1//=== ARMCallingConv.cpp - ARM Custom CC Routines ---------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the ARM Calling Convention that
10// aren't done by tablegen, and includes the table generated implementations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMCallingConv.h"
15#include "ARM.h"
16#include "ARMSubtarget.h"
17using namespace llvm;
18
19// APCS f64 is in register pairs, possibly split to stack
20static bool f64AssignAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
21 CCValAssign::LocInfo LocInfo,
22 CCState &State, bool CanFail) {
23 static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
24
25 // Try to get the first register.
26 if (MCRegister Reg = State.AllocateReg(Regs: RegList))
27 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
28 else {
29 // For the 2nd half of a v2f64, do not fail.
30 if (CanFail)
31 return false;
32
33 // Put the whole thing on the stack.
34 State.addLoc(V: CCValAssign::getCustomMem(
35 ValNo, ValVT, Offset: State.AllocateStack(Size: 8, Alignment: Align(4)), LocVT, HTP: LocInfo));
36 return true;
37 }
38
39 // Try to get the second register.
40 if (MCRegister Reg = State.AllocateReg(Regs: RegList))
41 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
42 else
43 State.addLoc(V: CCValAssign::getCustomMem(
44 ValNo, ValVT, Offset: State.AllocateStack(Size: 4, Alignment: Align(4)), LocVT, HTP: LocInfo));
45 return true;
46}
47
48static bool CC_ARM_APCS_Custom_f64(unsigned ValNo, MVT ValVT, MVT LocVT,
49 CCValAssign::LocInfo LocInfo,
50 ISD::ArgFlagsTy ArgFlags,
51 CCState &State) {
52 if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, CanFail: true))
53 return false;
54 if (LocVT == MVT::v2f64 &&
55 !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, CanFail: false))
56 return false;
57 return true; // we handled it
58}
59
60// AAPCS f64 is in aligned register pairs
61static bool f64AssignAAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
62 CCValAssign::LocInfo LocInfo,
63 CCState &State, bool CanFail) {
64 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
65 static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
66 static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 };
67 static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
68
69 MCRegister Reg = State.AllocateReg(Regs: HiRegList, ShadowRegs: ShadowRegList);
70 if (!Reg) {
71
72 // If we had R3 unallocated only, now we still must to waste it.
73 Reg = State.AllocateReg(Regs: GPRArgRegs);
74 assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64");
75
76 // For the 2nd half of a v2f64, do not just fail.
77 if (CanFail)
78 return false;
79
80 // Put the whole thing on the stack.
81 State.addLoc(V: CCValAssign::getCustomMem(
82 ValNo, ValVT, Offset: State.AllocateStack(Size: 8, Alignment: Align(8)), LocVT, HTP: LocInfo));
83 return true;
84 }
85
86 unsigned i;
87 for (i = 0; i < 2; ++i)
88 if (HiRegList[i] == Reg)
89 break;
90
91 MCRegister T = State.AllocateReg(Reg: LoRegList[i]);
92 (void)T;
93 assert(T == LoRegList[i] && "Could not allocate register");
94
95 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
96 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg: LoRegList[i],
97 LocVT, HTP: LocInfo));
98 return true;
99}
100
101static bool CC_ARM_AAPCS_Custom_f64(unsigned ValNo, MVT ValVT, MVT LocVT,
102 CCValAssign::LocInfo LocInfo,
103 ISD::ArgFlagsTy ArgFlags,
104 CCState &State) {
105 if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, CanFail: true))
106 return false;
107 if (LocVT == MVT::v2f64 &&
108 !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, CanFail: false))
109 return false;
110 return true; // we handled it
111}
112
113static bool f64RetAssign(unsigned ValNo, MVT ValVT, MVT LocVT,
114 CCValAssign::LocInfo LocInfo, CCState &State) {
115 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
116 static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
117
118 MCRegister Reg = State.AllocateReg(Regs: HiRegList, ShadowRegs: LoRegList);
119 if (!Reg)
120 return false; // we didn't handle it
121
122 unsigned i;
123 for (i = 0; i < 2; ++i)
124 if (HiRegList[i] == Reg)
125 break;
126
127 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
128 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg: LoRegList[i],
129 LocVT, HTP: LocInfo));
130 return true;
131}
132
133static bool RetCC_ARM_APCS_Custom_f64(unsigned ValNo, MVT ValVT, MVT LocVT,
134 CCValAssign::LocInfo LocInfo,
135 ISD::ArgFlagsTy ArgFlags,
136 CCState &State) {
137 if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
138 return false;
139 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
140 return false;
141 return true; // we handled it
142}
143
144static bool RetCC_ARM_AAPCS_Custom_f64(unsigned ValNo, MVT ValVT, MVT LocVT,
145 CCValAssign::LocInfo LocInfo,
146 ISD::ArgFlagsTy ArgFlags,
147 CCState &State) {
148 return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
149 State);
150}
151
152static const MCPhysReg RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
153
154static const MCPhysReg SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3,
155 ARM::S4, ARM::S5, ARM::S6, ARM::S7,
156 ARM::S8, ARM::S9, ARM::S10, ARM::S11,
157 ARM::S12, ARM::S13, ARM::S14, ARM::S15 };
158static const MCPhysReg DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3,
159 ARM::D4, ARM::D5, ARM::D6, ARM::D7 };
160static const MCPhysReg QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 };
161
162
163// Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA
164// has InConsecutiveRegs set, and that the last member also has
165// InConsecutiveRegsLast set. We must process all members of the HA before
166// we can allocate it, as we need to know the total number of registers that
167// will be needed in order to (attempt to) allocate a contiguous block.
168static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
169 MVT LocVT,
170 CCValAssign::LocInfo LocInfo,
171 ISD::ArgFlagsTy ArgFlags,
172 CCState &State) {
173 SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
174
175 // AAPCS HFAs must have 1-4 elements, all of the same type
176 if (PendingMembers.size() > 0)
177 assert(PendingMembers[0].getLocVT() == LocVT);
178
179 // Add the argument to the list to be allocated once we know the size of the
180 // aggregate. Store the type's required alignment as extra info for later: in
181 // the [N x i64] case all trace has been removed by the time we actually get
182 // to do allocation.
183 PendingMembers.push_back(Elt: CCValAssign::getPending(
184 ValNo, ValVT, LocVT, HTP: LocInfo, ExtraInfo: ArgFlags.getNonZeroOrigAlign().value()));
185
186 if (!ArgFlags.isInConsecutiveRegsLast())
187 return true;
188
189 const MachineFunction &MF = State.getMachineFunction();
190 // Try to allocate a contiguous block of registers, each of the correct
191 // size to hold one member.
192 auto &DL = MF.getDataLayout();
193 const MaybeAlign StackAlign = DL.getStackAlignment();
194 assert(StackAlign && "data layout string is missing stack alignment");
195 const Align FirstMemberAlign(PendingMembers[0].getExtraInfo());
196 Align Alignment = std::min(a: FirstMemberAlign, b: *StackAlign);
197
198 ArrayRef<MCPhysReg> RegList;
199 switch (LocVT.SimpleTy) {
200 case MVT::i32: {
201 RegList = RRegList;
202 unsigned RegIdx = State.getFirstUnallocated(Regs: RegList);
203
204 // First consume all registers that would give an unaligned object. Whether
205 // we go on stack or in regs, no-one will be using them in future.
206 unsigned RegAlign = alignTo(Value: Alignment.value(), Align: 4) / 4;
207 while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
208 State.AllocateReg(Reg: RegList[RegIdx++]);
209
210 break;
211 }
212 case MVT::f16:
213 case MVT::bf16:
214 case MVT::f32:
215 RegList = SRegList;
216 break;
217 case MVT::v4f16:
218 case MVT::v4bf16:
219 case MVT::f64:
220 RegList = DRegList;
221 break;
222 case MVT::v8f16:
223 case MVT::v8bf16:
224 case MVT::v2f64:
225 RegList = QRegList;
226 break;
227 default:
228 llvm_unreachable("Unexpected member type for block aggregate");
229 break;
230 }
231
232 ArrayRef<MCPhysReg> RegResult =
233 State.AllocateRegBlock(Regs: RegList, RegsRequired: PendingMembers.size());
234 if (!RegResult.empty()) {
235 for (const auto &[PendingMember, Reg] : zip(t&: PendingMembers, u&: RegResult)) {
236 PendingMember.convertToReg(Reg);
237 State.addLoc(V: PendingMember);
238 }
239 PendingMembers.clear();
240 return true;
241 }
242
243 // Register allocation failed, we'll be needing the stack
244 unsigned Size = LocVT.getSizeInBits() / 8;
245 if (LocVT == MVT::i32 && State.getStackSize() == 0) {
246 // If nothing else has used the stack until this point, a non-HFA aggregate
247 // can be split between regs and stack.
248 unsigned RegIdx = State.getFirstUnallocated(Regs: RegList);
249 for (auto &It : PendingMembers) {
250 if (RegIdx >= RegList.size())
251 It.convertToMem(Offset: State.AllocateStack(Size, Alignment: Align(Size)));
252 else
253 It.convertToReg(Reg: State.AllocateReg(Reg: RegList[RegIdx++]));
254
255 State.addLoc(V: It);
256 }
257 PendingMembers.clear();
258 return true;
259 }
260
261 if (LocVT != MVT::i32)
262 RegList = SRegList;
263
264 // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core)
265 for (auto Reg : RegList)
266 State.AllocateReg(Reg);
267
268 // Clamp the alignment between 4 and 8.
269 if (MF.getTarget().getTargetTriple().isTargetAEABI())
270 Alignment = ArgFlags.getNonZeroMemAlign() <= 4 ? Align(4) : Align(8);
271
272 // After the first item has been allocated, the rest are packed as tightly as
273 // possible. (E.g. an incoming i64 would have starting Align of 8, but we'll
274 // be allocating a bunch of i32 slots).
275 for (auto &It : PendingMembers) {
276 It.convertToMem(Offset: State.AllocateStack(Size, Alignment));
277 State.addLoc(V: It);
278 Alignment = Align(1);
279 }
280
281 // All pending members have now been allocated
282 PendingMembers.clear();
283
284 // This will be allocated by the last member of the aggregate
285 return true;
286}
287
288static bool CustomAssignInRegList(unsigned ValNo, MVT ValVT, MVT LocVT,
289 CCValAssign::LocInfo LocInfo, CCState &State,
290 ArrayRef<MCPhysReg> RegList) {
291 MCRegister Reg = State.AllocateReg(Regs: RegList);
292 if (Reg) {
293 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
294 return true;
295 }
296 return false;
297}
298
299static bool CC_ARM_AAPCS_Custom_f16(unsigned ValNo, MVT ValVT, MVT LocVT,
300 CCValAssign::LocInfo LocInfo,
301 ISD::ArgFlagsTy ArgFlags, CCState &State) {
302 // f16 and bf16 arguments are extended to i32 and assigned to a register in
303 // [r0, r3].
304 return CustomAssignInRegList(ValNo, ValVT, LocVT: MVT::i32, LocInfo, State,
305 RegList: RRegList);
306}
307
308static bool CC_ARM_AAPCS_VFP_Custom_f16(unsigned ValNo, MVT ValVT, MVT LocVT,
309 CCValAssign::LocInfo LocInfo,
310 ISD::ArgFlagsTy ArgFlags,
311 CCState &State) {
312 // f16 and bf16 arguments are extended to f32 and assigned to a register in
313 // [s0, s15].
314 return CustomAssignInRegList(ValNo, ValVT, LocVT: MVT::f32, LocInfo, State,
315 RegList: SRegList);
316}
317
318static bool CC_ARM_AAPCS_Common_Custom_f16_Stack(unsigned ValNo, MVT ValVT,
319 MVT LocVT,
320 CCValAssign::LocInfo LocInfo,
321 ISD::ArgFlagsTy ArgFlags,
322 CCState &State) {
323 // f16 and bf16 (if not passed in a register) are assigned to a 32-bit stack
324 // slot, with the most-significant 16 bits unspecified. The 32-bit slot is
325 // important to make sure that the byte ordering is correct for big endian
326 // targets.
327 State.addLoc(V: CCValAssign::getCustomMem(
328 ValNo, ValVT, Offset: State.AllocateStack(Size: 4, Alignment: Align(4)), LocVT: MVT::i32, HTP: LocInfo));
329 return true;
330}
331
332// Include the table generated calling convention implementations.
333#include "ARMGenCallingConv.inc"
334