1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
14#include "RISCVSubtarget.h"
15#include "llvm/IR/DataLayout.h"
16#include "llvm/IR/Module.h"
17#include "llvm/MC/MCRegister.h"
18
19using namespace llvm;
20
21// Calling Convention Implementation.
22// The expectations for frontend ABI lowering vary from target to target.
23// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
24// details, but this is a longer term goal. For now, we simply try to keep the
25// role of the frontend as simple and well-defined as possible. The rules can
26// be summarised as:
27// * Never split up large scalar arguments. We handle them here.
28// * If a hardfloat calling convention is being used, and the struct may be
29// passed in a pair of registers (fp+fp, int+fp), and both registers are
30// available, then pass as two separate arguments. If either the GPRs or FPRs
31// are exhausted, then pass according to the rule below.
32// * If a struct could never be passed in registers or directly in a stack
33// slot (as it is larger than 2*XLEN and the floating point rules don't
34// apply), then pass it using a pointer with the byval attribute.
35// * If a struct is less than 2*XLEN, then coerce to either a two-element
36// word-sized array or a 2*XLEN scalar (depending on alignment).
37// * The frontend can determine whether a struct is returned by reference or
38// not based on its size and fields. If it will be returned by reference, the
39// frontend must modify the prototype so a pointer with the sret annotation is
40// passed as the first argument. This is not necessary for large scalar
41// returns.
42// * Struct return values and varargs should be coerced to structs containing
43// register-size fields in the same situations they would be for fixed
44// arguments.
45
46static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
47 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
48 RISCV::F16_H, RISCV::F17_H};
49static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
50 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
51 RISCV::F16_F, RISCV::F17_F};
52static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
53 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
54 RISCV::F16_D, RISCV::F17_D};
55// This is an interim calling convention and it may be changed in the future.
56static const MCPhysReg ArgVRs[] = {
57 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
58 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
59 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
60static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
61 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
62 RISCV::V20M2, RISCV::V22M2};
63static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
64 RISCV::V20M4};
65static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
66static const MCPhysReg ArgVRN2M1s[] = {
67 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
68 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
69 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
70 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
71static const MCPhysReg ArgVRN3M1s[] = {
72 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
73 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
74 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
75 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
76 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
77static const MCPhysReg ArgVRN4M1s[] = {
78 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
79 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
80 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
81 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
82 RISCV::V20_V21_V22_V23};
83static const MCPhysReg ArgVRN5M1s[] = {
84 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
85 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
86 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
87 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
88 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
89 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
90static const MCPhysReg ArgVRN6M1s[] = {
91 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
92 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
93 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
94 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
95 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
96 RISCV::V18_V19_V20_V21_V22_V23};
97static const MCPhysReg ArgVRN7M1s[] = {
98 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
99 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
100 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
101 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
102 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
103static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
104 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
105 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
106 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
107 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
108 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
109 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
110 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
111 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
112static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
113 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
114 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
115 RISCV::V20M2_V22M2};
116static const MCPhysReg ArgVRN3M2s[] = {
117 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
118 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
119 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
120static const MCPhysReg ArgVRN4M2s[] = {
121 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
122 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
123 RISCV::V16M2_V18M2_V20M2_V22M2};
124static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
125 RISCV::V16M4_V20M4};
126
127ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) {
128 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
129 // the ILP32E ABI.
130 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
131 RISCV::X13, RISCV::X14, RISCV::X15,
132 RISCV::X16, RISCV::X17};
133 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
134 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
135 RISCV::X13, RISCV::X14, RISCV::X15};
136
137 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
138 return ArrayRef(ArgEGPRs);
139
140 return ArrayRef(ArgIGPRs);
141}
142
143static ArrayRef<MCPhysReg> getArgGPR16s(const RISCVABI::ABI ABI) {
144 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
145 // the ILP32E ABI.
146 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
147 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
148 RISCV::X16_H, RISCV::X17_H};
149 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
150 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
151 RISCV::X12_H, RISCV::X13_H,
152 RISCV::X14_H, RISCV::X15_H};
153
154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
155 return ArrayRef(ArgEGPRs);
156
157 return ArrayRef(ArgIGPRs);
158}
159
160static ArrayRef<MCPhysReg> getArgGPR32s(const RISCVABI::ABI ABI) {
161 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
162 // the ILP32E ABI.
163 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
164 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
165 RISCV::X16_W, RISCV::X17_W};
166 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
167 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
168 RISCV::X12_W, RISCV::X13_W,
169 RISCV::X14_W, RISCV::X15_W};
170
171 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
172 return ArrayRef(ArgEGPRs);
173
174 return ArrayRef(ArgIGPRs);
175}
176
177static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
178 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
179 // for save-restore libcall, so we don't use them.
180 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
181 static const MCPhysReg FastCCIGPRs[] = {
182 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
183 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
184
185 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
186 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
187 RISCV::X13, RISCV::X14, RISCV::X15};
188
189 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
190 return ArrayRef(FastCCEGPRs);
191
192 return ArrayRef(FastCCIGPRs);
193}
194
195static ArrayRef<MCPhysReg> getFastCCArgGPRF16s(const RISCVABI::ABI ABI) {
196 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
197 // for save-restore libcall, so we don't use them.
198 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
199 static const MCPhysReg FastCCIGPRs[] = {
200 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
201 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
202 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
203
204 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
205 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
206 RISCV::X12_H, RISCV::X13_H,
207 RISCV::X14_H, RISCV::X15_H};
208
209 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
210 return ArrayRef(FastCCEGPRs);
211
212 return ArrayRef(FastCCIGPRs);
213}
214
215static ArrayRef<MCPhysReg> getFastCCArgGPRF32s(const RISCVABI::ABI ABI) {
216 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
217 // for save-restore libcall, so we don't use them.
218 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
219 static const MCPhysReg FastCCIGPRs[] = {
220 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
221 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
222 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
223
224 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
225 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
226 RISCV::X12_W, RISCV::X13_W,
227 RISCV::X14_W, RISCV::X15_W};
228
229 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
230 return ArrayRef(FastCCEGPRs);
231
232 return ArrayRef(FastCCIGPRs);
233}
234
235// Pass a 2*XLEN argument that has been split into two XLEN values through
236// registers or the stack as necessary.
237static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
238 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
239 MVT ValVT2, MVT LocVT2,
240 ISD::ArgFlagsTy ArgFlags2, bool EABI) {
241 unsigned XLenInBytes = XLen / 8;
242 const RISCVSubtarget &STI =
243 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
244 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI: STI.getTargetABI());
245
246 if (MCRegister Reg = State.AllocateReg(Regs: ArgGPRs)) {
247 // At least one half can be passed via register.
248 State.addLoc(V: CCValAssign::getReg(ValNo: VA1.getValNo(), ValVT: VA1.getValVT(), Reg,
249 LocVT: VA1.getLocVT(), HTP: CCValAssign::Full));
250 } else {
251 // Both halves must be passed on the stack, with proper alignment.
252 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
253 // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
254 Align StackAlign(XLenInBytes);
255 if (!EABI || XLen != 32)
256 StackAlign = std::max(a: StackAlign, b: ArgFlags1.getNonZeroOrigAlign());
257 State.addLoc(
258 V: CCValAssign::getMem(ValNo: VA1.getValNo(), ValVT: VA1.getValVT(),
259 Offset: State.AllocateStack(Size: XLenInBytes, Alignment: StackAlign),
260 LocVT: VA1.getLocVT(), HTP: CCValAssign::Full));
261 State.addLoc(V: CCValAssign::getMem(
262 ValNo: ValNo2, ValVT: ValVT2, Offset: State.AllocateStack(Size: XLenInBytes, Alignment: Align(XLenInBytes)),
263 LocVT: LocVT2, HTP: CCValAssign::Full));
264 return false;
265 }
266
267 if (MCRegister Reg = State.AllocateReg(Regs: ArgGPRs)) {
268 // The second half can also be passed via register.
269 State.addLoc(
270 V: CCValAssign::getReg(ValNo: ValNo2, ValVT: ValVT2, Reg, LocVT: LocVT2, HTP: CCValAssign::Full));
271 } else {
272 // The second half is passed via the stack, without additional alignment.
273 State.addLoc(V: CCValAssign::getMem(
274 ValNo: ValNo2, ValVT: ValVT2, Offset: State.AllocateStack(Size: XLenInBytes, Alignment: Align(XLenInBytes)),
275 LocVT: LocVT2, HTP: CCValAssign::Full));
276 }
277
278 return false;
279}
280
281static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
282 const RISCVTargetLowering &TLI) {
283 const TargetRegisterClass *RC = TLI.getRegClassFor(VT: ValVT);
284 if (RC == &RISCV::VRRegClass) {
285 // Assign the first mask argument to V0.
286 // This is an interim calling convention and it may be changed in the
287 // future.
288 if (ValVT.getVectorElementType() == MVT::i1)
289 if (MCRegister Reg = State.AllocateReg(Reg: RISCV::V0))
290 return Reg;
291 return State.AllocateReg(Regs: ArgVRs);
292 }
293 if (RC == &RISCV::VRM2RegClass)
294 return State.AllocateReg(Regs: ArgVRM2s);
295 if (RC == &RISCV::VRM4RegClass)
296 return State.AllocateReg(Regs: ArgVRM4s);
297 if (RC == &RISCV::VRM8RegClass)
298 return State.AllocateReg(Regs: ArgVRM8s);
299 if (RC == &RISCV::VRN2M1RegClass)
300 return State.AllocateReg(Regs: ArgVRN2M1s);
301 if (RC == &RISCV::VRN3M1RegClass)
302 return State.AllocateReg(Regs: ArgVRN3M1s);
303 if (RC == &RISCV::VRN4M1RegClass)
304 return State.AllocateReg(Regs: ArgVRN4M1s);
305 if (RC == &RISCV::VRN5M1RegClass)
306 return State.AllocateReg(Regs: ArgVRN5M1s);
307 if (RC == &RISCV::VRN6M1RegClass)
308 return State.AllocateReg(Regs: ArgVRN6M1s);
309 if (RC == &RISCV::VRN7M1RegClass)
310 return State.AllocateReg(Regs: ArgVRN7M1s);
311 if (RC == &RISCV::VRN8M1RegClass)
312 return State.AllocateReg(Regs: ArgVRN8M1s);
313 if (RC == &RISCV::VRN2M2RegClass)
314 return State.AllocateReg(Regs: ArgVRN2M2s);
315 if (RC == &RISCV::VRN3M2RegClass)
316 return State.AllocateReg(Regs: ArgVRN3M2s);
317 if (RC == &RISCV::VRN4M2RegClass)
318 return State.AllocateReg(Regs: ArgVRN4M2s);
319 if (RC == &RISCV::VRN2M4RegClass)
320 return State.AllocateReg(Regs: ArgVRN2M4s);
321 llvm_unreachable("Unhandled register class for ValueType");
322}
323
324// Implements the RISC-V calling convention. Returns true upon failure.
325bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
326 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
327 CCState &State, bool IsRet, Type *OrigTy) {
328 const MachineFunction &MF = State.getMachineFunction();
329 const DataLayout &DL = MF.getDataLayout();
330 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
331 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
332
333 unsigned XLen = Subtarget.getXLen();
334 MVT XLenVT = Subtarget.getXLenVT();
335
336 if (ArgFlags.isNest()) {
337 // Static chain parameter must not be passed in normal argument registers,
338 // so we assign t2/t3 for it as done in GCC's
339 // __builtin_call_with_static_chain
340 bool HasCFBranch =
341 Subtarget.hasStdExtZicfilp() &&
342 MF.getFunction().getParent()->getModuleFlag(Key: "cf-protection-branch");
343
344 // Normal: t2, Branch control flow protection: t3
345 const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
346
347 RISCVABI::ABI ABI = Subtarget.getTargetABI();
348 if (HasCFBranch &&
349 (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E))
350 reportFatalUsageError(
351 reason: "Nested functions with control flow protection are not "
352 "usable with ILP32E or LP64E ABI.");
353 if (MCRegister Reg = State.AllocateReg(Reg: StaticChainReg)) {
354 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
355 return false;
356 }
357 }
358
359 // Any return value split in to more than two values can't be returned
360 // directly. Vectors are returned via the available vector registers.
361 if ((!LocVT.isVector() || Subtarget.isPExtPackedType(VT: ValVT)) && IsRet &&
362 ValNo > 1)
363 return true;
364
365 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
366 // variadic argument, or if no F16/F32 argument registers are available.
367 bool UseGPRForF16_F32 = true;
368 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
369 // variadic argument, or if no F64 argument registers are available.
370 bool UseGPRForF64 = true;
371
372 RISCVABI::ABI ABI = Subtarget.getTargetABI();
373 switch (ABI) {
374 default:
375 llvm_unreachable("Unexpected ABI");
376 case RISCVABI::ABI_ILP32:
377 case RISCVABI::ABI_ILP32E:
378 case RISCVABI::ABI_LP64:
379 case RISCVABI::ABI_LP64E:
380 break;
381 case RISCVABI::ABI_ILP32F:
382 case RISCVABI::ABI_LP64F:
383 UseGPRForF16_F32 = ArgFlags.isVarArg();
384 break;
385 case RISCVABI::ABI_ILP32D:
386 case RISCVABI::ABI_LP64D:
387 UseGPRForF16_F32 = ArgFlags.isVarArg();
388 UseGPRForF64 = ArgFlags.isVarArg();
389 break;
390 }
391
392 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) {
393 if (MCRegister Reg = State.AllocateReg(Regs: ArgFPR16s)) {
394 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
395 return false;
396 }
397 }
398
399 if (LocVT == MVT::f32 && !UseGPRForF16_F32) {
400 if (MCRegister Reg = State.AllocateReg(Regs: ArgFPR32s)) {
401 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
402 return false;
403 }
404 }
405
406 if (LocVT == MVT::f64 && !UseGPRForF64) {
407 if (MCRegister Reg = State.AllocateReg(Regs: ArgFPR64s)) {
408 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
409 return false;
410 }
411 }
412
413 if ((ValVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
414 if (MCRegister Reg = State.AllocateReg(Regs: getArgGPR16s(ABI))) {
415 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
416 return false;
417 }
418 }
419
420 if (ValVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
421 if (MCRegister Reg = State.AllocateReg(Regs: getArgGPR32s(ABI))) {
422 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
423 return false;
424 }
425 }
426
427 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
428
429 // Zdinx use GPR without a bitcast when possible.
430 if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
431 if (MCRegister Reg = State.AllocateReg(Regs: ArgGPRs)) {
432 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
433 return false;
434 }
435 }
436
437 // FP smaller than XLen, uses custom GPR.
438 if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
439 (LocVT == MVT::f32 && XLen == 64)) {
440 if (MCRegister Reg = State.AllocateReg(Regs: ArgGPRs)) {
441 LocVT = XLenVT;
442 State.addLoc(
443 V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
444 return false;
445 }
446 }
447
448 // Bitcast FP to GPR if we can use a GPR register.
449 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
450 if (MCRegister Reg = State.AllocateReg(Regs: ArgGPRs)) {
451 LocVT = XLenVT;
452 LocInfo = CCValAssign::BCvt;
453 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
454 return false;
455 }
456 }
457
458 // If this is a variadic argument, the RISC-V calling convention requires
459 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
460 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
461 // be used regardless of whether the original argument was split during
462 // legalisation or not. The argument will not be passed by registers if the
463 // original type is larger than 2*XLEN, so the register alignment rule does
464 // not apply.
465 // TODO: To be compatible with GCC's behaviors, we don't align registers
466 // currently if we are using ILP32E calling convention. This behavior may be
467 // changed when RV32E/ILP32E is ratified.
468 unsigned TwoXLenInBytes = (2 * XLen) / 8;
469 if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
470 DL.getTypeAllocSize(Ty: OrigTy) == TwoXLenInBytes &&
471 ABI != RISCVABI::ABI_ILP32E) {
472 unsigned RegIdx = State.getFirstUnallocated(Regs: ArgGPRs);
473 // Skip 'odd' register if necessary.
474 if (RegIdx != std::size(cont: ArgGPRs) && RegIdx % 2 == 1)
475 State.AllocateReg(Regs: ArgGPRs);
476 }
477
478 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
479 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
480 State.getPendingArgFlags();
481
482 assert(PendingLocs.size() == PendingArgFlags.size() &&
483 "PendingLocs and PendingArgFlags out of sync");
484
485 // Handle passing f64 on RV32D with a soft float ABI or when floating point
486 // registers are exhausted.
487 if (XLen == 32 && LocVT == MVT::f64) {
488 assert(PendingLocs.empty() && "Can't lower f64 if it is split");
489 // Depending on available argument GPRS, f64 may be passed in a pair of
490 // GPRs, split between a GPR and the stack, or passed completely on the
491 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
492 // cases.
493 MCRegister Reg = State.AllocateReg(Regs: ArgGPRs);
494 if (!Reg) {
495 int64_t StackOffset = State.AllocateStack(Size: 8, Alignment: Align(8));
496 State.addLoc(
497 V: CCValAssign::getMem(ValNo, ValVT, Offset: StackOffset, LocVT, HTP: LocInfo));
498 return false;
499 }
500 LocVT = MVT::i32;
501 State.addLoc(V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
502 MCRegister HiReg = State.AllocateReg(Regs: ArgGPRs);
503 if (HiReg) {
504 State.addLoc(
505 V: CCValAssign::getCustomReg(ValNo, ValVT, Reg: HiReg, LocVT, HTP: LocInfo));
506 } else {
507 int64_t StackOffset = State.AllocateStack(Size: 4, Alignment: Align(4));
508 State.addLoc(
509 V: CCValAssign::getCustomMem(ValNo, ValVT, Offset: StackOffset, LocVT, HTP: LocInfo));
510 }
511 return false;
512 }
513
514 // If the split argument only had two elements, it should be passed directly
515 // in registers or on the stack.
516 if ((ValVT.isScalarInteger() || Subtarget.isPExtPackedType(VT: ValVT)) &&
517 ArgFlags.isSplitEnd() && PendingLocs.size() <= 1) {
518 assert(PendingLocs.size() == 1 && "Unexpected PendingLocs.size()");
519 // Apply the normal calling convention rules to the first half of the
520 // split argument.
521 CCValAssign VA = PendingLocs[0];
522 ISD::ArgFlagsTy AF = PendingArgFlags[0];
523 PendingLocs.clear();
524 PendingArgFlags.clear();
525 return CC_RISCVAssign2XLen(
526 XLen, State, VA1: VA, ArgFlags1: AF, ValNo2: ValNo, ValVT2: ValVT, LocVT2: LocVT, ArgFlags2: ArgFlags,
527 EABI: ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E);
528 }
529
530 // Split arguments might be passed indirectly, so keep track of the pending
531 // values. Split vectors excluding P extension packed vectors(see
532 // isPExtPackedType) are passed via a mix of registers and indirectly, so
533 // treat them as we would any other argument.
534 if ((ValVT.isScalarInteger() || Subtarget.isPExtPackedType(VT: ValVT)) &&
535 (ArgFlags.isSplit() || !PendingLocs.empty())) {
536 PendingLocs.push_back(
537 Elt: CCValAssign::getPending(ValNo, ValVT, LocVT, HTP: LocInfo));
538 PendingArgFlags.push_back(Elt: ArgFlags);
539 if (!ArgFlags.isSplitEnd()) {
540 return false;
541 }
542 }
543
544 // Allocate to a register if possible, or else a stack slot.
545 MCRegister Reg;
546 unsigned StoreSizeBytes = XLen / 8;
547 Align StackAlign = Align(XLen / 8);
548
549 // FIXME: If P extension and V extension are enabled at the same time,
550 // who should go first?
551 if (!Subtarget.isPExtPackedType(VT: ValVT) &&
552 (ValVT.isVector() || ValVT.isRISCVVectorTuple())) {
553 Reg = allocateRVVReg(ValVT, ValNo, State, TLI);
554 if (Reg) {
555 // Fixed-length vectors are located in the corresponding scalable-vector
556 // container types.
557 if (ValVT.isFixedLengthVector()) {
558 LocVT = TLI.getContainerForFixedLengthVector(VT: LocVT);
559 State.addLoc(
560 V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
561 return false;
562 }
563 } else {
564 // For return values, the vector must be passed fully via registers or
565 // via the stack.
566 if (IsRet)
567 return true;
568 // Try using a GPR to pass the address
569 if ((Reg = State.AllocateReg(Regs: ArgGPRs))) {
570 LocVT = XLenVT;
571 LocInfo = CCValAssign::Indirect;
572 } else if (ValVT.isScalableVector()) {
573 LocVT = XLenVT;
574 LocInfo = CCValAssign::Indirect;
575 } else {
576 StoreSizeBytes = ValVT.getStoreSize();
577 // Align vectors to their element sizes, being careful for vXi1
578 // vectors.
579 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
580 }
581 }
582 } else {
583 Reg = State.AllocateReg(Regs: ArgGPRs);
584 }
585
586 int64_t StackOffset =
587 Reg ? 0 : State.AllocateStack(Size: StoreSizeBytes, Alignment: StackAlign);
588
589 // If we reach this point and PendingLocs is non-empty, we must be at the
590 // end of a split argument that must be passed indirectly.
591 if (!PendingLocs.empty()) {
592 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
593 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
594
595 for (auto &It : PendingLocs) {
596 if (Reg)
597 State.addLoc(V: CCValAssign::getReg(ValNo: It.getValNo(), ValVT: It.getValVT(), Reg,
598 LocVT: XLenVT, HTP: CCValAssign::Indirect));
599 else
600 State.addLoc(V: CCValAssign::getMem(ValNo: It.getValNo(), ValVT: It.getValVT(),
601 Offset: StackOffset, LocVT: XLenVT,
602 HTP: CCValAssign::Indirect));
603 }
604 PendingLocs.clear();
605 PendingArgFlags.clear();
606 return false;
607 }
608
609 assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT ||
610 Subtarget.isPExtPackedType(LocVT) ||
611 (TLI.getSubtarget().hasVInstructions() &&
612 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
613 "Expected an XLenVT or vector types at this stage");
614
615 if (Reg) {
616 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
617 return false;
618 }
619
620 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset: StackOffset, LocVT, HTP: LocInfo));
621 return false;
622}
623
624// FastCC has less than 1% performance improvement for some particular
625// benchmark. But theoretically, it may have benefit for some cases.
626bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
627 CCValAssign::LocInfo LocInfo,
628 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet,
629 Type *OrigTy) {
630 const MachineFunction &MF = State.getMachineFunction();
631 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
632 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
633 RISCVABI::ABI ABI = Subtarget.getTargetABI();
634
635 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
636 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
637 static const MCPhysReg FPR16List[] = {
638 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
639 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
640 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
641 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
642 if (MCRegister Reg = State.AllocateReg(Regs: FPR16List)) {
643 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
644 return false;
645 }
646 }
647
648 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
649 static const MCPhysReg FPR32List[] = {
650 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
651 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
652 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
653 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
654 if (MCRegister Reg = State.AllocateReg(Regs: FPR32List)) {
655 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
656 return false;
657 }
658 }
659
660 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
661 static const MCPhysReg FPR64List[] = {
662 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
663 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
664 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
665 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
666 if (MCRegister Reg = State.AllocateReg(Regs: FPR64List)) {
667 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
668 return false;
669 }
670 }
671
672 MVT XLenVT = Subtarget.getXLenVT();
673
674 // Check if there is an available GPRF16 before hitting the stack.
675 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
676 if (MCRegister Reg = State.AllocateReg(Regs: getFastCCArgGPRF16s(ABI))) {
677 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
678 return false;
679 }
680 }
681
682 // Check if there is an available GPRF32 before hitting the stack.
683 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
684 if (MCRegister Reg = State.AllocateReg(Regs: getFastCCArgGPRF32s(ABI))) {
685 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
686 return false;
687 }
688 }
689
690 // Check if there is an available GPR before hitting the stack.
691 if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
692 if (MCRegister Reg = State.AllocateReg(Regs: getFastCCArgGPRs(ABI))) {
693 if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
694 LocVT = XLenVT;
695 State.addLoc(
696 V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
697 return false;
698 }
699 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
700 return false;
701 }
702 }
703
704 ArrayRef<MCPhysReg> ArgGPRs = getFastCCArgGPRs(ABI);
705
706 if (LocVT.isVector()) {
707 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
708 // Fixed-length vectors are located in the corresponding scalable-vector
709 // container types.
710 if (LocVT.isFixedLengthVector()) {
711 LocVT = TLI.getContainerForFixedLengthVector(VT: LocVT);
712 State.addLoc(
713 V: CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
714 return false;
715 }
716 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
717 return false;
718 }
719
720 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
721 // have a free GPR.
722 if (LocVT.isScalableVector() ||
723 State.getFirstUnallocated(Regs: ArgGPRs) != ArgGPRs.size()) {
724 LocInfo = CCValAssign::Indirect;
725 LocVT = XLenVT;
726 }
727 }
728
729 if (LocVT == XLenVT) {
730 if (MCRegister Reg = State.AllocateReg(Regs: getFastCCArgGPRs(ABI))) {
731 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
732 return false;
733 }
734 }
735
736 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
737 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
738 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
739 int64_t Offset = State.AllocateStack(Size: LocVT.getStoreSize(), Alignment: StackAlign);
740 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
741 return false;
742 }
743
744 return true; // CC didn't match.
745}
746
747bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
748 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
749 Type *OrigTy, CCState &State) {
750 if (ArgFlags.isNest()) {
751 report_fatal_error(
752 reason: "Attribute 'nest' is not supported in GHC calling convention");
753 }
754
755 static const MCPhysReg GPRList[] = {
756 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
757 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
758
759 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
760 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
761 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
762 if (MCRegister Reg = State.AllocateReg(Regs: GPRList)) {
763 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
764 return false;
765 }
766 }
767
768 const RISCVSubtarget &Subtarget =
769 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
770
771 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
772 // Pass in STG registers: F1, ..., F6
773 // fs0 ... fs5
774 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
775 RISCV::F18_F, RISCV::F19_F,
776 RISCV::F20_F, RISCV::F21_F};
777 if (MCRegister Reg = State.AllocateReg(Regs: FPR32List)) {
778 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
779 return false;
780 }
781 }
782
783 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
784 // Pass in STG registers: D1, ..., D6
785 // fs6 ... fs11
786 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
787 RISCV::F24_D, RISCV::F25_D,
788 RISCV::F26_D, RISCV::F27_D};
789 if (MCRegister Reg = State.AllocateReg(Regs: FPR64List)) {
790 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
791 return false;
792 }
793 }
794
795 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
796 static const MCPhysReg GPR32List[] = {
797 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
798 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
799 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
800 if (MCRegister Reg = State.AllocateReg(Regs: GPR32List)) {
801 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
802 return false;
803 }
804 }
805
806 if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
807 if (MCRegister Reg = State.AllocateReg(Regs: GPRList)) {
808 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, HTP: LocInfo));
809 return false;
810 }
811 }
812
813 report_fatal_error(reason: "No registers left in GHC calling convention");
814 return true;
815}
816