1//===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVCallLowering.h"
16#include "RISCVCallingConv.h"
17#include "RISCVISelLowering.h"
18#include "RISCVMachineFunctionInfo.h"
19#include "RISCVSubtarget.h"
20#include "llvm/CodeGen/Analysis.h"
21#include "llvm/CodeGen/FunctionLoweringInfo.h"
22#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24
25using namespace llvm;
26
27namespace {
28
29struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
30private:
31 // The function used internally to assign args - we ignore the AssignFn stored
32 // by OutgoingValueAssigner since RISC-V implements its CC using a custom
33 // function with a different signature.
34 RISCVCCAssignFn *RISCVAssignFn;
35
36 // Whether this is assigning args for a return.
37 bool IsRet;
38
39public:
40 RISCVOutgoingValueAssigner(RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
41 : CallLowering::OutgoingValueAssigner(nullptr),
42 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
43
44 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
45 CCValAssign::LocInfo LocInfo,
46 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
47 CCState &State) override {
48 if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, Info.IsFixed,
49 IsRet, Info.Ty))
50 return true;
51
52 StackSize = State.getStackSize();
53 return false;
54 }
55};
56
57struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
58 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
59 MachineInstrBuilder MIB)
60 : OutgoingValueHandler(B, MRI), MIB(MIB),
61 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
62 Register getStackAddress(uint64_t MemSize, int64_t Offset,
63 MachinePointerInfo &MPO,
64 ISD::ArgFlagsTy Flags) override {
65 MachineFunction &MF = MIRBuilder.getMF();
66 LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen());
67 LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
68
69 if (!SPReg)
70 SPReg = MIRBuilder.buildCopy(Res: p0, Op: Register(RISCV::X2)).getReg(Idx: 0);
71
72 auto OffsetReg = MIRBuilder.buildConstant(Res: sXLen, Val: Offset);
73
74 auto AddrReg = MIRBuilder.buildPtrAdd(Res: p0, Op0: SPReg, Op1: OffsetReg);
75
76 MPO = MachinePointerInfo::getStack(MF, Offset);
77 return AddrReg.getReg(Idx: 0);
78 }
79
80 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
81 const MachinePointerInfo &MPO,
82 const CCValAssign &VA) override {
83 MachineFunction &MF = MIRBuilder.getMF();
84 uint64_t LocMemOffset = VA.getLocMemOffset();
85
86 // TODO: Move StackAlignment to subtarget and share with FrameLowering.
87 auto MMO =
88 MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOStore, MemTy,
89 base_alignment: commonAlignment(A: Align(16), Offset: LocMemOffset));
90
91 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
92 MIRBuilder.buildStore(Val: ExtReg, Addr, MMO&: *MMO);
93 }
94
95 void assignValueToReg(Register ValVReg, Register PhysReg,
96 const CCValAssign &VA) override {
97 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
98 MIRBuilder.buildCopy(Res: PhysReg, Op: ExtReg);
99 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
100 }
101
102 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
103 ArrayRef<CCValAssign> VAs,
104 std::function<void()> *Thunk) override {
105 const CCValAssign &VA = VAs[0];
106 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
107 (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
108 Register PhysReg = VA.getLocReg();
109
110 auto assignFunc = [=]() {
111 auto Trunc = MIRBuilder.buildAnyExt(Res: LLT(VA.getLocVT()), Op: Arg.Regs[0]);
112 MIRBuilder.buildCopy(Res: PhysReg, Op: Trunc);
113 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
114 };
115
116 if (Thunk) {
117 *Thunk = assignFunc;
118 return 1;
119 }
120
121 assignFunc();
122 return 1;
123 }
124
125 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
126 const CCValAssign &VAHi = VAs[1];
127
128 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
129 assert(VA.getValNo() == VAHi.getValNo() &&
130 "Values belong to different arguments");
131
132 assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
133 VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
134 "unexpected custom value");
135
136 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
137 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
138 MIRBuilder.buildUnmerge(Res: NewRegs, Op: Arg.Regs[0]);
139
140 if (VAHi.isMemLoc()) {
141 LLT MemTy(VAHi.getLocVT());
142
143 MachinePointerInfo MPO;
144 Register StackAddr = getStackAddress(
145 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
146
147 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
148 VA: const_cast<CCValAssign &>(VAHi));
149 }
150
151 auto assignFunc = [=]() {
152 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
153 if (VAHi.isRegLoc())
154 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
155 };
156
157 if (Thunk) {
158 *Thunk = assignFunc;
159 return 2;
160 }
161
162 assignFunc();
163 return 2;
164 }
165
166private:
167 MachineInstrBuilder MIB;
168
169 // Cache the SP register vreg if we need it more than once in this call site.
170 Register SPReg;
171
172 const RISCVSubtarget &Subtarget;
173};
174
175struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
176private:
177 // The function used internally to assign args - we ignore the AssignFn stored
178 // by IncomingValueAssigner since RISC-V implements its CC using a custom
179 // function with a different signature.
180 RISCVCCAssignFn *RISCVAssignFn;
181
182 // Whether this is assigning args from a return.
183 bool IsRet;
184
185public:
186 RISCVIncomingValueAssigner(RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
187 : CallLowering::IncomingValueAssigner(nullptr),
188 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
189
190 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
191 CCValAssign::LocInfo LocInfo,
192 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
193 CCState &State) override {
194 MachineFunction &MF = State.getMachineFunction();
195
196 if (LocVT.isScalableVector())
197 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
198
199 if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State,
200 /*IsFixed=*/true, IsRet, Info.Ty))
201 return true;
202
203 StackSize = State.getStackSize();
204 return false;
205 }
206};
207
208struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
209 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
210 : IncomingValueHandler(B, MRI),
211 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
212
213 Register getStackAddress(uint64_t MemSize, int64_t Offset,
214 MachinePointerInfo &MPO,
215 ISD::ArgFlagsTy Flags) override {
216 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
217
218 int FI = MFI.CreateFixedObject(Size: MemSize, SPOffset: Offset, /*Immutable=*/IsImmutable: true);
219 MPO = MachinePointerInfo::getFixedStack(MF&: MIRBuilder.getMF(), FI);
220 return MIRBuilder.buildFrameIndex(Res: LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen()), Idx: FI)
221 .getReg(Idx: 0);
222 }
223
224 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
225 const MachinePointerInfo &MPO,
226 const CCValAssign &VA) override {
227 MachineFunction &MF = MIRBuilder.getMF();
228 auto MMO = MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOLoad, MemTy,
229 base_alignment: inferAlignFromPtrInfo(MF, MPO));
230 MIRBuilder.buildLoad(Res: ValVReg, Addr, MMO&: *MMO);
231 }
232
233 void assignValueToReg(Register ValVReg, Register PhysReg,
234 const CCValAssign &VA) override {
235 markPhysRegUsed(PhysReg);
236 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
237 }
238
239 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
240 ArrayRef<CCValAssign> VAs,
241 std::function<void()> *Thunk) override {
242 const CCValAssign &VA = VAs[0];
243 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
244 (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
245 Register PhysReg = VA.getLocReg();
246
247 markPhysRegUsed(PhysReg);
248
249 LLT LocTy(VA.getLocVT());
250 auto Copy = MIRBuilder.buildCopy(Res: LocTy, Op: PhysReg);
251
252 MIRBuilder.buildTrunc(Res: Arg.Regs[0], Op: Copy.getReg(Idx: 0));
253 return 1;
254 }
255
256 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
257 const CCValAssign &VAHi = VAs[1];
258
259 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
260 assert(VA.getValNo() == VAHi.getValNo() &&
261 "Values belong to different arguments");
262
263 assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
264 VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
265 "unexpected custom value");
266
267 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
268 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
269
270 if (VAHi.isMemLoc()) {
271 LLT MemTy(VAHi.getLocVT());
272
273 MachinePointerInfo MPO;
274 Register StackAddr = getStackAddress(
275 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
276
277 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
278 VA: const_cast<CCValAssign &>(VAHi));
279 }
280
281 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
282 if (VAHi.isRegLoc())
283 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
284
285 MIRBuilder.buildMergeLikeInstr(Res: Arg.Regs[0], Ops: NewRegs);
286
287 return 2;
288 }
289
290 /// How the physical register gets marked varies between formal
291 /// parameters (it's a basic-block live-in), and a call instruction
292 /// (it's an implicit-def of the BL).
293 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
294
295private:
296 const RISCVSubtarget &Subtarget;
297};
298
299struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
300 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
301 : RISCVIncomingValueHandler(B, MRI) {}
302
303 void markPhysRegUsed(MCRegister PhysReg) override {
304 MIRBuilder.getMRI()->addLiveIn(Reg: PhysReg);
305 MIRBuilder.getMBB().addLiveIn(PhysReg);
306 }
307};
308
309struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
310 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
311 MachineInstrBuilder &MIB)
312 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
313
314 void markPhysRegUsed(MCRegister PhysReg) override {
315 MIB.addDef(RegNo: PhysReg, Flags: RegState::Implicit);
316 }
317
318 MachineInstrBuilder MIB;
319};
320
321} // namespace
322
323RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
324 : CallLowering(&TLI) {}
325
326/// Return true if scalable vector with ScalarTy is legal for lowering.
327static bool isLegalElementTypeForRVV(Type *EltTy,
328 const RISCVSubtarget &Subtarget) {
329 if (EltTy->isPointerTy())
330 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
331 if (EltTy->isIntegerTy(Bitwidth: 1) || EltTy->isIntegerTy(Bitwidth: 8) ||
332 EltTy->isIntegerTy(Bitwidth: 16) || EltTy->isIntegerTy(Bitwidth: 32))
333 return true;
334 if (EltTy->isIntegerTy(Bitwidth: 64))
335 return Subtarget.hasVInstructionsI64();
336 if (EltTy->isHalfTy())
337 return Subtarget.hasVInstructionsF16();
338 if (EltTy->isBFloatTy())
339 return Subtarget.hasVInstructionsBF16Minimal();
340 if (EltTy->isFloatTy())
341 return Subtarget.hasVInstructionsF32();
342 if (EltTy->isDoubleTy())
343 return Subtarget.hasVInstructionsF64();
344 return false;
345}
346
347// TODO: Support all argument types.
348// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
349static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
350 bool IsLowerArgs = false) {
351 if (T->isIntegerTy())
352 return true;
353 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy() || T->isFP128Ty())
354 return true;
355 if (T->isPointerTy())
356 return true;
357 if (T->isArrayTy())
358 return isSupportedArgumentType(T: T->getArrayElementType(), Subtarget,
359 IsLowerArgs);
360 // TODO: Support fixed vector types.
361 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
362 T->isScalableTy() &&
363 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
364 return true;
365 return false;
366}
367
368// TODO: Only integer, pointer and aggregate types are supported now.
369// TODO: Remove IsLowerRetVal argument by adding support for vectors in
370// lowerCall.
371static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget,
372 bool IsLowerRetVal = false) {
373 if (T->isIntegerTy() || T->isFloatingPointTy() || T->isPointerTy())
374 return true;
375
376 if (T->isArrayTy())
377 return isSupportedReturnType(T: T->getArrayElementType(), Subtarget);
378
379 if (T->isStructTy()) {
380 auto StructT = cast<StructType>(Val: T);
381 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i)
382 if (!isSupportedReturnType(T: StructT->getElementType(N: i), Subtarget))
383 return false;
384 return true;
385 }
386
387 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() &&
388 T->isScalableTy() &&
389 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
390 return true;
391
392 return false;
393}
394
395bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
396 const Value *Val, ArrayRef<Register> VRegs,
397 FunctionLoweringInfo &FLI) const {
398 assert(!Val == VRegs.empty() && "Return value without a vreg");
399 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Opcode: RISCV::PseudoRET);
400
401 if (!FLI.CanLowerReturn) {
402 insertSRetStores(MIRBuilder, RetTy: Val->getType(), VRegs, DemoteReg: FLI.DemoteRegister);
403 } else if (!VRegs.empty()) {
404 const RISCVSubtarget &Subtarget =
405 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
406 if (!isSupportedReturnType(T: Val->getType(), Subtarget,
407 /*IsLowerRetVal=*/true))
408 return false;
409
410 MachineFunction &MF = MIRBuilder.getMF();
411 const DataLayout &DL = MF.getDataLayout();
412 const Function &F = MF.getFunction();
413 CallingConv::ID CC = F.getCallingConv();
414
415 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
416 setArgFlags(Arg&: OrigRetInfo, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
417
418 SmallVector<ArgInfo, 4> SplitRetInfos;
419 splitToValueTypes(OrigArgInfo: OrigRetInfo, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
420
421 RISCVOutgoingValueAssigner Assigner(
422 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV,
423 /*IsRet=*/true);
424 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
425 if (!determineAndHandleAssignments(Handler, Assigner, Args&: SplitRetInfos,
426 MIRBuilder, CallConv: CC, IsVarArg: F.isVarArg()))
427 return false;
428 }
429
430 MIRBuilder.insertInstr(MIB: Ret);
431 return true;
432}
433
434bool RISCVCallLowering::canLowerReturn(MachineFunction &MF,
435 CallingConv::ID CallConv,
436 SmallVectorImpl<BaseArgInfo> &Outs,
437 bool IsVarArg) const {
438 SmallVector<CCValAssign, 16> ArgLocs;
439 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
440 MF.getFunction().getContext());
441
442 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
443
444 std::optional<unsigned> FirstMaskArgument = std::nullopt;
445 // Preassign the first mask argument.
446 if (Subtarget.hasVInstructions()) {
447 for (const auto &ArgIdx : enumerate(First&: Outs)) {
448 MVT ArgVT = MVT::getVT(Ty: ArgIdx.value().Ty);
449 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
450 FirstMaskArgument = ArgIdx.index();
451 }
452 }
453
454 for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
455 MVT VT = MVT::getVT(Ty: Outs[I].Ty);
456 if (CC_RISCV(ValNo: I, ValVT: VT, LocVT: VT, LocInfo: CCValAssign::Full, ArgFlags: Outs[I].Flags[0], State&: CCInfo,
457 /*IsFixed=*/true, /*isRet=*/IsRet: true, OrigTy: nullptr))
458 return false;
459 }
460 return true;
461}
462
463/// If there are varargs that were passed in a0-a7, the data in those registers
464/// must be copied to the varargs save area on the stack.
465void RISCVCallLowering::saveVarArgRegisters(
466 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
467 IncomingValueAssigner &Assigner, CCState &CCInfo) const {
468 MachineFunction &MF = MIRBuilder.getMF();
469 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
470 unsigned XLenInBytes = Subtarget.getXLen() / 8;
471 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(ABI: Subtarget.getTargetABI());
472 MachineRegisterInfo &MRI = MF.getRegInfo();
473 unsigned Idx = CCInfo.getFirstUnallocated(Regs: ArgRegs);
474 MachineFrameInfo &MFI = MF.getFrameInfo();
475 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
476
477 // Size of the vararg save area. For now, the varargs save area is either
478 // zero or large enough to hold a0-a7.
479 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
480 int FI;
481
482 // If all registers are allocated, then all varargs must be passed on the
483 // stack and we don't need to save any argregs.
484 if (VarArgsSaveSize == 0) {
485 int VaArgOffset = Assigner.StackSize;
486 FI = MFI.CreateFixedObject(Size: XLenInBytes, SPOffset: VaArgOffset, IsImmutable: true);
487 } else {
488 int VaArgOffset = -VarArgsSaveSize;
489 FI = MFI.CreateFixedObject(Size: VarArgsSaveSize, SPOffset: VaArgOffset, IsImmutable: true);
490
491 // If saving an odd number of registers then create an extra stack slot to
492 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
493 // offsets to even-numbered registered remain 2*XLEN-aligned.
494 if (Idx % 2) {
495 MFI.CreateFixedObject(Size: XLenInBytes,
496 SPOffset: VaArgOffset - static_cast<int>(XLenInBytes), IsImmutable: true);
497 VarArgsSaveSize += XLenInBytes;
498 }
499
500 const LLT p0 = LLT::pointer(AddressSpace: MF.getDataLayout().getAllocaAddrSpace(),
501 SizeInBits: Subtarget.getXLen());
502 const LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
503
504 auto FIN = MIRBuilder.buildFrameIndex(Res: p0, Idx: FI);
505 auto Offset = MIRBuilder.buildConstant(
506 Res: MRI.createGenericVirtualRegister(Ty: sXLen), Val: XLenInBytes);
507
508 // Copy the integer registers that may have been used for passing varargs
509 // to the vararg save area.
510 const MVT XLenVT = Subtarget.getXLenVT();
511 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
512 const Register VReg = MRI.createGenericVirtualRegister(Ty: sXLen);
513 Handler.assignValueToReg(
514 ValVReg: VReg, PhysReg: ArgRegs[I],
515 VA: CCValAssign::getReg(ValNo: I + MF.getFunction().getNumOperands(), ValVT: XLenVT,
516 Reg: ArgRegs[I], LocVT: XLenVT, HTP: CCValAssign::Full));
517 auto MPO =
518 MachinePointerInfo::getFixedStack(MF, FI, Offset: (I - Idx) * XLenInBytes);
519 MIRBuilder.buildStore(Val: VReg, Addr: FIN, PtrInfo: MPO, Alignment: inferAlignFromPtrInfo(MF, MPO));
520 FIN = MIRBuilder.buildPtrAdd(Res: MRI.createGenericVirtualRegister(Ty: p0),
521 Op0: FIN.getReg(Idx: 0), Op1: Offset);
522 }
523 }
524
525 // Record the frame index of the first variable argument which is a value
526 // necessary to G_VASTART.
527 RVFI->setVarArgsFrameIndex(FI);
528 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
529}
530
531bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
532 const Function &F,
533 ArrayRef<ArrayRef<Register>> VRegs,
534 FunctionLoweringInfo &FLI) const {
535 MachineFunction &MF = MIRBuilder.getMF();
536
537 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
538 for (auto &Arg : F.args()) {
539 if (!isSupportedArgumentType(T: Arg.getType(), Subtarget,
540 /*IsLowerArgs=*/true))
541 return false;
542 }
543
544 MachineRegisterInfo &MRI = MF.getRegInfo();
545 const DataLayout &DL = MF.getDataLayout();
546 CallingConv::ID CC = F.getCallingConv();
547
548 SmallVector<ArgInfo, 32> SplitArgInfos;
549
550 // Insert the hidden sret parameter if the return value won't fit in the
551 // return registers.
552 if (!FLI.CanLowerReturn)
553 insertSRetIncomingArgument(F, SplitArgs&: SplitArgInfos, DemoteReg&: FLI.DemoteRegister, MRI, DL);
554
555 unsigned Index = 0;
556 for (auto &Arg : F.args()) {
557 // Construct the ArgInfo object from destination register and argument type.
558 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
559 setArgFlags(Arg&: AInfo, OpIdx: Index + AttributeList::FirstArgIndex, DL, FuncInfo: F);
560
561 // Handle any required merging from split value types from physical
562 // registers into the desired VReg. ArgInfo objects are constructed
563 // correspondingly and appended to SplitArgInfos.
564 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
565
566 ++Index;
567 }
568
569 RISCVIncomingValueAssigner Assigner(CC == CallingConv::Fast ? CC_RISCV_FastCC
570 : CC_RISCV,
571 /*IsRet=*/false);
572 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
573
574 SmallVector<CCValAssign, 16> ArgLocs;
575 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
576 if (!determineAssignments(Assigner, Args&: SplitArgInfos, CCInfo) ||
577 !handleAssignments(Handler, Args&: SplitArgInfos, CCState&: CCInfo, ArgLocs, MIRBuilder))
578 return false;
579
580 if (F.isVarArg())
581 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
582
583 return true;
584}
585
586bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
587 CallLoweringInfo &Info) const {
588 MachineFunction &MF = MIRBuilder.getMF();
589 const DataLayout &DL = MF.getDataLayout();
590 CallingConv::ID CC = Info.CallConv;
591
592 const RISCVSubtarget &Subtarget =
593 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
594 for (auto &AInfo : Info.OrigArgs) {
595 if (!isSupportedArgumentType(T: AInfo.Ty, Subtarget))
596 return false;
597 if (AInfo.Flags[0].isByVal())
598 return false;
599 }
600
601 if (!Info.OrigRet.Ty->isVoidTy() &&
602 !isSupportedReturnType(T: Info.OrigRet.Ty, Subtarget))
603 return false;
604
605 MachineInstrBuilder CallSeqStart =
606 MIRBuilder.buildInstr(Opcode: RISCV::ADJCALLSTACKDOWN);
607
608 SmallVector<ArgInfo, 32> SplitArgInfos;
609 for (auto &AInfo : Info.OrigArgs) {
610 // Handle any required unmerging of split value types from a given VReg into
611 // physical registers. ArgInfo objects are constructed correspondingly and
612 // appended to SplitArgInfos.
613 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
614 }
615
616 // TODO: Support tail calls.
617 Info.IsTailCall = false;
618
619 // Select the recommended relocation type R_RISCV_CALL_PLT.
620 if (!Info.Callee.isReg())
621 Info.Callee.setTargetFlags(RISCVII::MO_CALL);
622
623 MachineInstrBuilder Call =
624 MIRBuilder
625 .buildInstrNoInsert(Opcode: Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
626 : RISCV::PseudoCALL)
627 .add(MO: Info.Callee);
628 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
629 Call.addRegMask(Mask: TRI->getCallPreservedMask(MF, Info.CallConv));
630
631 RISCVOutgoingValueAssigner ArgAssigner(
632 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV,
633 /*IsRet=*/false);
634 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
635 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: SplitArgInfos,
636 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
637 return false;
638
639 MIRBuilder.insertInstr(MIB: Call);
640
641 CallSeqStart.addImm(Val: ArgAssigner.StackSize).addImm(Val: 0);
642 MIRBuilder.buildInstr(Opcode: RISCV::ADJCALLSTACKUP)
643 .addImm(Val: ArgAssigner.StackSize)
644 .addImm(Val: 0);
645
646 // If Callee is a reg, since it is used by a target specific
647 // instruction, it must have a register class matching the
648 // constraint of that instruction.
649 if (Call->getOperand(i: 0).isReg())
650 constrainOperandRegClass(MF, TRI: *TRI, MRI&: MF.getRegInfo(),
651 TII: *Subtarget.getInstrInfo(),
652 RBI: *Subtarget.getRegBankInfo(), InsertPt&: *Call,
653 II: Call->getDesc(), RegMO&: Call->getOperand(i: 0), OpIdx: 0);
654
655 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
656 SmallVector<ArgInfo, 4> SplitRetInfos;
657 splitToValueTypes(OrigArgInfo: Info.OrigRet, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
658
659 RISCVIncomingValueAssigner RetAssigner(
660 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV,
661 /*IsRet=*/true);
662 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
663 if (!determineAndHandleAssignments(Handler&: RetHandler, Assigner&: RetAssigner, Args&: SplitRetInfos,
664 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
665 return false;
666 }
667
668 if (!Info.CanLowerReturn)
669 insertSRetLoads(MIRBuilder, RetTy: Info.OrigRet.Ty, VRegs: Info.OrigRet.Regs,
670 DemoteReg: Info.DemoteRegister, FI: Info.DemoteStackIndex);
671
672 return true;
673}
674