1//===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVCallLowering.h"
16#include "RISCVISelLowering.h"
17#include "RISCVMachineFunctionInfo.h"
18#include "RISCVSubtarget.h"
19#include "llvm/CodeGen/Analysis.h"
20#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22
23using namespace llvm;
24
25namespace {
26
27struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
28private:
29 // The function used internally to assign args - we ignore the AssignFn stored
30 // by OutgoingValueAssigner since RISC-V implements its CC using a custom
31 // function with a different signature.
32 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
33
34 // Whether this is assigning args for a return.
35 bool IsRet;
36
37 RVVArgDispatcher &RVVDispatcher;
38
39public:
40 RISCVOutgoingValueAssigner(
41 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
42 RVVArgDispatcher &RVVDispatcher)
43 : CallLowering::OutgoingValueAssigner(nullptr),
44 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
45 RVVDispatcher(RVVDispatcher) {}
46
47 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
48 CCValAssign::LocInfo LocInfo,
49 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
50 CCState &State) override {
51 MachineFunction &MF = State.getMachineFunction();
52 const DataLayout &DL = MF.getDataLayout();
53 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
54
55 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
56 LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
57 *Subtarget.getTargetLowering(), RVVDispatcher))
58 return true;
59
60 StackSize = State.getStackSize();
61 return false;
62 }
63};
64
65struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
66 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
67 MachineInstrBuilder MIB)
68 : OutgoingValueHandler(B, MRI), MIB(MIB),
69 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
70 Register getStackAddress(uint64_t MemSize, int64_t Offset,
71 MachinePointerInfo &MPO,
72 ISD::ArgFlagsTy Flags) override {
73 MachineFunction &MF = MIRBuilder.getMF();
74 LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen());
75 LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
76
77 if (!SPReg)
78 SPReg = MIRBuilder.buildCopy(Res: p0, Op: Register(RISCV::X2)).getReg(Idx: 0);
79
80 auto OffsetReg = MIRBuilder.buildConstant(Res: sXLen, Val: Offset);
81
82 auto AddrReg = MIRBuilder.buildPtrAdd(Res: p0, Op0: SPReg, Op1: OffsetReg);
83
84 MPO = MachinePointerInfo::getStack(MF, Offset);
85 return AddrReg.getReg(Idx: 0);
86 }
87
88 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
89 const MachinePointerInfo &MPO,
90 const CCValAssign &VA) override {
91 MachineFunction &MF = MIRBuilder.getMF();
92 uint64_t LocMemOffset = VA.getLocMemOffset();
93
94 // TODO: Move StackAlignment to subtarget and share with FrameLowering.
95 auto MMO =
96 MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOStore, MemTy,
97 base_alignment: commonAlignment(A: Align(16), Offset: LocMemOffset));
98
99 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
100 MIRBuilder.buildStore(Val: ExtReg, Addr, MMO&: *MMO);
101 }
102
103 void assignValueToReg(Register ValVReg, Register PhysReg,
104 const CCValAssign &VA) override {
105 // If we're passing a smaller fp value into a larger integer register,
106 // anyextend before copying.
107 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
108 ((VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::i64) &&
109 VA.getValVT() == MVT::f16)) {
110 LLT DstTy = LLT::scalar(SizeInBits: VA.getLocVT().getSizeInBits());
111 ValVReg = MIRBuilder.buildAnyExt(Res: DstTy, Op: ValVReg).getReg(Idx: 0);
112 }
113
114 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
115 MIRBuilder.buildCopy(Res: PhysReg, Op: ExtReg);
116 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
117 }
118
119 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
120 ArrayRef<CCValAssign> VAs,
121 std::function<void()> *Thunk) override {
122 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
123 const CCValAssign &VALo = VAs[0];
124 const CCValAssign &VAHi = VAs[1];
125
126 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
127 assert(VALo.getValNo() == VAHi.getValNo() &&
128 "Values belong to different arguments");
129
130 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
131 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
132 "unexpected custom value");
133
134 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
135 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
136 MIRBuilder.buildUnmerge(Res: NewRegs, Op: Arg.Regs[0]);
137
138 if (VAHi.isMemLoc()) {
139 LLT MemTy(VAHi.getLocVT());
140
141 MachinePointerInfo MPO;
142 Register StackAddr = getStackAddress(
143 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
144
145 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
146 VA: const_cast<CCValAssign &>(VAHi));
147 }
148
149 auto assignFunc = [=]() {
150 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VALo.getLocReg(), VA: VALo);
151 if (VAHi.isRegLoc())
152 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
153 };
154
155 if (Thunk) {
156 *Thunk = assignFunc;
157 return 2;
158 }
159
160 assignFunc();
161 return 2;
162 }
163
164private:
165 MachineInstrBuilder MIB;
166
167 // Cache the SP register vreg if we need it more than once in this call site.
168 Register SPReg;
169
170 const RISCVSubtarget &Subtarget;
171};
172
173struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
174private:
175 // The function used internally to assign args - we ignore the AssignFn stored
176 // by IncomingValueAssigner since RISC-V implements its CC using a custom
177 // function with a different signature.
178 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
179
180 // Whether this is assigning args from a return.
181 bool IsRet;
182
183 RVVArgDispatcher &RVVDispatcher;
184
185public:
186 RISCVIncomingValueAssigner(
187 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
188 RVVArgDispatcher &RVVDispatcher)
189 : CallLowering::IncomingValueAssigner(nullptr),
190 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
191 RVVDispatcher(RVVDispatcher) {}
192
193 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
194 CCValAssign::LocInfo LocInfo,
195 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
196 CCState &State) override {
197 MachineFunction &MF = State.getMachineFunction();
198 const DataLayout &DL = MF.getDataLayout();
199 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
200
201 if (LocVT.isScalableVector())
202 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
203
204 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
205 LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
206 *Subtarget.getTargetLowering(), RVVDispatcher))
207 return true;
208
209 StackSize = State.getStackSize();
210 return false;
211 }
212};
213
214struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
215 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
216 : IncomingValueHandler(B, MRI),
217 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
218
219 Register getStackAddress(uint64_t MemSize, int64_t Offset,
220 MachinePointerInfo &MPO,
221 ISD::ArgFlagsTy Flags) override {
222 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
223
224 int FI = MFI.CreateFixedObject(Size: MemSize, SPOffset: Offset, /*Immutable=*/IsImmutable: true);
225 MPO = MachinePointerInfo::getFixedStack(MF&: MIRBuilder.getMF(), FI);
226 return MIRBuilder.buildFrameIndex(Res: LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen()), Idx: FI)
227 .getReg(Idx: 0);
228 }
229
230 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
231 const MachinePointerInfo &MPO,
232 const CCValAssign &VA) override {
233 MachineFunction &MF = MIRBuilder.getMF();
234 auto MMO = MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOLoad, MemTy,
235 base_alignment: inferAlignFromPtrInfo(MF, MPO));
236 MIRBuilder.buildLoad(Res: ValVReg, Addr, MMO&: *MMO);
237 }
238
239 void assignValueToReg(Register ValVReg, Register PhysReg,
240 const CCValAssign &VA) override {
241 markPhysRegUsed(PhysReg);
242 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
243 }
244
245 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
246 ArrayRef<CCValAssign> VAs,
247 std::function<void()> *Thunk) override {
248 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
249 const CCValAssign &VALo = VAs[0];
250 const CCValAssign &VAHi = VAs[1];
251
252 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
253 assert(VALo.getValNo() == VAHi.getValNo() &&
254 "Values belong to different arguments");
255
256 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
257 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
258 "unexpected custom value");
259
260 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
261 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
262
263 if (VAHi.isMemLoc()) {
264 LLT MemTy(VAHi.getLocVT());
265
266 MachinePointerInfo MPO;
267 Register StackAddr = getStackAddress(
268 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
269
270 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
271 VA: const_cast<CCValAssign &>(VAHi));
272 }
273
274 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VALo.getLocReg(), VA: VALo);
275 if (VAHi.isRegLoc())
276 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
277
278 MIRBuilder.buildMergeLikeInstr(Res: Arg.Regs[0], Ops: NewRegs);
279
280 return 2;
281 }
282
283 /// How the physical register gets marked varies between formal
284 /// parameters (it's a basic-block live-in), and a call instruction
285 /// (it's an implicit-def of the BL).
286 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
287
288private:
289 const RISCVSubtarget &Subtarget;
290};
291
292struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
293 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
294 : RISCVIncomingValueHandler(B, MRI) {}
295
296 void markPhysRegUsed(MCRegister PhysReg) override {
297 MIRBuilder.getMRI()->addLiveIn(Reg: PhysReg);
298 MIRBuilder.getMBB().addLiveIn(PhysReg);
299 }
300};
301
302struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
303 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
304 MachineInstrBuilder &MIB)
305 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
306
307 void markPhysRegUsed(MCRegister PhysReg) override {
308 MIB.addDef(RegNo: PhysReg, Flags: RegState::Implicit);
309 }
310
311 MachineInstrBuilder MIB;
312};
313
314} // namespace
315
316RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
317 : CallLowering(&TLI) {}
318
319/// Return true if scalable vector with ScalarTy is legal for lowering.
320static bool isLegalElementTypeForRVV(Type *EltTy,
321 const RISCVSubtarget &Subtarget) {
322 if (EltTy->isPointerTy())
323 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
324 if (EltTy->isIntegerTy(Bitwidth: 1) || EltTy->isIntegerTy(Bitwidth: 8) ||
325 EltTy->isIntegerTy(Bitwidth: 16) || EltTy->isIntegerTy(Bitwidth: 32))
326 return true;
327 if (EltTy->isIntegerTy(Bitwidth: 64))
328 return Subtarget.hasVInstructionsI64();
329 if (EltTy->isHalfTy())
330 return Subtarget.hasVInstructionsF16();
331 if (EltTy->isBFloatTy())
332 return Subtarget.hasVInstructionsBF16();
333 if (EltTy->isFloatTy())
334 return Subtarget.hasVInstructionsF32();
335 if (EltTy->isDoubleTy())
336 return Subtarget.hasVInstructionsF64();
337 return false;
338}
339
340// TODO: Support all argument types.
341// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
342static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
343 bool IsLowerArgs = false) {
344 if (T->isIntegerTy())
345 return true;
346 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy())
347 return true;
348 if (T->isPointerTy())
349 return true;
350 // TODO: Support fixed vector types.
351 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
352 T->isScalableTy() &&
353 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
354 return true;
355 return false;
356}
357
358// TODO: Only integer, pointer and aggregate types are supported now.
359// TODO: Remove IsLowerRetVal argument by adding support for vectors in
360// lowerCall.
361static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget,
362 bool IsLowerRetVal = false) {
363 // TODO: Integers larger than 2*XLen are passed indirectly which is not
364 // supported yet.
365 if (T->isIntegerTy())
366 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
367 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy())
368 return true;
369 if (T->isPointerTy())
370 return true;
371
372 if (T->isArrayTy())
373 return isSupportedReturnType(T: T->getArrayElementType(), Subtarget);
374
375 if (T->isStructTy()) {
376 auto StructT = cast<StructType>(Val: T);
377 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i)
378 if (!isSupportedReturnType(T: StructT->getElementType(N: i), Subtarget))
379 return false;
380 return true;
381 }
382
383 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() &&
384 T->isScalableTy() &&
385 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
386 return true;
387
388 return false;
389}
390
391bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
392 const Value *Val,
393 ArrayRef<Register> VRegs,
394 MachineInstrBuilder &Ret) const {
395 if (!Val)
396 return true;
397
398 const RISCVSubtarget &Subtarget =
399 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
400 if (!isSupportedReturnType(T: Val->getType(), Subtarget, /*IsLowerRetVal=*/true))
401 return false;
402
403 MachineFunction &MF = MIRBuilder.getMF();
404 const DataLayout &DL = MF.getDataLayout();
405 const Function &F = MF.getFunction();
406 CallingConv::ID CC = F.getCallingConv();
407
408 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
409 setArgFlags(Arg&: OrigRetInfo, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
410
411 SmallVector<ArgInfo, 4> SplitRetInfos;
412 splitToValueTypes(OrigArgInfo: OrigRetInfo, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
413
414 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
415 ArrayRef(F.getReturnType())};
416 RISCVOutgoingValueAssigner Assigner(
417 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
418 /*IsRet=*/true, Dispatcher);
419 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
420 return determineAndHandleAssignments(Handler, Assigner, Args&: SplitRetInfos,
421 MIRBuilder, CallConv: CC, IsVarArg: F.isVarArg());
422}
423
424bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
425 const Value *Val, ArrayRef<Register> VRegs,
426 FunctionLoweringInfo &FLI) const {
427 assert(!Val == VRegs.empty() && "Return value without a vreg");
428 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Opcode: RISCV::PseudoRET);
429
430 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
431 return false;
432
433 MIRBuilder.insertInstr(MIB: Ret);
434 return true;
435}
436
437/// If there are varargs that were passed in a0-a7, the data in those registers
438/// must be copied to the varargs save area on the stack.
439void RISCVCallLowering::saveVarArgRegisters(
440 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
441 IncomingValueAssigner &Assigner, CCState &CCInfo) const {
442 MachineFunction &MF = MIRBuilder.getMF();
443 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
444 unsigned XLenInBytes = Subtarget.getXLen() / 8;
445 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(ABI: Subtarget.getTargetABI());
446 MachineRegisterInfo &MRI = MF.getRegInfo();
447 unsigned Idx = CCInfo.getFirstUnallocated(Regs: ArgRegs);
448 MachineFrameInfo &MFI = MF.getFrameInfo();
449 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
450
451 // Size of the vararg save area. For now, the varargs save area is either
452 // zero or large enough to hold a0-a7.
453 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
454 int FI;
455
456 // If all registers are allocated, then all varargs must be passed on the
457 // stack and we don't need to save any argregs.
458 if (VarArgsSaveSize == 0) {
459 int VaArgOffset = Assigner.StackSize;
460 FI = MFI.CreateFixedObject(Size: XLenInBytes, SPOffset: VaArgOffset, IsImmutable: true);
461 } else {
462 int VaArgOffset = -VarArgsSaveSize;
463 FI = MFI.CreateFixedObject(Size: VarArgsSaveSize, SPOffset: VaArgOffset, IsImmutable: true);
464
465 // If saving an odd number of registers then create an extra stack slot to
466 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
467 // offsets to even-numbered registered remain 2*XLEN-aligned.
468 if (Idx % 2) {
469 MFI.CreateFixedObject(Size: XLenInBytes,
470 SPOffset: VaArgOffset - static_cast<int>(XLenInBytes), IsImmutable: true);
471 VarArgsSaveSize += XLenInBytes;
472 }
473
474 const LLT p0 = LLT::pointer(AddressSpace: MF.getDataLayout().getAllocaAddrSpace(),
475 SizeInBits: Subtarget.getXLen());
476 const LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
477
478 auto FIN = MIRBuilder.buildFrameIndex(Res: p0, Idx: FI);
479 auto Offset = MIRBuilder.buildConstant(
480 Res: MRI.createGenericVirtualRegister(Ty: sXLen), Val: XLenInBytes);
481
482 // Copy the integer registers that may have been used for passing varargs
483 // to the vararg save area.
484 const MVT XLenVT = Subtarget.getXLenVT();
485 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
486 const Register VReg = MRI.createGenericVirtualRegister(Ty: sXLen);
487 Handler.assignValueToReg(
488 ValVReg: VReg, PhysReg: ArgRegs[I],
489 VA: CCValAssign::getReg(ValNo: I + MF.getFunction().getNumOperands(), ValVT: XLenVT,
490 RegNo: ArgRegs[I], LocVT: XLenVT, HTP: CCValAssign::Full));
491 auto MPO =
492 MachinePointerInfo::getFixedStack(MF, FI, Offset: (I - Idx) * XLenInBytes);
493 MIRBuilder.buildStore(Val: VReg, Addr: FIN, PtrInfo: MPO, Alignment: inferAlignFromPtrInfo(MF, MPO));
494 FIN = MIRBuilder.buildPtrAdd(Res: MRI.createGenericVirtualRegister(Ty: p0),
495 Op0: FIN.getReg(Idx: 0), Op1: Offset);
496 }
497 }
498
499 // Record the frame index of the first variable argument which is a value
500 // necessary to G_VASTART.
501 RVFI->setVarArgsFrameIndex(FI);
502 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
503}
504
505bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
506 const Function &F,
507 ArrayRef<ArrayRef<Register>> VRegs,
508 FunctionLoweringInfo &FLI) const {
509 // Early exit if there are no arguments. varargs are not part of F.args() but
510 // must be lowered.
511 if (F.arg_empty() && !F.isVarArg())
512 return true;
513
514 const RISCVSubtarget &Subtarget =
515 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
516 for (auto &Arg : F.args()) {
517 if (!isSupportedArgumentType(T: Arg.getType(), Subtarget,
518 /*IsLowerArgs=*/true))
519 return false;
520 }
521
522 MachineFunction &MF = MIRBuilder.getMF();
523 const DataLayout &DL = MF.getDataLayout();
524 CallingConv::ID CC = F.getCallingConv();
525
526 SmallVector<ArgInfo, 32> SplitArgInfos;
527 SmallVector<Type *, 4> TypeList;
528 unsigned Index = 0;
529 for (auto &Arg : F.args()) {
530 // Construct the ArgInfo object from destination register and argument type.
531 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
532 setArgFlags(Arg&: AInfo, OpIdx: Index + AttributeList::FirstArgIndex, DL, FuncInfo: F);
533
534 // Handle any required merging from split value types from physical
535 // registers into the desired VReg. ArgInfo objects are constructed
536 // correspondingly and appended to SplitArgInfos.
537 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
538
539 TypeList.push_back(Elt: Arg.getType());
540
541 ++Index;
542 }
543
544 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
545 ArrayRef(TypeList)};
546 RISCVIncomingValueAssigner Assigner(
547 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
548 /*IsRet=*/false, Dispatcher);
549 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
550
551 SmallVector<CCValAssign, 16> ArgLocs;
552 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
553 if (!determineAssignments(Assigner, Args&: SplitArgInfos, CCInfo) ||
554 !handleAssignments(Handler, Args&: SplitArgInfos, CCState&: CCInfo, ArgLocs, MIRBuilder))
555 return false;
556
557 if (F.isVarArg())
558 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
559
560 return true;
561}
562
563bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
564 CallLoweringInfo &Info) const {
565 MachineFunction &MF = MIRBuilder.getMF();
566 const DataLayout &DL = MF.getDataLayout();
567 const Function &F = MF.getFunction();
568 CallingConv::ID CC = F.getCallingConv();
569
570 const RISCVSubtarget &Subtarget =
571 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
572 for (auto &AInfo : Info.OrigArgs) {
573 if (!isSupportedArgumentType(T: AInfo.Ty, Subtarget))
574 return false;
575 }
576
577 if (!Info.OrigRet.Ty->isVoidTy() &&
578 !isSupportedReturnType(T: Info.OrigRet.Ty, Subtarget))
579 return false;
580
581 MachineInstrBuilder CallSeqStart =
582 MIRBuilder.buildInstr(Opcode: RISCV::ADJCALLSTACKDOWN);
583
584 SmallVector<ArgInfo, 32> SplitArgInfos;
585 SmallVector<ISD::OutputArg, 8> Outs;
586 SmallVector<Type *, 4> TypeList;
587 for (auto &AInfo : Info.OrigArgs) {
588 // Handle any required unmerging of split value types from a given VReg into
589 // physical registers. ArgInfo objects are constructed correspondingly and
590 // appended to SplitArgInfos.
591 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
592 TypeList.push_back(Elt: AInfo.Ty);
593 }
594
595 // TODO: Support tail calls.
596 Info.IsTailCall = false;
597
598 // Select the recommended relocation type R_RISCV_CALL_PLT.
599 if (!Info.Callee.isReg())
600 Info.Callee.setTargetFlags(RISCVII::MO_CALL);
601
602 MachineInstrBuilder Call =
603 MIRBuilder
604 .buildInstrNoInsert(Opcode: Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
605 : RISCV::PseudoCALL)
606 .add(MO: Info.Callee);
607 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
608 Call.addRegMask(Mask: TRI->getCallPreservedMask(MF, Info.CallConv));
609
610 RVVArgDispatcher ArgDispatcher{&MF, getTLI<RISCVTargetLowering>(),
611 ArrayRef(TypeList)};
612 RISCVOutgoingValueAssigner ArgAssigner(
613 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
614 /*IsRet=*/false, ArgDispatcher);
615 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
616 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: SplitArgInfos,
617 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
618 return false;
619
620 MIRBuilder.insertInstr(MIB: Call);
621
622 CallSeqStart.addImm(Val: ArgAssigner.StackSize).addImm(Val: 0);
623 MIRBuilder.buildInstr(Opcode: RISCV::ADJCALLSTACKUP)
624 .addImm(Val: ArgAssigner.StackSize)
625 .addImm(Val: 0);
626
627 // If Callee is a reg, since it is used by a target specific
628 // instruction, it must have a register class matching the
629 // constraint of that instruction.
630 if (Call->getOperand(i: 0).isReg())
631 constrainOperandRegClass(MF, TRI: *TRI, MRI&: MF.getRegInfo(),
632 TII: *Subtarget.getInstrInfo(),
633 RBI: *Subtarget.getRegBankInfo(), InsertPt&: *Call,
634 II: Call->getDesc(), RegMO&: Call->getOperand(i: 0), OpIdx: 0);
635
636 if (Info.OrigRet.Ty->isVoidTy())
637 return true;
638
639 SmallVector<ArgInfo, 4> SplitRetInfos;
640 splitToValueTypes(OrigArgInfo: Info.OrigRet, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
641
642 RVVArgDispatcher RetDispatcher{&MF, getTLI<RISCVTargetLowering>(),
643 ArrayRef(F.getReturnType())};
644 RISCVIncomingValueAssigner RetAssigner(
645 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
646 /*IsRet=*/true, RetDispatcher);
647 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
648 if (!determineAndHandleAssignments(Handler&: RetHandler, Assigner&: RetAssigner, Args&: SplitRetInfos,
649 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
650 return false;
651
652 return true;
653}
654