1//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements some simple delegations needed for call lowering.
11///
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/GlobalISel/CallLowering.h"
15#include "llvm/CodeGen/Analysis.h"
16#include "llvm/CodeGen/CallingConvLower.h"
17#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18#include "llvm/CodeGen/GlobalISel/Utils.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineOperand.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/TargetLowering.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
26#include "llvm/Target/TargetMachine.h"
27
28#define DEBUG_TYPE "call-lowering"
29
30using namespace llvm;
31
32void CallLowering::anchor() {}
33
34/// Helper function which updates \p Flags when \p AttrFn returns true.
35static void
36addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
37 const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38 // TODO: There are missing flags. Add them here.
39 if (AttrFn(Attribute::SExt))
40 Flags.setSExt();
41 if (AttrFn(Attribute::ZExt))
42 Flags.setZExt();
43 if (AttrFn(Attribute::InReg))
44 Flags.setInReg();
45 if (AttrFn(Attribute::StructRet))
46 Flags.setSRet();
47 if (AttrFn(Attribute::Nest))
48 Flags.setNest();
49 if (AttrFn(Attribute::ByVal))
50 Flags.setByVal();
51 if (AttrFn(Attribute::ByRef))
52 Flags.setByRef();
53 if (AttrFn(Attribute::Preallocated))
54 Flags.setPreallocated();
55 if (AttrFn(Attribute::InAlloca))
56 Flags.setInAlloca();
57 if (AttrFn(Attribute::Returned))
58 Flags.setReturned();
59 if (AttrFn(Attribute::SwiftSelf))
60 Flags.setSwiftSelf();
61 if (AttrFn(Attribute::SwiftAsync))
62 Flags.setSwiftAsync();
63 if (AttrFn(Attribute::SwiftError))
64 Flags.setSwiftError();
65}
66
67ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
68 unsigned ArgIdx) const {
69 ISD::ArgFlagsTy Flags;
70 addFlagsUsingAttrFn(Flags, AttrFn: [&Call, &ArgIdx](Attribute::AttrKind Attr) {
71 return Call.paramHasAttr(ArgNo: ArgIdx, Kind: Attr);
72 });
73 return Flags;
74}
75
76ISD::ArgFlagsTy
77CallLowering::getAttributesForReturn(const CallBase &Call) const {
78 ISD::ArgFlagsTy Flags;
79 addFlagsUsingAttrFn(Flags, AttrFn: [&Call](Attribute::AttrKind Attr) {
80 return Call.hasRetAttr(Kind: Attr);
81 });
82 return Flags;
83}
84
85void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
86 const AttributeList &Attrs,
87 unsigned OpIdx) const {
88 addFlagsUsingAttrFn(Flags, AttrFn: [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
89 return Attrs.hasAttributeAtIndex(Index: OpIdx, Kind: Attr);
90 });
91}
92
93bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
94 ArrayRef<Register> ResRegs,
95 ArrayRef<ArrayRef<Register>> ArgRegs,
96 Register SwiftErrorVReg,
97 std::optional<PtrAuthInfo> PAI,
98 Register ConvergenceCtrlToken,
99 std::function<Register()> GetCalleeReg) const {
100 CallLoweringInfo Info;
101 const DataLayout &DL = MIRBuilder.getDataLayout();
102 MachineFunction &MF = MIRBuilder.getMF();
103 MachineRegisterInfo &MRI = MF.getRegInfo();
104 bool CanBeTailCalled = CB.isTailCall() &&
105 isInTailCallPosition(Call: CB, TM: MF.getTarget()) &&
106 (MF.getFunction()
107 .getFnAttribute(Kind: "disable-tail-calls")
108 .getValueAsString() != "true");
109
110 CallingConv::ID CallConv = CB.getCallingConv();
111 Type *RetTy = CB.getType();
112 bool IsVarArg = CB.getFunctionType()->isVarArg();
113
114 SmallVector<BaseArgInfo, 4> SplitArgs;
115 getReturnInfo(CallConv, RetTy, Attrs: CB.getAttributes(), Outs&: SplitArgs, DL);
116 Info.CanLowerReturn = canLowerReturn(MF, CallConv, Outs&: SplitArgs, IsVarArg);
117
118 Info.IsConvergent = CB.isConvergent();
119
120 if (!Info.CanLowerReturn) {
121 // Callee requires sret demotion.
122 insertSRetOutgoingArgument(MIRBuilder, CB, Info);
123
124 // The sret demotion isn't compatible with tail-calls, since the sret
125 // argument points into the caller's stack frame.
126 CanBeTailCalled = false;
127 }
128
129 // First step is to marshall all the function's parameters into the correct
130 // physregs and memory locations. Gather the sequence of argument types that
131 // we'll pass to the assigner function.
132 unsigned i = 0;
133 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
134 for (const auto &Arg : CB.args()) {
135 ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(Call: CB, ArgIdx: i)};
136 setArgFlags(Arg&: OrigArg, OpIdx: i + AttributeList::FirstArgIndex, DL, FuncInfo: CB);
137 if (i >= NumFixedArgs)
138 OrigArg.Flags[0].setVarArg();
139
140 // If we have an explicit sret argument that is an Instruction, (i.e., it
141 // might point to function-local memory), we can't meaningfully tail-call.
142 if (OrigArg.Flags[0].isSRet() && isa<Instruction>(Val: &Arg))
143 CanBeTailCalled = false;
144
145 Info.OrigArgs.push_back(Elt: OrigArg);
146 ++i;
147 }
148
149 // Try looking through a bitcast from one function type to another.
150 // Commonly happens with calls to objc_msgSend().
151 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
152
153 // If IRTranslator chose to drop the ptrauth info, we can turn this into
154 // a direct call.
155 if (!PAI && CB.countOperandBundlesOfType(ID: LLVMContext::OB_ptrauth)) {
156 CalleeV = cast<ConstantPtrAuth>(Val: CalleeV)->getPointer();
157 assert(isa<Function>(CalleeV));
158 }
159
160 if (const Function *F = dyn_cast<Function>(Val: CalleeV)) {
161 if (F->hasFnAttribute(Kind: Attribute::NonLazyBind)) {
162 LLT Ty = getLLTForType(Ty&: *F->getType(), DL);
163 Register Reg = MIRBuilder.buildGlobalValue(Res: Ty, GV: F).getReg(Idx: 0);
164 Info.Callee = MachineOperand::CreateReg(Reg, isDef: false);
165 } else {
166 Info.Callee = MachineOperand::CreateGA(GV: F, Offset: 0);
167 }
168 } else if (isa<GlobalIFunc>(Val: CalleeV) || isa<GlobalAlias>(Val: CalleeV)) {
169 // IR IFuncs and Aliases can't be forward declared (only defined), so the
170 // callee must be in the same TU and therefore we can direct-call it without
171 // worrying about it being out of range.
172 Info.Callee = MachineOperand::CreateGA(GV: cast<GlobalValue>(Val: CalleeV), Offset: 0);
173 } else
174 Info.Callee = MachineOperand::CreateReg(Reg: GetCalleeReg(), isDef: false);
175
176 Register ReturnHintAlignReg;
177 Align ReturnHintAlign;
178
179 Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, getAttributesForReturn(Call: CB)};
180
181 if (!Info.OrigRet.Ty->isVoidTy()) {
182 setArgFlags(Arg&: Info.OrigRet, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: CB);
183
184 if (MaybeAlign Alignment = CB.getRetAlign()) {
185 if (*Alignment > Align(1)) {
186 ReturnHintAlignReg = MRI.cloneVirtualRegister(VReg: ResRegs[0]);
187 Info.OrigRet.Regs[0] = ReturnHintAlignReg;
188 ReturnHintAlign = *Alignment;
189 }
190 }
191 }
192
193 auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_kcfi);
194 if (Bundle && CB.isIndirectCall()) {
195 Info.CFIType = cast<ConstantInt>(Val: Bundle->Inputs[0]);
196 assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
197 }
198
199 if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_deactivation_symbol)) {
200 Info.DeactivationSymbol = cast<GlobalValue>(Val: Bundle->Inputs[0]);
201 }
202
203 Info.CB = &CB;
204 Info.KnownCallees = CB.getMetadata(KindID: LLVMContext::MD_callees);
205 Info.CallConv = CallConv;
206 Info.SwiftErrorVReg = SwiftErrorVReg;
207 Info.PAI = PAI;
208 Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
209 Info.IsMustTailCall = CB.isMustTailCall();
210 Info.IsTailCall = CanBeTailCalled;
211 Info.IsVarArg = IsVarArg;
212 if (!lowerCall(MIRBuilder, Info))
213 return false;
214
215 if (ReturnHintAlignReg && !Info.LoweredTailCall) {
216 MIRBuilder.buildAssertAlign(Res: ResRegs[0], Op: ReturnHintAlignReg,
217 AlignVal: ReturnHintAlign);
218 }
219
220 return true;
221}
222
223template <typename FuncInfoTy>
224void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
225 const DataLayout &DL,
226 const FuncInfoTy &FuncInfo) const {
227 auto &Flags = Arg.Flags[0];
228 const AttributeList &Attrs = FuncInfo.getAttributes();
229 addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
230
231 PointerType *PtrTy = dyn_cast<PointerType>(Val: Arg.Ty->getScalarType());
232 if (PtrTy) {
233 Flags.setPointer();
234 Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
235 }
236
237 Align MemAlign = DL.getABITypeAlign(Ty: Arg.Ty);
238 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
239 Flags.isByRef()) {
240 assert(OpIdx >= AttributeList::FirstArgIndex);
241 unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
242
243 Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
244 if (!ElementTy)
245 ElementTy = FuncInfo.getParamByRefType(ParamIdx);
246 if (!ElementTy)
247 ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
248 if (!ElementTy)
249 ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
250
251 assert(ElementTy && "Must have byval, inalloca or preallocated type");
252
253 uint64_t MemSize = DL.getTypeAllocSize(Ty: ElementTy);
254 if (Flags.isByRef())
255 Flags.setByRefSize(MemSize);
256 else
257 Flags.setByValSize(MemSize);
258
259 // For ByVal, alignment should be passed from FE. BE will guess if
260 // this info is not there but there are cases it cannot get right.
261 if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
262 MemAlign = *ParamAlign;
263 else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
264 MemAlign = *ParamAlign;
265 else
266 MemAlign = getTLI()->getByValTypeAlignment(Ty: ElementTy, DL);
267 } else if (OpIdx >= AttributeList::FirstArgIndex) {
268 if (auto ParamAlign =
269 FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
270 MemAlign = *ParamAlign;
271 }
272 Flags.setMemAlign(MemAlign);
273 Flags.setOrigAlign(DL.getABITypeAlign(Ty: Arg.Ty));
274
275 // Don't try to use the returned attribute if the argument is marked as
276 // swiftself, since it won't be passed in x0.
277 if (Flags.isSwiftSelf())
278 Flags.setReturned(false);
279}
280
281template void
282CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
283 const DataLayout &DL,
284 const Function &FuncInfo) const;
285
286template void
287CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
288 const DataLayout &DL,
289 const CallBase &FuncInfo) const;
290
291void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
292 SmallVectorImpl<ArgInfo> &SplitArgs,
293 const DataLayout &DL,
294 CallingConv::ID CallConv,
295 SmallVectorImpl<TypeSize> *Offsets) const {
296 SmallVector<Type *, 4> SplitTys;
297 ComputeValueTypes(DL, Ty: OrigArg.Ty, Types&: SplitTys, Offsets);
298
299 if (SplitTys.size() == 0)
300 return;
301
302 if (SplitTys.size() == 1) {
303 // No splitting to do, but we want to replace the original type (e.g. [1 x
304 // double] -> double).
305 SplitArgs.emplace_back(Args: OrigArg.Regs[0], Args&: SplitTys[0], Args: OrigArg.OrigArgIndex,
306 Args: OrigArg.Flags[0], Args: OrigArg.OrigValue);
307 return;
308 }
309
310 // Create one ArgInfo for each virtual register in the original ArgInfo.
311 assert(OrigArg.Regs.size() == SplitTys.size() && "Regs / types mismatch");
312
313 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
314 Ty: OrigArg.Ty, CallConv, isVarArg: false, DL);
315 for (unsigned i = 0, e = SplitTys.size(); i < e; ++i) {
316 SplitArgs.emplace_back(Args: OrigArg.Regs[i], Args&: SplitTys[i], Args: OrigArg.OrigArgIndex,
317 Args: OrigArg.Flags[0]);
318 if (NeedsRegBlock)
319 SplitArgs.back().Flags[0].setInConsecutiveRegs();
320 }
321
322 SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
323}
324
325/// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
326static MachineInstrBuilder
327mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
328 ArrayRef<Register> SrcRegs) {
329 MachineRegisterInfo &MRI = *B.getMRI();
330 LLT LLTy = MRI.getType(Reg: DstRegs[0]);
331 LLT PartLLT = MRI.getType(Reg: SrcRegs[0]);
332
333 // Deal with v3s16 split into v2s16
334 LLT LCMTy = getCoverTy(OrigTy: LLTy, TargetTy: PartLLT);
335 if (LCMTy == LLTy) {
336 // Common case where no padding is needed.
337 assert(DstRegs.size() == 1);
338
339 SmallVector<Register, 8> ConcatRegs(SrcRegs.size());
340 llvm::copy(Range&: SrcRegs, Out: ConcatRegs.begin());
341
342 if (LLTy.getScalarType() != PartLLT.getScalarType())
343 for (size_t I = 0, E = SrcRegs.size(); I != E; ++I) {
344 auto BitcastDst =
345 MRI.getType(Reg: SrcRegs[I]).changeElementType(NewEltTy: LLTy.getScalarType());
346 ConcatRegs[I] = B.buildBitcast(Dst: BitcastDst, Src: SrcRegs[I]).getReg(Idx: 0);
347 }
348
349 return B.buildConcatVectors(Res: DstRegs[0], Ops: ConcatRegs);
350 }
351
352 // We need to create an unmerge to the result registers, which may require
353 // widening the original value.
354 Register UnmergeSrcReg;
355 if (LCMTy.getSizeInBits() != PartLLT.getSizeInBits()) {
356 assert(DstRegs.size() == 1);
357 return B.buildDeleteTrailingVectorElements(
358 Res: DstRegs[0], Op0: B.buildMergeLikeInstr(Res: LCMTy, Ops: SrcRegs));
359 } else {
360 // We don't need to widen anything if we're extracting a scalar which was
361 // promoted to a vector e.g. s8 -> v4s8 -> s8
362 assert(SrcRegs.size() == 1);
363 UnmergeSrcReg = SrcRegs[0];
364 }
365
366 size_t NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
367
368 SmallVector<Register, 8> PadDstRegs(NumDst);
369 llvm::copy(Range&: DstRegs, Out: PadDstRegs.begin());
370
371 // Create the excess dead defs for the unmerge.
372 for (size_t I = DstRegs.size(); I != NumDst; ++I)
373 PadDstRegs[I] = MRI.createGenericVirtualRegister(Ty: LLTy);
374
375 if (PartLLT != LCMTy)
376 UnmergeSrcReg = B.buildBitcast(Dst: LCMTy, Src: UnmergeSrcReg).getReg(Idx: 0);
377
378 if (PadDstRegs.size() == 1)
379 return B.buildDeleteTrailingVectorElements(Res: DstRegs[0], Op0: UnmergeSrcReg);
380 return B.buildUnmerge(Res: PadDstRegs, Op: UnmergeSrcReg);
381}
382
383void CallLowering::buildCopyFromRegs(MachineIRBuilder &B,
384 ArrayRef<Register> OrigRegs,
385 ArrayRef<Register> Regs, LLT LLTy,
386 LLT PartLLT, const ISD::ArgFlagsTy Flags) {
387 MachineRegisterInfo &MRI = *B.getMRI();
388
389 if (PartLLT == LLTy) {
390 // We should have avoided introducing a new virtual register, and just
391 // directly assigned here.
392 assert(OrigRegs[0] == Regs[0]);
393 return;
394 }
395
396 if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
397 Regs.size() == 1) {
398 B.buildBitcast(Dst: OrigRegs[0], Src: Regs[0]);
399 return;
400 }
401
402 // A vector PartLLT needs extending to LLTy's element size.
403 // E.g. <2 x s64> = G_SEXT <2 x s32>.
404 if (PartLLT.isVector() == LLTy.isVector() &&
405 PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
406 (!PartLLT.isVector() ||
407 PartLLT.getElementCount() == LLTy.getElementCount()) &&
408 OrigRegs.size() == 1 && Regs.size() == 1) {
409 Register SrcReg = Regs[0];
410
411 LLT LocTy = MRI.getType(Reg: SrcReg);
412
413 if (Flags.isSExt()) {
414 SrcReg = B.buildAssertSExt(Res: LocTy, Op: SrcReg, Size: LLTy.getScalarSizeInBits())
415 .getReg(Idx: 0);
416 } else if (Flags.isZExt()) {
417 SrcReg = B.buildAssertZExt(Res: LocTy, Op: SrcReg, Size: LLTy.getScalarSizeInBits())
418 .getReg(Idx: 0);
419 }
420
421 // Sometimes pointers are passed zero extended.
422 LLT OrigTy = MRI.getType(Reg: OrigRegs[0]);
423 if (OrigTy.isPointer()) {
424 LLT IntPtrTy = LLT::scalar(SizeInBits: OrigTy.getSizeInBits());
425 B.buildIntToPtr(Dst: OrigRegs[0], Src: B.buildTrunc(Res: IntPtrTy, Op: SrcReg));
426 return;
427 }
428
429 B.buildTrunc(Res: OrigRegs[0], Op: SrcReg);
430 return;
431 }
432
433 if (!LLTy.isVector() && !PartLLT.isVector()) {
434 assert(OrigRegs.size() == 1);
435 LLT OrigTy = MRI.getType(Reg: OrigRegs[0]);
436
437 unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
438 if (SrcSize == OrigTy.getSizeInBits())
439 B.buildMergeValues(Res: OrigRegs[0], Ops: Regs);
440 else {
441 auto Widened = B.buildMergeLikeInstr(Res: LLT::scalar(SizeInBits: SrcSize), Ops: Regs);
442 B.buildTrunc(Res: OrigRegs[0], Op: Widened);
443 }
444
445 return;
446 }
447
448 if (PartLLT.isVector()) {
449 assert(OrigRegs.size() == 1);
450 SmallVector<Register> CastRegs(Regs);
451
452 // If PartLLT is a mismatched vector in both number of elements and element
453 // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
454 // have the same elt type, i.e. v4s32.
455 // TODO: Extend this coersion to element multiples other than just 2.
456 if (TypeSize::isKnownGT(LHS: PartLLT.getSizeInBits(), RHS: LLTy.getSizeInBits()) &&
457 PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
458 Regs.size() == 1) {
459 LLT NewTy = PartLLT.changeElementType(NewEltTy: LLTy.getElementType())
460 .changeElementCount(EC: PartLLT.getElementCount() * 2);
461 CastRegs[0] = B.buildBitcast(Dst: NewTy, Src: Regs[0]).getReg(Idx: 0);
462 PartLLT = NewTy;
463 }
464
465 if (LLTy.getScalarSizeInBits() == PartLLT.getScalarSizeInBits()) {
466 mergeVectorRegsToResultRegs(B, DstRegs: OrigRegs, SrcRegs: CastRegs);
467 } else {
468 unsigned I = 0;
469 LLT GCDTy = getGCDType(OrigTy: LLTy, TargetTy: PartLLT);
470
471 // We are both splitting a vector, and bitcasting its element types. Cast
472 // the source pieces into the appropriate number of pieces with the result
473 // element type.
474 for (Register SrcReg : CastRegs)
475 CastRegs[I++] = B.buildBitcast(Dst: GCDTy, Src: SrcReg).getReg(Idx: 0);
476 mergeVectorRegsToResultRegs(B, DstRegs: OrigRegs, SrcRegs: CastRegs);
477 }
478
479 return;
480 }
481
482 assert(LLTy.isVector() && !PartLLT.isVector());
483
484 LLT DstEltTy = LLTy.getElementType();
485
486 // Pointer information was discarded. We'll need to coerce some register types
487 // to avoid violating type constraints.
488 LLT RealDstEltTy = MRI.getType(Reg: OrigRegs[0]).getElementType();
489
490 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
491
492 if (DstEltTy == PartLLT) {
493 // Vector was trivially scalarized.
494
495 if (RealDstEltTy.isPointer()) {
496 for (Register Reg : Regs)
497 MRI.setType(VReg: Reg, Ty: RealDstEltTy);
498 }
499
500 B.buildBuildVector(Res: OrigRegs[0], Ops: Regs);
501 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
502 // Deal with vector with 64-bit elements decomposed to 32-bit
503 // registers. Need to create intermediate 64-bit elements.
504 SmallVector<Register, 8> EltMerges;
505 int PartsPerElt =
506 divideCeil(Numerator: DstEltTy.getSizeInBits(), Denominator: PartLLT.getSizeInBits());
507 LLT ExtendedPartTy = LLT::scalar(SizeInBits: PartLLT.getSizeInBits() * PartsPerElt);
508
509 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
510 auto Merge =
511 B.buildMergeLikeInstr(Res: ExtendedPartTy, Ops: Regs.take_front(N: PartsPerElt));
512 if (ExtendedPartTy.getSizeInBits() > RealDstEltTy.getSizeInBits())
513 Merge = B.buildTrunc(Res: RealDstEltTy, Op: Merge);
514 // Fix the type in case this is really a vector of pointers.
515 MRI.setType(VReg: Merge.getReg(Idx: 0), Ty: RealDstEltTy);
516 EltMerges.push_back(Elt: Merge.getReg(Idx: 0));
517 Regs = Regs.drop_front(N: PartsPerElt);
518 }
519
520 B.buildBuildVector(Res: OrigRegs[0], Ops: EltMerges);
521 } else {
522 // Vector was split, and elements promoted to a wider type.
523 // FIXME: Should handle floating point promotions.
524 unsigned NumElts = LLTy.getNumElements();
525 LLT BVType = LLT::fixed_vector(NumElements: NumElts, ScalarTy: PartLLT);
526
527 Register BuildVec;
528 if (NumElts == Regs.size())
529 BuildVec = B.buildBuildVector(Res: BVType, Ops: Regs).getReg(Idx: 0);
530 else {
531 // Vector elements are packed in the inputs.
532 // e.g. we have a <4 x s16> but 2 x s32 in regs.
533 assert(NumElts > Regs.size());
534 LLT SrcEltTy = MRI.getType(Reg: Regs[0]);
535
536 LLT OriginalEltTy = MRI.getType(Reg: OrigRegs[0]).getElementType();
537
538 // Input registers contain packed elements.
539 // Determine how many elements per reg.
540 assert((SrcEltTy.getSizeInBits() % OriginalEltTy.getSizeInBits()) == 0);
541 unsigned EltPerReg =
542 (SrcEltTy.getSizeInBits() / OriginalEltTy.getSizeInBits());
543
544 SmallVector<Register, 0> BVRegs;
545 BVRegs.reserve(N: Regs.size() * EltPerReg);
546 for (Register R : Regs) {
547 auto Unmerge = B.buildUnmerge(Res: OriginalEltTy, Op: R);
548 for (unsigned K = 0; K < EltPerReg; ++K)
549 BVRegs.push_back(Elt: B.buildAnyExt(Res: PartLLT, Op: Unmerge.getReg(Idx: K)).getReg(Idx: 0));
550 }
551
552 // We may have some more elements in BVRegs, e.g. if we have 2 s32 pieces
553 // for a <3 x s16> vector. We should have less than EltPerReg extra items.
554 if (BVRegs.size() > NumElts) {
555 assert((BVRegs.size() - NumElts) < EltPerReg);
556 BVRegs.truncate(N: NumElts);
557 }
558 BuildVec = B.buildBuildVector(Res: BVType, Ops: BVRegs).getReg(Idx: 0);
559 }
560 B.buildTrunc(Res: OrigRegs[0], Op: BuildVec);
561 }
562}
563
564void CallLowering::buildCopyToRegs(MachineIRBuilder &B,
565 ArrayRef<Register> DstRegs, Register SrcReg,
566 LLT SrcTy, LLT PartTy, unsigned ExtendOp) {
567 // We could just insert a regular copy, but this is unreachable at the moment.
568 assert(SrcTy != PartTy && "identical part types shouldn't reach here");
569
570 const TypeSize PartSize = PartTy.getSizeInBits();
571
572 if (PartSize == SrcTy.getSizeInBits() && DstRegs.size() == 1) {
573 // TODO: Handle int<->ptr casts. It just happens the ABI lowering
574 // assignments are not pointer aware.
575 B.buildBitcast(Dst: DstRegs[0], Src: SrcReg);
576 return;
577 }
578
579 if (PartTy.isVector() == SrcTy.isVector() &&
580 PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
581 assert(DstRegs.size() == 1);
582 B.buildInstr(Opc: ExtendOp, DstOps: {DstRegs[0]}, SrcOps: {SrcReg});
583 return;
584 }
585
586 if (SrcTy.isVector() && !PartTy.isVector() &&
587 TypeSize::isKnownGT(LHS: PartSize, RHS: SrcTy.getElementType().getSizeInBits()) &&
588 SrcTy.getElementCount() == ElementCount::getFixed(MinVal: DstRegs.size())) {
589 // Vector was scalarized, and the elements extended.
590 auto UnmergeToEltTy = B.buildUnmerge(Res: SrcTy.getElementType(), Op: SrcReg);
591 for (int i = 0, e = DstRegs.size(); i != e; ++i)
592 B.buildAnyExt(Res: DstRegs[i], Op: UnmergeToEltTy.getReg(Idx: i));
593 return;
594 }
595
596 if (SrcTy.isVector() && PartTy.isVector() &&
597 PartTy.getSizeInBits() == SrcTy.getSizeInBits() &&
598 ElementCount::isKnownLT(LHS: SrcTy.getElementCount(),
599 RHS: PartTy.getElementCount())) {
600 // A coercion like: v2f32 -> v4f32 or nxv2f32 -> nxv4f32
601 Register DstReg = DstRegs.front();
602 B.buildPadVectorWithUndefElements(Res: DstReg, Op0: SrcReg);
603 return;
604 }
605
606 LLT GCDTy = getGCDType(OrigTy: SrcTy, TargetTy: PartTy);
607 if (GCDTy == PartTy) {
608 // If this already evenly divisible, we can create a simple unmerge.
609 B.buildUnmerge(Res: DstRegs, Op: SrcReg);
610 return;
611 }
612
613 if (SrcTy.isVector() && !PartTy.isVector() &&
614 SrcTy.getScalarSizeInBits() > PartTy.getSizeInBits()) {
615 LLT ExtTy =
616 LLT::vector(EC: SrcTy.getElementCount(),
617 ScalarTy: LLT::scalar(SizeInBits: PartTy.getScalarSizeInBits() * DstRegs.size() /
618 SrcTy.getNumElements()));
619 auto Ext = B.buildAnyExt(Res: ExtTy, Op: SrcReg);
620 B.buildUnmerge(Res: DstRegs, Op: Ext);
621 return;
622 }
623
624 MachineRegisterInfo &MRI = *B.getMRI();
625 LLT DstTy = MRI.getType(Reg: DstRegs[0]);
626 LLT CoverTy = getCoverTy(OrigTy: SrcTy, TargetTy: PartTy);
627 if (SrcTy.isVector() && DstRegs.size() > 1) {
628 TypeSize FullCoverSize =
629 DstTy.getSizeInBits().multiplyCoefficientBy(RHS: DstRegs.size());
630
631 LLT EltTy = SrcTy.getElementType();
632 TypeSize EltSize = EltTy.getSizeInBits();
633 if (FullCoverSize.isKnownMultipleOf(RHS: EltSize)) {
634 TypeSize VecSize = FullCoverSize.divideCoefficientBy(RHS: EltSize);
635 CoverTy =
636 LLT::vector(EC: ElementCount::get(MinVal: VecSize, Scalable: VecSize.isScalable()), ScalarTy: EltTy);
637 }
638 }
639
640 if (PartTy.isVector() && CoverTy == PartTy) {
641 assert(DstRegs.size() == 1);
642 B.buildPadVectorWithUndefElements(Res: DstRegs[0], Op0: SrcReg);
643 return;
644 }
645
646 const unsigned DstSize = DstTy.getSizeInBits();
647 const unsigned SrcSize = SrcTy.getSizeInBits();
648 unsigned CoveringSize = CoverTy.getSizeInBits();
649
650 Register UnmergeSrc = SrcReg;
651
652 if (!CoverTy.isVector() && CoveringSize != SrcSize) {
653 // For scalars, it's common to be able to use a simple extension.
654 if (SrcTy.isScalar() && DstTy.isScalar()) {
655 CoveringSize = alignTo(Value: SrcSize, Align: DstSize);
656 LLT CoverTy = LLT::scalar(SizeInBits: CoveringSize);
657 UnmergeSrc = B.buildInstr(Opc: ExtendOp, DstOps: {CoverTy}, SrcOps: {SrcReg}).getReg(Idx: 0);
658 } else {
659 // Widen to the common type.
660 // FIXME: This should respect the extend type
661 Register Undef = B.buildUndef(Res: SrcTy).getReg(Idx: 0);
662 SmallVector<Register, 8> MergeParts(1, SrcReg);
663 for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
664 MergeParts.push_back(Elt: Undef);
665 UnmergeSrc = B.buildMergeLikeInstr(Res: CoverTy, Ops: MergeParts).getReg(Idx: 0);
666 }
667 }
668
669 if (CoverTy.isVector() && CoveringSize != SrcSize)
670 UnmergeSrc = B.buildPadVectorWithUndefElements(Res: CoverTy, Op0: SrcReg).getReg(Idx: 0);
671
672 B.buildUnmerge(Res: DstRegs, Op: UnmergeSrc);
673}
674
675bool CallLowering::determineAndHandleAssignments(
676 ValueHandler &Handler, ValueAssigner &Assigner,
677 SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
678 CallingConv::ID CallConv, bool IsVarArg,
679 ArrayRef<Register> ThisReturnRegs) const {
680 MachineFunction &MF = MIRBuilder.getMF();
681 const Function &F = MF.getFunction();
682 SmallVector<CCValAssign, 16> ArgLocs;
683
684 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
685 if (!determineAssignments(Assigner, Args, CCInfo))
686 return false;
687
688 return handleAssignments(Handler, Args, CCState&: CCInfo, ArgLocs, MIRBuilder,
689 ThisReturnRegs);
690}
691
692static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
693 if (Flags.isSExt())
694 return TargetOpcode::G_SEXT;
695 if (Flags.isZExt())
696 return TargetOpcode::G_ZEXT;
697 return TargetOpcode::G_ANYEXT;
698}
699
700bool CallLowering::determineAssignments(ValueAssigner &Assigner,
701 SmallVectorImpl<ArgInfo> &Args,
702 CCState &CCInfo) const {
703 LLVMContext &Ctx = CCInfo.getContext();
704 const DataLayout &DL = CCInfo.getMachineFunction().getDataLayout();
705 const CallingConv::ID CallConv = CCInfo.getCallingConv();
706
707 unsigned NumArgs = Args.size();
708 for (unsigned i = 0; i != NumArgs; ++i) {
709 EVT CurVT = TLI->getValueType(DL, Ty: Args[i].Ty);
710
711 MVT NewVT = TLI->getRegisterTypeForCallingConv(Context&: Ctx, CC: CallConv, VT: CurVT);
712
713 // If we need to split the type over multiple regs, check it's a scenario
714 // we currently support.
715 unsigned NumParts =
716 TLI->getNumRegistersForCallingConv(Context&: Ctx, CC: CallConv, VT: CurVT);
717
718 if (NumParts == 1) {
719 // Try to use the register type if we couldn't assign the VT.
720 if (Assigner.assignArg(ValNo: i, OrigVT: CurVT, ValVT: NewVT, LocVT: NewVT, LocInfo: CCValAssign::Full, Info: Args[i],
721 Flags: Args[i].Flags[0], State&: CCInfo))
722 return false;
723 continue;
724 }
725
726 // For incoming arguments (physregs to vregs), we could have values in
727 // physregs (or memlocs) which we want to extract and copy to vregs.
728 // During this, we might have to deal with the LLT being split across
729 // multiple regs, so we have to record this information for later.
730 //
731 // If we have outgoing args, then we have the opposite case. We have a
732 // vreg with an LLT which we want to assign to a physical location, and
733 // we might have to record that the value has to be split later.
734
735 // We're handling an incoming arg which is split over multiple regs.
736 // E.g. passing an s128 on AArch64.
737 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
738 Args[i].Flags.clear();
739
740 for (unsigned Part = 0; Part < NumParts; ++Part) {
741 ISD::ArgFlagsTy Flags = OrigFlags;
742 if (Part == 0) {
743 Flags.setSplit();
744 } else {
745 Flags.setOrigAlign(Align(1));
746 if (Part == NumParts - 1)
747 Flags.setSplitEnd();
748 }
749
750 Args[i].Flags.push_back(Elt: Flags);
751 if (Assigner.assignArg(ValNo: i, OrigVT: CurVT, ValVT: NewVT, LocVT: NewVT, LocInfo: CCValAssign::Full, Info: Args[i],
752 Flags: Args[i].Flags[Part], State&: CCInfo)) {
753 // Still couldn't assign this smaller part type for some reason.
754 return false;
755 }
756 }
757 }
758
759 return true;
760}
761
762bool CallLowering::handleAssignments(ValueHandler &Handler,
763 SmallVectorImpl<ArgInfo> &Args,
764 CCState &CCInfo,
765 SmallVectorImpl<CCValAssign> &ArgLocs,
766 MachineIRBuilder &MIRBuilder,
767 ArrayRef<Register> ThisReturnRegs) const {
768 MachineFunction &MF = MIRBuilder.getMF();
769 MachineRegisterInfo &MRI = MF.getRegInfo();
770 const Function &F = MF.getFunction();
771 const DataLayout &DL = F.getDataLayout();
772
773 const unsigned NumArgs = Args.size();
774
775 // Stores thunks for outgoing register assignments. This is used so we delay
776 // generating register copies until mem loc assignments are done. We do this
777 // so that if the target is using the delayed stack protector feature, we can
778 // find the split point of the block accurately. E.g. if we have:
779 // G_STORE %val, %memloc
780 // $x0 = COPY %foo
781 // $x1 = COPY %bar
782 // CALL func
783 // ... then the split point for the block will correctly be at, and including,
784 // the copy to $x0. If instead the G_STORE instruction immediately precedes
785 // the CALL, then we'd prematurely choose the CALL as the split point, thus
786 // generating a split block with a CALL that uses undefined physregs.
787 SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
788
789 for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
790 assert(j < ArgLocs.size() && "Skipped too many arg locs");
791 CCValAssign &VA = ArgLocs[j];
792 assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
793
794 if (VA.needsCustom()) {
795 std::function<void()> Thunk;
796 unsigned NumArgRegs = Handler.assignCustomValue(
797 Arg&: Args[i], VAs: ArrayRef(ArgLocs).slice(N: j), Thunk: &Thunk);
798 if (Thunk)
799 DelayedOutgoingRegAssignments.emplace_back(Args&: Thunk);
800 if (!NumArgRegs)
801 return false;
802 j += (NumArgRegs - 1);
803 continue;
804 }
805
806 auto AllocaAddressSpace = MF.getDataLayout().getAllocaAddrSpace();
807
808 const MVT ValVT = VA.getValVT();
809 const MVT LocVT = VA.getLocVT();
810
811 const LLT LocTy = getLLTForMVT(Ty: LocVT);
812 const LLT ValTy = getLLTForMVT(Ty: ValVT);
813 const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
814 const EVT OrigVT = TLI->getValueType(DL, Ty: Args[i].Ty);
815 // Use the EVT here to strip pointerness.
816 const LLT OrigTy = getLLTForType(Ty&: *OrigVT.getTypeForEVT(Context&: F.getContext()), DL);
817 const LLT PointerTy = LLT::pointer(
818 AddressSpace: AllocaAddressSpace, SizeInBits: DL.getPointerSizeInBits(AS: AllocaAddressSpace));
819
820 // Expected to be multiple regs for a single incoming arg.
821 // There should be Regs.size() ArgLocs per argument.
822 // This should be the same as getNumRegistersForCallingConv
823 const unsigned NumParts = Args[i].Flags.size();
824
825 // Now split the registers into the assigned types.
826 Args[i].OrigRegs.assign(in_start: Args[i].Regs.begin(), in_end: Args[i].Regs.end());
827
828 if (NumParts != 1 || NewLLT != OrigTy) {
829 // If we can't directly assign the register, we need one or more
830 // intermediate values.
831 Args[i].Regs.resize(N: NumParts);
832
833 // When we have indirect parameter passing we are receiving a pointer,
834 // that points to the actual value, so we need one "temporary" pointer.
835 if (VA.getLocInfo() == CCValAssign::Indirect) {
836 if (Handler.isIncomingArgumentHandler())
837 Args[i].Regs[0] = MRI.createGenericVirtualRegister(Ty: PointerTy);
838 } else {
839 // For each split register, create and assign a vreg that will store
840 // the incoming component of the larger value. These will later be
841 // merged to form the final vreg.
842 for (unsigned Part = 0; Part < NumParts; ++Part)
843 Args[i].Regs[Part] = MRI.createGenericVirtualRegister(Ty: NewLLT);
844 }
845 }
846
847 assert((j + (NumParts - 1)) < ArgLocs.size() &&
848 "Too many regs for number of args");
849
850 // Coerce into outgoing value types before register assignment.
851 if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy &&
852 VA.getLocInfo() != CCValAssign::Indirect) {
853 assert(Args[i].OrigRegs.size() == 1);
854 buildCopyToRegs(B&: MIRBuilder, DstRegs: Args[i].Regs, SrcReg: Args[i].OrigRegs[0], SrcTy: OrigTy,
855 PartTy: ValTy, ExtendOp: extendOpFromFlags(Flags: Args[i].Flags[0]));
856 }
857
858 bool IndirectParameterPassingHandled = false;
859 bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(VT: OrigVT, DL);
860 for (unsigned Part = 0; Part < NumParts; ++Part) {
861 assert((VA.getLocInfo() != CCValAssign::Indirect || Part == 0) &&
862 "Only the first parameter should be processed when "
863 "handling indirect passing!");
864 Register ArgReg = Args[i].Regs[Part];
865 // There should be Regs.size() ArgLocs per argument.
866 unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;
867 CCValAssign &VA = ArgLocs[j + Idx];
868 const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
869
870 // We found an indirect parameter passing, and we have an
871 // OutgoingValueHandler as our handler (so we are at the call site or the
872 // return value). In this case, start the construction of the following
873 // GMIR, that is responsible for the preparation of indirect parameter
874 // passing:
875 //
876 // %1(indirectly passed type) = The value to pass
877 // %3(pointer) = G_FRAME_INDEX %stack.0
878 // G_STORE %1, %3 :: (store (s128), align 8)
879 //
880 // After this GMIR, the remaining part of the loop body will decide how
881 // to get the value to the caller and we break out of the loop.
882 if (VA.getLocInfo() == CCValAssign::Indirect &&
883 !Handler.isIncomingArgumentHandler()) {
884 Align AlignmentForStored = DL.getPrefTypeAlign(Ty: Args[i].Ty);
885 MachineFrameInfo &MFI = MF.getFrameInfo();
886 // Get some space on the stack for the value, so later we can pass it
887 // as a reference.
888 int FrameIdx = MFI.CreateStackObject(Size: OrigTy.getScalarSizeInBits(),
889 Alignment: AlignmentForStored, isSpillSlot: false);
890 Register PointerToStackReg =
891 MIRBuilder.buildFrameIndex(Res: PointerTy, Idx: FrameIdx).getReg(Idx: 0);
892 MachinePointerInfo StackPointerMPO =
893 MachinePointerInfo::getFixedStack(MF, FI: FrameIdx);
894 // Store the value in the previously created stack space.
895 MIRBuilder.buildStore(Val: Args[i].OrigRegs[Part], Addr: PointerToStackReg,
896 PtrInfo: StackPointerMPO,
897 Alignment: inferAlignFromPtrInfo(MF, MPO: StackPointerMPO));
898
899 ArgReg = PointerToStackReg;
900 IndirectParameterPassingHandled = true;
901 }
902
903 if (VA.isMemLoc() && !Flags.isByVal()) {
904 // Individual pieces may have been spilled to the stack and others
905 // passed in registers.
906
907 // TODO: The memory size may be larger than the value we need to
908 // store. We may need to adjust the offset for big endian targets.
909 LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
910
911 MachinePointerInfo MPO;
912 Register StackAddr =
913 Handler.getStackAddress(MemSize: VA.getLocInfo() == CCValAssign::Indirect
914 ? PointerTy.getSizeInBytes()
915 : MemTy.getSizeInBytes(),
916 Offset: VA.getLocMemOffset(), MPO, Flags);
917
918 // Finish the handling of indirect passing from the passers
919 // (OutgoingParameterHandler) side.
920 // This branch is needed, so the pointer to the value is loaded onto the
921 // stack.
922 if (VA.getLocInfo() == CCValAssign::Indirect)
923 Handler.assignValueToAddress(ValVReg: ArgReg, Addr: StackAddr, MemTy: PointerTy, MPO, VA);
924 else
925 Handler.assignValueToAddress(Arg: Args[i], ValRegIndex: Part, Addr: StackAddr, MemTy, MPO,
926 VA);
927 } else if (VA.isMemLoc() && Flags.isByVal()) {
928 assert(Args[i].Regs.size() == 1 && "didn't expect split byval pointer");
929
930 if (Handler.isIncomingArgumentHandler()) {
931 // We just need to copy the frame index value to the pointer.
932 MachinePointerInfo MPO;
933 Register StackAddr = Handler.getStackAddress(
934 MemSize: Flags.getByValSize(), Offset: VA.getLocMemOffset(), MPO, Flags);
935 MIRBuilder.buildCopy(Res: Args[i].Regs[0], Op: StackAddr);
936 } else {
937 // For outgoing byval arguments, insert the implicit copy byval
938 // implies, such that writes in the callee do not modify the caller's
939 // value.
940 uint64_t MemSize = Flags.getByValSize();
941 int64_t Offset = VA.getLocMemOffset();
942
943 MachinePointerInfo DstMPO;
944 Register StackAddr =
945 Handler.getStackAddress(MemSize, Offset, MPO&: DstMPO, Flags);
946
947 MachinePointerInfo SrcMPO(Args[i].OrigValue);
948 if (!Args[i].OrigValue) {
949 // We still need to accurately track the stack address space if we
950 // don't know the underlying value.
951 const LLT PtrTy = MRI.getType(Reg: StackAddr);
952 SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
953 }
954
955 Align DstAlign = std::max(a: Flags.getNonZeroByValAlign(),
956 b: inferAlignFromPtrInfo(MF, MPO: DstMPO));
957
958 Align SrcAlign = std::max(a: Flags.getNonZeroByValAlign(),
959 b: inferAlignFromPtrInfo(MF, MPO: SrcMPO));
960
961 Handler.copyArgumentMemory(Arg: Args[i], DstPtr: StackAddr, SrcPtr: Args[i].Regs[0],
962 DstPtrInfo: DstMPO, DstAlign, SrcPtrInfo: SrcMPO, SrcAlign,
963 MemSize, VA);
964 }
965 } else if (i == 0 && !ThisReturnRegs.empty() &&
966 Handler.isIncomingArgumentHandler() &&
967 isTypeIsValidForThisReturn(Ty: ValVT)) {
968 Handler.assignValueToReg(ValVReg: ArgReg, PhysReg: ThisReturnRegs[Part], VA, Flags);
969 } else if (Handler.isIncomingArgumentHandler()) {
970 Handler.assignValueToReg(ValVReg: ArgReg, PhysReg: VA.getLocReg(), VA, Flags);
971 } else {
972 DelayedOutgoingRegAssignments.emplace_back(Args: [=, &Handler]() {
973 Handler.assignValueToReg(ValVReg: ArgReg, PhysReg: VA.getLocReg(), VA, Flags);
974 });
975 }
976
977 // Finish the handling of indirect parameter passing when receiving
978 // the value (we are in the called function or the caller when receiving
979 // the return value).
980 if (VA.getLocInfo() == CCValAssign::Indirect &&
981 Handler.isIncomingArgumentHandler()) {
982 Align Alignment = DL.getABITypeAlign(Ty: Args[i].Ty);
983 MachinePointerInfo MPO = MachinePointerInfo::getUnknownStack(MF);
984
985 // Since we are doing indirect parameter passing, we know that the value
986 // in the temporary register is not the value passed to the function,
987 // but rather a pointer to that value. Let's load that value into the
988 // virtual register where the parameter should go.
989 MIRBuilder.buildLoad(Res: Args[i].OrigRegs[0], Addr: Args[i].Regs[0], PtrInfo: MPO,
990 Alignment);
991
992 IndirectParameterPassingHandled = true;
993 }
994
995 if (IndirectParameterPassingHandled)
996 break;
997 }
998
999 // Now that all pieces have been assigned, re-pack the register typed values
1000 // into the original value typed registers. This is only necessary, when
1001 // the value was passed in multiple registers, not indirectly.
1002 if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT &&
1003 !IndirectParameterPassingHandled) {
1004 // Merge the split registers into the expected larger result vregs of
1005 // the original call.
1006 buildCopyFromRegs(B&: MIRBuilder, OrigRegs: Args[i].OrigRegs, Regs: Args[i].Regs, LLTy: OrigTy,
1007 PartLLT: LocTy, Flags: Args[i].Flags[0]);
1008 }
1009
1010 j += NumParts - 1;
1011 }
1012 for (auto &Fn : DelayedOutgoingRegAssignments)
1013 Fn();
1014
1015 return true;
1016}
1017
1018void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
1019 ArrayRef<Register> VRegs, Register DemoteReg,
1020 int FI) const {
1021 MachineFunction &MF = MIRBuilder.getMF();
1022 MachineRegisterInfo &MRI = MF.getRegInfo();
1023 const DataLayout &DL = MF.getDataLayout();
1024
1025 SmallVector<EVT, 4> SplitVTs;
1026 SmallVector<uint64_t, 4> Offsets;
1027 ComputeValueVTs(TLI: *TLI, DL, Ty: RetTy, ValueVTs&: SplitVTs, /*MemVTs=*/nullptr, FixedOffsets: &Offsets, StartingOffset: 0);
1028
1029 assert(VRegs.size() == SplitVTs.size());
1030
1031 unsigned NumValues = SplitVTs.size();
1032 Align BaseAlign = DL.getPrefTypeAlign(Ty: RetTy);
1033 Type *RetPtrTy =
1034 PointerType::get(C&: RetTy->getContext(), AddressSpace: DL.getAllocaAddrSpace());
1035 LLT OffsetLLTy = getLLTForType(Ty&: *DL.getIndexType(PtrTy: RetPtrTy), DL);
1036
1037 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1038
1039 for (unsigned I = 0; I < NumValues; ++I) {
1040 Register Addr;
1041 MIRBuilder.materializeObjectPtrOffset(Res&: Addr, Op0: DemoteReg, ValueTy: OffsetLLTy,
1042 Value: Offsets[I]);
1043 auto *MMO = MF.getMachineMemOperand(PtrInfo, f: MachineMemOperand::MOLoad,
1044 MemTy: MRI.getType(Reg: VRegs[I]),
1045 base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[I]));
1046 MIRBuilder.buildLoad(Res: VRegs[I], Addr, MMO&: *MMO);
1047 }
1048}
1049
1050void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
1051 ArrayRef<Register> VRegs,
1052 Register DemoteReg) const {
1053 MachineFunction &MF = MIRBuilder.getMF();
1054 MachineRegisterInfo &MRI = MF.getRegInfo();
1055 const DataLayout &DL = MF.getDataLayout();
1056
1057 SmallVector<EVT, 4> SplitVTs;
1058 SmallVector<uint64_t, 4> Offsets;
1059 ComputeValueVTs(TLI: *TLI, DL, Ty: RetTy, ValueVTs&: SplitVTs, /*MemVTs=*/nullptr, FixedOffsets: &Offsets, StartingOffset: 0);
1060
1061 assert(VRegs.size() == SplitVTs.size());
1062
1063 unsigned NumValues = SplitVTs.size();
1064 Align BaseAlign = DL.getPrefTypeAlign(Ty: RetTy);
1065 unsigned AS = DL.getAllocaAddrSpace();
1066 LLT OffsetLLTy = getLLTForType(Ty&: *DL.getIndexType(C&: RetTy->getContext(), AddressSpace: AS), DL);
1067
1068 MachinePointerInfo PtrInfo(AS);
1069
1070 for (unsigned I = 0; I < NumValues; ++I) {
1071 Register Addr;
1072 MIRBuilder.materializeObjectPtrOffset(Res&: Addr, Op0: DemoteReg, ValueTy: OffsetLLTy,
1073 Value: Offsets[I]);
1074 auto *MMO = MF.getMachineMemOperand(PtrInfo, f: MachineMemOperand::MOStore,
1075 MemTy: MRI.getType(Reg: VRegs[I]),
1076 base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[I]));
1077 MIRBuilder.buildStore(Val: VRegs[I], Addr, MMO&: *MMO);
1078 }
1079}
1080
1081void CallLowering::insertSRetIncomingArgument(
1082 const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
1083 MachineRegisterInfo &MRI, const DataLayout &DL) const {
1084 unsigned AS = DL.getAllocaAddrSpace();
1085 DemoteReg = MRI.createGenericVirtualRegister(
1086 Ty: LLT::pointer(AddressSpace: AS, SizeInBits: DL.getPointerSizeInBits(AS)));
1087
1088 Type *PtrTy = PointerType::get(C&: F.getContext(), AddressSpace: AS);
1089
1090 SmallVector<EVT, 1> ValueVTs;
1091 ComputeValueVTs(TLI: *TLI, DL, Ty: PtrTy, ValueVTs);
1092
1093 // NOTE: Assume that a pointer won't get split into more than one VT.
1094 assert(ValueVTs.size() == 1);
1095
1096 ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(Context&: PtrTy->getContext()),
1097 ArgInfo::NoArgIndex);
1098 setArgFlags(Arg&: DemoteArg, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
1099 DemoteArg.Flags[0].setSRet();
1100 SplitArgs.insert(I: SplitArgs.begin(), Elt: DemoteArg);
1101}
1102
1103void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
1104 const CallBase &CB,
1105 CallLoweringInfo &Info) const {
1106 const DataLayout &DL = MIRBuilder.getDataLayout();
1107 Type *RetTy = CB.getType();
1108 unsigned AS = DL.getAllocaAddrSpace();
1109 LLT FramePtrTy = LLT::pointer(AddressSpace: AS, SizeInBits: DL.getPointerSizeInBits(AS));
1110
1111 int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
1112 Size: DL.getTypeAllocSize(Ty: RetTy), Alignment: DL.getPrefTypeAlign(Ty: RetTy), isSpillSlot: false);
1113
1114 Register DemoteReg = MIRBuilder.buildFrameIndex(Res: FramePtrTy, Idx: FI).getReg(Idx: 0);
1115 ArgInfo DemoteArg(DemoteReg, PointerType::get(C&: RetTy->getContext(), AddressSpace: AS),
1116 ArgInfo::NoArgIndex);
1117 setArgFlags(Arg&: DemoteArg, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: CB);
1118 DemoteArg.Flags[0].setSRet();
1119
1120 Info.OrigArgs.insert(I: Info.OrigArgs.begin(), Elt: DemoteArg);
1121 Info.DemoteStackIndex = FI;
1122 Info.DemoteRegister = DemoteReg;
1123}
1124
1125bool CallLowering::checkReturn(CCState &CCInfo,
1126 SmallVectorImpl<BaseArgInfo> &Outs,
1127 CCAssignFn *Fn) const {
1128 for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
1129 MVT VT = MVT::getVT(Ty: Outs[I].Ty);
1130 if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], Outs[I].Ty, CCInfo))
1131 return false;
1132 }
1133 return true;
1134}
1135
1136void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
1137 AttributeList Attrs,
1138 SmallVectorImpl<BaseArgInfo> &Outs,
1139 const DataLayout &DL) const {
1140 LLVMContext &Context = RetTy->getContext();
1141 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1142
1143 SmallVector<EVT, 4> SplitVTs;
1144 ComputeValueVTs(TLI: *TLI, DL, Ty: RetTy, ValueVTs&: SplitVTs);
1145 addArgFlagsFromAttributes(Flags, Attrs, OpIdx: AttributeList::ReturnIndex);
1146
1147 for (EVT VT : SplitVTs) {
1148 unsigned NumParts =
1149 TLI->getNumRegistersForCallingConv(Context, CC: CallConv, VT);
1150 MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CC: CallConv, VT);
1151 Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
1152
1153 for (unsigned I = 0; I < NumParts; ++I) {
1154 Outs.emplace_back(Args&: PartTy, Args&: Flags);
1155 }
1156 }
1157}
1158
1159bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
1160 const auto &F = MF.getFunction();
1161 Type *ReturnType = F.getReturnType();
1162 CallingConv::ID CallConv = F.getCallingConv();
1163
1164 SmallVector<BaseArgInfo, 4> SplitArgs;
1165 getReturnInfo(CallConv, RetTy: ReturnType, Attrs: F.getAttributes(), Outs&: SplitArgs,
1166 DL: MF.getDataLayout());
1167 return canLowerReturn(MF, CallConv, Outs&: SplitArgs, IsVarArg: F.isVarArg());
1168}
1169
1170bool CallLowering::parametersInCSRMatch(
1171 const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
1172 const SmallVectorImpl<CCValAssign> &OutLocs,
1173 const SmallVectorImpl<ArgInfo> &OutArgs) const {
1174 for (unsigned i = 0; i < OutLocs.size(); ++i) {
1175 const auto &ArgLoc = OutLocs[i];
1176 // If it's not a register, it's fine.
1177 if (!ArgLoc.isRegLoc())
1178 continue;
1179
1180 MCRegister PhysReg = ArgLoc.getLocReg();
1181
1182 // Only look at callee-saved registers.
1183 if (MachineOperand::clobbersPhysReg(RegMask: CallerPreservedMask, PhysReg))
1184 continue;
1185
1186 LLVM_DEBUG(
1187 dbgs()
1188 << "... Call has an argument passed in a callee-saved register.\n");
1189
1190 // Check if it was copied from.
1191 const ArgInfo &OutInfo = OutArgs[i];
1192
1193 if (OutInfo.Regs.size() > 1) {
1194 LLVM_DEBUG(
1195 dbgs() << "... Cannot handle arguments in multiple registers.\n");
1196 return false;
1197 }
1198
1199 // Check if we copy the register, walking through copies from virtual
1200 // registers. Note that getDefIgnoringCopies does not ignore copies from
1201 // physical registers.
1202 MachineInstr *RegDef = getDefIgnoringCopies(Reg: OutInfo.Regs[0], MRI);
1203 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
1204 LLVM_DEBUG(
1205 dbgs()
1206 << "... Parameter was not copied into a VReg, cannot tail call.\n");
1207 return false;
1208 }
1209
1210 // Got a copy. Verify that it's the same as the register we want.
1211 Register CopyRHS = RegDef->getOperand(i: 1).getReg();
1212 if (CopyRHS != PhysReg) {
1213 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
1214 "VReg, cannot tail call.\n");
1215 return false;
1216 }
1217 }
1218
1219 return true;
1220}
1221
1222bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
1223 MachineFunction &MF,
1224 SmallVectorImpl<ArgInfo> &InArgs,
1225 ValueAssigner &CalleeAssigner,
1226 ValueAssigner &CallerAssigner) const {
1227 const Function &F = MF.getFunction();
1228 CallingConv::ID CalleeCC = Info.CallConv;
1229 CallingConv::ID CallerCC = F.getCallingConv();
1230
1231 if (CallerCC == CalleeCC)
1232 return true;
1233
1234 SmallVector<CCValAssign, 16> ArgLocs1;
1235 CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
1236 if (!determineAssignments(Assigner&: CalleeAssigner, Args&: InArgs, CCInfo&: CCInfo1))
1237 return false;
1238
1239 SmallVector<CCValAssign, 16> ArgLocs2;
1240 CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
1241 if (!determineAssignments(Assigner&: CallerAssigner, Args&: InArgs, CCInfo&: CCInfo2))
1242 return false;
1243
1244 // We need the argument locations to match up exactly. If there's more in
1245 // one than the other, then we are done.
1246 if (ArgLocs1.size() != ArgLocs2.size())
1247 return false;
1248
1249 // Make sure that each location is passed in exactly the same way.
1250 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1251 const CCValAssign &Loc1 = ArgLocs1[i];
1252 const CCValAssign &Loc2 = ArgLocs2[i];
1253
1254 // We need both of them to be the same. So if one is a register and one
1255 // isn't, we're done.
1256 if (Loc1.isRegLoc() != Loc2.isRegLoc())
1257 return false;
1258
1259 if (Loc1.isRegLoc()) {
1260 // If they don't have the same register location, we're done.
1261 if (Loc1.getLocReg() != Loc2.getLocReg())
1262 return false;
1263
1264 // They matched, so we can move to the next ArgLoc.
1265 continue;
1266 }
1267
1268 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1269 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1270 return false;
1271 }
1272
1273 return true;
1274}
1275
1276LLT CallLowering::ValueHandler::getStackValueStoreType(
1277 const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1278 const MVT ValVT = VA.getValVT();
1279 if (ValVT != MVT::iPTR) {
1280 LLT ValTy(ValVT);
1281
1282 // We lost the pointeriness going through CCValAssign, so try to restore it
1283 // based on the flags.
1284 if (Flags.isPointer()) {
1285 LLT PtrTy = LLT::pointer(AddressSpace: Flags.getPointerAddrSpace(),
1286 SizeInBits: ValTy.getScalarSizeInBits());
1287 if (ValVT.isVector() && ValVT.getVectorNumElements() != 1)
1288 return LLT::vector(EC: ValTy.getElementCount(), ScalarTy: PtrTy);
1289 return PtrTy;
1290 }
1291
1292 return ValTy;
1293 }
1294
1295 unsigned AddrSpace = Flags.getPointerAddrSpace();
1296 return LLT::pointer(AddressSpace: AddrSpace, SizeInBits: DL.getPointerSize(AS: AddrSpace));
1297}
1298
1299void CallLowering::ValueHandler::copyArgumentMemory(
1300 const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1301 const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1302 const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1303 CCValAssign &VA) const {
1304 MachineFunction &MF = MIRBuilder.getMF();
1305 MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1306 PtrInfo: SrcPtrInfo,
1307 F: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, Size: MemSize,
1308 BaseAlignment: SrcAlign);
1309
1310 MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1311 PtrInfo: DstPtrInfo,
1312 F: MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1313 Size: MemSize, BaseAlignment: DstAlign);
1314
1315 const LLT PtrTy = MRI.getType(Reg: DstPtr);
1316 const LLT SizeTy = LLT::integer(SizeInBits: PtrTy.getSizeInBits());
1317
1318 auto SizeConst = MIRBuilder.buildConstant(Res: SizeTy, Val: MemSize);
1319 MIRBuilder.buildMemCpy(DstPtr, SrcPtr, Size: SizeConst, DstMMO&: *DstMMO, SrcMMO&: *SrcMMO);
1320}
1321
1322Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1323 const CCValAssign &VA,
1324 unsigned MaxSizeBits) {
1325 LLT LocTy{VA.getLocVT()};
1326 LLT ValTy{VA.getValVT()};
1327
1328 if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1329 return ValReg;
1330
1331 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1332 if (MaxSizeBits <= ValTy.getSizeInBits())
1333 return ValReg;
1334 LocTy = LLT::scalar(SizeInBits: MaxSizeBits);
1335 }
1336
1337 const LLT ValRegTy = MRI.getType(Reg: ValReg);
1338 if (ValRegTy.isPointer()) {
1339 // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1340 // we have to cast to do the extension.
1341 LLT IntPtrTy = LLT::scalar(SizeInBits: ValRegTy.getSizeInBits());
1342 ValReg = MIRBuilder.buildPtrToInt(Dst: IntPtrTy, Src: ValReg).getReg(Idx: 0);
1343 }
1344
1345 switch (VA.getLocInfo()) {
1346 default:
1347 break;
1348 case CCValAssign::Full:
1349 case CCValAssign::BCvt:
1350 // FIXME: bitconverting between vector types may or may not be a
1351 // nop in big-endian situations.
1352 return ValReg;
1353 case CCValAssign::AExt: {
1354 auto MIB = MIRBuilder.buildAnyExt(Res: LocTy, Op: ValReg);
1355 return MIB.getReg(Idx: 0);
1356 }
1357 case CCValAssign::SExt: {
1358 Register NewReg = MRI.createGenericVirtualRegister(Ty: LocTy);
1359 MIRBuilder.buildSExt(Res: NewReg, Op: ValReg);
1360 return NewReg;
1361 }
1362 case CCValAssign::ZExt: {
1363 Register NewReg = MRI.createGenericVirtualRegister(Ty: LocTy);
1364 MIRBuilder.buildZExt(Res: NewReg, Op: ValReg);
1365 return NewReg;
1366 }
1367 }
1368 llvm_unreachable("unable to extend register");
1369}
1370
1371void CallLowering::ValueAssigner::anchor() {}
1372
1373Register CallLowering::IncomingValueHandler::buildExtensionHint(
1374 const CCValAssign &VA, Register SrcReg, LLT NarrowTy) {
1375 switch (VA.getLocInfo()) {
1376 case CCValAssign::LocInfo::ZExt: {
1377 return MIRBuilder
1378 .buildAssertZExt(Res: MRI.cloneVirtualRegister(VReg: SrcReg), Op: SrcReg,
1379 Size: NarrowTy.getScalarSizeInBits())
1380 .getReg(Idx: 0);
1381 }
1382 case CCValAssign::LocInfo::SExt: {
1383 return MIRBuilder
1384 .buildAssertSExt(Res: MRI.cloneVirtualRegister(VReg: SrcReg), Op: SrcReg,
1385 Size: NarrowTy.getScalarSizeInBits())
1386 .getReg(Idx: 0);
1387 break;
1388 }
1389 default:
1390 return SrcReg;
1391 }
1392}
1393
1394/// Check if we can use a basic COPY instruction between the two types.
1395///
1396/// We're currently building on top of the infrastructure using MVT, which loses
1397/// pointer information in the CCValAssign. We accept copies from physical
1398/// registers that have been reported as integers if it's to an equivalent sized
1399/// pointer LLT.
1400static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1401 if (SrcTy == DstTy)
1402 return true;
1403
1404 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1405 return false;
1406
1407 SrcTy = SrcTy.getScalarType();
1408 DstTy = DstTy.getScalarType();
1409
1410 return (SrcTy.isPointer() && DstTy.isScalar()) ||
1411 (DstTy.isPointer() && SrcTy.isScalar());
1412}
1413
1414void CallLowering::IncomingValueHandler::assignValueToReg(
1415 Register ValVReg, Register PhysReg, const CCValAssign &VA,
1416 ISD::ArgFlagsTy Flags) {
1417 const MVT LocVT = VA.getLocVT();
1418 const LLT LocTy = getLLTForMVT(Ty: LocVT);
1419 const LLT RegTy = MRI.getType(Reg: ValVReg);
1420
1421 if (isCopyCompatibleType(SrcTy: RegTy, DstTy: LocTy)) {
1422 MIRBuilder.buildCopy(Res: ValVReg, Op: PhysReg);
1423 return;
1424 }
1425
1426 auto Copy = MIRBuilder.buildCopy(Res: LocTy, Op: PhysReg);
1427 auto Hint = buildExtensionHint(VA, SrcReg: Copy.getReg(Idx: 0), NarrowTy: RegTy);
1428 MIRBuilder.buildTrunc(Res: ValVReg, Op: Hint);
1429}
1430