1//===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ARMCallLowering.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMISelLowering.h"
18#include "ARMSubtarget.h"
19#include "Utils/ARMBaseInfo.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/CodeGen/Analysis.h"
22#include "llvm/CodeGen/CallingConvLower.h"
23#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24#include "llvm/CodeGen/GlobalISel/Utils.h"
25#include "llvm/CodeGen/LowLevelTypeUtils.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/TargetRegisterInfo.h"
34#include "llvm/CodeGen/TargetSubtargetInfo.h"
35#include "llvm/CodeGen/ValueTypes.h"
36#include "llvm/CodeGenTypes/LowLevelType.h"
37#include "llvm/CodeGenTypes/MachineValueType.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DerivedTypes.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/Support/Casting.h"
45#include <algorithm>
46#include <cassert>
47#include <cstdint>
48#include <functional>
49#include <utility>
50
51using namespace llvm;
52
53// Whether Big-endian GISel is enabled, defaults to off, can be enabled for
54// testing.
55static cl::opt<bool>
56 EnableGISelBigEndian("enable-arm-gisel-bigendian", cl::Hidden,
57 cl::init(Val: false),
58 cl::desc("Enable Global-ISel Big Endian Lowering"));
59
60ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
61 : CallLowering(&TLI) {}
62
63static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
64 Type *T) {
65 if (T->isArrayTy())
66 return isSupportedType(DL, TLI, T: T->getArrayElementType());
67
68 if (T->isStructTy()) {
69 // For now we only allow homogeneous structs that we can manipulate with
70 // G_MERGE_VALUES and G_UNMERGE_VALUES
71 auto StructT = cast<StructType>(Val: T);
72 for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i)
73 if (StructT->getElementType(N: i) != StructT->getElementType(N: 0))
74 return false;
75 return isSupportedType(DL, TLI, T: StructT->getElementType(N: 0));
76 }
77
78 EVT VT = TLI.getValueType(DL, Ty: T, AllowUnknown: true);
79 if (!VT.isSimple() || VT.isVector() ||
80 !(VT.isInteger() || VT.isFloatingPoint()))
81 return false;
82
83 unsigned VTSize = VT.getSimpleVT().getSizeInBits();
84
85 if (VTSize == 64)
86 // FIXME: Support i64 too
87 return VT.isFloatingPoint();
88
89 return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
90}
91
92namespace {
93
94/// Helper class for values going out through an ABI boundary (used for handling
95/// function return values and call parameters).
96struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
97 ARMOutgoingValueHandler(MachineIRBuilder &MIRBuilder,
98 MachineRegisterInfo &MRI, MachineInstrBuilder &MIB)
99 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
100
101 Register getStackAddress(uint64_t Size, int64_t Offset,
102 MachinePointerInfo &MPO,
103 ISD::ArgFlagsTy Flags) override {
104 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
105 "Unsupported size");
106
107 LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: 32);
108 LLT s32 = LLT::scalar(SizeInBits: 32);
109 auto SPReg = MIRBuilder.buildCopy(Res: p0, Op: Register(ARM::SP));
110
111 auto OffsetReg = MIRBuilder.buildConstant(Res: s32, Val: Offset);
112
113 auto AddrReg = MIRBuilder.buildPtrAdd(Res: p0, Op0: SPReg, Op1: OffsetReg);
114
115 MPO = MachinePointerInfo::getStack(MF&: MIRBuilder.getMF(), Offset);
116 return AddrReg.getReg(Idx: 0);
117 }
118
119 void assignValueToReg(Register ValVReg, Register PhysReg,
120 const CCValAssign &VA,
121 ISD::ArgFlagsTy Flags = {}) override {
122 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
123 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
124
125 assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
126 assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
127
128 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
129 MIRBuilder.buildCopy(Res: PhysReg, Op: ExtReg);
130 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
131 }
132
133 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
134 const MachinePointerInfo &MPO,
135 const CCValAssign &VA) override {
136 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
137 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
138 PtrInfo: MPO, f: MachineMemOperand::MOStore, MemTy, base_alignment: Align(1));
139 MIRBuilder.buildStore(Val: ExtReg, Addr, MMO&: *MMO);
140 }
141
142 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
143 ArrayRef<CCValAssign> VAs,
144 std::function<void()> *Thunk) override {
145 assert(Arg.Regs.size() == 1 && "Can't handle multiple regs yet");
146
147 const CCValAssign &VA = VAs[0];
148 assert(VA.needsCustom() && "Value doesn't need custom handling");
149
150 // Custom lowering for other types, such as f16, is currently not supported
151 if (VA.getValVT() != MVT::f64)
152 return 0;
153
154 const CCValAssign &NextVA = VAs[1];
155 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
156 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
157
158 assert(VA.getValNo() == NextVA.getValNo() &&
159 "Values belong to different arguments");
160
161 assert(VA.isRegLoc() && "Value should be in reg");
162 assert(NextVA.isRegLoc() && "Value should be in reg");
163
164 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
165 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
166 MIRBuilder.buildUnmerge(Res: NewRegs, Op: Arg.Regs[0]);
167
168 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
169 if (!IsLittle)
170 std::swap(a&: NewRegs[0], b&: NewRegs[1]);
171
172 if (Thunk) {
173 *Thunk = [=]() {
174 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
175 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
176 };
177 return 2;
178 }
179 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
180 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
181 return 2;
182 }
183
184 MachineInstrBuilder MIB;
185};
186
187} // end anonymous namespace
188
189/// Lower the return value for the already existing \p Ret. This assumes that
190/// \p MIRBuilder's insertion point is correct.
191bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
192 const Value *Val, ArrayRef<Register> VRegs,
193 MachineInstrBuilder &Ret) const {
194 if (!Val)
195 // Nothing to do here.
196 return true;
197
198 auto &MF = MIRBuilder.getMF();
199 const auto &F = MF.getFunction();
200
201 const auto &DL = MF.getDataLayout();
202 auto &TLI = *getTLI<ARMTargetLowering>();
203 if (!isSupportedType(DL, TLI, T: Val->getType()))
204 return false;
205
206 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
207 setArgFlags(Arg&: OrigRetInfo, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
208
209 SmallVector<ArgInfo, 4> SplitRetInfos;
210 splitToValueTypes(OrigArgInfo: OrigRetInfo, SplitArgs&: SplitRetInfos, DL, CallConv: F.getCallingConv());
211
212 CCAssignFn *AssignFn =
213 TLI.CCAssignFnForReturn(CC: F.getCallingConv(), isVarArg: F.isVarArg());
214
215 OutgoingValueAssigner RetAssigner(AssignFn);
216 ARMOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
217 return determineAndHandleAssignments(Handler&: RetHandler, Assigner&: RetAssigner, Args&: SplitRetInfos,
218 MIRBuilder, CallConv: F.getCallingConv(),
219 IsVarArg: F.isVarArg());
220}
221
222bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
223 const Value *Val, ArrayRef<Register> VRegs,
224 FunctionLoweringInfo &FLI) const {
225 assert(!Val == VRegs.empty() && "Return value without a vreg");
226
227 auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
228 unsigned Opcode = ST.getReturnOpcode();
229 auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(MOs: predOps(Pred: ARMCC::AL));
230
231 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
232 return false;
233
234 MIRBuilder.insertInstr(MIB: Ret);
235 return true;
236}
237
238namespace {
239
240/// Helper class for values coming in through an ABI boundary (used for handling
241/// formal arguments and call return values).
242struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
243 ARMIncomingValueHandler(MachineIRBuilder &MIRBuilder,
244 MachineRegisterInfo &MRI)
245 : IncomingValueHandler(MIRBuilder, MRI) {}
246
247 Register getStackAddress(uint64_t Size, int64_t Offset,
248 MachinePointerInfo &MPO,
249 ISD::ArgFlagsTy Flags) override {
250 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
251 "Unsupported size");
252
253 auto &MFI = MIRBuilder.getMF().getFrameInfo();
254
255 // Byval is assumed to be writable memory, but other stack passed arguments
256 // are not.
257 const bool IsImmutable = !Flags.isByVal();
258
259 int FI = MFI.CreateFixedObject(Size, SPOffset: Offset, IsImmutable);
260 MPO = MachinePointerInfo::getFixedStack(MF&: MIRBuilder.getMF(), FI);
261
262 return MIRBuilder.buildFrameIndex(Res: LLT::pointer(AddressSpace: MPO.getAddrSpace(), SizeInBits: 32), Idx: FI)
263 .getReg(Idx: 0);
264 }
265
266 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
267 const MachinePointerInfo &MPO,
268 const CCValAssign &VA) override {
269 if (VA.getLocInfo() == CCValAssign::SExt ||
270 VA.getLocInfo() == CCValAssign::ZExt) {
271 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
272 // that's what we should load.
273 MemTy = LLT::scalar(SizeInBits: 32);
274 assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
275
276 auto LoadVReg = buildLoad(Res: LLT::scalar(SizeInBits: 32), Addr, MemTy, MPO);
277 MIRBuilder.buildTrunc(Res: ValVReg, Op: LoadVReg);
278 } else {
279 // If the value is not extended, a simple load will suffice.
280 buildLoad(Res: ValVReg, Addr, MemTy, MPO);
281 }
282 }
283
284 MachineInstrBuilder buildLoad(const DstOp &Res, Register Addr, LLT MemTy,
285 const MachinePointerInfo &MPO) {
286 MachineFunction &MF = MIRBuilder.getMF();
287
288 auto MMO = MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOLoad, MemTy,
289 base_alignment: inferAlignFromPtrInfo(MF, MPO));
290 return MIRBuilder.buildLoad(Res, Addr, MMO&: *MMO);
291 }
292
293 void assignValueToReg(Register ValVReg, Register PhysReg,
294 const CCValAssign &VA,
295 ISD::ArgFlagsTy Flags = {}) override {
296 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
297 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
298
299 uint64_t ValSize = VA.getValVT().getFixedSizeInBits();
300 uint64_t LocSize = VA.getLocVT().getFixedSizeInBits();
301
302 assert(ValSize <= 64 && "Unsupported value size");
303 assert(LocSize <= 64 && "Unsupported location size");
304
305 markPhysRegUsed(PhysReg);
306 if (ValSize == LocSize) {
307 MIRBuilder.buildCopy(Res: ValVReg, Op: PhysReg);
308 } else {
309 assert(ValSize < LocSize && "Extensions not supported");
310
311 // We cannot create a truncating copy, nor a trunc of a physical register.
312 // Therefore, we need to copy the content of the physical register into a
313 // virtual one and then truncate that.
314 auto PhysRegToVReg = MIRBuilder.buildCopy(Res: LLT::scalar(SizeInBits: LocSize), Op: PhysReg);
315 MIRBuilder.buildTrunc(Res: ValVReg, Op: PhysRegToVReg);
316 }
317 }
318
319 unsigned assignCustomValue(ARMCallLowering::ArgInfo &Arg,
320 ArrayRef<CCValAssign> VAs,
321 std::function<void()> *Thunk) override {
322 assert(Arg.Regs.size() == 1 && "Can't handle multiple regs yet");
323
324 const CCValAssign &VA = VAs[0];
325 assert(VA.needsCustom() && "Value doesn't need custom handling");
326
327 // Custom lowering for other types, such as f16, is currently not supported
328 if (VA.getValVT() != MVT::f64)
329 return 0;
330
331 const CCValAssign &NextVA = VAs[1];
332 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
333 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
334
335 assert(VA.getValNo() == NextVA.getValNo() &&
336 "Values belong to different arguments");
337
338 assert(VA.isRegLoc() && "Value should be in reg");
339 assert(NextVA.isRegLoc() && "Value should be in reg");
340
341 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
342 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
343
344 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VA.getLocReg(), VA);
345 assignValueToReg(ValVReg: NewRegs[1], PhysReg: NextVA.getLocReg(), VA: NextVA);
346
347 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
348 if (!IsLittle)
349 std::swap(a&: NewRegs[0], b&: NewRegs[1]);
350
351 MIRBuilder.buildMergeLikeInstr(Res: Arg.Regs[0], Ops: NewRegs);
352
353 return 2;
354 }
355
356 /// Marking a physical register as used is different between formal
357 /// parameters, where it's a basic block live-in, and call returns, where it's
358 /// an implicit-def of the call instruction.
359 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
360};
361
362struct FormalArgHandler : public ARMIncomingValueHandler {
363 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
364 : ARMIncomingValueHandler(MIRBuilder, MRI) {}
365
366 void markPhysRegUsed(unsigned PhysReg) override {
367 MIRBuilder.getMRI()->addLiveIn(Reg: PhysReg);
368 MIRBuilder.getMBB().addLiveIn(PhysReg);
369 }
370};
371
372} // end anonymous namespace
373
374bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
375 const Function &F,
376 ArrayRef<ArrayRef<Register>> VRegs,
377 FunctionLoweringInfo &FLI) const {
378 auto &TLI = *getTLI<ARMTargetLowering>();
379 auto Subtarget = TLI.getSubtarget();
380
381 if (Subtarget->isThumb1Only())
382 return false;
383
384 // Quick exit if there aren't any args
385 if (F.arg_empty())
386 return true;
387
388 if (F.isVarArg())
389 return false;
390
391 auto &MF = MIRBuilder.getMF();
392 auto &MBB = MIRBuilder.getMBB();
393 const auto &DL = MF.getDataLayout();
394
395 for (auto &Arg : F.args()) {
396 if (!isSupportedType(DL, TLI, T: Arg.getType()))
397 return false;
398 if (Arg.hasPassPointeeByValueCopyAttr())
399 return false;
400 }
401
402 CCAssignFn *AssignFn =
403 TLI.CCAssignFnForCall(CC: F.getCallingConv(), isVarArg: F.isVarArg());
404
405 OutgoingValueAssigner ArgAssigner(AssignFn);
406 FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo());
407
408 SmallVector<ArgInfo, 8> SplitArgInfos;
409 unsigned Idx = 0;
410 for (auto &Arg : F.args()) {
411 ArgInfo OrigArgInfo(VRegs[Idx], Arg.getType(), Idx);
412
413 setArgFlags(Arg&: OrigArgInfo, OpIdx: Idx + AttributeList::FirstArgIndex, DL, FuncInfo: F);
414 splitToValueTypes(OrigArgInfo, SplitArgs&: SplitArgInfos, DL, CallConv: F.getCallingConv());
415
416 Idx++;
417 }
418
419 if (!MBB.empty())
420 MIRBuilder.setInstr(*MBB.begin());
421
422 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: SplitArgInfos,
423 MIRBuilder, CallConv: F.getCallingConv(),
424 IsVarArg: F.isVarArg()))
425 return false;
426
427 // Move back to the end of the basic block.
428 MIRBuilder.setMBB(MBB);
429 return true;
430}
431
432namespace {
433
434struct CallReturnHandler : public ARMIncomingValueHandler {
435 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
436 MachineInstrBuilder MIB)
437 : ARMIncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
438
439 void markPhysRegUsed(unsigned PhysReg) override {
440 MIB.addDef(RegNo: PhysReg, Flags: RegState::Implicit);
441 }
442
443 MachineInstrBuilder MIB;
444};
445
446// FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
447unsigned getCallOpcode(const MachineFunction &MF, const ARMSubtarget &STI,
448 bool isDirect) {
449 if (isDirect)
450 return STI.isThumb() ? ARM::tBL : ARM::BL;
451
452 if (STI.isThumb())
453 return gettBLXrOpcode(MF);
454
455 if (STI.hasV5TOps())
456 return getBLXOpcode(MF);
457
458 if (STI.hasV4TOps())
459 return ARM::BX_CALL;
460
461 return ARM::BMOVPCRX_CALL;
462}
463} // end anonymous namespace
464
465bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const {
466 MachineFunction &MF = MIRBuilder.getMF();
467 const auto &TLI = *getTLI<ARMTargetLowering>();
468 const auto &DL = MF.getDataLayout();
469 const auto &STI = MF.getSubtarget<ARMSubtarget>();
470 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
471 MachineRegisterInfo &MRI = MF.getRegInfo();
472
473 if (STI.genLongCalls())
474 return false;
475
476 if (STI.isThumb1Only())
477 return false;
478
479 auto CallSeqStart = MIRBuilder.buildInstr(Opcode: ARM::ADJCALLSTACKDOWN);
480
481 // Create the call instruction so we can add the implicit uses of arg
482 // registers, but don't insert it yet.
483 bool IsDirect = !Info.Callee.isReg();
484 auto CallOpcode = getCallOpcode(MF, STI, isDirect: IsDirect);
485 auto MIB = MIRBuilder.buildInstrNoInsert(Opcode: CallOpcode);
486
487 bool IsThumb = STI.isThumb();
488 if (IsThumb)
489 MIB.add(MOs: predOps(Pred: ARMCC::AL));
490
491 MIB.add(MO: Info.Callee);
492 if (!IsDirect) {
493 auto CalleeReg = Info.Callee.getReg();
494 if (CalleeReg && !CalleeReg.isPhysical()) {
495 unsigned CalleeIdx = IsThumb ? 2 : 0;
496 MIB->getOperand(i: CalleeIdx).setReg(constrainOperandRegClass(
497 MF, TRI: *TRI, MRI, TII: *STI.getInstrInfo(), RBI: *STI.getRegBankInfo(),
498 InsertPt&: *MIB.getInstr(), II: MIB->getDesc(), RegMO&: Info.Callee, OpIdx: CalleeIdx));
499 }
500 }
501
502 MIB.addRegMask(Mask: TRI->getCallPreservedMask(MF, Info.CallConv));
503
504 SmallVector<ArgInfo, 8> ArgInfos;
505 for (auto Arg : Info.OrigArgs) {
506 if (!isSupportedType(DL, TLI, T: Arg.Ty))
507 return false;
508
509 if (Arg.Flags[0].isByVal())
510 return false;
511
512 splitToValueTypes(OrigArgInfo: Arg, SplitArgs&: ArgInfos, DL, CallConv: Info.CallConv);
513 }
514
515 auto ArgAssignFn = TLI.CCAssignFnForCall(CC: Info.CallConv, isVarArg: Info.IsVarArg);
516 OutgoingValueAssigner ArgAssigner(ArgAssignFn);
517 ARMOutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB);
518 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: ArgInfos,
519 MIRBuilder, CallConv: Info.CallConv, IsVarArg: Info.IsVarArg))
520 return false;
521
522 // Now we can add the actual call instruction to the correct basic block.
523 MIRBuilder.insertInstr(MIB);
524
525 if (!Info.OrigRet.Ty->isVoidTy()) {
526 if (!isSupportedType(DL, TLI, T: Info.OrigRet.Ty))
527 return false;
528
529 ArgInfos.clear();
530 splitToValueTypes(OrigArgInfo: Info.OrigRet, SplitArgs&: ArgInfos, DL, CallConv: Info.CallConv);
531 auto RetAssignFn = TLI.CCAssignFnForReturn(CC: Info.CallConv, isVarArg: Info.IsVarArg);
532 OutgoingValueAssigner Assigner(RetAssignFn);
533 CallReturnHandler RetHandler(MIRBuilder, MRI, MIB);
534 if (!determineAndHandleAssignments(Handler&: RetHandler, Assigner, Args&: ArgInfos,
535 MIRBuilder, CallConv: Info.CallConv,
536 IsVarArg: Info.IsVarArg))
537 return false;
538 }
539
540 // We now know the size of the stack - update the ADJCALLSTACKDOWN
541 // accordingly.
542 CallSeqStart.addImm(Val: ArgAssigner.StackSize).addImm(Val: 0).add(MOs: predOps(Pred: ARMCC::AL));
543
544 MIRBuilder.buildInstr(Opcode: ARM::ADJCALLSTACKUP)
545 .addImm(Val: ArgAssigner.StackSize)
546 .addImm(Val: -1ULL)
547 .add(MOs: predOps(Pred: ARMCC::AL));
548
549 return true;
550}
551
552bool ARMCallLowering::enableBigEndian() const { return EnableGISelBigEndian; }
553