1//===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the CCState class, used for lowering and implementing
10// calling conventions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/CallingConvLower.h"
15#include "llvm/CodeGen/MachineFrameInfo.h"
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetRegisterInfo.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/SaveAndRestore.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28CCState::CCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
29 SmallVectorImpl<CCValAssign> &Locs, LLVMContext &Context,
30 bool NegativeOffsets)
31 : CallingConv(CC), IsVarArg(IsVarArg), MF(MF),
32 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(Locs), Context(Context),
33 NegativeOffsets(NegativeOffsets) {
34
35 // No stack is used.
36 StackSize = 0;
37
38 clearByValRegsInfo();
39 UsedRegs.resize(N: (TRI.getNumRegs()+31)/32);
40}
41
42/// Allocate space on the stack large enough to pass an argument by value.
43/// The size and alignment information of the argument is encoded in
44/// its parameter attribute.
45void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
46 CCValAssign::LocInfo LocInfo, int MinSize,
47 Align MinAlign, ISD::ArgFlagsTy ArgFlags) {
48 Align Alignment = ArgFlags.getNonZeroByValAlign();
49 unsigned Size = ArgFlags.getByValSize();
50 if (MinSize > (int)Size)
51 Size = MinSize;
52 if (MinAlign > Alignment)
53 Alignment = MinAlign;
54 ensureMaxAlignment(Alignment);
55 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Alignment);
56 Size = unsigned(alignTo(Size, A: MinAlign));
57 uint64_t Offset = AllocateStack(Size, Alignment);
58 addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
59}
60
61/// Mark a register and all of its aliases as allocated.
62void CCState::MarkAllocated(MCPhysReg Reg) {
63 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
64 UsedRegs[(*AI).id() / 32] |= 1 << ((*AI).id() & 31);
65}
66
67void CCState::MarkUnallocated(MCPhysReg Reg) {
68 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
69 UsedRegs[(*AI).id() / 32] &= ~(1 << ((*AI).id() & 31));
70}
71
72bool CCState::IsShadowAllocatedReg(MCRegister Reg) const {
73 if (!isAllocated(Reg))
74 return false;
75
76 for (auto const &ValAssign : Locs)
77 if (ValAssign.isRegLoc() && TRI.regsOverlap(RegA: ValAssign.getLocReg(), RegB: Reg))
78 return false;
79 return true;
80}
81
82/// Analyze an array of argument values,
83/// incorporating info about the formals into this state.
84void
85CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
86 CCAssignFn Fn) {
87 unsigned NumArgs = Ins.size();
88
89 for (unsigned i = 0; i != NumArgs; ++i) {
90 MVT ArgVT = Ins[i].VT;
91 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
92 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Ins[i].OrigTy, *this))
93 report_fatal_error(reason: "unable to allocate function argument #" + Twine(i));
94 }
95}
96
97/// Analyze the return values of a function, returning true if the return can
98/// be performed without sret-demotion and false otherwise.
99bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
100 CCAssignFn Fn) {
101 // Determine which register each value should be copied into.
102 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
103 MVT VT = Outs[i].VT;
104 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
105 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this))
106 return false;
107 }
108 return true;
109}
110
111/// Analyze the returned values of a return,
112/// incorporating info about the result values into this state.
113void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
114 CCAssignFn Fn) {
115 // Determine which register each value should be copied into.
116 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
117 MVT VT = Outs[i].VT;
118 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
119 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this))
120 report_fatal_error(reason: "unable to allocate function return #" + Twine(i));
121 }
122}
123
124/// Analyze the outgoing arguments to a call,
125/// incorporating info about the passed values into this state.
126void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
127 CCAssignFn Fn) {
128 unsigned NumOps = Outs.size();
129 for (unsigned i = 0; i != NumOps; ++i) {
130 MVT ArgVT = Outs[i].VT;
131 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
132 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy,
133 *this)) {
134#ifndef NDEBUG
135 dbgs() << "Call operand #" << i << " has unhandled type "
136 << ArgVT << '\n';
137#endif
138 llvm_unreachable(nullptr);
139 }
140 }
141}
142
143/// Same as above except it takes vectors of types and argument flags.
144void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
145 SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
146 SmallVectorImpl<Type *> &OrigTys,
147 CCAssignFn Fn) {
148 unsigned NumOps = ArgVTs.size();
149 for (unsigned i = 0; i != NumOps; ++i) {
150 MVT ArgVT = ArgVTs[i];
151 ISD::ArgFlagsTy ArgFlags = Flags[i];
152 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, OrigTys[i], *this)) {
153#ifndef NDEBUG
154 dbgs() << "Call operand #" << i << " has unhandled type "
155 << ArgVT << '\n';
156#endif
157 llvm_unreachable(nullptr);
158 }
159 }
160}
161
162/// Analyze the return values of a call, incorporating info about the passed
163/// values into this state.
164void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
165 CCAssignFn Fn) {
166 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
167 MVT VT = Ins[i].VT;
168 ISD::ArgFlagsTy Flags = Ins[i].Flags;
169 if (Fn(i, VT, VT, CCValAssign::Full, Flags, Ins[i].OrigTy, *this)) {
170#ifndef NDEBUG
171 dbgs() << "Call result #" << i << " has unhandled type "
172 << VT << '\n';
173#endif
174 llvm_unreachable(nullptr);
175 }
176 }
177}
178
179/// Same as above except it's specialized for calls that produce a single value.
180void CCState::AnalyzeCallResult(MVT VT, Type *OrigTy, CCAssignFn Fn) {
181 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), OrigTy, *this)) {
182#ifndef NDEBUG
183 dbgs() << "Call result has unhandled type "
184 << VT << '\n';
185#endif
186 llvm_unreachable(nullptr);
187 }
188}
189
190void CCState::ensureMaxAlignment(Align Alignment) {
191 if (!AnalyzingMustTailForwardedRegs)
192 MF.getFrameInfo().ensureMaxAlignment(Alignment);
193}
194
195static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
196 if (VT.isVector())
197 return true; // Assume -msse-regparm might be in effect.
198 if (!VT.isInteger())
199 return false;
200 return (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall);
201}
202
203void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCRegister> &Regs,
204 MVT VT, CCAssignFn Fn) {
205 uint64_t SavedStackSize = StackSize;
206 Align SavedMaxStackArgAlign = MaxStackArgAlign;
207 unsigned NumLocs = Locs.size();
208
209 // Set the 'inreg' flag if it is used for this calling convention.
210 ISD::ArgFlagsTy Flags;
211 if (isValueTypeInRegForCC(CC: CallingConv, VT))
212 Flags.setInReg();
213
214 // Allocate something of this value type repeatedly until we get assigned a
215 // location in memory.
216 bool HaveRegParm;
217 do {
218 Type *OrigTy = EVT(VT).getTypeForEVT(Context);
219 if (Fn(0, VT, VT, CCValAssign::Full, Flags, OrigTy, *this)) {
220#ifndef NDEBUG
221 dbgs() << "Call has unhandled type " << VT
222 << " while computing remaining regparms\n";
223#endif
224 llvm_unreachable(nullptr);
225 }
226 HaveRegParm = Locs.back().isRegLoc();
227 } while (HaveRegParm);
228
229 // Copy all the registers from the value locations we added.
230 assert(NumLocs < Locs.size() && "CC assignment failed to add location");
231 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
232 if (Locs[I].isRegLoc())
233 Regs.push_back(Elt: Locs[I].getLocReg());
234
235 // Clear the assigned values and stack memory. We leave the registers marked
236 // as allocated so that future queries don't return the same registers, i.e.
237 // when i64 and f64 are both passed in GPRs.
238 StackSize = SavedStackSize;
239 MaxStackArgAlign = SavedMaxStackArgAlign;
240 Locs.truncate(N: NumLocs);
241}
242
243void CCState::analyzeMustTailForwardedRegisters(
244 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
245 CCAssignFn Fn) {
246 // Oftentimes calling conventions will not user register parameters for
247 // variadic functions, so we need to assume we're not variadic so that we get
248 // all the registers that might be used in a non-variadic call.
249 SaveAndRestore SavedVarArg(IsVarArg, false);
250 SaveAndRestore SavedMustTail(AnalyzingMustTailForwardedRegs, true);
251
252 for (MVT RegVT : RegParmTypes) {
253 SmallVector<MCRegister, 8> RemainingRegs;
254 getRemainingRegParmsForType(Regs&: RemainingRegs, VT: RegVT, Fn);
255 const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
256 const TargetRegisterClass *RC = TL->getRegClassFor(VT: RegVT);
257 for (MCRegister PReg : RemainingRegs) {
258 Register VReg = MF.addLiveIn(PReg, RC);
259 Forwards.push_back(Elt: ForwardedRegister(VReg, PReg, RegVT));
260 }
261 }
262}
263
264bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
265 CallingConv::ID CallerCC, MachineFunction &MF,
266 LLVMContext &C,
267 const SmallVectorImpl<ISD::InputArg> &Ins,
268 CCAssignFn CalleeFn, CCAssignFn CallerFn) {
269 if (CalleeCC == CallerCC)
270 return true;
271 SmallVector<CCValAssign, 4> RVLocs1;
272 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
273 CCInfo1.AnalyzeCallResult(Ins, Fn: CalleeFn);
274
275 SmallVector<CCValAssign, 4> RVLocs2;
276 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
277 CCInfo2.AnalyzeCallResult(Ins, Fn: CallerFn);
278
279 auto AreCompatible = [](const CCValAssign &Loc1, const CCValAssign &Loc2) {
280 assert(!Loc1.isPendingLoc() && !Loc2.isPendingLoc() &&
281 "The location must have been decided by now");
282 // Must fill the same part of their locations.
283 if (Loc1.getLocInfo() != Loc2.getLocInfo())
284 return false;
285 // Must both be in the same registers, or both in memory at the same offset.
286 if (Loc1.isRegLoc() && Loc2.isRegLoc())
287 return Loc1.getLocReg() == Loc2.getLocReg();
288 if (Loc1.isMemLoc() && Loc2.isMemLoc())
289 return Loc1.getLocMemOffset() == Loc2.getLocMemOffset();
290 llvm_unreachable("Unknown location kind");
291 };
292
293 return llvm::equal(LRange&: RVLocs1, RRange&: RVLocs2, P: AreCompatible);
294}
295