1//===----------------------- SIFrameLowering.cpp --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//==-----------------------------------------------------------------------===//
8
9#include "SIFrameLowering.h"
10#include "AMDGPU.h"
11#include "AMDGPULaneMaskUtils.h"
12#include "GCNSubtarget.h"
13#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14#include "SIMachineFunctionInfo.h"
15#include "llvm/CodeGen/LiveRegUnits.h"
16#include "llvm/CodeGen/MachineFrameInfo.h"
17#include "llvm/CodeGen/RegisterScavenging.h"
18#include "llvm/Target/TargetMachine.h"
19
20using namespace llvm;
21
22#define DEBUG_TYPE "frame-info"
23
24static cl::opt<bool> EnableSpillVGPRToAGPR(
25 "amdgpu-spill-vgpr-to-agpr",
26 cl::desc("Enable spilling VGPRs to AGPRs"),
27 cl::ReallyHidden,
28 cl::init(Val: true));
29
30// Find a register matching \p RC from \p LiveUnits which is unused and
31// available throughout the function. On failure, returns AMDGPU::NoRegister.
32// TODO: Rewrite the loop here to iterate over MCRegUnits instead of
33// MCRegisters. This should reduce the number of iterations and avoid redundant
34// checking.
35static MCRegister findUnusedRegister(MachineRegisterInfo &MRI,
36 const LiveRegUnits &LiveUnits,
37 const TargetRegisterClass &RC) {
38 for (MCRegister Reg : RC) {
39 if (!MRI.isPhysRegUsed(PhysReg: Reg) && LiveUnits.available(Reg) &&
40 !MRI.isReserved(PhysReg: Reg))
41 return Reg;
42 }
43 return MCRegister();
44}
45
46// Find a scratch register that we can use in the prologue. We avoid using
47// callee-save registers since they may appear to be free when this is called
48// from canUseAsPrologue (during shrink wrapping), but then no longer be free
49// when this is called from emitPrologue.
50static MCRegister findScratchNonCalleeSaveRegister(
51 MachineRegisterInfo &MRI, LiveRegUnits &LiveUnits,
52 const TargetRegisterClass &RC, bool Unused = false) {
53 // Mark callee saved registers as used so we will not choose them.
54 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
55 for (unsigned i = 0; CSRegs[i]; ++i)
56 LiveUnits.addReg(Reg: CSRegs[i]);
57
58 // We are looking for a register that can be used throughout the entire
59 // function, so any use is unacceptable.
60 if (Unused)
61 return findUnusedRegister(MRI, LiveUnits, RC);
62
63 for (MCRegister Reg : RC) {
64 if (LiveUnits.available(Reg) && !MRI.isReserved(PhysReg: Reg))
65 return Reg;
66 }
67
68 return MCRegister();
69}
70
71/// Query target location for spilling SGPRs
72/// \p IncludeScratchCopy : Also look for free scratch SGPRs
73static void getVGPRSpillLaneOrTempRegister(
74 MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR,
75 const TargetRegisterClass &RC = AMDGPU::SReg_32_XM0_XEXECRegClass,
76 bool IncludeScratchCopy = true) {
77 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
78 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
79
80 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
81 const SIRegisterInfo *TRI = ST.getRegisterInfo();
82 unsigned Size = TRI->getSpillSize(RC);
83 Align Alignment = TRI->getSpillAlign(RC);
84
85 // We need to save and restore the given SGPR.
86
87 Register ScratchSGPR;
88 // 1: Try to save the given register into an unused scratch SGPR. The
89 // LiveUnits should have all the callee saved registers marked as used. For
90 // certain cases we skip copy to scratch SGPR.
91 if (IncludeScratchCopy)
92 ScratchSGPR = findUnusedRegister(MRI&: MF.getRegInfo(), LiveUnits, RC);
93
94 if (!ScratchSGPR) {
95 int FI = FrameInfo.CreateStackObject(Size, Alignment, isSpillSlot: true, Alloca: nullptr,
96 ID: TargetStackID::SGPRSpill);
97
98 if (TRI->spillSGPRToVGPR() &&
99 MFI->allocateSGPRSpillToVGPRLane(MF, FI, /*SpillToPhysVGPRLane=*/true,
100 /*IsPrologEpilog=*/true)) {
101 // 2: There's no free lane to spill, and no free register to save the
102 // SGPR, so we're forced to take another VGPR to use for the spill.
103 MFI->addToPrologEpilogSGPRSpills(
104 Reg: SGPR, SI: PrologEpilogSGPRSaveRestoreInfo(
105 SGPRSaveKind::SPILL_TO_VGPR_LANE, FI));
106
107 LLVM_DEBUG(auto Spill = MFI->getSGPRSpillToPhysicalVGPRLanes(FI).front();
108 dbgs() << printReg(SGPR, TRI) << " requires fallback spill to "
109 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane
110 << '\n';);
111 } else {
112 // Remove dead <FI> index
113 MF.getFrameInfo().RemoveStackObject(ObjectIdx: FI);
114 // 3: If all else fails, spill the register to memory.
115 FI = FrameInfo.CreateSpillStackObject(Size, Alignment);
116 MFI->addToPrologEpilogSGPRSpills(
117 Reg: SGPR,
118 SI: PrologEpilogSGPRSaveRestoreInfo(SGPRSaveKind::SPILL_TO_MEM, FI));
119 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling "
120 << printReg(SGPR, TRI) << '\n');
121 }
122 } else {
123 MFI->addToPrologEpilogSGPRSpills(
124 Reg: SGPR, SI: PrologEpilogSGPRSaveRestoreInfo(
125 SGPRSaveKind::COPY_TO_SCRATCH_SGPR, ScratchSGPR));
126 LiveUnits.addReg(Reg: ScratchSGPR);
127 LLVM_DEBUG(dbgs() << "Saving " << printReg(SGPR, TRI) << " with copy to "
128 << printReg(ScratchSGPR, TRI) << '\n');
129 }
130}
131
132// We need to specially emit stack operations here because a different frame
133// register is used than in the rest of the function, as getFrameRegister would
134// use.
135static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI,
136 const SIMachineFunctionInfo &FuncInfo,
137 LiveRegUnits &LiveUnits, MachineFunction &MF,
138 MachineBasicBlock &MBB,
139 MachineBasicBlock::iterator I, const DebugLoc &DL,
140 Register SpillReg, int FI, Register FrameReg,
141 int64_t DwordOff = 0) {
142 unsigned Opc = ST.hasFlatScratchEnabled() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
143 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
144
145 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
146 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
147 MachineMemOperand *MMO = MF.getMachineMemOperand(
148 PtrInfo, F: MachineMemOperand::MOStore, Size: FrameInfo.getObjectSize(ObjectIdx: FI),
149 BaseAlignment: FrameInfo.getObjectAlign(ObjectIdx: FI));
150 LiveUnits.addReg(Reg: SpillReg);
151 bool IsKill = !MBB.isLiveIn(Reg: SpillReg);
152 TRI.buildSpillLoadStore(MBB, MI: I, DL, LoadStoreOp: Opc, Index: FI, ValueReg: SpillReg, ValueIsKill: IsKill, ScratchOffsetReg: FrameReg,
153 InstrOffset: DwordOff, MMO, RS: nullptr, LiveUnits: &LiveUnits);
154 if (IsKill)
155 LiveUnits.removeReg(Reg: SpillReg);
156}
157
158static void buildEpilogRestore(const GCNSubtarget &ST,
159 const SIRegisterInfo &TRI,
160 const SIMachineFunctionInfo &FuncInfo,
161 LiveRegUnits &LiveUnits, MachineFunction &MF,
162 MachineBasicBlock &MBB,
163 MachineBasicBlock::iterator I,
164 const DebugLoc &DL, Register SpillReg, int FI,
165 Register FrameReg, int64_t DwordOff = 0) {
166 unsigned Opc = ST.hasFlatScratchEnabled() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
167 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
168
169 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
170 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
171 MachineMemOperand *MMO = MF.getMachineMemOperand(
172 PtrInfo, F: MachineMemOperand::MOLoad, Size: FrameInfo.getObjectSize(ObjectIdx: FI),
173 BaseAlignment: FrameInfo.getObjectAlign(ObjectIdx: FI));
174 TRI.buildSpillLoadStore(MBB, MI: I, DL, LoadStoreOp: Opc, Index: FI, ValueReg: SpillReg, ValueIsKill: false, ScratchOffsetReg: FrameReg,
175 InstrOffset: DwordOff, MMO, RS: nullptr, LiveUnits: &LiveUnits);
176}
177
178static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
179 const DebugLoc &DL, const SIInstrInfo *TII,
180 Register TargetReg) {
181 MachineFunction *MF = MBB.getParent();
182 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
183 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
184 const MCInstrDesc &SMovB32 = TII->get(Opcode: AMDGPU::S_MOV_B32);
185 Register TargetLo = TRI->getSubReg(Reg: TargetReg, Idx: AMDGPU::sub0);
186 Register TargetHi = TRI->getSubReg(Reg: TargetReg, Idx: AMDGPU::sub1);
187
188 if (MFI->getGITPtrHigh() != 0xffffffff) {
189 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: TargetHi)
190 .addImm(Val: MFI->getGITPtrHigh())
191 .addReg(RegNo: TargetReg, Flags: RegState::ImplicitDefine);
192 } else {
193 const MCInstrDesc &GetPC64 = TII->get(Opcode: AMDGPU::S_GETPC_B64_pseudo);
194 BuildMI(BB&: MBB, I, MIMD: DL, MCID: GetPC64, DestReg: TargetReg);
195 }
196 Register GitPtrLo = MFI->getGITPtrLoReg(MF: *MF);
197 MF->getRegInfo().addLiveIn(Reg: GitPtrLo);
198 MBB.addLiveIn(PhysReg: GitPtrLo);
199 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: TargetLo)
200 .addReg(RegNo: GitPtrLo);
201}
202
203static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI,
204 const SIMachineFunctionInfo *FuncInfo,
205 MachineFunction &MF, MachineBasicBlock &MBB,
206 MachineBasicBlock::iterator MBBI, bool IsProlog) {
207 if (LiveUnits.empty()) {
208 LiveUnits.init(TRI);
209 if (IsProlog) {
210 LiveUnits.addLiveIns(MBB);
211 } else {
212 // In epilog.
213 LiveUnits.addLiveOuts(MBB);
214 LiveUnits.stepBackward(MI: *MBBI);
215 }
216 }
217}
218
219namespace llvm {
220
221// SpillBuilder to save/restore special SGPR spills like the one needed for FP,
222// BP, etc. These spills are delayed until the current function's frame is
223// finalized. For a given register, the builder uses the
224// PrologEpilogSGPRSaveRestoreInfo to decide the spill method.
225class PrologEpilogSGPRSpillBuilder {
226 MachineBasicBlock::iterator MI;
227 MachineBasicBlock &MBB;
228 MachineFunction &MF;
229 const GCNSubtarget &ST;
230 MachineFrameInfo &MFI;
231 SIMachineFunctionInfo *FuncInfo;
232 const SIInstrInfo *TII;
233 const SIRegisterInfo &TRI;
234 Register SuperReg;
235 const PrologEpilogSGPRSaveRestoreInfo SI;
236 LiveRegUnits &LiveUnits;
237 const DebugLoc &DL;
238 Register FrameReg;
239 ArrayRef<int16_t> SplitParts;
240 unsigned NumSubRegs;
241 unsigned EltSize = 4;
242
243 void saveToMemory(const int FI) const {
244 MachineRegisterInfo &MRI = MF.getRegInfo();
245 assert(!MFI.isDeadObjectIndex(FI));
246
247 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI: MI, /*IsProlog*/ true);
248
249 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister(
250 MRI, LiveUnits, RC: AMDGPU::VGPR_32RegClass);
251 if (!TmpVGPR)
252 report_fatal_error(reason: "failed to find free scratch register");
253
254 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
255 Register SubReg = NumSubRegs == 1
256 ? SuperReg
257 : Register(TRI.getSubReg(Reg: SuperReg, Idx: SplitParts[I]));
258 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_MOV_B32_e32), DestReg: TmpVGPR)
259 .addReg(RegNo: SubReg);
260
261 buildPrologSpill(ST, TRI, FuncInfo: *FuncInfo, LiveUnits, MF, MBB, I: MI, DL, SpillReg: TmpVGPR,
262 FI, FrameReg, DwordOff);
263 DwordOff += 4;
264 }
265 }
266
267 void saveToVGPRLane(const int FI) const {
268 assert(!MFI.isDeadObjectIndex(FI));
269
270 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
271 ArrayRef<SIRegisterInfo::SpilledReg> Spill =
272 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FrameIndex: FI);
273 assert(Spill.size() == NumSubRegs);
274
275 for (unsigned I = 0; I < NumSubRegs; ++I) {
276 Register SubReg = NumSubRegs == 1
277 ? SuperReg
278 : Register(TRI.getSubReg(Reg: SuperReg, Idx: SplitParts[I]));
279 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::SI_SPILL_S32_TO_VGPR),
280 DestReg: Spill[I].VGPR)
281 .addReg(RegNo: SubReg)
282 .addImm(Val: Spill[I].Lane)
283 .addReg(RegNo: Spill[I].VGPR, Flags: RegState::Undef);
284 }
285 }
286
287 void copyToScratchSGPR(Register DstReg) const {
288 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg)
289 .addReg(RegNo: SuperReg)
290 .setMIFlag(MachineInstr::FrameSetup);
291 }
292
293 void restoreFromMemory(const int FI) {
294 MachineRegisterInfo &MRI = MF.getRegInfo();
295
296 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI: MI, /*IsProlog*/ false);
297 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister(
298 MRI, LiveUnits, RC: AMDGPU::VGPR_32RegClass);
299 if (!TmpVGPR)
300 report_fatal_error(reason: "failed to find free scratch register");
301
302 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
303 Register SubReg = NumSubRegs == 1
304 ? SuperReg
305 : Register(TRI.getSubReg(Reg: SuperReg, Idx: SplitParts[I]));
306
307 buildEpilogRestore(ST, TRI, FuncInfo: *FuncInfo, LiveUnits, MF, MBB, I: MI, DL,
308 SpillReg: TmpVGPR, FI, FrameReg, DwordOff);
309 assert(SubReg.isPhysical());
310
311 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_READFIRSTLANE_B32), DestReg: SubReg)
312 .addReg(RegNo: TmpVGPR, Flags: RegState::Kill);
313 DwordOff += 4;
314 }
315 }
316
317 void restoreFromVGPRLane(const int FI) {
318 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
319 ArrayRef<SIRegisterInfo::SpilledReg> Spill =
320 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FrameIndex: FI);
321 assert(Spill.size() == NumSubRegs);
322
323 for (unsigned I = 0; I < NumSubRegs; ++I) {
324 Register SubReg = NumSubRegs == 1
325 ? SuperReg
326 : Register(TRI.getSubReg(Reg: SuperReg, Idx: SplitParts[I]));
327 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::SI_RESTORE_S32_FROM_VGPR), DestReg: SubReg)
328 .addReg(RegNo: Spill[I].VGPR)
329 .addImm(Val: Spill[I].Lane);
330 }
331 }
332
333 void copyFromScratchSGPR(Register SrcReg) const {
334 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: SuperReg)
335 .addReg(RegNo: SrcReg)
336 .setMIFlag(MachineInstr::FrameDestroy);
337 }
338
339public:
340 PrologEpilogSGPRSpillBuilder(Register Reg,
341 const PrologEpilogSGPRSaveRestoreInfo SI,
342 MachineBasicBlock &MBB,
343 MachineBasicBlock::iterator MI,
344 const DebugLoc &DL, const SIInstrInfo *TII,
345 const SIRegisterInfo &TRI,
346 LiveRegUnits &LiveUnits, Register FrameReg)
347 : MI(MI), MBB(MBB), MF(*MBB.getParent()),
348 ST(MF.getSubtarget<GCNSubtarget>()), MFI(MF.getFrameInfo()),
349 FuncInfo(MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
350 SuperReg(Reg), SI(SI), LiveUnits(LiveUnits), DL(DL),
351 FrameReg(FrameReg) {
352 const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(Reg: SuperReg);
353 SplitParts = TRI.getRegSplitParts(RC, EltSize);
354 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
355
356 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
357 }
358
359 void save() {
360 switch (SI.getKind()) {
361 case SGPRSaveKind::SPILL_TO_MEM:
362 return saveToMemory(FI: SI.getIndex());
363 case SGPRSaveKind::SPILL_TO_VGPR_LANE:
364 return saveToVGPRLane(FI: SI.getIndex());
365 case SGPRSaveKind::COPY_TO_SCRATCH_SGPR:
366 return copyToScratchSGPR(DstReg: SI.getReg());
367 }
368 }
369
370 void restore() {
371 switch (SI.getKind()) {
372 case SGPRSaveKind::SPILL_TO_MEM:
373 return restoreFromMemory(FI: SI.getIndex());
374 case SGPRSaveKind::SPILL_TO_VGPR_LANE:
375 return restoreFromVGPRLane(FI: SI.getIndex());
376 case SGPRSaveKind::COPY_TO_SCRATCH_SGPR:
377 return copyFromScratchSGPR(SrcReg: SI.getReg());
378 }
379 }
380};
381
382} // namespace llvm
383
384// Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()`
385void SIFrameLowering::emitEntryFunctionFlatScratchInit(
386 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
387 const DebugLoc &DL, Register ScratchWaveOffsetReg) const {
388 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
389 const SIInstrInfo *TII = ST.getInstrInfo();
390 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
391 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
392
393 // We don't need this if we only have spills since there is no user facing
394 // scratch.
395
396 // TODO: If we know we don't have flat instructions earlier, we can omit
397 // this from the input registers.
398 //
399 // TODO: We only need to know if we access scratch space through a flat
400 // pointer. Because we only detect if flat instructions are used at all,
401 // this will be used more often than necessary on VI.
402
403 Register FlatScrInitLo;
404 Register FlatScrInitHi;
405
406 if (ST.isAmdPalOS()) {
407 // Extract the scratch offset from the descriptor in the GIT
408 LiveRegUnits LiveUnits;
409 LiveUnits.init(TRI: *TRI);
410 LiveUnits.addLiveIns(MBB);
411
412 // Find unused reg to load flat scratch init into
413 MachineRegisterInfo &MRI = MF.getRegInfo();
414 Register FlatScrInit = AMDGPU::NoRegister;
415 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF);
416 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2;
417 AllSGPR64s = AllSGPR64s.slice(
418 N: std::min(a: static_cast<unsigned>(AllSGPR64s.size()), b: NumPreloaded));
419 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
420 for (MCPhysReg Reg : AllSGPR64s) {
421 if (LiveUnits.available(Reg) && !MRI.isReserved(PhysReg: Reg) &&
422 MRI.isAllocatable(PhysReg: Reg) && !TRI->isSubRegisterEq(RegA: Reg, RegB: GITPtrLoReg)) {
423 FlatScrInit = Reg;
424 break;
425 }
426 }
427 assert(FlatScrInit && "Failed to find free register for scratch init");
428
429 FlatScrInitLo = TRI->getSubReg(Reg: FlatScrInit, Idx: AMDGPU::sub0);
430 FlatScrInitHi = TRI->getSubReg(Reg: FlatScrInit, Idx: AMDGPU::sub1);
431
432 buildGitPtr(MBB, I, DL, TII, TargetReg: FlatScrInit);
433
434 // We now have the GIT ptr - now get the scratch descriptor from the entry
435 // at offset 0 (or offset 16 for a compute shader).
436 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
437 const MCInstrDesc &LoadDwordX2 = TII->get(Opcode: AMDGPU::S_LOAD_DWORDX2_IMM);
438 auto *MMO = MF.getMachineMemOperand(
439 PtrInfo,
440 F: MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
441 MachineMemOperand::MODereferenceable,
442 Size: 8, BaseAlignment: Align(4));
443 unsigned Offset =
444 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
445 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
446 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(ST: Subtarget, ByteOffset: Offset);
447 BuildMI(BB&: MBB, I, MIMD: DL, MCID: LoadDwordX2, DestReg: FlatScrInit)
448 .addReg(RegNo: FlatScrInit)
449 .addImm(Val: EncodedOffset) // offset
450 .addImm(Val: 0) // cpol
451 .addMemOperand(MMO);
452
453 // Mask the offset in [47:0] of the descriptor
454 const MCInstrDesc &SAndB32 = TII->get(Opcode: AMDGPU::S_AND_B32);
455 auto And = BuildMI(BB&: MBB, I, MIMD: DL, MCID: SAndB32, DestReg: FlatScrInitHi)
456 .addReg(RegNo: FlatScrInitHi)
457 .addImm(Val: 0xffff);
458 And->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
459 } else {
460 Register FlatScratchInitReg =
461 MFI->getPreloadedReg(Value: AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
462 assert(FlatScratchInitReg);
463
464 MachineRegisterInfo &MRI = MF.getRegInfo();
465 MRI.addLiveIn(Reg: FlatScratchInitReg);
466 MBB.addLiveIn(PhysReg: FlatScratchInitReg);
467
468 FlatScrInitLo = TRI->getSubReg(Reg: FlatScratchInitReg, Idx: AMDGPU::sub0);
469 FlatScrInitHi = TRI->getSubReg(Reg: FlatScratchInitReg, Idx: AMDGPU::sub1);
470 }
471
472 // Do a 64-bit pointer add.
473 if (ST.flatScratchIsPointer()) {
474 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
475 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_U32), DestReg: FlatScrInitLo)
476 .addReg(RegNo: FlatScrInitLo)
477 .addReg(RegNo: ScratchWaveOffsetReg);
478 auto Addc = BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADDC_U32),
479 DestReg: FlatScrInitHi)
480 .addReg(RegNo: FlatScrInitHi)
481 .addImm(Val: 0);
482 Addc->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
483
484 using namespace AMDGPU::Hwreg;
485 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_SETREG_B32))
486 .addReg(RegNo: FlatScrInitLo)
487 .addImm(Val: int16_t(HwregEncoding::encode(Values: ID_FLAT_SCR_LO, Values: 0, Values: 32)));
488 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_SETREG_B32))
489 .addReg(RegNo: FlatScrInitHi)
490 .addImm(Val: int16_t(HwregEncoding::encode(Values: ID_FLAT_SCR_HI, Values: 0, Values: 32)));
491 return;
492 }
493
494 // For GFX9.
495 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_U32), DestReg: AMDGPU::FLAT_SCR_LO)
496 .addReg(RegNo: FlatScrInitLo)
497 .addReg(RegNo: ScratchWaveOffsetReg);
498 auto Addc = BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADDC_U32),
499 DestReg: AMDGPU::FLAT_SCR_HI)
500 .addReg(RegNo: FlatScrInitHi)
501 .addImm(Val: 0);
502 Addc->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
503
504 return;
505 }
506
507 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9);
508
509 // Copy the size in bytes.
510 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: AMDGPU::FLAT_SCR_LO)
511 .addReg(RegNo: FlatScrInitHi, Flags: RegState::Kill);
512
513 // Add wave offset in bytes to private base offset.
514 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
515 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_I32), DestReg: FlatScrInitLo)
516 .addReg(RegNo: FlatScrInitLo)
517 .addReg(RegNo: ScratchWaveOffsetReg);
518
519 // Convert offset to 256-byte units.
520 auto LShr = BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_LSHR_B32),
521 DestReg: AMDGPU::FLAT_SCR_HI)
522 .addReg(RegNo: FlatScrInitLo, Flags: RegState::Kill)
523 .addImm(Val: 8);
524 LShr->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
525}
526
527// Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not
528// memory. They should have been removed by now.
529static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
530 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
531 I != E; ++I) {
532 if (!MFI.isDeadObjectIndex(ObjectIdx: I))
533 return false;
534 }
535
536 return true;
537}
538
539// Shift down registers reserved for the scratch RSRC.
540Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg(
541 MachineFunction &MF) const {
542
543 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
544 const SIInstrInfo *TII = ST.getInstrInfo();
545 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
546 MachineRegisterInfo &MRI = MF.getRegInfo();
547 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
548
549 assert(MFI->isEntryFunction());
550
551 Register ScratchRsrcReg = MFI->getScratchRSrcReg();
552
553 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(PhysReg: ScratchRsrcReg) &&
554 allStackObjectsAreDead(MFI: MF.getFrameInfo())))
555 return Register();
556
557 if (ST.hasSGPRInitBug() ||
558 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
559 return ScratchRsrcReg;
560
561 // We reserved the last registers for this. Shift it down to the end of those
562 // which were actually used.
563 //
564 // FIXME: It might be safer to use a pseudoregister before replacement.
565
566 // FIXME: We should be able to eliminate unused input registers. We only
567 // cannot do this for the resources required for scratch access. For now we
568 // skip over user SGPRs and may leave unused holes.
569
570 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
571 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF);
572 AllSGPR128s = AllSGPR128s.slice(N: std::min(a: static_cast<unsigned>(AllSGPR128s.size()), b: NumPreloaded));
573
574 // Skip the last N reserved elements because they should have already been
575 // reserved for VCC etc.
576 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
577 for (MCPhysReg Reg : AllSGPR128s) {
578 // Pick the first unallocated one. Make sure we don't clobber the other
579 // reserved input we needed. Also for PAL, make sure we don't clobber
580 // the GIT pointer passed in SGPR0 or SGPR8.
581 if (!MRI.isPhysRegUsed(PhysReg: Reg) && MRI.isAllocatable(PhysReg: Reg) &&
582 (!GITPtrLoReg || !TRI->isSubRegisterEq(RegA: Reg, RegB: GITPtrLoReg))) {
583 MRI.replaceRegWith(FromReg: ScratchRsrcReg, ToReg: Reg);
584 MFI->setScratchRSrcReg(Reg);
585 MRI.reserveReg(PhysReg: Reg, TRI);
586 return Reg;
587 }
588 }
589
590 return ScratchRsrcReg;
591}
592
593static unsigned getScratchScaleFactor(const GCNSubtarget &ST) {
594 return ST.hasFlatScratchEnabled() ? 1 : ST.getWavefrontSize();
595}
596
597void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
598 MachineBasicBlock &MBB) const {
599 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
600
601 // FIXME: If we only have SGPR spills, we won't actually be using scratch
602 // memory since these spill to VGPRs. We should be cleaning up these unused
603 // SGPR spill frame indices somewhere.
604
605 // FIXME: We still have implicit uses on SGPR spill instructions in case they
606 // need to spill to vector memory. It's likely that will not happen, but at
607 // this point it appears we need the setup. This part of the prolog should be
608 // emitted after frame indices are eliminated.
609
610 // FIXME: Remove all of the isPhysRegUsed checks
611
612 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
613 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
614 const SIInstrInfo *TII = ST.getInstrInfo();
615 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
616 MachineRegisterInfo &MRI = MF.getRegInfo();
617 const Function &F = MF.getFunction();
618 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
619
620 assert(MFI->isEntryFunction());
621
622 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
623 Value: AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
624
625 // We need to do the replacement of the private segment buffer register even
626 // if there are no stack objects. There could be stores to undef or a
627 // constant without an associated object.
628 //
629 // This will return `Register()` in cases where there are no actual
630 // uses of the SRSRC.
631 Register ScratchRsrcReg;
632 if (!ST.hasFlatScratchEnabled())
633 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF);
634
635 // Make the selected register live throughout the function.
636 if (ScratchRsrcReg) {
637 for (MachineBasicBlock &OtherBB : MF) {
638 if (&OtherBB != &MBB) {
639 OtherBB.addLiveIn(PhysReg: ScratchRsrcReg);
640 }
641 }
642 }
643
644 // Now that we have fixed the reserved SRSRC we need to locate the
645 // (potentially) preloaded SRSRC.
646 Register PreloadedScratchRsrcReg;
647 if (ST.isAmdHsaOrMesa(F)) {
648 PreloadedScratchRsrcReg =
649 MFI->getPreloadedReg(Value: AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
650 if (ScratchRsrcReg && PreloadedScratchRsrcReg) {
651 // We added live-ins during argument lowering, but since they were not
652 // used they were deleted. We're adding the uses now, so add them back.
653 MRI.addLiveIn(Reg: PreloadedScratchRsrcReg);
654 MBB.addLiveIn(PhysReg: PreloadedScratchRsrcReg);
655 }
656 }
657
658 // Debug location must be unknown since the first debug location is used to
659 // determine the end of the prologue.
660 DebugLoc DL;
661 MachineBasicBlock::iterator I = MBB.begin();
662
663 // We found the SRSRC first because it needs four registers and has an
664 // alignment requirement. If the SRSRC that we found is clobbering with
665 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR
666 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch
667 // wave offset to a free SGPR.
668 Register ScratchWaveOffsetReg;
669 if (PreloadedScratchWaveOffsetReg &&
670 TRI->isSubRegisterEq(RegA: ScratchRsrcReg, RegB: PreloadedScratchWaveOffsetReg)) {
671 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF);
672 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
673 AllSGPRs = AllSGPRs.slice(
674 N: std::min(a: static_cast<unsigned>(AllSGPRs.size()), b: NumPreloaded));
675 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
676 for (MCPhysReg Reg : AllSGPRs) {
677 if (!MRI.isPhysRegUsed(PhysReg: Reg) && MRI.isAllocatable(PhysReg: Reg) &&
678 !TRI->isSubRegisterEq(RegA: ScratchRsrcReg, RegB: Reg) && GITPtrLoReg != Reg) {
679 ScratchWaveOffsetReg = Reg;
680 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: ScratchWaveOffsetReg)
681 .addReg(RegNo: PreloadedScratchWaveOffsetReg, Flags: RegState::Kill);
682 break;
683 }
684 }
685
686 // FIXME: We can spill incoming arguments and restore at the end of the
687 // prolog.
688 if (!ScratchWaveOffsetReg)
689 report_fatal_error(
690 reason: "could not find temporary scratch offset register in prolog");
691 } else {
692 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg;
693 }
694 assert(ScratchWaveOffsetReg || !PreloadedScratchWaveOffsetReg);
695
696 unsigned Offset = FrameInfo.getStackSize() * getScratchScaleFactor(ST);
697 if (!mayReserveScratchForCWSR(MF)) {
698 if (hasFP(MF)) {
699 Register FPReg = MFI->getFrameOffsetReg();
700 assert(FPReg != AMDGPU::FP_REG);
701 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_MOV_B32), DestReg: FPReg).addImm(Val: 0);
702 }
703
704 if (requiresStackPointerReference(MF)) {
705 Register SPReg = MFI->getStackPtrOffsetReg();
706 assert(SPReg != AMDGPU::SP_REG);
707 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_MOV_B32), DestReg: SPReg).addImm(Val: Offset);
708 }
709 } else {
710 // We need to check if we're on a compute queue - if we are, then the CWSR
711 // trap handler may need to store some VGPRs on the stack. The first VGPR
712 // block is saved separately, so we only need to allocate space for any
713 // additional VGPR blocks used. For now, we will make sure there's enough
714 // room for the theoretical maximum number of VGPRs that can be allocated.
715 // FIXME: Figure out if the shader uses fewer VGPRs in practice.
716 assert(hasFP(MF));
717 Register FPReg = MFI->getFrameOffsetReg();
718 assert(FPReg != AMDGPU::FP_REG);
719 unsigned VGPRSize = llvm::alignTo(
720 Size: (ST.getAddressableNumVGPRs(DynamicVGPRBlockSize: MFI->getDynamicVGPRBlockSize()) -
721 AMDGPU::IsaInfo::getVGPRAllocGranule(STI: &ST,
722 DynamicVGPRBlockSize: MFI->getDynamicVGPRBlockSize())) *
723 4,
724 A: FrameInfo.getMaxAlign());
725 MFI->setScratchReservedForDynamicVGPRs(VGPRSize);
726
727 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::GET_STACK_BASE), DestReg: FPReg);
728 if (requiresStackPointerReference(MF)) {
729 Register SPReg = MFI->getStackPtrOffsetReg();
730 assert(SPReg != AMDGPU::SP_REG);
731
732 // If at least one of the constants can be inlined, then we can use
733 // s_cselect. Otherwise, use a mov and cmovk.
734 if (AMDGPU::isInlinableLiteral32(Literal: Offset, HasInv2Pi: ST.hasInv2PiInlineImm()) ||
735 AMDGPU::isInlinableLiteral32(Literal: Offset + VGPRSize,
736 HasInv2Pi: ST.hasInv2PiInlineImm())) {
737 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_CSELECT_B32), DestReg: SPReg)
738 .addImm(Val: Offset + VGPRSize)
739 .addImm(Val: Offset);
740 } else {
741 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_MOV_B32), DestReg: SPReg).addImm(Val: Offset);
742 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_CMOVK_I32), DestReg: SPReg)
743 .addImm(Val: Offset + VGPRSize);
744 }
745 }
746 }
747
748 bool NeedsFlatScratchInit =
749 MFI->getUserSGPRInfo().hasFlatScratchInit() &&
750 (MRI.isPhysRegUsed(PhysReg: AMDGPU::FLAT_SCR) || FrameInfo.hasCalls() ||
751 (!allStackObjectsAreDead(MFI: FrameInfo) && ST.hasFlatScratchEnabled()));
752
753 if ((NeedsFlatScratchInit || ScratchRsrcReg) &&
754 PreloadedScratchWaveOffsetReg && !ST.hasArchitectedFlatScratch()) {
755 MRI.addLiveIn(Reg: PreloadedScratchWaveOffsetReg);
756 MBB.addLiveIn(PhysReg: PreloadedScratchWaveOffsetReg);
757 }
758
759 if (NeedsFlatScratchInit) {
760 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg);
761 }
762
763 if (ScratchRsrcReg) {
764 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL,
765 PreloadedPrivateBufferReg: PreloadedScratchRsrcReg,
766 ScratchRsrcReg, ScratchWaveOffsetReg);
767 }
768
769 if (ST.hasWaitXcnt()) {
770 // Set REPLAY_MODE (bit 25) in MODE register to enable multi-group XNACK
771 // replay. This aligns hardware behavior with the compiler's s_wait_xcnt
772 // insertion logic, which assumes multi-group mode by default.
773 unsigned RegEncoding =
774 AMDGPU::Hwreg::HwregEncoding::encode(Values: AMDGPU::Hwreg::ID_MODE, Values: 25, Values: 1);
775 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_SETREG_IMM32_B32))
776 .addImm(Val: 1)
777 .addImm(Val: RegEncoding);
778 }
779}
780
781// Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg`
782void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup(
783 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
784 const DebugLoc &DL, Register PreloadedScratchRsrcReg,
785 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const {
786
787 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
788 const SIInstrInfo *TII = ST.getInstrInfo();
789 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
790 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
791 const Function &Fn = MF.getFunction();
792
793 if (ST.isAmdPalOS()) {
794 // The pointer to the GIT is formed from the offset passed in and either
795 // the amdgpu-git-ptr-high function attribute or the top part of the PC
796 Register Rsrc01 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub0_sub1);
797 Register Rsrc03 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub3);
798
799 buildGitPtr(MBB, I, DL, TII, TargetReg: Rsrc01);
800
801 // We now have the GIT ptr - now get the scratch descriptor from the entry
802 // at offset 0 (or offset 16 for a compute shader).
803 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
804 const MCInstrDesc &LoadDwordX4 = TII->get(Opcode: AMDGPU::S_LOAD_DWORDX4_IMM);
805 auto *MMO = MF.getMachineMemOperand(
806 PtrInfo,
807 F: MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
808 MachineMemOperand::MODereferenceable,
809 Size: 16, BaseAlignment: Align(4));
810 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
811 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
812 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(ST: Subtarget, ByteOffset: Offset);
813 BuildMI(BB&: MBB, I, MIMD: DL, MCID: LoadDwordX4, DestReg: ScratchRsrcReg)
814 .addReg(RegNo: Rsrc01)
815 .addImm(Val: EncodedOffset) // offset
816 .addImm(Val: 0) // cpol
817 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine)
818 .addMemOperand(MMO);
819
820 // The driver will always set the SRD for wave 64 (bits 118:117 of
821 // descriptor / bits 22:21 of third sub-reg will be 0b11)
822 // If the shader is actually wave32 we have to modify the const_index_stride
823 // field of the descriptor 3rd sub-reg (bits 22:21) to 0b10 (stride=32). The
824 // reason the driver does this is that there can be cases where it presents
825 // 2 shaders with different wave size (e.g. VsFs).
826 // TODO: convert to using SCRATCH instructions or multiple SRD buffers
827 if (ST.isWave32()) {
828 const MCInstrDesc &SBitsetB32 = TII->get(Opcode: AMDGPU::S_BITSET0_B32);
829 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SBitsetB32, DestReg: Rsrc03)
830 .addImm(Val: 21)
831 .addReg(RegNo: Rsrc03);
832 }
833 } else if (ST.isMesaGfxShader(F: Fn) || !PreloadedScratchRsrcReg) {
834 assert(!ST.isAmdHsaOrMesa(Fn));
835 const MCInstrDesc &SMovB32 = TII->get(Opcode: AMDGPU::S_MOV_B32);
836
837 Register Rsrc2 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub2);
838 Register Rsrc3 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub3);
839
840 // Use relocations to get the pointer, and setup the other bits manually.
841 uint64_t Rsrc23 = TII->getScratchRsrcWords23();
842
843 if (MFI->getUserSGPRInfo().hasImplicitBufferPtr()) {
844 Register Rsrc01 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub0_sub1);
845
846 if (AMDGPU::isCompute(CC: MF.getFunction().getCallingConv())) {
847 const MCInstrDesc &Mov64 = TII->get(Opcode: AMDGPU::S_MOV_B64);
848
849 BuildMI(BB&: MBB, I, MIMD: DL, MCID: Mov64, DestReg: Rsrc01)
850 .addReg(RegNo: MFI->getImplicitBufferPtrUserSGPR())
851 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
852 } else {
853 const MCInstrDesc &LoadDwordX2 = TII->get(Opcode: AMDGPU::S_LOAD_DWORDX2_IMM);
854
855 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
856 auto *MMO = MF.getMachineMemOperand(
857 PtrInfo,
858 F: MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
859 MachineMemOperand::MODereferenceable,
860 Size: 8, BaseAlignment: Align(4));
861 BuildMI(BB&: MBB, I, MIMD: DL, MCID: LoadDwordX2, DestReg: Rsrc01)
862 .addReg(RegNo: MFI->getImplicitBufferPtrUserSGPR())
863 .addImm(Val: 0) // offset
864 .addImm(Val: 0) // cpol
865 .addMemOperand(MMO)
866 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
867
868 MF.getRegInfo().addLiveIn(Reg: MFI->getImplicitBufferPtrUserSGPR());
869 MBB.addLiveIn(PhysReg: MFI->getImplicitBufferPtrUserSGPR());
870 }
871 } else {
872 Register Rsrc0 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub0);
873 Register Rsrc1 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub1);
874
875 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: Rsrc0)
876 .addExternalSymbol(FnName: "SCRATCH_RSRC_DWORD0")
877 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
878
879 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: Rsrc1)
880 .addExternalSymbol(FnName: "SCRATCH_RSRC_DWORD1")
881 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
882 }
883
884 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: Rsrc2)
885 .addImm(Val: Lo_32(Value: Rsrc23))
886 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
887
888 BuildMI(BB&: MBB, I, MIMD: DL, MCID: SMovB32, DestReg: Rsrc3)
889 .addImm(Val: Hi_32(Value: Rsrc23))
890 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
891 } else if (ST.isAmdHsaOrMesa(F: Fn)) {
892 assert(PreloadedScratchRsrcReg);
893
894 if (ScratchRsrcReg != PreloadedScratchRsrcReg) {
895 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: ScratchRsrcReg)
896 .addReg(RegNo: PreloadedScratchRsrcReg, Flags: RegState::Kill);
897 }
898 }
899
900 // Add the scratch wave offset into the scratch RSRC.
901 //
902 // We only want to update the first 48 bits, which is the base address
903 // pointer, without touching the adjacent 16 bits of flags. We know this add
904 // cannot carry-out from bit 47, otherwise the scratch allocation would be
905 // impossible to fit in the 48-bit global address space.
906 //
907 // TODO: Evaluate if it is better to just construct an SRD using the flat
908 // scratch init and some constants rather than update the one we are passed.
909 Register ScratchRsrcSub0 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub0);
910 Register ScratchRsrcSub1 = TRI->getSubReg(Reg: ScratchRsrcReg, Idx: AMDGPU::sub1);
911
912 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in
913 // the kernel body via inreg arguments.
914 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_U32), DestReg: ScratchRsrcSub0)
915 .addReg(RegNo: ScratchRsrcSub0)
916 .addReg(RegNo: ScratchWaveOffsetReg)
917 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
918 auto Addc = BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADDC_U32), DestReg: ScratchRsrcSub1)
919 .addReg(RegNo: ScratchRsrcSub1)
920 .addImm(Val: 0)
921 .addReg(RegNo: ScratchRsrcReg, Flags: RegState::ImplicitDefine);
922 Addc->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
923}
924
925bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
926 switch (ID) {
927 case TargetStackID::Default:
928 case TargetStackID::NoAlloc:
929 case TargetStackID::SGPRSpill:
930 return true;
931 case TargetStackID::ScalableVector:
932 case TargetStackID::ScalablePredicateVector:
933 case TargetStackID::WasmLocal:
934 return false;
935 }
936 llvm_unreachable("Invalid TargetStackID::Value");
937}
938
939// Activate only the inactive lanes when \p EnableInactiveLanes is true.
940// Otherwise, activate all lanes. It returns the saved exec.
941static Register buildScratchExecCopy(LiveRegUnits &LiveUnits,
942 MachineFunction &MF,
943 MachineBasicBlock &MBB,
944 MachineBasicBlock::iterator MBBI,
945 const DebugLoc &DL, bool IsProlog,
946 bool EnableInactiveLanes) {
947 Register ScratchExecCopy;
948 MachineRegisterInfo &MRI = MF.getRegInfo();
949 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
950 const SIInstrInfo *TII = ST.getInstrInfo();
951 const SIRegisterInfo &TRI = TII->getRegisterInfo();
952 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
953
954 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, IsProlog);
955
956 if (FuncInfo->isWholeWaveFunction()) {
957 // Whole wave functions already have a copy of the original EXEC mask that
958 // we can use.
959 assert(IsProlog && "Epilog should look at return, not setup");
960 ScratchExecCopy =
961 TII->getWholeWaveFunctionSetup(MF)->getOperand(i: 0).getReg();
962 assert(ScratchExecCopy && "Couldn't find copy of EXEC");
963 } else {
964 ScratchExecCopy = findScratchNonCalleeSaveRegister(
965 MRI, LiveUnits, RC: *TRI.getWaveMaskRegClass());
966 }
967
968 if (!ScratchExecCopy)
969 report_fatal_error(reason: "failed to find free scratch register");
970
971 LiveUnits.addReg(Reg: ScratchExecCopy);
972
973 const unsigned SaveExecOpc =
974 ST.isWave32() ? (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B32
975 : AMDGPU::S_OR_SAVEEXEC_B32)
976 : (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B64
977 : AMDGPU::S_OR_SAVEEXEC_B64);
978 auto SaveExec =
979 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SaveExecOpc), DestReg: ScratchExecCopy).addImm(Val: -1);
980 SaveExec->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
981
982 return ScratchExecCopy;
983}
984
985void SIFrameLowering::emitCSRSpillStores(
986 MachineFunction &MF, MachineBasicBlock &MBB,
987 MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits,
988 Register FrameReg, Register FramePtrRegScratchCopy) const {
989 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
990 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
991 const SIInstrInfo *TII = ST.getInstrInfo();
992 const SIRegisterInfo &TRI = TII->getRegisterInfo();
993 MachineRegisterInfo &MRI = MF.getRegInfo();
994 const AMDGPU::LaneMaskConstants &LMC = AMDGPU::LaneMaskConstants::get(ST);
995
996 // Spill Whole-Wave Mode VGPRs. Save only the inactive lanes of the scratch
997 // registers. However, save all lanes of callee-saved VGPRs. Due to this, we
998 // might end up flipping the EXEC bits twice.
999 Register ScratchExecCopy;
1000 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1001 FuncInfo->splitWWMSpillRegisters(MF, CalleeSavedRegs&: WWMCalleeSavedRegs, ScratchRegs&: WWMScratchRegs);
1002 if (!WWMScratchRegs.empty())
1003 ScratchExecCopy =
1004 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1005 /*IsProlog*/ true, /*EnableInactiveLanes*/ true);
1006
1007 auto StoreWWMRegisters =
1008 [&](SmallVectorImpl<std::pair<Register, int>> &WWMRegs) {
1009 for (const auto &Reg : WWMRegs) {
1010 Register VGPR = Reg.first;
1011 int FI = Reg.second;
1012 buildPrologSpill(ST, TRI, FuncInfo: *FuncInfo, LiveUnits, MF, MBB, I: MBBI, DL,
1013 SpillReg: VGPR, FI, FrameReg);
1014 }
1015 };
1016
1017 for (const Register Reg : make_first_range(c&: WWMScratchRegs)) {
1018 if (!MRI.isReserved(PhysReg: Reg)) {
1019 MRI.addLiveIn(Reg);
1020 MBB.addLiveIn(PhysReg: Reg);
1021 }
1022 }
1023 StoreWWMRegisters(WWMScratchRegs);
1024
1025 auto EnableAllLanes = [&]() {
1026 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.MovOpc), DestReg: LMC.ExecReg).addImm(Val: -1);
1027 };
1028
1029 if (!WWMCalleeSavedRegs.empty()) {
1030 if (ScratchExecCopy) {
1031 EnableAllLanes();
1032 } else {
1033 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1034 /*IsProlog*/ true,
1035 /*EnableInactiveLanes*/ false);
1036 }
1037 }
1038
1039 StoreWWMRegisters(WWMCalleeSavedRegs);
1040 if (FuncInfo->isWholeWaveFunction()) {
1041 // If we have already saved some WWM CSR registers, then the EXEC is already
1042 // -1 and we don't need to do anything else. Otherwise, set EXEC to -1 here.
1043 if (!ScratchExecCopy)
1044 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL, /*IsProlog*/ true,
1045 /*EnableInactiveLanes*/ true);
1046 else if (WWMCalleeSavedRegs.empty())
1047 EnableAllLanes();
1048 } else if (ScratchExecCopy) {
1049 // FIXME: Split block and make terminator.
1050 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.MovOpc), DestReg: LMC.ExecReg)
1051 .addReg(RegNo: ScratchExecCopy, Flags: RegState::Kill);
1052 LiveUnits.addReg(Reg: ScratchExecCopy);
1053 }
1054
1055 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1056
1057 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1058 // Special handle FP spill:
1059 // Skip if FP is saved to a scratch SGPR, the save has already been emitted.
1060 // Otherwise, FP has been moved to a temporary register and spill it
1061 // instead.
1062 Register Reg =
1063 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1064 if (!Reg)
1065 continue;
1066
1067 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1068 LiveUnits, FrameReg);
1069 SB.save();
1070 }
1071
1072 // If a copy to scratch SGPR has been chosen for any of the SGPR spills, make
1073 // such scratch registers live throughout the function.
1074 SmallVector<Register, 1> ScratchSGPRs;
1075 FuncInfo->getAllScratchSGPRCopyDstRegs(Regs&: ScratchSGPRs);
1076 if (!ScratchSGPRs.empty()) {
1077 for (MachineBasicBlock &MBB : MF) {
1078 for (MCPhysReg Reg : ScratchSGPRs)
1079 MBB.addLiveIn(PhysReg: Reg);
1080
1081 MBB.sortUniqueLiveIns();
1082 }
1083 if (!LiveUnits.empty()) {
1084 for (MCPhysReg Reg : ScratchSGPRs)
1085 LiveUnits.addReg(Reg);
1086 }
1087 }
1088}
1089
1090void SIFrameLowering::emitCSRSpillRestores(
1091 MachineFunction &MF, MachineBasicBlock &MBB,
1092 MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits,
1093 Register FrameReg, Register FramePtrRegScratchCopy) const {
1094 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1095 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1096 const SIInstrInfo *TII = ST.getInstrInfo();
1097 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1098 const AMDGPU::LaneMaskConstants &LMC = AMDGPU::LaneMaskConstants::get(ST);
1099 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1100
1101 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1102 // Special handle FP restore:
1103 // Skip if FP needs to be restored from the scratch SGPR. Otherwise, restore
1104 // the FP value to a temporary register. The frame pointer should be
1105 // overwritten only at the end when all other spills are restored from
1106 // current frame.
1107 Register Reg =
1108 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1109 if (!Reg)
1110 continue;
1111
1112 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1113 LiveUnits, FrameReg);
1114 SB.restore();
1115 }
1116
1117 // Restore Whole-Wave Mode VGPRs. Restore only the inactive lanes of the
1118 // scratch registers. However, restore all lanes of callee-saved VGPRs. Due to
1119 // this, we might end up flipping the EXEC bits twice.
1120 Register ScratchExecCopy;
1121 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1122 FuncInfo->splitWWMSpillRegisters(MF, CalleeSavedRegs&: WWMCalleeSavedRegs, ScratchRegs&: WWMScratchRegs);
1123 auto RestoreWWMRegisters =
1124 [&](SmallVectorImpl<std::pair<Register, int>> &WWMRegs) {
1125 for (const auto &Reg : WWMRegs) {
1126 Register VGPR = Reg.first;
1127 int FI = Reg.second;
1128 buildEpilogRestore(ST, TRI, FuncInfo: *FuncInfo, LiveUnits, MF, MBB, I: MBBI, DL,
1129 SpillReg: VGPR, FI, FrameReg);
1130 }
1131 };
1132
1133 if (FuncInfo->isWholeWaveFunction()) {
1134 // For whole wave functions, the EXEC is already -1 at this point.
1135 // Therefore, we can restore the CSR WWM registers right away.
1136 RestoreWWMRegisters(WWMCalleeSavedRegs);
1137
1138 // The original EXEC is the first operand of the return instruction.
1139 MachineInstr &Return = MBB.instr_back();
1140 unsigned Opcode = Return.getOpcode();
1141 switch (Opcode) {
1142 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
1143 Opcode = AMDGPU::SI_RETURN;
1144 break;
1145 case AMDGPU::SI_TCRETURN_GFX_WholeWave:
1146 Opcode = AMDGPU::SI_TCRETURN_GFX;
1147 break;
1148 default:
1149 llvm_unreachable("Unexpected return inst");
1150 }
1151 Register OrigExec = Return.getOperand(i: 0).getReg();
1152
1153 if (!WWMScratchRegs.empty()) {
1154 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.XorOpc), DestReg: LMC.ExecReg)
1155 .addReg(RegNo: OrigExec)
1156 .addImm(Val: -1);
1157 RestoreWWMRegisters(WWMScratchRegs);
1158 }
1159
1160 // Restore original EXEC.
1161 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.MovOpc), DestReg: LMC.ExecReg).addReg(RegNo: OrigExec);
1162
1163 // Drop the first operand and update the opcode.
1164 Return.removeOperand(OpNo: 0);
1165 Return.setDesc(TII->get(Opcode));
1166
1167 return;
1168 }
1169
1170 if (!WWMScratchRegs.empty()) {
1171 ScratchExecCopy =
1172 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1173 /*IsProlog=*/false, /*EnableInactiveLanes=*/true);
1174 }
1175 RestoreWWMRegisters(WWMScratchRegs);
1176 if (!WWMCalleeSavedRegs.empty()) {
1177 if (ScratchExecCopy) {
1178 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.MovOpc), DestReg: LMC.ExecReg).addImm(Val: -1);
1179 } else {
1180 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1181 /*IsProlog*/ false,
1182 /*EnableInactiveLanes*/ false);
1183 }
1184 }
1185
1186 RestoreWWMRegisters(WWMCalleeSavedRegs);
1187 if (ScratchExecCopy) {
1188 // FIXME: Split block and make terminator.
1189 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: LMC.MovOpc), DestReg: LMC.ExecReg)
1190 .addReg(RegNo: ScratchExecCopy, Flags: RegState::Kill);
1191 }
1192}
1193
1194void SIFrameLowering::emitPrologue(MachineFunction &MF,
1195 MachineBasicBlock &MBB) const {
1196 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1197 if (FuncInfo->isEntryFunction()) {
1198 emitEntryFunctionPrologue(MF, MBB);
1199 return;
1200 }
1201
1202 MachineFrameInfo &MFI = MF.getFrameInfo();
1203 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1204 const SIInstrInfo *TII = ST.getInstrInfo();
1205 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1206 MachineRegisterInfo &MRI = MF.getRegInfo();
1207
1208 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1209 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1210 Register BasePtrReg =
1211 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
1212 LiveRegUnits LiveUnits;
1213
1214 MachineBasicBlock::iterator MBBI = MBB.begin();
1215 // DebugLoc must be unknown since the first instruction with DebugLoc is used
1216 // to determine the end of the prologue.
1217 DebugLoc DL;
1218
1219 if (FuncInfo->isChainFunction()) {
1220 // Functions with the amdgpu_cs_chain[_preserve] CC don't receive a SP, but
1221 // are free to set one up if they need it.
1222 bool UseSP = requiresStackPointerReference(MF);
1223 if (UseSP) {
1224 assert(StackPtrReg != AMDGPU::SP_REG);
1225
1226 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_MOV_B32), DestReg: StackPtrReg)
1227 .addImm(Val: MFI.getStackSize() * getScratchScaleFactor(ST));
1228 }
1229 }
1230
1231 bool HasFP = false;
1232 bool HasBP = false;
1233 uint32_t NumBytes = MFI.getStackSize();
1234 uint32_t RoundedSize = NumBytes;
1235
1236 if (TRI.hasStackRealignment(MF))
1237 HasFP = true;
1238
1239 Register FramePtrRegScratchCopy;
1240 if (!HasFP && !hasFP(MF)) {
1241 // Emit the CSR spill stores with SP base register.
1242 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits,
1243 FrameReg: FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1244 FramePtrRegScratchCopy);
1245 } else {
1246 // CSR spill stores will use FP as base register.
1247 Register SGPRForFPSaveRestoreCopy =
1248 FuncInfo->getScratchSGPRCopyDstReg(Reg: FramePtrReg);
1249
1250 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true);
1251 if (SGPRForFPSaveRestoreCopy) {
1252 // Copy FP to the scratch register now and emit the CFI entry. It avoids
1253 // the extra FP copy needed in the other two cases when FP is spilled to
1254 // memory or to a VGPR lane.
1255 PrologEpilogSGPRSpillBuilder SB(
1256 FramePtrReg,
1257 FuncInfo->getPrologEpilogSGPRSaveRestoreInfo(Reg: FramePtrReg), MBB, MBBI,
1258 DL, TII, TRI, LiveUnits, FramePtrReg);
1259 SB.save();
1260 LiveUnits.addReg(Reg: SGPRForFPSaveRestoreCopy);
1261 } else {
1262 // Copy FP into a new scratch register so that its previous value can be
1263 // spilled after setting up the new frame.
1264 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1265 MRI, LiveUnits, RC: AMDGPU::SReg_32_XM0_XEXECRegClass);
1266 if (!FramePtrRegScratchCopy)
1267 report_fatal_error(reason: "failed to find free scratch register");
1268
1269 LiveUnits.addReg(Reg: FramePtrRegScratchCopy);
1270 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: FramePtrRegScratchCopy)
1271 .addReg(RegNo: FramePtrReg);
1272 }
1273 }
1274
1275 if (HasFP) {
1276 const unsigned Alignment = MFI.getMaxAlign().value();
1277
1278 RoundedSize += Alignment;
1279 if (LiveUnits.empty()) {
1280 LiveUnits.init(TRI);
1281 LiveUnits.addLiveIns(MBB);
1282 }
1283
1284 // s_add_i32 s33, s32, NumBytes
1285 // s_and_b32 s33, s33, 0b111...0000
1286 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_I32), DestReg: FramePtrReg)
1287 .addReg(RegNo: StackPtrReg)
1288 .addImm(Val: (Alignment - 1) * getScratchScaleFactor(ST))
1289 .setMIFlag(MachineInstr::FrameSetup);
1290 auto And = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_AND_B32), DestReg: FramePtrReg)
1291 .addReg(RegNo: FramePtrReg, Flags: RegState::Kill)
1292 .addImm(Val: -Alignment * getScratchScaleFactor(ST))
1293 .setMIFlag(MachineInstr::FrameSetup);
1294 And->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
1295 FuncInfo->setIsStackRealigned(true);
1296 } else if ((HasFP = hasFP(MF))) {
1297 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: FramePtrReg)
1298 .addReg(RegNo: StackPtrReg)
1299 .setMIFlag(MachineInstr::FrameSetup);
1300 }
1301
1302 // If FP is used, emit the CSR spills with FP base register.
1303 if (HasFP) {
1304 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits, FrameReg: FramePtrReg,
1305 FramePtrRegScratchCopy);
1306 if (FramePtrRegScratchCopy)
1307 LiveUnits.removeReg(Reg: FramePtrRegScratchCopy);
1308 }
1309
1310 // If we need a base pointer, set it up here. It's whatever the value of
1311 // the stack pointer is at this point. Any variable size objects will be
1312 // allocated after this, so we can still use the base pointer to reference
1313 // the incoming arguments.
1314 if ((HasBP = TRI.hasBasePointer(MF))) {
1315 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: BasePtrReg)
1316 .addReg(RegNo: StackPtrReg)
1317 .setMIFlag(MachineInstr::FrameSetup);
1318 }
1319
1320 if (HasFP && RoundedSize != 0) {
1321 auto Add = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_I32), DestReg: StackPtrReg)
1322 .addReg(RegNo: StackPtrReg)
1323 .addImm(Val: RoundedSize * getScratchScaleFactor(ST))
1324 .setMIFlag(MachineInstr::FrameSetup);
1325 Add->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
1326 }
1327
1328 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(Reg: FramePtrReg);
1329 (void)FPSaved;
1330 assert((!HasFP || FPSaved) &&
1331 "Needed to save FP but didn't save it anywhere");
1332
1333 // If we allow spilling to AGPRs we may have saved FP but then spill
1334 // everything into AGPRs instead of the stack.
1335 assert((HasFP || !FPSaved || EnableSpillVGPRToAGPR) &&
1336 "Saved FP but didn't need it");
1337
1338 bool BPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(Reg: BasePtrReg);
1339 (void)BPSaved;
1340 assert((!HasBP || BPSaved) &&
1341 "Needed to save BP but didn't save it anywhere");
1342
1343 assert((HasBP || !BPSaved) && "Saved BP but didn't need it");
1344
1345 if (FuncInfo->isWholeWaveFunction()) {
1346 // SI_WHOLE_WAVE_FUNC_SETUP has outlived its purpose.
1347 TII->getWholeWaveFunctionSetup(MF)->eraseFromParent();
1348 }
1349}
1350
1351void SIFrameLowering::emitEpilogue(MachineFunction &MF,
1352 MachineBasicBlock &MBB) const {
1353 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1354 if (FuncInfo->isEntryFunction())
1355 return;
1356
1357 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1358 const SIInstrInfo *TII = ST.getInstrInfo();
1359 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1360 MachineRegisterInfo &MRI = MF.getRegInfo();
1361 LiveRegUnits LiveUnits;
1362 // Get the insert location for the epilogue. If there were no terminators in
1363 // the block, get the last instruction.
1364 MachineBasicBlock::iterator MBBI = MBB.end();
1365 DebugLoc DL;
1366 if (!MBB.empty()) {
1367 MBBI = MBB.getLastNonDebugInstr();
1368 if (MBBI != MBB.end())
1369 DL = MBBI->getDebugLoc();
1370
1371 MBBI = MBB.getFirstTerminator();
1372 }
1373
1374 const MachineFrameInfo &MFI = MF.getFrameInfo();
1375 uint32_t NumBytes = MFI.getStackSize();
1376 uint32_t RoundedSize = FuncInfo->isStackRealigned()
1377 ? NumBytes + MFI.getMaxAlign().value()
1378 : NumBytes;
1379 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1380 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1381 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(Reg: FramePtrReg);
1382
1383 if (RoundedSize != 0) {
1384 if (TRI.hasBasePointer(MF)) {
1385 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: StackPtrReg)
1386 .addReg(RegNo: TRI.getBaseRegister())
1387 .setMIFlag(MachineInstr::FrameDestroy);
1388 } else if (hasFP(MF)) {
1389 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: StackPtrReg)
1390 .addReg(RegNo: FramePtrReg)
1391 .setMIFlag(MachineInstr::FrameDestroy);
1392 }
1393 }
1394
1395 Register FramePtrRegScratchCopy;
1396 Register SGPRForFPSaveRestoreCopy =
1397 FuncInfo->getScratchSGPRCopyDstReg(Reg: FramePtrReg);
1398 if (FPSaved) {
1399 // CSR spill restores should use FP as base register. If
1400 // SGPRForFPSaveRestoreCopy is not true, restore the previous value of FP
1401 // into a new scratch register and copy to FP later when other registers are
1402 // restored from the current stack frame.
1403 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false);
1404 if (SGPRForFPSaveRestoreCopy) {
1405 LiveUnits.addReg(Reg: SGPRForFPSaveRestoreCopy);
1406 } else {
1407 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1408 MRI, LiveUnits, RC: AMDGPU::SReg_32_XM0_XEXECRegClass);
1409 if (!FramePtrRegScratchCopy)
1410 report_fatal_error(reason: "failed to find free scratch register");
1411
1412 LiveUnits.addReg(Reg: FramePtrRegScratchCopy);
1413 }
1414
1415 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits, FrameReg: FramePtrReg,
1416 FramePtrRegScratchCopy);
1417 }
1418
1419 if (FPSaved) {
1420 // Insert the copy to restore FP.
1421 Register SrcReg = SGPRForFPSaveRestoreCopy ? SGPRForFPSaveRestoreCopy
1422 : FramePtrRegScratchCopy;
1423 MachineInstrBuilder MIB =
1424 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: FramePtrReg)
1425 .addReg(RegNo: SrcReg);
1426 if (SGPRForFPSaveRestoreCopy)
1427 MIB.setMIFlag(MachineInstr::FrameDestroy);
1428 } else {
1429 // Insert the CSR spill restores with SP as the base register.
1430 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits,
1431 FrameReg: FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1432 FramePtrRegScratchCopy);
1433 }
1434}
1435
1436#ifndef NDEBUG
1437static bool allSGPRSpillsAreDead(const MachineFunction &MF) {
1438 const MachineFrameInfo &MFI = MF.getFrameInfo();
1439 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1440 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
1441 I != E; ++I) {
1442 if (!MFI.isDeadObjectIndex(I) &&
1443 MFI.getStackID(I) == TargetStackID::SGPRSpill &&
1444 !FuncInfo->checkIndexInPrologEpilogSGPRSpills(I)) {
1445 return false;
1446 }
1447 }
1448
1449 return true;
1450}
1451#endif
1452
1453StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF,
1454 int FI,
1455 Register &FrameReg) const {
1456 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1457
1458 FrameReg = RI->getFrameRegister(MF);
1459 return StackOffset::getFixed(Fixed: MF.getFrameInfo().getObjectOffset(ObjectIdx: FI));
1460}
1461
1462void SIFrameLowering::processFunctionBeforeFrameFinalized(
1463 MachineFunction &MF,
1464 RegScavenger *RS) const {
1465 MachineFrameInfo &MFI = MF.getFrameInfo();
1466
1467 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1468 const SIInstrInfo *TII = ST.getInstrInfo();
1469 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1470 MachineRegisterInfo &MRI = MF.getRegInfo();
1471 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1472
1473 const bool SpillVGPRToAGPR = ST.hasMAIInsts() && FuncInfo->hasSpilledVGPRs()
1474 && EnableSpillVGPRToAGPR;
1475
1476 if (SpillVGPRToAGPR) {
1477 // To track the spill frame indices handled in this pass.
1478 BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
1479 BitVector NonVGPRSpillFIs(MFI.getObjectIndexEnd(), false);
1480
1481 bool SeenDbgInstr = false;
1482
1483 for (MachineBasicBlock &MBB : MF) {
1484 for (MachineInstr &MI : llvm::make_early_inc_range(Range&: MBB)) {
1485 int FrameIndex;
1486 if (MI.isDebugInstr())
1487 SeenDbgInstr = true;
1488
1489 if (TII->isVGPRSpill(MI)) {
1490 // Try to eliminate stack used by VGPR spills before frame
1491 // finalization.
1492 unsigned FIOp = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(),
1493 Name: AMDGPU::OpName::vaddr);
1494 int FI = MI.getOperand(i: FIOp).getIndex();
1495 Register VReg =
1496 TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::vdata)->getReg();
1497 if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
1498 isAGPRtoVGPR: TRI->isAGPR(MRI, Reg: VReg))) {
1499 assert(RS != nullptr);
1500 RS->enterBasicBlockEnd(MBB);
1501 RS->backward(I: std::next(x: MI.getIterator()));
1502 TRI->eliminateFrameIndex(MI, SPAdj: 0, FIOperandNum: FIOp, RS);
1503 SpillFIs.set(FI);
1504 continue;
1505 }
1506 } else if (TII->isStoreToStackSlot(MI, FrameIndex) ||
1507 TII->isLoadFromStackSlot(MI, FrameIndex))
1508 if (!MFI.isFixedObjectIndex(ObjectIdx: FrameIndex))
1509 NonVGPRSpillFIs.set(FrameIndex);
1510 }
1511 }
1512
1513 // Stack slot coloring may assign different objects to the same stack slot.
1514 // If not, then the VGPR to AGPR spill slot is dead.
1515 for (unsigned FI : SpillFIs.set_bits())
1516 if (!NonVGPRSpillFIs.test(Idx: FI))
1517 FuncInfo->setVGPRToAGPRSpillDead(FI);
1518
1519 for (MachineBasicBlock &MBB : MF) {
1520 for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs())
1521 MBB.addLiveIn(PhysReg: Reg);
1522
1523 for (MCPhysReg Reg : FuncInfo->getAGPRSpillVGPRs())
1524 MBB.addLiveIn(PhysReg: Reg);
1525
1526 MBB.sortUniqueLiveIns();
1527
1528 if (!SpillFIs.empty() && SeenDbgInstr) {
1529 // FIXME: The dead frame indices are replaced with a null register from
1530 // the debug value instructions. We should instead, update it with the
1531 // correct register value. But not sure the register value alone is
1532 for (MachineInstr &MI : MBB) {
1533 if (MI.isDebugValue()) {
1534 uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
1535 if (MI.getOperand(i: StackOperandIdx).isFI() &&
1536 !MFI.isFixedObjectIndex(
1537 ObjectIdx: MI.getOperand(i: StackOperandIdx).getIndex()) &&
1538 SpillFIs[MI.getOperand(i: StackOperandIdx).getIndex()]) {
1539 MI.getOperand(i: StackOperandIdx)
1540 .ChangeToRegister(Reg: Register(), isDef: false /*isDef*/);
1541 }
1542 }
1543 }
1544 }
1545 }
1546 }
1547
1548 // At this point we've already allocated all spilled SGPRs to VGPRs if we
1549 // can. Any remaining SGPR spills will go to memory, so move them back to the
1550 // default stack.
1551 bool HaveSGPRToVMemSpill =
1552 FuncInfo->removeDeadFrameIndices(MFI, /*ResetSGPRSpillStackIDs*/ true);
1553 assert(allSGPRSpillsAreDead(MF) &&
1554 "SGPR spill should have been removed in SILowerSGPRSpills");
1555
1556 // FIXME: The other checks should be redundant with allStackObjectsAreDead,
1557 // but currently hasNonSpillStackObjects is set only from source
1558 // allocas. Stack temps produced from legalization are not counted currently.
1559 if (!allStackObjectsAreDead(MFI)) {
1560 assert(RS && "RegScavenger required if spilling");
1561
1562 // Add an emergency spill slot
1563 RS->addScavengingFrameIndex(FI: FuncInfo->getScavengeFI(MFI, TRI: *TRI));
1564
1565 // If we are spilling SGPRs to memory with a large frame, we may need a
1566 // second VGPR emergency frame index.
1567 if (HaveSGPRToVMemSpill &&
1568 allocateScavengingFrameIndexesNearIncomingSP(MF)) {
1569 RS->addScavengingFrameIndex(FI: MFI.CreateSpillStackObject(Size: 4, Alignment: Align(4)));
1570 }
1571 }
1572}
1573
1574void SIFrameLowering::processFunctionBeforeFrameIndicesReplaced(
1575 MachineFunction &MF, RegScavenger *RS) const {
1576 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1577 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1578 MachineRegisterInfo &MRI = MF.getRegInfo();
1579 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1580
1581 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
1582 // On gfx908, we had initially reserved highest available VGPR for AGPR
1583 // copy. Now since we are done with RA, check if there exist an unused VGPR
1584 // which is lower than the eariler reserved VGPR before RA. If one exist,
1585 // use it for AGPR copy instead of one reserved before RA.
1586 Register VGPRForAGPRCopy = FuncInfo->getVGPRForAGPRCopy();
1587 Register UnusedLowVGPR =
1588 TRI->findUnusedRegister(MRI, RC: &AMDGPU::VGPR_32RegClass, MF);
1589 if (UnusedLowVGPR && (TRI->getHWRegIndex(Reg: UnusedLowVGPR) <
1590 TRI->getHWRegIndex(Reg: VGPRForAGPRCopy))) {
1591 // Reserve this newly identified VGPR (for AGPR copy)
1592 // reserved registers should already be frozen at this point
1593 // so we can avoid calling MRI.freezeReservedRegs and just use
1594 // MRI.reserveReg
1595 FuncInfo->setVGPRForAGPRCopy(UnusedLowVGPR);
1596 MRI.reserveReg(PhysReg: UnusedLowVGPR, TRI);
1597 }
1598 }
1599 // We initally reserved the highest available SGPR pair for long branches
1600 // now, after RA, we shift down to a lower unused one if one exists
1601 Register LongBranchReservedReg = FuncInfo->getLongBranchReservedReg();
1602 Register UnusedLowSGPR =
1603 TRI->findUnusedRegister(MRI, RC: &AMDGPU::SGPR_64RegClass, MF);
1604 // If LongBranchReservedReg is null then we didn't find a long branch
1605 // and never reserved a register to begin with so there is nothing to
1606 // shift down. Then if UnusedLowSGPR is null, there isn't available lower
1607 // register to use so just keep the original one we set.
1608 if (LongBranchReservedReg && UnusedLowSGPR) {
1609 FuncInfo->setLongBranchReservedReg(UnusedLowSGPR);
1610 MRI.reserveReg(PhysReg: UnusedLowSGPR, TRI);
1611 }
1612}
1613
1614// The special SGPR spills like the one needed for FP, BP or any reserved
1615// registers delayed until frame lowering.
1616void SIFrameLowering::determinePrologEpilogSGPRSaves(
1617 MachineFunction &MF, BitVector &SavedVGPRs,
1618 bool NeedExecCopyReservedReg) const {
1619 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1620 MachineRegisterInfo &MRI = MF.getRegInfo();
1621 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1622 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1623 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1624 LiveRegUnits LiveUnits;
1625 LiveUnits.init(TRI: *TRI);
1626 // Initially mark callee saved registers as used so we will not choose them
1627 // while looking for scratch SGPRs.
1628 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
1629 for (unsigned I = 0; CSRegs[I]; ++I)
1630 LiveUnits.addReg(Reg: CSRegs[I]);
1631
1632 const TargetRegisterClass &RC = *TRI->getWaveMaskRegClass();
1633
1634 Register ReservedRegForExecCopy = MFI->getSGPRForEXECCopy();
1635 if (NeedExecCopyReservedReg ||
1636 (ReservedRegForExecCopy &&
1637 MRI.isPhysRegUsed(PhysReg: ReservedRegForExecCopy, /*SkipRegMaskTest=*/true))) {
1638 MRI.reserveReg(PhysReg: ReservedRegForExecCopy, TRI);
1639 Register UnusedScratchReg = findUnusedRegister(MRI, LiveUnits, RC);
1640 if (UnusedScratchReg) {
1641 // If found any unused scratch SGPR, reserve the register itself for Exec
1642 // copy and there is no need for any spills in that case.
1643 MFI->setSGPRForEXECCopy(UnusedScratchReg);
1644 MRI.replaceRegWith(FromReg: ReservedRegForExecCopy, ToReg: UnusedScratchReg);
1645 LiveUnits.addReg(Reg: UnusedScratchReg);
1646 } else {
1647 // Needs spill.
1648 assert(!MFI->hasPrologEpilogSGPRSpillEntry(ReservedRegForExecCopy) &&
1649 "Re-reserving spill slot for EXEC copy register");
1650 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, SGPR: ReservedRegForExecCopy, RC,
1651 /*IncludeScratchCopy=*/false);
1652 }
1653 } else if (ReservedRegForExecCopy) {
1654 // Reset it at this point. There are no whole-wave copies and spills
1655 // encountered.
1656 MFI->setSGPRForEXECCopy(AMDGPU::NoRegister);
1657 }
1658
1659 // hasFP only knows about stack objects that already exist. We're now
1660 // determining the stack slots that will be created, so we have to predict
1661 // them. Stack objects force FP usage with calls.
1662 //
1663 // Note a new VGPR CSR may be introduced if one is used for the spill, but we
1664 // don't want to report it here.
1665 //
1666 // FIXME: Is this really hasReservedCallFrame?
1667 const bool WillHaveFP =
1668 FrameInfo.hasCalls() &&
1669 (SavedVGPRs.any() || !allStackObjectsAreDead(MFI: FrameInfo));
1670
1671 if (WillHaveFP || hasFP(MF)) {
1672 Register FramePtrReg = MFI->getFrameOffsetReg();
1673 assert(!MFI->hasPrologEpilogSGPRSpillEntry(FramePtrReg) &&
1674 "Re-reserving spill slot for FP");
1675 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, SGPR: FramePtrReg);
1676 }
1677
1678 if (TRI->hasBasePointer(MF)) {
1679 Register BasePtrReg = TRI->getBaseRegister();
1680 assert(!MFI->hasPrologEpilogSGPRSpillEntry(BasePtrReg) &&
1681 "Re-reserving spill slot for BP");
1682 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, SGPR: BasePtrReg);
1683 }
1684}
1685
1686// Only report VGPRs to generic code.
1687void SIFrameLowering::determineCalleeSaves(MachineFunction &MF,
1688 BitVector &SavedVGPRs,
1689 RegScavenger *RS) const {
1690 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1691
1692 // If this is a function with the amdgpu_cs_chain[_preserve] calling
1693 // convention and it doesn't contain any calls to llvm.amdgcn.cs.chain, then
1694 // we don't need to save and restore anything.
1695 if (MFI->isChainFunction() && !MF.getFrameInfo().hasTailCall())
1696 return;
1697
1698 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs&: SavedVGPRs, RS);
1699
1700 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1701 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1702 const SIInstrInfo *TII = ST.getInstrInfo();
1703 bool NeedExecCopyReservedReg = false;
1704
1705 MachineInstr *ReturnMI = nullptr;
1706 for (MachineBasicBlock &MBB : MF) {
1707 for (MachineInstr &MI : MBB) {
1708 // TODO: Walking through all MBBs here would be a bad heuristic. Better
1709 // handle them elsewhere.
1710 if (TII->isWWMRegSpillOpcode(Opcode: MI.getOpcode()))
1711 NeedExecCopyReservedReg = true;
1712 else if (MI.getOpcode() == AMDGPU::SI_RETURN ||
1713 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1714 MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
1715 (MFI->isChainFunction() &&
1716 TII->isChainCallOpcode(Opcode: MI.getOpcode()))) {
1717 // We expect all return to be the same size.
1718 assert(!ReturnMI ||
1719 (count_if(MI.operands(), [](auto Op) { return Op.isReg(); }) ==
1720 count_if(ReturnMI->operands(), [](auto Op) { return Op.isReg(); })));
1721 ReturnMI = &MI;
1722 }
1723 }
1724 }
1725
1726 SmallVector<Register> SortedWWMVGPRs;
1727 for (Register Reg : MFI->getWWMReservedRegs()) {
1728 // The shift-back is needed only for the VGPRs used for SGPR spills and they
1729 // are of 32-bit size. SIPreAllocateWWMRegs pass can add tuples into WWM
1730 // reserved registers.
1731 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1732 if (TRI->getRegSizeInBits(RC: *RC) != 32)
1733 continue;
1734 SortedWWMVGPRs.push_back(Elt: Reg);
1735 }
1736
1737 sort(C&: SortedWWMVGPRs, Comp: std::greater<Register>());
1738 MFI->shiftWwmVGPRsToLowestRange(MF, WWMVGPRs&: SortedWWMVGPRs, SavedVGPRs);
1739
1740 if (MFI->isEntryFunction())
1741 return;
1742
1743 if (MFI->isWholeWaveFunction()) {
1744 // In practice, all the VGPRs are WWM registers, and we will need to save at
1745 // least their inactive lanes. Add them to WWMReservedRegs.
1746 assert(!NeedExecCopyReservedReg &&
1747 "Whole wave functions can use the reg mapped for their i1 argument");
1748
1749 // FIXME: Be more efficient!
1750 unsigned NumArchVGPRs = ST.has1024AddressableVGPRs() ? 1024 : 256;
1751 for (MCRegister Reg :
1752 AMDGPU::VGPR_32RegClass.getRegisters().take_front(N: NumArchVGPRs))
1753 if (MF.getRegInfo().isPhysRegModified(PhysReg: Reg)) {
1754 MFI->reserveWWMRegister(Reg);
1755 MF.begin()->addLiveIn(PhysReg: Reg);
1756 }
1757 MF.begin()->sortUniqueLiveIns();
1758 }
1759
1760 // Remove any VGPRs used in the return value because these do not need to be saved.
1761 // This prevents CSR restore from clobbering return VGPRs.
1762 if (ReturnMI) {
1763 for (auto &Op : ReturnMI->operands()) {
1764 if (Op.isReg())
1765 SavedVGPRs.reset(Idx: Op.getReg());
1766 }
1767 }
1768
1769 // Create the stack objects for WWM registers now.
1770 for (Register Reg : MFI->getWWMReservedRegs()) {
1771 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1772 MFI->allocateWWMSpill(MF, VGPR: Reg, Size: TRI->getSpillSize(RC: *RC),
1773 Alignment: TRI->getSpillAlign(RC: *RC));
1774 }
1775
1776 // Ignore the SGPRs the default implementation found.
1777 SavedVGPRs.clearBitsNotInMask(Mask: TRI->getAllVectorRegMask());
1778
1779 // Do not save AGPRs prior to GFX90A because there was no easy way to do so.
1780 // In gfx908 there was do AGPR loads and stores and thus spilling also
1781 // require a temporary VGPR.
1782 if (!ST.hasGFX90AInsts())
1783 SavedVGPRs.clearBitsInMask(Mask: TRI->getAllAGPRRegMask());
1784
1785 determinePrologEpilogSGPRSaves(MF, SavedVGPRs, NeedExecCopyReservedReg);
1786
1787 // The Whole-Wave VGPRs need to be specially inserted in the prolog, so don't
1788 // allow the default insertion to handle them.
1789 for (auto &Reg : MFI->getWWMSpills())
1790 SavedVGPRs.reset(Idx: Reg.first);
1791}
1792
1793void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF,
1794 BitVector &SavedRegs,
1795 RegScavenger *RS) const {
1796 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1797 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1798 if (MFI->isEntryFunction())
1799 return;
1800
1801 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1802 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1803
1804 // The SP is specifically managed and we don't want extra spills of it.
1805 SavedRegs.reset(Idx: MFI->getStackPtrOffsetReg());
1806
1807 const BitVector AllSavedRegs = SavedRegs;
1808 SavedRegs.clearBitsInMask(Mask: TRI->getAllVectorRegMask());
1809
1810 // We have to anticipate introducing CSR VGPR spills or spill of caller
1811 // save VGPR reserved for SGPR spills as we now always create stack entry
1812 // for it, if we don't have any stack objects already, since we require a FP
1813 // if there is a call and stack. We will allocate a VGPR for SGPR spills if
1814 // there are any SGPR spills. Whether they are CSR spills or otherwise.
1815 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1816 const bool WillHaveFP =
1817 FrameInfo.hasCalls() && (AllSavedRegs.any() || MFI->hasSpilledSGPRs());
1818
1819 // FP will be specially managed like SP.
1820 if (WillHaveFP || hasFP(MF))
1821 SavedRegs.reset(Idx: MFI->getFrameOffsetReg());
1822
1823 // Return address use with return instruction is hidden through the SI_RETURN
1824 // pseudo. Given that and since the IPRA computes actual register usage and
1825 // does not use CSR list, the clobbering of return address by function calls
1826 // (D117243) or otherwise (D120922) is ignored/not seen by the IPRA's register
1827 // usage collection. This will ensure save/restore of return address happens
1828 // in those scenarios.
1829 const MachineRegisterInfo &MRI = MF.getRegInfo();
1830 Register RetAddrReg = TRI->getReturnAddressReg(MF);
1831 if (!MFI->isEntryFunction() &&
1832 (FrameInfo.hasCalls() || MRI.isPhysRegModified(PhysReg: RetAddrReg))) {
1833 SavedRegs.set(TRI->getSubReg(Reg: RetAddrReg, Idx: AMDGPU::sub0));
1834 SavedRegs.set(TRI->getSubReg(Reg: RetAddrReg, Idx: AMDGPU::sub1));
1835 }
1836}
1837
1838static void assignSlotsUsingVGPRBlocks(MachineFunction &MF,
1839 const GCNSubtarget &ST,
1840 std::vector<CalleeSavedInfo> &CSI) {
1841 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1842 MachineFrameInfo &MFI = MF.getFrameInfo();
1843 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1844
1845 assert(
1846 llvm::is_sorted(CSI,
1847 [](const CalleeSavedInfo &A, const CalleeSavedInfo &B) {
1848 return A.getReg() < B.getReg();
1849 }) &&
1850 "Callee saved registers not sorted");
1851
1852 auto CanUseBlockOps = [&](const CalleeSavedInfo &CSI) {
1853 return !CSI.isSpilledToReg() &&
1854 TRI->getPhysRegBaseClass(Reg: CSI.getReg()) == &AMDGPU::VGPR_32RegClass &&
1855 !FuncInfo->isWWMReservedRegister(Reg: CSI.getReg());
1856 };
1857
1858 auto CSEnd = CSI.end();
1859 for (auto CSIt = CSI.begin(); CSIt != CSEnd; ++CSIt) {
1860 Register Reg = CSIt->getReg();
1861 if (!CanUseBlockOps(*CSIt))
1862 continue;
1863
1864 // Find all the regs that will fit in a 32-bit mask starting at the current
1865 // reg and build said mask. It should have 1 for every register that's
1866 // included, with the current register as the least significant bit.
1867 uint32_t Mask = 1;
1868 CSEnd = std::remove_if(
1869 first: CSIt + 1, last: CSEnd, pred: [&](const CalleeSavedInfo &CSI) -> bool {
1870 if (CanUseBlockOps(CSI) && CSI.getReg() < Reg + 32) {
1871 Mask |= 1 << (CSI.getReg() - Reg);
1872 return true;
1873 } else {
1874 return false;
1875 }
1876 });
1877
1878 const TargetRegisterClass *BlockRegClass = TRI->getRegClassForBlockOp(MF);
1879 Register RegBlock =
1880 TRI->getMatchingSuperReg(Reg, SubIdx: AMDGPU::sub0, RC: BlockRegClass);
1881 if (!RegBlock) {
1882 // We couldn't find a super register for the block. This can happen if
1883 // the register we started with is too high (e.g. v232 if the maximum is
1884 // v255). We therefore try to get the last register block and figure out
1885 // the mask from there.
1886 Register LastBlockStart =
1887 AMDGPU::VGPR0 + alignDown(Value: Reg - AMDGPU::VGPR0, Align: 32);
1888 RegBlock =
1889 TRI->getMatchingSuperReg(Reg: LastBlockStart, SubIdx: AMDGPU::sub0, RC: BlockRegClass);
1890 assert(RegBlock && TRI->isSubRegister(RegBlock, Reg) &&
1891 "Couldn't find super register");
1892 int RegDelta = Reg - LastBlockStart;
1893 assert(RegDelta > 0 && llvm::countl_zero(Mask) >= RegDelta &&
1894 "Bad shift amount");
1895 Mask <<= RegDelta;
1896 }
1897
1898 FuncInfo->setMaskForVGPRBlockOps(RegisterBlock: RegBlock, Mask);
1899
1900 // The stack objects can be a bit smaller than the register block if we know
1901 // some of the high bits of Mask are 0. This may happen often with calling
1902 // conventions where the caller and callee-saved VGPRs are interleaved at
1903 // a small boundary (e.g. 8 or 16).
1904 int UnusedBits = llvm::countl_zero(Val: Mask);
1905 unsigned BlockSize = TRI->getSpillSize(RC: *BlockRegClass) - UnusedBits * 4;
1906 int FrameIdx =
1907 MFI.CreateStackObject(Size: BlockSize, Alignment: TRI->getSpillAlign(RC: *BlockRegClass),
1908 /*isSpillSlot=*/true);
1909 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FrameIdx, IsCalleeSaved: true);
1910
1911 CSIt->setFrameIdx(FrameIdx);
1912 CSIt->setReg(RegBlock);
1913 }
1914 CSI.erase(first: CSEnd, last: CSI.end());
1915}
1916
1917bool SIFrameLowering::assignCalleeSavedSpillSlots(
1918 MachineFunction &MF, const TargetRegisterInfo *TRI,
1919 std::vector<CalleeSavedInfo> &CSI) const {
1920 if (CSI.empty())
1921 return true; // Early exit if no callee saved registers are modified!
1922
1923 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1924 bool UseVGPRBlocks = ST.useVGPRBlockOpsForCSR();
1925
1926 if (UseVGPRBlocks)
1927 assignSlotsUsingVGPRBlocks(MF, ST, CSI);
1928
1929 return assignCalleeSavedSpillSlotsImpl(MF, TRI, CSI) || UseVGPRBlocks;
1930}
1931
1932bool SIFrameLowering::assignCalleeSavedSpillSlotsImpl(
1933 MachineFunction &MF, const TargetRegisterInfo *TRI,
1934 std::vector<CalleeSavedInfo> &CSI) const {
1935 if (CSI.empty())
1936 return true; // Early exit if no callee saved registers are modified!
1937
1938 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1939 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1940 const SIRegisterInfo *RI = ST.getRegisterInfo();
1941 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1942 Register BasePtrReg = RI->getBaseRegister();
1943 Register SGPRForFPSaveRestoreCopy =
1944 FuncInfo->getScratchSGPRCopyDstReg(Reg: FramePtrReg);
1945 Register SGPRForBPSaveRestoreCopy =
1946 FuncInfo->getScratchSGPRCopyDstReg(Reg: BasePtrReg);
1947 if (!SGPRForFPSaveRestoreCopy && !SGPRForBPSaveRestoreCopy)
1948 return false;
1949
1950 unsigned NumModifiedRegs = 0;
1951
1952 if (SGPRForFPSaveRestoreCopy)
1953 NumModifiedRegs++;
1954 if (SGPRForBPSaveRestoreCopy)
1955 NumModifiedRegs++;
1956
1957 for (auto &CS : CSI) {
1958 if (CS.getReg() == FramePtrReg.asMCReg() && SGPRForFPSaveRestoreCopy) {
1959 CS.setDstReg(SGPRForFPSaveRestoreCopy);
1960 if (--NumModifiedRegs)
1961 break;
1962 } else if (CS.getReg() == BasePtrReg.asMCReg() &&
1963 SGPRForBPSaveRestoreCopy) {
1964 CS.setDstReg(SGPRForBPSaveRestoreCopy);
1965 if (--NumModifiedRegs)
1966 break;
1967 }
1968 }
1969
1970 return false;
1971}
1972
1973bool SIFrameLowering::allocateScavengingFrameIndexesNearIncomingSP(
1974 const MachineFunction &MF) const {
1975
1976 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1977 const MachineFrameInfo &MFI = MF.getFrameInfo();
1978 const SIInstrInfo *TII = ST.getInstrInfo();
1979 uint64_t EstStackSize = MFI.estimateStackSize(MF);
1980 uint64_t MaxOffset = EstStackSize - 1;
1981
1982 // We need the emergency stack slots to be allocated in range of the
1983 // MUBUF/flat scratch immediate offset from the base register, so assign these
1984 // first at the incoming SP position.
1985 //
1986 // TODO: We could try sorting the objects to find a hole in the first bytes
1987 // rather than allocating as close to possible. This could save a lot of space
1988 // on frames with alignment requirements.
1989 if (ST.hasFlatScratchEnabled()) {
1990 if (TII->isLegalFLATOffset(Offset: MaxOffset, AddrSpace: AMDGPUAS::PRIVATE_ADDRESS,
1991 FlatVariant: SIInstrFlags::FlatScratch))
1992 return false;
1993 } else {
1994 if (TII->isLegalMUBUFImmOffset(Imm: MaxOffset))
1995 return false;
1996 }
1997
1998 return true;
1999}
2000
2001bool SIFrameLowering::spillCalleeSavedRegisters(
2002 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2003 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2004 MachineFunction *MF = MBB.getParent();
2005 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2006 if (!ST.useVGPRBlockOpsForCSR())
2007 return false;
2008
2009 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
2010 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2011 const SIInstrInfo *TII = ST.getInstrInfo();
2012 SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
2013
2014 const TargetRegisterClass *BlockRegClass =
2015 static_cast<const SIRegisterInfo *>(TRI)->getRegClassForBlockOp(MF: *MF);
2016 for (const CalleeSavedInfo &CS : CSI) {
2017 Register Reg = CS.getReg();
2018 if (!BlockRegClass->contains(Reg) ||
2019 !FuncInfo->hasMaskForVGPRBlockOps(RegisterBlock: Reg)) {
2020 spillCalleeSavedRegister(SaveBlock&: MBB, MI, CS, TII, TRI);
2021 continue;
2022 }
2023
2024 // Build a scratch block store.
2025 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(RegisterBlock: Reg);
2026 int FrameIndex = CS.getFrameIdx();
2027 MachinePointerInfo PtrInfo =
2028 MachinePointerInfo::getFixedStack(MF&: *MF, FI: FrameIndex);
2029 MachineMemOperand *MMO =
2030 MF->getMachineMemOperand(PtrInfo, F: MachineMemOperand::MOStore,
2031 Size: FrameInfo.getObjectSize(ObjectIdx: FrameIndex),
2032 BaseAlignment: FrameInfo.getObjectAlign(ObjectIdx: FrameIndex));
2033
2034 BuildMI(BB&: MBB, I: MI, MIMD: MI->getDebugLoc(),
2035 MCID: TII->get(Opcode: AMDGPU::SI_BLOCK_SPILL_V1024_SAVE))
2036 .addReg(RegNo: Reg, Flags: getKillRegState(B: false))
2037 .addFrameIndex(Idx: FrameIndex)
2038 .addReg(RegNo: MFI->getStackPtrOffsetReg())
2039 .addImm(Val: 0)
2040 .addImm(Val: Mask)
2041 .addMemOperand(MMO);
2042
2043 FuncInfo->setHasSpilledVGPRs();
2044
2045 // Add the register to the liveins. This is necessary because if any of the
2046 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2047 // then the whole block will be marked as reserved and `updateLiveness` will
2048 // skip it.
2049 MBB.addLiveIn(PhysReg: Reg);
2050 }
2051 MBB.sortUniqueLiveIns();
2052
2053 return true;
2054}
2055
2056bool SIFrameLowering::restoreCalleeSavedRegisters(
2057 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2058 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2059 MachineFunction *MF = MBB.getParent();
2060 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2061 if (!ST.useVGPRBlockOpsForCSR())
2062 return false;
2063
2064 SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
2065 MachineFrameInfo &MFI = MF->getFrameInfo();
2066 const SIInstrInfo *TII = ST.getInstrInfo();
2067 const SIRegisterInfo *SITRI = static_cast<const SIRegisterInfo *>(TRI);
2068 const TargetRegisterClass *BlockRegClass = SITRI->getRegClassForBlockOp(MF: *MF);
2069 for (const CalleeSavedInfo &CS : reverse(C&: CSI)) {
2070 Register Reg = CS.getReg();
2071 if (!BlockRegClass->contains(Reg) ||
2072 !FuncInfo->hasMaskForVGPRBlockOps(RegisterBlock: Reg)) {
2073 restoreCalleeSavedRegister(MBB, MI, CS, TII, TRI);
2074 continue;
2075 }
2076
2077 // Build a scratch block load.
2078 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(RegisterBlock: Reg);
2079 int FrameIndex = CS.getFrameIdx();
2080 MachinePointerInfo PtrInfo =
2081 MachinePointerInfo::getFixedStack(MF&: *MF, FI: FrameIndex);
2082 MachineMemOperand *MMO = MF->getMachineMemOperand(
2083 PtrInfo, F: MachineMemOperand::MOLoad, Size: MFI.getObjectSize(ObjectIdx: FrameIndex),
2084 BaseAlignment: MFI.getObjectAlign(ObjectIdx: FrameIndex));
2085
2086 auto MIB = BuildMI(BB&: MBB, I: MI, MIMD: MI->getDebugLoc(),
2087 MCID: TII->get(Opcode: AMDGPU::SI_BLOCK_SPILL_V1024_RESTORE), DestReg: Reg)
2088 .addFrameIndex(Idx: FrameIndex)
2089 .addReg(RegNo: FuncInfo->getStackPtrOffsetReg())
2090 .addImm(Val: 0)
2091 .addImm(Val: Mask)
2092 .addMemOperand(MMO);
2093 SITRI->addImplicitUsesForBlockCSRLoad(MIB, BlockReg: Reg);
2094
2095 // Add the register to the liveins. This is necessary because if any of the
2096 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2097 // then the whole block will be marked as reserved and `updateLiveness` will
2098 // skip it.
2099 MBB.addLiveIn(PhysReg: Reg);
2100 }
2101
2102 MBB.sortUniqueLiveIns();
2103 return true;
2104}
2105
2106MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
2107 MachineFunction &MF,
2108 MachineBasicBlock &MBB,
2109 MachineBasicBlock::iterator I) const {
2110 int64_t Amount = I->getOperand(i: 0).getImm();
2111 if (Amount == 0)
2112 return MBB.erase(I);
2113
2114 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
2115 const SIInstrInfo *TII = ST.getInstrInfo();
2116 const DebugLoc &DL = I->getDebugLoc();
2117 unsigned Opc = I->getOpcode();
2118 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
2119 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(i: 1).getImm() : 0;
2120
2121 if (!hasReservedCallFrame(MF)) {
2122 Amount = alignTo(Size: Amount, A: getStackAlign());
2123 assert(isUInt<32>(Amount) && "exceeded stack address space size");
2124 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2125 Register SPReg = MFI->getStackPtrOffsetReg();
2126
2127 Amount *= getScratchScaleFactor(ST);
2128 if (IsDestroy)
2129 Amount = -Amount;
2130 auto Add = BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::S_ADD_I32), DestReg: SPReg)
2131 .addReg(RegNo: SPReg)
2132 .addImm(Val: Amount);
2133 Add->getOperand(i: 3).setIsDead(); // Mark SCC as dead.
2134 } else if (CalleePopAmount != 0) {
2135 llvm_unreachable("is this used?");
2136 }
2137
2138 return MBB.erase(I);
2139}
2140
2141/// Returns true if the frame will require a reference to the stack pointer.
2142///
2143/// This is the set of conditions common to setting up the stack pointer in a
2144/// kernel, and for using a frame pointer in a callable function.
2145///
2146/// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm
2147/// references SP.
2148static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) {
2149 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint();
2150}
2151
2152// The FP for kernels is always known 0, so we never really need to setup an
2153// explicit register for it. However, DisableFramePointerElim will force us to
2154// use a register for it.
2155bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
2156 const MachineFrameInfo &MFI = MF.getFrameInfo();
2157
2158 // For entry & chain functions we can use an immediate offset in most cases,
2159 // so the presence of calls doesn't imply we need a distinct frame pointer.
2160 if (MFI.hasCalls() &&
2161 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() &&
2162 !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) {
2163 // All offsets are unsigned, so need to be addressed in the same direction
2164 // as stack growth.
2165
2166 // FIXME: This function is pretty broken, since it can be called before the
2167 // frame layout is determined or CSR spills are inserted.
2168 return MFI.getStackSize() != 0;
2169 }
2170
2171 return (frameTriviallyRequiresSP(MFI) &&
2172 !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) ||
2173 MFI.isFrameAddressTaken() ||
2174 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
2175 MF) ||
2176 mayReserveScratchForCWSR(MF) ||
2177 MF.getTarget().Options.DisableFramePointerElim(MF);
2178}
2179
2180bool SIFrameLowering::mayReserveScratchForCWSR(
2181 const MachineFunction &MF) const {
2182 return MF.getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() &&
2183 AMDGPU::isEntryFunctionCC(CC: MF.getFunction().getCallingConv()) &&
2184 AMDGPU::isCompute(CC: MF.getFunction().getCallingConv());
2185}
2186
2187// This is essentially a reduced version of hasFP for entry functions. Since the
2188// stack pointer is known 0 on entry to kernels, we never really need an FP
2189// register. We may need to initialize the stack pointer depending on the frame
2190// properties, which logically overlaps many of the cases where an ordinary
2191// function would require an FP.
2192// Also used for chain functions. While not technically entry functions, chain
2193// functions may need to set up a stack pointer in some situations.
2194bool SIFrameLowering::requiresStackPointerReference(
2195 const MachineFunction &MF) const {
2196 // Callable functions always require a stack pointer reference.
2197 assert((MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() ||
2198 MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) &&
2199 "only expected to call this for entry points and chain functions");
2200
2201 const MachineFrameInfo &MFI = MF.getFrameInfo();
2202
2203 // Entry points ordinarily don't need to initialize SP. We have to set it up
2204 // for callees if there are any. Also note tail calls are impossible/don't
2205 // make any sense for kernels.
2206 if (MFI.hasCalls())
2207 return true;
2208
2209 // We still need to initialize the SP if we're doing anything weird that
2210 // references the SP, like variable sized stack objects.
2211 return frameTriviallyRequiresSP(MFI);
2212}
2213