1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
14#include "MCTargetDesc/RISCVBaseInfo.h"
15#include "RISCVMachineFunctionInfo.h"
16#include "RISCVSubtarget.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/CFIInstBuilder.h"
19#include "llvm/CodeGen/LivePhysRegs.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/RegisterScavenging.h"
25#include "llvm/IR/DiagnosticInfo.h"
26#include "llvm/MC/MCDwarf.h"
27#include "llvm/Support/LEB128.h"
28
29#include <algorithm>
30
31#define DEBUG_TYPE "riscv-frame"
32
33using namespace llvm;
34
35static Align getABIStackAlignment(RISCVABI::ABI ABI) {
36 if (ABI == RISCVABI::ABI_ILP32E)
37 return Align(4);
38 if (ABI == RISCVABI::ABI_LP64E)
39 return Align(8);
40 return Align(16);
41}
42
43RISCVFrameLowering::RISCVFrameLowering(const RISCVSubtarget &STI)
44 : TargetFrameLowering(
45 StackGrowsDown, getABIStackAlignment(ABI: STI.getTargetABI()),
46 /*LocalAreaOffset=*/0,
47 /*TransientStackAlignment=*/getABIStackAlignment(ABI: STI.getTargetABI())),
48 STI(STI) {}
49
50// The register used to hold the frame pointer.
51static constexpr MCPhysReg FPReg = RISCV::X8;
52
53// The register used to hold the stack pointer.
54static constexpr MCPhysReg SPReg = RISCV::X2;
55
56// The register used to hold the return address.
57static constexpr MCPhysReg RAReg = RISCV::X1;
58
59// LIst of CSRs that are given a fixed location by save/restore libcalls or
60// Zcmp/Xqccmp Push/Pop. The order in this table indicates the order the
61// registers are saved on the stack. Zcmp uses the reverse order of save/restore
62// and Xqccmp on the stack, but this is handled when offsets are calculated.
63static const MCPhysReg FixedCSRFIMap[] = {
64 /*ra*/ RAReg, /*s0*/ FPReg, /*s1*/ RISCV::X9,
65 /*s2*/ RISCV::X18, /*s3*/ RISCV::X19, /*s4*/ RISCV::X20,
66 /*s5*/ RISCV::X21, /*s6*/ RISCV::X22, /*s7*/ RISCV::X23,
67 /*s8*/ RISCV::X24, /*s9*/ RISCV::X25, /*s10*/ RISCV::X26,
68 /*s11*/ RISCV::X27};
69
70// The number of stack bytes allocated by `QC.C.MIENTER(.NEST)` and popped by
71// `QC.C.MILEAVERET`.
72static constexpr uint64_t QCIInterruptPushAmount = 96;
73
74static const std::pair<MCPhysReg, int8_t> FixedCSRFIQCIInterruptMap[] = {
75 /* -1 is a gap for mepc/mnepc */
76 {/*fp*/ FPReg, -2},
77 /* -3 is a gap for qc.mcause */
78 {/*ra*/ RAReg, -4},
79 /* -5 is reserved */
80 {/*t0*/ RISCV::X5, -6},
81 {/*t1*/ RISCV::X6, -7},
82 {/*t2*/ RISCV::X7, -8},
83 {/*a0*/ RISCV::X10, -9},
84 {/*a1*/ RISCV::X11, -10},
85 {/*a2*/ RISCV::X12, -11},
86 {/*a3*/ RISCV::X13, -12},
87 {/*a4*/ RISCV::X14, -13},
88 {/*a5*/ RISCV::X15, -14},
89 {/*a6*/ RISCV::X16, -15},
90 {/*a7*/ RISCV::X17, -16},
91 {/*t3*/ RISCV::X28, -17},
92 {/*t4*/ RISCV::X29, -18},
93 {/*t5*/ RISCV::X30, -19},
94 {/*t6*/ RISCV::X31, -20},
95 /* -21, -22, -23, -24 are reserved */
96};
97
98/// Returns true if DWARF CFI instructions ("frame moves") should be emitted.
99static bool needsDwarfCFI(const MachineFunction &MF) {
100 return MF.needsFrameMoves();
101}
102
103// For now we use x3, a.k.a gp, as pointer to shadow call stack.
104// User should not use x3 in their asm.
105static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MI,
107 const DebugLoc &DL) {
108 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
109 // We check Zimop instead of (Zimop || Zcmop) to determine whether HW shadow
110 // stack is available despite the fact that sspush/sspopchk both have a
111 // compressed form, because if only Zcmop is available, we would need to
112 // reserve X5 due to c.sspopchk only takes X5 and we currently do not support
113 // using X5 as the return address register.
114 // However, we can still aggressively use c.sspush x1 if zcmop is available.
115 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
116 STI.hasStdExtZimop();
117 bool HasSWShadowStack =
118 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
119 if (!HasHWShadowStack && !HasSWShadowStack)
120 return;
121
122 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
123
124 // Do not save RA to the SCS if it's not saved to the regular stack,
125 // i.e. RA is not at risk of being overwritten.
126 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
127 if (llvm::none_of(
128 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
129 return;
130
131 const RISCVInstrInfo *TII = STI.getInstrInfo();
132 if (HasHWShadowStack) {
133 if (STI.hasStdExtZcmop()) {
134 static_assert(RAReg == RISCV::X1, "C.SSPUSH only accepts X1");
135 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_C_SSPUSH));
136 } else {
137 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPUSH)).addReg(RegNo: RAReg);
138 }
139 return;
140 }
141
142 Register SCSPReg = RISCVABI::getSCSPReg();
143
144 bool IsRV64 = STI.is64Bit();
145 int64_t SlotSize = STI.getXLen() / 8;
146 // Store return address to shadow call stack
147 // addi gp, gp, [4|8]
148 // s[w|d] ra, -[4|8](gp)
149 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
150 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
151 .addReg(RegNo: SCSPReg)
152 .addImm(Val: SlotSize)
153 .setMIFlag(MachineInstr::FrameSetup);
154 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
155 .addReg(RegNo: RAReg)
156 .addReg(RegNo: SCSPReg)
157 .addImm(Val: -SlotSize)
158 .setMIFlag(MachineInstr::FrameSetup);
159
160 if (!needsDwarfCFI(MF))
161 return;
162
163 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
164 // of the shadow stack pointer when unwinding past this frame.
165 char DwarfSCSReg = TRI->getDwarfRegNum(Reg: SCSPReg, /*IsEH*/ isEH: true);
166 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
167
168 char Offset = static_cast<char>(-SlotSize) & 0x7f;
169 const char CFIInst[] = {
170 dwarf::DW_CFA_val_expression,
171 DwarfSCSReg, // register
172 2, // length
173 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
174 Offset, // addend (sleb128)
175 };
176
177 CFIInstBuilder(MBB, MI, MachineInstr::FrameSetup)
178 .buildEscape(Bytes: StringRef(CFIInst, sizeof(CFIInst)));
179}
180
181static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
182 MachineBasicBlock::iterator MI,
183 const DebugLoc &DL) {
184 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
185 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
186 STI.hasStdExtZimop();
187 bool HasSWShadowStack =
188 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
189 if (!HasHWShadowStack && !HasSWShadowStack)
190 return;
191
192 // See emitSCSPrologue() above.
193 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
194 if (llvm::none_of(
195 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
196 return;
197
198 const RISCVInstrInfo *TII = STI.getInstrInfo();
199 if (HasHWShadowStack) {
200 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPOPCHK)).addReg(RegNo: RAReg);
201 return;
202 }
203
204 Register SCSPReg = RISCVABI::getSCSPReg();
205
206 bool IsRV64 = STI.is64Bit();
207 int64_t SlotSize = STI.getXLen() / 8;
208 // Load return address from shadow call stack
209 // l[w|d] ra, -[4|8](gp)
210 // addi gp, gp, -[4|8]
211 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::LD : RISCV::LW))
212 .addReg(RegNo: RAReg, Flags: RegState::Define)
213 .addReg(RegNo: SCSPReg)
214 .addImm(Val: -SlotSize)
215 .setMIFlag(MachineInstr::FrameDestroy);
216 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
217 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
218 .addReg(RegNo: SCSPReg)
219 .addImm(Val: -SlotSize)
220 .setMIFlag(MachineInstr::FrameDestroy);
221 if (needsDwarfCFI(MF)) {
222 // Restore the SCS pointer
223 CFIInstBuilder(MBB, MI, MachineInstr::FrameDestroy).buildRestore(Reg: SCSPReg);
224 }
225}
226
227// Insert instruction to swap mscratchsw with sp
228static void emitSiFiveCLICStackSwap(MachineFunction &MF, MachineBasicBlock &MBB,
229 MachineBasicBlock::iterator MBBI,
230 const DebugLoc &DL) {
231 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
232
233 if (!RVFI->isSiFiveStackSwapInterrupt(MF))
234 return;
235
236 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
237 const RISCVInstrInfo *TII = STI.getInstrInfo();
238
239 assert(STI.hasVendorXSfmclic() && "Stack Swapping Requires XSfmclic");
240
241 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
242 .addReg(RegNo: SPReg, Flags: RegState::Define)
243 .addImm(Val: RISCVSysReg::sf_mscratchcsw)
244 .addReg(RegNo: SPReg, Flags: RegState::Kill)
245 .setMIFlag(MachineInstr::FrameSetup);
246
247 // FIXME: CFI Information for this swap.
248}
249
250static void
251createSiFivePreemptibleInterruptFrameEntries(MachineFunction &MF,
252 RISCVMachineFunctionInfo &RVFI) {
253 if (!RVFI.isSiFivePreemptibleInterrupt(MF))
254 return;
255
256 const TargetRegisterClass &RC = RISCV::GPRRegClass;
257 const TargetRegisterInfo &TRI =
258 *MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
259 MachineFrameInfo &MFI = MF.getFrameInfo();
260
261 // Create two frame objects for spilling X8 and X9, which will be done in
262 // `emitSiFiveCLICPreemptibleSaves`. This is in addition to any other stack
263 // objects we might have for X8 and X9, as they might be saved twice.
264 for (int I = 0; I < 2; ++I) {
265 int FI = MFI.CreateStackObject(Size: TRI.getSpillSize(RC), Alignment: TRI.getSpillAlign(RC),
266 isSpillSlot: true);
267 RVFI.pushInterruptCSRFrameIndex(FI);
268 }
269}
270
271static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF,
272 MachineBasicBlock &MBB,
273 MachineBasicBlock::iterator MBBI,
274 const DebugLoc &DL) {
275 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
276
277 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
278 return;
279
280 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
281 const RISCVInstrInfo *TII = STI.getInstrInfo();
282
283 // FIXME: CFI Information here is nonexistent/wrong.
284
285 // X8 and X9 might be stored into the stack twice, initially into the
286 // `interruptCSRFrameIndex` here, and then maybe again into their CSI frame
287 // index.
288 //
289 // This is done instead of telling the register allocator that we need two
290 // VRegs to store the value of `mcause` and `mepc` through the instruction,
291 // which affects other passes.
292 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X8, /* IsKill=*/true,
293 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
294 RC: &RISCV::GPRRegClass, VReg: Register(),
295 Flags: MachineInstr::FrameSetup);
296 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X9, /* IsKill=*/true,
297 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
298 RC: &RISCV::GPRRegClass, VReg: Register(),
299 Flags: MachineInstr::FrameSetup);
300
301 // Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
302 // used in the function, then they will appear in `getUnmanagedCSI` and will
303 // be saved again.
304 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
305 .addReg(RegNo: RISCV::X8, Flags: RegState::Define)
306 .addImm(Val: RISCVSysReg::mcause)
307 .addReg(RegNo: RISCV::X0)
308 .setMIFlag(MachineInstr::FrameSetup);
309 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
310 .addReg(RegNo: RISCV::X9, Flags: RegState::Define)
311 .addImm(Val: RISCVSysReg::mepc)
312 .addReg(RegNo: RISCV::X0)
313 .setMIFlag(MachineInstr::FrameSetup);
314
315 // Enable interrupts.
316 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRSI))
317 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
318 .addImm(Val: RISCVSysReg::mstatus)
319 .addImm(Val: 8)
320 .setMIFlag(MachineInstr::FrameSetup);
321}
322
323static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF,
324 MachineBasicBlock &MBB,
325 MachineBasicBlock::iterator MBBI,
326 const DebugLoc &DL) {
327 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
328
329 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
330 return;
331
332 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
333 const RISCVInstrInfo *TII = STI.getInstrInfo();
334
335 // FIXME: CFI Information here is nonexistent/wrong.
336
337 // Disable interrupts.
338 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRCI))
339 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
340 .addImm(Val: RISCVSysReg::mstatus)
341 .addImm(Val: 8)
342 .setMIFlag(MachineInstr::FrameSetup);
343
344 // Restore `mepc` from x9 (s1), and `mcause` from x8 (s0). If either were used
345 // in the function, they have already been restored once, so now have the
346 // value stored in `emitSiFiveCLICPreemptibleSaves`.
347 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
348 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
349 .addImm(Val: RISCVSysReg::mepc)
350 .addReg(RegNo: RISCV::X9, Flags: RegState::Kill)
351 .setMIFlag(MachineInstr::FrameSetup);
352 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
353 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
354 .addImm(Val: RISCVSysReg::mcause)
355 .addReg(RegNo: RISCV::X8, Flags: RegState::Kill)
356 .setMIFlag(MachineInstr::FrameSetup);
357
358 // X8 and X9 need to be restored to their values on function entry, which we
359 // saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
360 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X9,
361 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
362 RC: &RISCV::GPRRegClass, VReg: Register(),
363 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
364 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X8,
365 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
366 RC: &RISCV::GPRRegClass, VReg: Register(),
367 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
368}
369
370// Get the ID of the libcall used for spilling and restoring callee saved
371// registers. The ID is representative of the number of registers saved or
372// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
373// single register.
374static int getLibCallID(const MachineFunction &MF,
375 const std::vector<CalleeSavedInfo> &CSI) {
376 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
377
378 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
379 return -1;
380
381 MCRegister MaxReg;
382 for (auto &CS : CSI)
383 // assignCalleeSavedSpillSlots assigns negative frame indexes to
384 // registers which can be saved by libcall.
385 if (CS.getFrameIdx() < 0)
386 MaxReg = std::max(a: MaxReg.id(), b: CS.getReg().id());
387
388 if (!MaxReg)
389 return -1;
390
391 switch (MaxReg.id()) {
392 default:
393 llvm_unreachable("Something has gone wrong!");
394 // clang-format off
395 case /*s11*/ RISCV::X27: return 12;
396 case /*s10*/ RISCV::X26: return 11;
397 case /*s9*/ RISCV::X25: return 10;
398 case /*s8*/ RISCV::X24: return 9;
399 case /*s7*/ RISCV::X23: return 8;
400 case /*s6*/ RISCV::X22: return 7;
401 case /*s5*/ RISCV::X21: return 6;
402 case /*s4*/ RISCV::X20: return 5;
403 case /*s3*/ RISCV::X19: return 4;
404 case /*s2*/ RISCV::X18: return 3;
405 case /*s1*/ RISCV::X9: return 2;
406 case /*s0*/ FPReg: return 1;
407 case /*ra*/ RAReg: return 0;
408 // clang-format on
409 }
410}
411
412// Get the name of the libcall used for spilling callee saved registers.
413// If this function will not use save/restore libcalls, then return a nullptr.
414static const char *
415getSpillLibCallName(const MachineFunction &MF,
416 const std::vector<CalleeSavedInfo> &CSI) {
417 static const char *const SpillLibCalls[] = {
418 "__riscv_save_0",
419 "__riscv_save_1",
420 "__riscv_save_2",
421 "__riscv_save_3",
422 "__riscv_save_4",
423 "__riscv_save_5",
424 "__riscv_save_6",
425 "__riscv_save_7",
426 "__riscv_save_8",
427 "__riscv_save_9",
428 "__riscv_save_10",
429 "__riscv_save_11",
430 "__riscv_save_12"
431 };
432
433 int LibCallID = getLibCallID(MF, CSI);
434 if (LibCallID == -1)
435 return nullptr;
436 return SpillLibCalls[LibCallID];
437}
438
439// Get the name of the libcall used for restoring callee saved registers.
440// If this function will not use save/restore libcalls, then return a nullptr.
441static const char *
442getRestoreLibCallName(const MachineFunction &MF,
443 const std::vector<CalleeSavedInfo> &CSI) {
444 static const char *const RestoreLibCalls[] = {
445 "__riscv_restore_0",
446 "__riscv_restore_1",
447 "__riscv_restore_2",
448 "__riscv_restore_3",
449 "__riscv_restore_4",
450 "__riscv_restore_5",
451 "__riscv_restore_6",
452 "__riscv_restore_7",
453 "__riscv_restore_8",
454 "__riscv_restore_9",
455 "__riscv_restore_10",
456 "__riscv_restore_11",
457 "__riscv_restore_12"
458 };
459
460 int LibCallID = getLibCallID(MF, CSI);
461 if (LibCallID == -1)
462 return nullptr;
463 return RestoreLibCalls[LibCallID];
464}
465
466// Get the max reg of Push/Pop for restoring callee saved registers.
467static unsigned getNumPushPopRegs(const std::vector<CalleeSavedInfo> &CSI) {
468 unsigned NumPushPopRegs = 0;
469 for (auto &CS : CSI) {
470 auto *FII = llvm::find_if(Range: FixedCSRFIMap,
471 P: [&](MCPhysReg P) { return P == CS.getReg(); });
472 if (FII != std::end(arr: FixedCSRFIMap)) {
473 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
474 NumPushPopRegs = std::max(a: NumPushPopRegs, b: RegNum + 1);
475 }
476 }
477 assert(NumPushPopRegs != 12 && "x26 requires x27 to also be pushed");
478 return NumPushPopRegs;
479}
480
481// Return true if the specified function should have a dedicated frame
482// pointer register. This is true if frame pointer elimination is
483// disabled, if it needs dynamic stack realignment, if the function has
484// variable sized allocas, or if the frame address is taken.
485bool RISCVFrameLowering::hasFPImpl(const MachineFunction &MF) const {
486 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
487
488 const MachineFrameInfo &MFI = MF.getFrameInfo();
489 return MF.getTarget().Options.DisableFramePointerElim(MF) ||
490 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
491 MFI.isFrameAddressTaken();
492}
493
494bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
495 const MachineFrameInfo &MFI = MF.getFrameInfo();
496 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
497
498 // If we do not reserve stack space for outgoing arguments in prologue,
499 // we will adjust the stack pointer before call instruction. After the
500 // adjustment, we can not use SP to access the stack objects for the
501 // arguments. Instead, use BP to access these stack objects.
502 return (MFI.hasVarSizedObjects() ||
503 (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
504 MFI.getMaxCallFrameSize() != 0))) &&
505 TRI->hasStackRealignment(MF);
506}
507
508// Determines the size of the frame and maximum call frame size.
509void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
510 MachineFrameInfo &MFI = MF.getFrameInfo();
511 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
512
513 // Get the number of bytes to allocate from the FrameInfo.
514 uint64_t FrameSize = MFI.getStackSize();
515
516 // QCI Interrupts use at least 96 bytes of stack space
517 if (RVFI->useQCIInterrupt(MF))
518 FrameSize = std::max(a: FrameSize, b: QCIInterruptPushAmount);
519
520 // Get the alignment.
521 Align StackAlign = getStackAlign();
522
523 // Make sure the frame is aligned.
524 FrameSize = alignTo(Size: FrameSize, A: StackAlign);
525
526 // Update frame info.
527 MFI.setStackSize(FrameSize);
528
529 // When using SP or BP to access stack objects, we may require extra padding
530 // to ensure the bottom of the RVV stack is correctly aligned within the main
531 // stack. We calculate this as the amount required to align the scalar local
532 // variable section up to the RVV alignment.
533 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
534 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
535 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
536 RVFI->getVarArgsSaveSize();
537 if (auto RVVPadding =
538 offsetToAlignment(Value: ScalarLocalVarSize, Alignment: RVFI->getRVVStackAlign()))
539 RVFI->setRVVPadding(RVVPadding);
540 }
541}
542
543// Returns the stack size including RVV padding (when required), rounded back
544// up to the required stack alignment.
545uint64_t RISCVFrameLowering::getStackSizeWithRVVPadding(
546 const MachineFunction &MF) const {
547 const MachineFrameInfo &MFI = MF.getFrameInfo();
548 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
549 return alignTo(Size: MFI.getStackSize() + RVFI->getRVVPadding(), A: getStackAlign());
550}
551
552static SmallVector<CalleeSavedInfo, 8>
553getUnmanagedCSI(const MachineFunction &MF,
554 const std::vector<CalleeSavedInfo> &CSI) {
555 const MachineFrameInfo &MFI = MF.getFrameInfo();
556 SmallVector<CalleeSavedInfo, 8> NonLibcallCSI;
557
558 for (auto &CS : CSI) {
559 int FI = CS.getFrameIdx();
560 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default)
561 NonLibcallCSI.push_back(Elt: CS);
562 }
563
564 return NonLibcallCSI;
565}
566
567static SmallVector<CalleeSavedInfo, 8>
568getRVVCalleeSavedInfo(const MachineFunction &MF,
569 const std::vector<CalleeSavedInfo> &CSI) {
570 const MachineFrameInfo &MFI = MF.getFrameInfo();
571 SmallVector<CalleeSavedInfo, 8> RVVCSI;
572
573 for (auto &CS : CSI) {
574 int FI = CS.getFrameIdx();
575 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector)
576 RVVCSI.push_back(Elt: CS);
577 }
578
579 return RVVCSI;
580}
581
582static SmallVector<CalleeSavedInfo, 8>
583getPushOrLibCallsSavedInfo(const MachineFunction &MF,
584 const std::vector<CalleeSavedInfo> &CSI) {
585 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
586
587 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
588 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
589 return PushOrLibCallsCSI;
590
591 for (const auto &CS : CSI) {
592 if (RVFI->useQCIInterrupt(MF)) {
593 // Some registers are saved by both `QC.C.MIENTER(.NEST)` and
594 // `QC.CM.PUSH(FP)`. In these cases, prioritise the CFI info that points
595 // to the versions saved by `QC.C.MIENTER(.NEST)` which is what FP
596 // unwinding would use.
597 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
598 Element: CS.getReg()))
599 continue;
600 }
601
602 if (llvm::is_contained(Range: FixedCSRFIMap, Element: CS.getReg()))
603 PushOrLibCallsCSI.push_back(Elt: CS);
604 }
605
606 return PushOrLibCallsCSI;
607}
608
609static SmallVector<CalleeSavedInfo, 8>
610getQCISavedInfo(const MachineFunction &MF,
611 const std::vector<CalleeSavedInfo> &CSI) {
612 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
613
614 SmallVector<CalleeSavedInfo, 8> QCIInterruptCSI;
615 if (!RVFI->useQCIInterrupt(MF))
616 return QCIInterruptCSI;
617
618 for (const auto &CS : CSI) {
619 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
620 Element: CS.getReg()))
621 QCIInterruptCSI.push_back(Elt: CS);
622 }
623
624 return QCIInterruptCSI;
625}
626
627void RISCVFrameLowering::allocateAndProbeStackForRVV(
628 MachineFunction &MF, MachineBasicBlock &MBB,
629 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount,
630 MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const {
631 assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
632
633 // Emit a variable-length allocation probing loop.
634
635 // Get VLEN in TargetReg
636 const RISCVInstrInfo *TII = STI.getInstrInfo();
637 Register TargetReg = RISCV::X6;
638 uint32_t NumOfVReg = Amount / RISCV::RVVBytesPerBlock;
639 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoReadVLENB), DestReg: TargetReg)
640 .setMIFlag(Flag);
641 TII->mulImm(MF, MBB, II: MBBI, DL, DestReg: TargetReg, Amt: NumOfVReg, Flag);
642
643 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
644 if (EmitCFI) {
645 // Set the CFA register to TargetReg.
646 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: -Amount);
647 }
648
649 // It will be expanded to a probe loop in `inlineStackProbe`.
650 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC_RVV))
651 .addReg(RegNo: TargetReg);
652
653 if (EmitCFI) {
654 // Set the CFA register back to SP.
655 CFIBuilder.buildDefCFARegister(Reg: SPReg);
656 }
657
658 // SUB SP, SP, T1
659 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
660 .addReg(RegNo: SPReg)
661 .addReg(RegNo: TargetReg)
662 .setMIFlag(Flag);
663
664 // If we have a dynamic allocation later we need to probe any residuals.
665 if (DynAllocation) {
666 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: STI.is64Bit() ? RISCV::SD : RISCV::SW))
667 .addReg(RegNo: RISCV::X0)
668 .addReg(RegNo: SPReg)
669 .addImm(Val: 0)
670 .setMIFlags(MachineInstr::FrameSetup);
671 }
672}
673
674static void appendScalableVectorExpression(const TargetRegisterInfo &TRI,
675 SmallVectorImpl<char> &Expr,
676 StackOffset Offset,
677 llvm::raw_string_ostream &Comment) {
678 int64_t FixedOffset = Offset.getFixed();
679 int64_t ScalableOffset = Offset.getScalable();
680 unsigned DwarfVLenB = TRI.getDwarfRegNum(Reg: RISCV::VLENB, isEH: true);
681 if (FixedOffset) {
682 Expr.push_back(Elt: dwarf::DW_OP_consts);
683 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: FixedOffset);
684 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
685 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(i: FixedOffset);
686 }
687
688 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_consts);
689 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: ScalableOffset);
690
691 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_bregx);
692 appendLEB128<LEB128Sign::Unsigned>(Buffer&: Expr, Value: DwarfVLenB);
693 Expr.push_back(Elt: 0);
694
695 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_mul);
696 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
697
698 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(i: ScalableOffset)
699 << " * vlenb";
700}
701
702static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
703 Register Reg,
704 StackOffset Offset) {
705 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
706 SmallString<64> Expr;
707 std::string CommentBuffer;
708 llvm::raw_string_ostream Comment(CommentBuffer);
709 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
710 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
711 Expr.push_back(Elt: (uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
712 Expr.push_back(Elt: 0);
713 if (Reg == SPReg)
714 Comment << "sp";
715 else
716 Comment << printReg(Reg, TRI: &TRI);
717
718 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
719
720 SmallString<64> DefCfaExpr;
721 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_def_cfa_expression);
722 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
723 DefCfaExpr.append(RHS: Expr.str());
724
725 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
726 Comment: Comment.str());
727}
728
729static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
730 Register Reg, StackOffset Offset) {
731 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
732 SmallString<64> Expr;
733 std::string CommentBuffer;
734 llvm::raw_string_ostream Comment(CommentBuffer);
735 Comment << printReg(Reg, TRI: &TRI) << " @ cfa";
736
737 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
738 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
739
740 SmallString<64> DefCfaExpr;
741 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
742 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_expression);
743 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: DwarfReg);
744 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
745 DefCfaExpr.append(RHS: Expr.str());
746
747 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
748 Comment: Comment.str());
749}
750
751// Allocate stack space and probe it if necessary.
752void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB,
753 MachineBasicBlock::iterator MBBI,
754 MachineFunction &MF, uint64_t Offset,
755 uint64_t RealStackSize, bool EmitCFI,
756 bool NeedProbe, uint64_t ProbeSize,
757 bool DynAllocation,
758 MachineInstr::MIFlag Flag) const {
759 DebugLoc DL;
760 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
761 const RISCVInstrInfo *TII = STI.getInstrInfo();
762 bool IsRV64 = STI.is64Bit();
763 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
764
765 // Simply allocate the stack if it's not big enough to require a probe.
766 if (!NeedProbe || Offset <= ProbeSize) {
767 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Offset),
768 Flag, RequiredAlign: getStackAlign());
769
770 if (EmitCFI)
771 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
772
773 if (NeedProbe && DynAllocation) {
774 // s[d|w] zero, 0(sp)
775 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
776 .addReg(RegNo: RISCV::X0)
777 .addReg(RegNo: SPReg)
778 .addImm(Val: 0)
779 .setMIFlags(Flag);
780 }
781
782 return;
783 }
784
785 // Unroll the probe loop depending on the number of iterations.
786 if (Offset < ProbeSize * 5) {
787 uint64_t CFAAdjust = RealStackSize - Offset;
788
789 uint64_t CurrentOffset = 0;
790 while (CurrentOffset + ProbeSize <= Offset) {
791 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
792 Offset: StackOffset::getFixed(Fixed: -ProbeSize), Flag, RequiredAlign: getStackAlign());
793 // s[d|w] zero, 0(sp)
794 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
795 .addReg(RegNo: RISCV::X0)
796 .addReg(RegNo: SPReg)
797 .addImm(Val: 0)
798 .setMIFlags(Flag);
799
800 CurrentOffset += ProbeSize;
801 if (EmitCFI)
802 CFIBuilder.buildDefCFAOffset(Offset: CurrentOffset + CFAAdjust);
803 }
804
805 uint64_t Residual = Offset - CurrentOffset;
806 if (Residual) {
807 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
808 Offset: StackOffset::getFixed(Fixed: -Residual), Flag, RequiredAlign: getStackAlign());
809 if (EmitCFI)
810 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
811
812 if (DynAllocation) {
813 // s[d|w] zero, 0(sp)
814 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
815 .addReg(RegNo: RISCV::X0)
816 .addReg(RegNo: SPReg)
817 .addImm(Val: 0)
818 .setMIFlags(Flag);
819 }
820 }
821
822 return;
823 }
824
825 // Emit a variable-length allocation probing loop.
826 uint64_t RoundedSize = alignDown(Value: Offset, Align: ProbeSize);
827 uint64_t Residual = Offset - RoundedSize;
828
829 Register TargetReg = RISCV::X6;
830 // SUB TargetReg, SP, RoundedSize
831 RI->adjustReg(MBB, II: MBBI, DL, DestReg: TargetReg, SrcReg: SPReg,
832 Offset: StackOffset::getFixed(Fixed: -RoundedSize), Flag, RequiredAlign: getStackAlign());
833
834 if (EmitCFI) {
835 // Set the CFA register to TargetReg.
836 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: RoundedSize);
837 }
838
839 // It will be expanded to a probe loop in `inlineStackProbe`.
840 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC)).addReg(RegNo: TargetReg);
841
842 if (EmitCFI) {
843 // Set the CFA register back to SP.
844 CFIBuilder.buildDefCFARegister(Reg: SPReg);
845 }
846
847 if (Residual) {
848 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Residual),
849 Flag, RequiredAlign: getStackAlign());
850 if (DynAllocation) {
851 // s[d|w] zero, 0(sp)
852 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
853 .addReg(RegNo: RISCV::X0)
854 .addReg(RegNo: SPReg)
855 .addImm(Val: 0)
856 .setMIFlags(Flag);
857 }
858 }
859
860 if (EmitCFI)
861 CFIBuilder.buildDefCFAOffset(Offset);
862}
863
864static bool isPush(unsigned Opcode) {
865 switch (Opcode) {
866 case RISCV::CM_PUSH:
867 case RISCV::QC_CM_PUSH:
868 case RISCV::QC_CM_PUSHFP:
869 return true;
870 default:
871 return false;
872 }
873}
874
875static bool isPop(unsigned Opcode) {
876 // There are other pops but these are the only ones introduced during this
877 // pass.
878 switch (Opcode) {
879 case RISCV::CM_POP:
880 case RISCV::QC_CM_POP:
881 return true;
882 default:
883 return false;
884 }
885}
886
887static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind,
888 bool UpdateFP) {
889 switch (Kind) {
890 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
891 return RISCV::CM_PUSH;
892 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
893 return UpdateFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
894 default:
895 llvm_unreachable("Unhandled PushPopKind");
896 }
897}
898
899static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind) {
900 // There are other pops but they are introduced later by the Push/Pop
901 // Optimizer.
902 switch (Kind) {
903 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
904 return RISCV::CM_POP;
905 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
906 return RISCV::QC_CM_POP;
907 default:
908 llvm_unreachable("Unhandled PushPopKind");
909 }
910}
911
912void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
913 MachineBasicBlock &MBB) const {
914 MachineFrameInfo &MFI = MF.getFrameInfo();
915 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
916 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
917 MachineBasicBlock::iterator MBBI = MBB.begin();
918
919 Register BPReg = RISCVABI::getBPReg();
920
921 // Debug location must be unknown since the first debug location is used
922 // to determine the end of the prologue.
923 DebugLoc DL;
924
925 // All calls are tail calls in GHC calling conv, and functions have no
926 // prologue/epilogue.
927 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
928 return;
929
930 // SiFive CLIC needs to swap `sp` into `sf.mscratchcsw`
931 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
932
933 // Emit prologue for shadow call stack.
934 emitSCSPrologue(MF, MBB, MI: MBBI, DL);
935
936 // We keep track of the first instruction because it might be a
937 // `(QC.)CM.PUSH(FP)`, and we may need to adjust the immediate rather than
938 // inserting an `addi sp, sp, -N*16`
939 auto PossiblePush = MBBI;
940
941 // Skip past all callee-saved register spill instructions.
942 while (MBBI != MBB.end() && MBBI->getFlag(Flag: MachineInstr::FrameSetup))
943 ++MBBI;
944
945 // Determine the correct frame layout
946 determineFrameLayout(MF);
947
948 const auto &CSI = MFI.getCalleeSavedInfo();
949
950 // Skip to before the spills of scalar callee-saved registers
951 // FIXME: assumes exactly one instruction is used to restore each
952 // callee-saved register.
953 MBBI = std::prev(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size() +
954 getUnmanagedCSI(MF, CSI).size());
955 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
956 bool NeedsDwarfCFI = needsDwarfCFI(MF);
957
958 // If libcalls are used to spill and restore callee-saved registers, the frame
959 // has two sections; the opaque section managed by the libcalls, and the
960 // section managed by MachineFrameInfo which can also hold callee saved
961 // registers in fixed stack slots, both of which have negative frame indices.
962 // This gets even more complicated when incoming arguments are passed via the
963 // stack, as these too have negative frame indices. An example is detailed
964 // below:
965 //
966 // | incoming arg | <- FI[-3]
967 // | libcallspill |
968 // | calleespill | <- FI[-2]
969 // | calleespill | <- FI[-1]
970 // | this_frame | <- FI[0]
971 //
972 // For negative frame indices, the offset from the frame pointer will differ
973 // depending on which of these groups the frame index applies to.
974 // The following calculates the correct offset knowing the number of callee
975 // saved registers spilt by the two methods.
976 if (int LibCallRegs = getLibCallID(MF, CSI: MFI.getCalleeSavedInfo()) + 1) {
977 // Calculate the size of the frame managed by the libcall. The stack
978 // alignment of these libcalls should be the same as how we set it in
979 // getABIStackAlignment.
980 unsigned LibCallFrameSize =
981 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
982 RVFI->setLibCallStackSize(LibCallFrameSize);
983
984 if (NeedsDwarfCFI) {
985 CFIBuilder.buildDefCFAOffset(Offset: LibCallFrameSize);
986 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
987 CFIBuilder.buildOffset(Reg: CS.getReg(),
988 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
989 }
990 }
991
992 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
993 // investigation. Get the number of bytes to allocate from the FrameInfo.
994 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
995 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
996 uint64_t RVVStackSize = RVFI->getRVVStackSize();
997
998 // Early exit if there is no need to allocate on the stack
999 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
1000 return;
1001
1002 // If the stack pointer has been marked as reserved, then produce an error if
1003 // the frame requires stack allocation
1004 if (STI.isRegisterReservedByUser(i: SPReg))
1005 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1006 MF.getFunction(), "Stack pointer required, but has been reserved."});
1007
1008 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1009 // Split the SP adjustment to reduce the offsets of callee saved spill.
1010 if (FirstSPAdjustAmount) {
1011 StackSize = FirstSPAdjustAmount;
1012 RealStackSize = FirstSPAdjustAmount;
1013 }
1014
1015 if (RVFI->useQCIInterrupt(MF)) {
1016 // The function starts with `QC.C.MIENTER(.NEST)`, so the `(QC.)CM.PUSH(FP)`
1017 // could only be the next instruction.
1018 ++PossiblePush;
1019
1020 if (NeedsDwarfCFI) {
1021 // Insert the CFI metadata before where we think the `(QC.)CM.PUSH(FP)`
1022 // could be. The PUSH will also get its own CFI metadata for its own
1023 // modifications, which should come after the PUSH.
1024 CFIInstBuilder PushCFIBuilder(MBB, PossiblePush,
1025 MachineInstr::FrameSetup);
1026 PushCFIBuilder.buildDefCFAOffset(Offset: QCIInterruptPushAmount);
1027 for (const CalleeSavedInfo &CS : getQCISavedInfo(MF, CSI))
1028 PushCFIBuilder.buildOffset(Reg: CS.getReg(),
1029 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1030 }
1031 }
1032
1033 if (RVFI->isPushable(MF) && PossiblePush != MBB.end() &&
1034 isPush(Opcode: PossiblePush->getOpcode())) {
1035 // Use available stack adjustment in push instruction to allocate additional
1036 // stack space. Align the stack size down to a multiple of 16. This is
1037 // needed for RVE.
1038 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1039 uint64_t StackAdj =
1040 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1041 PossiblePush->getOperand(i: 1).setImm(StackAdj);
1042 StackSize -= StackAdj;
1043
1044 if (NeedsDwarfCFI) {
1045 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize - StackSize);
1046 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1047 CFIBuilder.buildOffset(Reg: CS.getReg(),
1048 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1049 }
1050 }
1051
1052 // Allocate space on the stack if necessary.
1053 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1054 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
1055 bool NeedProbe = TLI->hasInlineStackProbe(MF);
1056 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1057 bool DynAllocation =
1058 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1059 if (StackSize != 0)
1060 allocateStack(MBB, MBBI, MF, Offset: StackSize, RealStackSize, EmitCFI: NeedsDwarfCFI,
1061 NeedProbe, ProbeSize, DynAllocation,
1062 Flag: MachineInstr::FrameSetup);
1063
1064 // Save SiFive CLIC CSRs into Stack
1065 emitSiFiveCLICPreemptibleSaves(MF, MBB, MBBI, DL);
1066
1067 // The frame pointer is callee-saved, and code has been generated for us to
1068 // save it to the stack. We need to skip over the storing of callee-saved
1069 // registers as the frame pointer must be modified after it has been saved
1070 // to the stack, not before.
1071 // FIXME: assumes exactly one instruction is used to save each callee-saved
1072 // register.
1073 std::advance(i&: MBBI, n: getUnmanagedCSI(MF, CSI).size());
1074 CFIBuilder.setInsertPoint(MBBI);
1075
1076 // Iterate over list of callee-saved registers and emit .cfi_offset
1077 // directives.
1078 if (NeedsDwarfCFI)
1079 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1080 CFIBuilder.buildOffset(Reg: CS.getReg(),
1081 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1082
1083 // Generate new FP.
1084 if (hasFP(MF)) {
1085 if (STI.isRegisterReservedByUser(i: FPReg))
1086 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1087 MF.getFunction(), "Frame pointer required, but has been reserved."});
1088 // The frame pointer does need to be reserved from register allocation.
1089 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
1090
1091 // Some stack management variants automatically keep FP updated, so we don't
1092 // need an instruction to do so.
1093 if (!RVFI->hasImplicitFPUpdates(MF)) {
1094 RI->adjustReg(
1095 MBB, II: MBBI, DL, DestReg: FPReg, SrcReg: SPReg,
1096 Offset: StackOffset::getFixed(Fixed: RealStackSize - RVFI->getVarArgsSaveSize()),
1097 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1098 }
1099
1100 if (NeedsDwarfCFI)
1101 CFIBuilder.buildDefCFA(Reg: FPReg, Offset: RVFI->getVarArgsSaveSize());
1102 }
1103
1104 uint64_t SecondSPAdjustAmount = 0;
1105 // Emit the second SP adjustment after saving callee saved registers.
1106 if (FirstSPAdjustAmount) {
1107 SecondSPAdjustAmount = getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1108 assert(SecondSPAdjustAmount > 0 &&
1109 "SecondSPAdjustAmount should be greater than zero");
1110
1111 allocateStack(MBB, MBBI, MF, Offset: SecondSPAdjustAmount,
1112 RealStackSize: getStackSizeWithRVVPadding(MF), EmitCFI: NeedsDwarfCFI && !hasFP(MF),
1113 NeedProbe, ProbeSize, DynAllocation,
1114 Flag: MachineInstr::FrameSetup);
1115 }
1116
1117 if (RVVStackSize) {
1118 if (NeedProbe) {
1119 allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, Amount: RVVStackSize,
1120 Flag: MachineInstr::FrameSetup,
1121 EmitCFI: NeedsDwarfCFI && !hasFP(MF), DynAllocation);
1122 } else {
1123 // We must keep the stack pointer aligned through any intermediate
1124 // updates.
1125 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
1126 Offset: StackOffset::getScalable(Scalable: -RVVStackSize),
1127 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1128 }
1129
1130 if (NeedsDwarfCFI && !hasFP(MF)) {
1131 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
1132 CFIBuilder.insertCFIInst(CFIInst: createDefCFAExpression(
1133 TRI: *RI, Reg: SPReg,
1134 Offset: StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF), Scalable: RVVStackSize / 8)));
1135 }
1136
1137 std::advance(i&: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1138 if (NeedsDwarfCFI)
1139 emitCalleeSavedRVVPrologCFI(MBB, MI: MBBI, HasFP: hasFP(MF));
1140 }
1141
1142 if (hasFP(MF)) {
1143 // Realign Stack
1144 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1145 if (RI->hasStackRealignment(MF)) {
1146 Align MaxAlignment = MFI.getMaxAlign();
1147
1148 const RISCVInstrInfo *TII = STI.getInstrInfo();
1149 if (isInt<12>(x: -(int)MaxAlignment.value())) {
1150 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ANDI), DestReg: SPReg)
1151 .addReg(RegNo: SPReg)
1152 .addImm(Val: -(int)MaxAlignment.value())
1153 .setMIFlag(MachineInstr::FrameSetup);
1154 } else {
1155 unsigned ShiftAmount = Log2(A: MaxAlignment);
1156 Register VR =
1157 MF.getRegInfo().createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1158 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SRLI), DestReg: VR)
1159 .addReg(RegNo: SPReg)
1160 .addImm(Val: ShiftAmount)
1161 .setMIFlag(MachineInstr::FrameSetup);
1162 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SLLI), DestReg: SPReg)
1163 .addReg(RegNo: VR)
1164 .addImm(Val: ShiftAmount)
1165 .setMIFlag(MachineInstr::FrameSetup);
1166 }
1167 if (NeedProbe && RVVStackSize == 0) {
1168 // Do a probe if the align + size allocated just passed the probe size
1169 // and was not yet probed.
1170 if (SecondSPAdjustAmount < ProbeSize &&
1171 SecondSPAdjustAmount + MaxAlignment.value() >= ProbeSize) {
1172 bool IsRV64 = STI.is64Bit();
1173 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
1174 .addReg(RegNo: RISCV::X0)
1175 .addReg(RegNo: SPReg)
1176 .addImm(Val: 0)
1177 .setMIFlags(MachineInstr::FrameSetup);
1178 }
1179 }
1180 // FP will be used to restore the frame in the epilogue, so we need
1181 // another base register BP to record SP after re-alignment. SP will
1182 // track the current stack after allocating variable sized objects.
1183 if (hasBP(MF)) {
1184 // move BP, SP
1185 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: BPReg)
1186 .addReg(RegNo: SPReg)
1187 .addImm(Val: 0)
1188 .setMIFlag(MachineInstr::FrameSetup);
1189 }
1190 }
1191 }
1192}
1193
1194void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
1195 MachineBasicBlock &MBB,
1196 MachineBasicBlock::iterator MBBI,
1197 const DebugLoc &DL,
1198 uint64_t &StackSize,
1199 int64_t CFAOffset) const {
1200 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1201
1202 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: StackSize),
1203 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1204 StackSize = 0;
1205
1206 if (needsDwarfCFI(MF))
1207 CFIInstBuilder(MBB, MBBI, MachineInstr::FrameDestroy)
1208 .buildDefCFAOffset(Offset: CFAOffset);
1209}
1210
1211void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
1212 MachineBasicBlock &MBB) const {
1213 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1214 MachineFrameInfo &MFI = MF.getFrameInfo();
1215 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1216
1217 // All calls are tail calls in GHC calling conv, and functions have no
1218 // prologue/epilogue.
1219 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1220 return;
1221
1222 // Get the insert location for the epilogue. If there were no terminators in
1223 // the block, get the last instruction.
1224 MachineBasicBlock::iterator MBBI = MBB.end();
1225 DebugLoc DL;
1226 if (!MBB.empty()) {
1227 MBBI = MBB.getLastNonDebugInstr();
1228 if (MBBI != MBB.end())
1229 DL = MBBI->getDebugLoc();
1230
1231 MBBI = MBB.getFirstTerminator();
1232
1233 // Skip to before the restores of all callee-saved registers.
1234 while (MBBI != MBB.begin() &&
1235 std::prev(x: MBBI)->getFlag(Flag: MachineInstr::FrameDestroy))
1236 --MBBI;
1237 }
1238
1239 const auto &CSI = MFI.getCalleeSavedInfo();
1240
1241 // Skip to before the restores of scalar callee-saved registers
1242 // FIXME: assumes exactly one instruction is used to restore each
1243 // callee-saved register.
1244 auto FirstScalarCSRRestoreInsn =
1245 std::next(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1246 CFIInstBuilder CFIBuilder(MBB, FirstScalarCSRRestoreInsn,
1247 MachineInstr::FrameDestroy);
1248 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1249
1250 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1251 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1252 : getStackSizeWithRVVPadding(MF);
1253 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1254 : getStackSizeWithRVVPadding(MF) -
1255 RVFI->getReservedSpillsSize();
1256 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
1257 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1258
1259 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
1260 MFI.hasVarSizedObjects() || !hasReservedCallFrame(MF);
1261 if (RVVStackSize) {
1262 // If RestoreSPFromFP the stack pointer will be restored using the frame
1263 // pointer value.
1264 if (!RestoreSPFromFP)
1265 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1266 Offset: StackOffset::getScalable(Scalable: RVVStackSize),
1267 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1268
1269 if (NeedsDwarfCFI) {
1270 if (!hasFP(MF))
1271 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1272 emitCalleeSavedRVVEpilogCFI(MBB, MI: FirstScalarCSRRestoreInsn);
1273 }
1274 }
1275
1276 if (FirstSPAdjustAmount) {
1277 uint64_t SecondSPAdjustAmount =
1278 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1279 assert(SecondSPAdjustAmount > 0 &&
1280 "SecondSPAdjustAmount should be greater than zero");
1281
1282 // If RestoreSPFromFP the stack pointer will be restored using the frame
1283 // pointer value.
1284 if (!RestoreSPFromFP)
1285 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1286 Offset: StackOffset::getFixed(Fixed: SecondSPAdjustAmount),
1287 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1288
1289 if (NeedsDwarfCFI && !hasFP(MF))
1290 CFIBuilder.buildDefCFAOffset(Offset: FirstSPAdjustAmount);
1291 }
1292
1293 // Restore the stack pointer using the value of the frame pointer. Only
1294 // necessary if the stack pointer was modified, meaning the stack size is
1295 // unknown.
1296 //
1297 // In order to make sure the stack point is right through the EH region,
1298 // we also need to restore stack pointer from the frame pointer if we
1299 // don't preserve stack space within prologue/epilogue for outgoing variables,
1300 // normally it's just checking the variable sized object is present or not
1301 // is enough, but we also don't preserve that at prologue/epilogue when
1302 // have vector objects in stack.
1303 if (RestoreSPFromFP) {
1304 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1305 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: FPReg,
1306 Offset: StackOffset::getFixed(Fixed: -FPOffset), Flag: MachineInstr::FrameDestroy,
1307 RequiredAlign: getStackAlign());
1308 }
1309
1310 if (NeedsDwarfCFI && hasFP(MF))
1311 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1312
1313 // Skip to after the restores of scalar callee-saved registers
1314 // FIXME: assumes exactly one instruction is used to restore each
1315 // callee-saved register.
1316 MBBI = std::next(x: FirstScalarCSRRestoreInsn, n: getUnmanagedCSI(MF, CSI).size());
1317 CFIBuilder.setInsertPoint(MBBI);
1318
1319 if (getLibCallID(MF, CSI) != -1) {
1320 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1321 // therefore it is unnecessary to place any CFI instructions after it. Just
1322 // deallocate stack if needed and return.
1323 if (StackSize != 0)
1324 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1325 CFAOffset: RVFI->getLibCallStackSize());
1326
1327 // Emit epilogue for shadow call stack.
1328 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1329 return;
1330 }
1331
1332 // Recover callee-saved registers.
1333 if (NeedsDwarfCFI)
1334 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1335 CFIBuilder.buildRestore(Reg: CS.getReg());
1336
1337 if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(Opcode: MBBI->getOpcode())) {
1338 // Use available stack adjustment in pop instruction to deallocate stack
1339 // space. Align the stack size down to a multiple of 16. This is needed for
1340 // RVE.
1341 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1342 uint64_t StackAdj =
1343 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1344 MBBI->getOperand(i: 1).setImm(StackAdj);
1345 StackSize -= StackAdj;
1346
1347 if (StackSize != 0)
1348 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1349 /*stack_adj of cm.pop instr*/ CFAOffset: RealStackSize - StackSize);
1350
1351 auto NextI = next_nodbg(It: MBBI, End: MBB.end());
1352 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1353 ++MBBI;
1354 if (NeedsDwarfCFI) {
1355 CFIBuilder.setInsertPoint(MBBI);
1356
1357 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1358 CFIBuilder.buildRestore(Reg: CS.getReg());
1359
1360 // Update CFA Offset. If this is a QCI interrupt function, there will
1361 // be a leftover offset which is deallocated by `QC.C.MILEAVERET`,
1362 // otherwise getQCIInterruptStackSize() will be 0.
1363 CFIBuilder.buildDefCFAOffset(Offset: RVFI->getQCIInterruptStackSize());
1364 }
1365 }
1366 }
1367
1368 emitSiFiveCLICPreemptibleRestores(MF, MBB, MBBI, DL);
1369
1370 // Deallocate stack if StackSize isn't a zero yet. If this is a QCI interrupt
1371 // function, there will be a leftover offset which is deallocated by
1372 // `QC.C.MILEAVERET`, otherwise getQCIInterruptStackSize() will be 0.
1373 if (StackSize != 0)
1374 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1375 CFAOffset: RVFI->getQCIInterruptStackSize());
1376
1377 // Emit epilogue for shadow call stack.
1378 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1379
1380 // SiFive CLIC needs to swap `sf.mscratchcsw` into `sp`
1381 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
1382}
1383
1384StackOffset
1385RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1386 Register &FrameReg) const {
1387 const MachineFrameInfo &MFI = MF.getFrameInfo();
1388 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1389 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1390
1391 // Callee-saved registers should be referenced relative to the stack
1392 // pointer (positive offset), otherwise use the frame pointer (negative
1393 // offset).
1394 const auto &CSI = getUnmanagedCSI(MF, CSI: MFI.getCalleeSavedInfo());
1395 int MinCSFI = 0;
1396 int MaxCSFI = -1;
1397 StackOffset Offset;
1398 auto StackID = MFI.getStackID(ObjectIdx: FI);
1399
1400 assert((StackID == TargetStackID::Default ||
1401 StackID == TargetStackID::ScalableVector) &&
1402 "Unexpected stack ID for the frame object.");
1403 if (StackID == TargetStackID::Default) {
1404 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1405 Offset = StackOffset::getFixed(Fixed: MFI.getObjectOffset(ObjectIdx: FI) +
1406 MFI.getOffsetAdjustment());
1407 } else if (StackID == TargetStackID::ScalableVector) {
1408 Offset = StackOffset::getScalable(Scalable: MFI.getObjectOffset(ObjectIdx: FI));
1409 }
1410
1411 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1412
1413 if (CSI.size()) {
1414 MinCSFI = CSI[0].getFrameIdx();
1415 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
1416 }
1417
1418 if (FI >= MinCSFI && FI <= MaxCSFI) {
1419 FrameReg = SPReg;
1420
1421 if (FirstSPAdjustAmount)
1422 Offset += StackOffset::getFixed(Fixed: FirstSPAdjustAmount);
1423 else
1424 Offset += StackOffset::getFixed(Fixed: getStackSizeWithRVVPadding(MF));
1425 return Offset;
1426 }
1427
1428 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1429 // If the stack was realigned, the frame pointer is set in order to allow
1430 // SP to be restored, so we need another base register to record the stack
1431 // after realignment.
1432 // |--------------------------| -- <-- FP
1433 // | callee-allocated save | | <----|
1434 // | area for register varargs| | |
1435 // |--------------------------| | |
1436 // | callee-saved registers | | |
1437 // |--------------------------| -- |
1438 // | realignment (the size of | | |
1439 // | this area is not counted | | |
1440 // | in MFI.getStackSize()) | | |
1441 // |--------------------------| -- |-- MFI.getStackSize()
1442 // | RVV alignment padding | | |
1443 // | (not counted in | | |
1444 // | MFI.getStackSize() but | | |
1445 // | counted in | | |
1446 // | RVFI.getRVVStackSize()) | | |
1447 // |--------------------------| -- |
1448 // | RVV objects | | |
1449 // | (not counted in | | |
1450 // | MFI.getStackSize()) | | |
1451 // |--------------------------| -- |
1452 // | padding before RVV | | |
1453 // | (not counted in | | |
1454 // | MFI.getStackSize() or in | | |
1455 // | RVFI.getRVVStackSize()) | | |
1456 // |--------------------------| -- |
1457 // | scalar local variables | | <----'
1458 // |--------------------------| -- <-- BP (if var sized objects present)
1459 // | VarSize objects | |
1460 // |--------------------------| -- <-- SP
1461 if (hasBP(MF)) {
1462 FrameReg = RISCVABI::getBPReg();
1463 } else {
1464 // VarSize objects must be empty in this case!
1465 assert(!MFI.hasVarSizedObjects());
1466 FrameReg = SPReg;
1467 }
1468 } else {
1469 FrameReg = RI->getFrameRegister(MF);
1470 }
1471
1472 if (FrameReg == FPReg) {
1473 Offset += StackOffset::getFixed(Fixed: RVFI->getVarArgsSaveSize());
1474 // When using FP to access scalable vector objects, we need to minus
1475 // the frame size.
1476 //
1477 // |--------------------------| -- <-- FP
1478 // | callee-allocated save | |
1479 // | area for register varargs| |
1480 // |--------------------------| |
1481 // | callee-saved registers | |
1482 // |--------------------------| | MFI.getStackSize()
1483 // | scalar local variables | |
1484 // |--------------------------| -- (Offset of RVV objects is from here.)
1485 // | RVV objects |
1486 // |--------------------------|
1487 // | VarSize objects |
1488 // |--------------------------| <-- SP
1489 if (StackID == TargetStackID::ScalableVector) {
1490 assert(!RI->hasStackRealignment(MF) &&
1491 "Can't index across variable sized realign");
1492 // We don't expect any extra RVV alignment padding, as the stack size
1493 // and RVV object sections should be correct aligned in their own
1494 // right.
1495 assert(MFI.getStackSize() == getStackSizeWithRVVPadding(MF) &&
1496 "Inconsistent stack layout");
1497 Offset -= StackOffset::getFixed(Fixed: MFI.getStackSize());
1498 }
1499 return Offset;
1500 }
1501
1502 // This case handles indexing off both SP and BP.
1503 // If indexing off SP, there must not be any var sized objects
1504 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1505
1506 // When using SP to access frame objects, we need to add RVV stack size.
1507 //
1508 // |--------------------------| -- <-- FP
1509 // | callee-allocated save | | <----|
1510 // | area for register varargs| | |
1511 // |--------------------------| | |
1512 // | callee-saved registers | | |
1513 // |--------------------------| -- |
1514 // | RVV alignment padding | | |
1515 // | (not counted in | | |
1516 // | MFI.getStackSize() but | | |
1517 // | counted in | | |
1518 // | RVFI.getRVVStackSize()) | | |
1519 // |--------------------------| -- |
1520 // | RVV objects | | |-- MFI.getStackSize()
1521 // | (not counted in | | |
1522 // | MFI.getStackSize()) | | |
1523 // |--------------------------| -- |
1524 // | padding before RVV | | |
1525 // | (not counted in | | |
1526 // | MFI.getStackSize()) | | |
1527 // |--------------------------| -- |
1528 // | scalar local variables | | <----'
1529 // |--------------------------| -- <-- BP (if var sized objects present)
1530 // | VarSize objects | |
1531 // |--------------------------| -- <-- SP
1532 //
1533 // The total amount of padding surrounding RVV objects is described by
1534 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1535 // objects to the required alignment.
1536 if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default) {
1537 if (MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1538 assert(!RI->hasStackRealignment(MF) &&
1539 "Can't index across variable sized realign");
1540 Offset += StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF),
1541 Scalable: RVFI->getRVVStackSize());
1542 } else {
1543 Offset += StackOffset::getFixed(Fixed: MFI.getStackSize());
1544 }
1545 } else if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector) {
1546 // Ensure the base of the RVV stack is correctly aligned: add on the
1547 // alignment padding.
1548 int ScalarLocalVarSize = MFI.getStackSize() -
1549 RVFI->getCalleeSavedStackSize() -
1550 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1551 Offset += StackOffset::get(Fixed: ScalarLocalVarSize, Scalable: RVFI->getRVVStackSize());
1552 }
1553 return Offset;
1554}
1555
1556static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI,
1557 const Register &Reg) {
1558 MCRegister BaseReg = TRI.getSubReg(Reg, Idx: RISCV::sub_vrm1_0);
1559 // If it's not a grouped vector register, it doesn't have subregister, so
1560 // the base register is just itself.
1561 if (!BaseReg.isValid())
1562 BaseReg = Reg;
1563 return BaseReg;
1564}
1565
1566void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
1567 BitVector &SavedRegs,
1568 RegScavenger *RS) const {
1569 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1570
1571 // In TargetFrameLowering::determineCalleeSaves, any vector register is marked
1572 // as saved if any of its subregister is clobbered, this is not correct in
1573 // vector registers. We only want the vector register to be marked as saved
1574 // if all of its subregisters are clobbered.
1575 // For example:
1576 // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked.
1577 // Correct behavior: v24m2 is marked only if v24 and v25 are marked.
1578 const MachineRegisterInfo &MRI = MF.getRegInfo();
1579 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
1580 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
1581 for (unsigned i = 0; CSRegs[i]; ++i) {
1582 unsigned CSReg = CSRegs[i];
1583 // Only vector registers need special care.
1584 if (!RISCV::VRRegClass.contains(Reg: getRVVBaseRegister(TRI, Reg: CSReg)))
1585 continue;
1586
1587 SavedRegs.reset(Idx: CSReg);
1588
1589 auto SubRegs = TRI.subregs(Reg: CSReg);
1590 // Set the register and all its subregisters.
1591 if (!MRI.def_empty(RegNo: CSReg) || MRI.getUsedPhysRegsMask().test(Idx: CSReg)) {
1592 SavedRegs.set(CSReg);
1593 for (unsigned Reg : SubRegs)
1594 SavedRegs.set(Reg);
1595 }
1596
1597 // Combine to super register if all of its subregisters are marked.
1598 if (!SubRegs.empty() && llvm::all_of(Range&: SubRegs, P: [&](unsigned Reg) {
1599 return SavedRegs.test(Idx: Reg);
1600 }))
1601 SavedRegs.set(CSReg);
1602 }
1603
1604 // Unconditionally spill RA and FP only if the function uses a frame
1605 // pointer.
1606 if (hasFP(MF)) {
1607 SavedRegs.set(RAReg);
1608 SavedRegs.set(FPReg);
1609 }
1610 // Mark BP as used if function has dedicated base pointer.
1611 if (hasBP(MF))
1612 SavedRegs.set(RISCVABI::getBPReg());
1613
1614 // When using cm.push/pop we must save X27 if we save X26.
1615 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1616 if (RVFI->isPushable(MF) && SavedRegs.test(Idx: RISCV::X26))
1617 SavedRegs.set(RISCV::X27);
1618
1619 // SiFive Preemptible Interrupt Handlers need additional frame entries
1620 createSiFivePreemptibleInterruptFrameEntries(MF, RVFI&: *RVFI);
1621}
1622
1623std::pair<int64_t, Align>
1624RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1625 MachineFrameInfo &MFI = MF.getFrameInfo();
1626 // Create a buffer of RVV objects to allocate.
1627 SmallVector<int, 8> ObjectsToAllocate;
1628 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1629 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1630 unsigned StackID = MFI.getStackID(ObjectIdx: I);
1631 if (StackID != TargetStackID::ScalableVector)
1632 continue;
1633 if (MFI.isDeadObjectIndex(ObjectIdx: I))
1634 continue;
1635
1636 ObjectsToAllocate.push_back(Elt: I);
1637 }
1638 };
1639 // First push RVV Callee Saved object, then push RVV stack object
1640 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1641 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1642 if (!RVVCSI.empty())
1643 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1644 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1645 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1646
1647 // The minimum alignment is 16 bytes.
1648 Align RVVStackAlign(16);
1649 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1650
1651 if (!ST.hasVInstructions()) {
1652 assert(ObjectsToAllocate.empty() &&
1653 "Can't allocate scalable-vector objects without V instructions");
1654 return std::make_pair(x: 0, y&: RVVStackAlign);
1655 }
1656
1657 // Allocate all RVV locals and spills
1658 int64_t Offset = 0;
1659 for (int FI : ObjectsToAllocate) {
1660 // ObjectSize in bytes.
1661 int64_t ObjectSize = MFI.getObjectSize(ObjectIdx: FI);
1662 auto ObjectAlign =
1663 std::max(a: Align(RISCV::RVVBytesPerBlock), b: MFI.getObjectAlign(ObjectIdx: FI));
1664 // If the data type is the fractional vector type, reserve one vector
1665 // register for it.
1666 if (ObjectSize < RISCV::RVVBytesPerBlock)
1667 ObjectSize = RISCV::RVVBytesPerBlock;
1668 Offset = alignTo(Size: Offset + ObjectSize, A: ObjectAlign);
1669 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: -Offset);
1670 // Update the maximum alignment of the RVV stack section
1671 RVVStackAlign = std::max(a: RVVStackAlign, b: ObjectAlign);
1672 }
1673
1674 uint64_t StackSize = Offset;
1675
1676 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1677 // object right at the bottom (i.e., any padding at the top of the frame),
1678 // readjust all RVV objects down by the alignment padding.
1679 // Stack size and offsets are multiples of vscale, stack alignment is in
1680 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1681 // stack alignment multiple of vscale.
1682 auto VScale =
1683 std::max<uint64_t>(a: ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, b: 1);
1684 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1685 if (auto AlignmentPadding =
1686 offsetToAlignment(Value: StackSize, Alignment: Align(RVVStackAlignVScale))) {
1687 StackSize += AlignmentPadding;
1688 for (int FI : ObjectsToAllocate)
1689 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: MFI.getObjectOffset(ObjectIdx: FI) - AlignmentPadding);
1690 }
1691 }
1692
1693 return std::make_pair(x&: StackSize, y&: RVVStackAlign);
1694}
1695
1696static unsigned getScavSlotsNumForRVV(MachineFunction &MF) {
1697 // For RVV spill, scalable stack offsets computing requires up to two scratch
1698 // registers
1699 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1700
1701 // For RVV spill, non-scalable stack offsets computing requires up to one
1702 // scratch register.
1703 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1704
1705 // ADDI instruction's destination register can be used for computing
1706 // offsets. So Scalable stack offsets require up to one scratch register.
1707 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1708
1709 static constexpr unsigned MaxScavSlotsNumKnown =
1710 std::max(l: {ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1711 ScavSlotsNumRVVSpillNonScalableObject});
1712
1713 unsigned MaxScavSlotsNum = 0;
1714 if (!MF.getSubtarget<RISCVSubtarget>().hasVInstructions())
1715 return false;
1716 for (const MachineBasicBlock &MBB : MF)
1717 for (const MachineInstr &MI : MBB) {
1718 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1719 for (auto &MO : MI.operands()) {
1720 if (!MO.isFI())
1721 continue;
1722 bool IsScalableVectorID = MF.getFrameInfo().getStackID(ObjectIdx: MO.getIndex()) ==
1723 TargetStackID::ScalableVector;
1724 if (IsRVVSpill) {
1725 MaxScavSlotsNum = std::max(
1726 a: MaxScavSlotsNum, b: IsScalableVectorID
1727 ? ScavSlotsNumRVVSpillScalableObject
1728 : ScavSlotsNumRVVSpillNonScalableObject);
1729 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1730 MaxScavSlotsNum =
1731 std::max(a: MaxScavSlotsNum, b: ScavSlotsADDIScalableObject);
1732 }
1733 }
1734 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1735 return MaxScavSlotsNumKnown;
1736 }
1737 return MaxScavSlotsNum;
1738}
1739
1740static bool hasRVVFrameObject(const MachineFunction &MF) {
1741 // Originally, the function will scan all the stack objects to check whether
1742 // if there is any scalable vector object on the stack or not. However, it
1743 // causes errors in the register allocator. In issue 53016, it returns false
1744 // before RA because there is no RVV stack objects. After RA, it returns true
1745 // because there are spilling slots for RVV values during RA. It will not
1746 // reserve BP during register allocation and generate BP access in the PEI
1747 // pass due to the inconsistent behavior of the function.
1748 //
1749 // The function is changed to use hasVInstructions() as the return value. It
1750 // is not precise, but it can make the register allocation correct.
1751 //
1752 // FIXME: Find a better way to make the decision or revisit the solution in
1753 // D103622.
1754 //
1755 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1756 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1757}
1758
1759static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF,
1760 const RISCVInstrInfo &TII) {
1761 unsigned FnSize = 0;
1762 for (auto &MBB : MF) {
1763 for (auto &MI : MBB) {
1764 // Far branches over 20-bit offset will be relaxed in branch relaxation
1765 // pass. In the worst case, conditional branches will be relaxed into
1766 // the following instruction sequence. Unconditional branches are
1767 // relaxed in the same way, with the exception that there is no first
1768 // branch instruction.
1769 //
1770 // foo
1771 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1772 // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1773 // jump .restore, s11 # 8 bytes
1774 // .rev_cond
1775 // bar
1776 // j .dest_bb # 4 bytes, or 2 bytes with Zca
1777 // .restore:
1778 // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1779 // .dest:
1780 // baz
1781 if (MI.isConditionalBranch())
1782 FnSize += TII.getInstSizeInBytes(MI);
1783 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1784 if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
1785 FnSize += 2 + 8 + 2 + 2;
1786 else
1787 FnSize += 4 + 8 + 4 + 4;
1788 continue;
1789 }
1790
1791 FnSize += TII.getInstSizeInBytes(MI);
1792 }
1793 }
1794 return FnSize;
1795}
1796
1797void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
1798 MachineFunction &MF, RegScavenger *RS) const {
1799 const RISCVRegisterInfo *RegInfo =
1800 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1801 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1802 MachineFrameInfo &MFI = MF.getFrameInfo();
1803 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1804 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1805
1806 int64_t RVVStackSize;
1807 Align RVVStackAlign;
1808 std::tie(args&: RVVStackSize, args&: RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1809
1810 RVFI->setRVVStackSize(RVVStackSize);
1811 RVFI->setRVVStackAlign(RVVStackAlign);
1812
1813 if (hasRVVFrameObject(MF)) {
1814 // Ensure the entire stack is aligned to at least the RVV requirement: some
1815 // scalable-vector object alignments are not considered by the
1816 // target-independent code.
1817 MFI.ensureMaxAlignment(Alignment: RVVStackAlign);
1818 }
1819
1820 unsigned ScavSlotsNum = 0;
1821
1822 // estimateStackSize has been observed to under-estimate the final stack
1823 // size, so give ourselves wiggle-room by checking for stack size
1824 // representable an 11-bit signed field rather than 12-bits.
1825 if (!isInt<11>(x: MFI.estimateStackSize(MF)))
1826 ScavSlotsNum = 1;
1827
1828 // Far branches over 20-bit offset require a spill slot for scratch register.
1829 bool IsLargeFunction = !isInt<20>(x: estimateFunctionSizeInBytes(MF, TII: *TII));
1830 if (IsLargeFunction)
1831 ScavSlotsNum = std::max(a: ScavSlotsNum, b: 1u);
1832
1833 // RVV loads & stores have no capacity to hold the immediate address offsets
1834 // so we must always reserve an emergency spill slot if the MachineFunction
1835 // contains any RVV spills.
1836 ScavSlotsNum = std::max(a: ScavSlotsNum, b: getScavSlotsNumForRVV(MF));
1837
1838 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1839 int FI = MFI.CreateSpillStackObject(Size: RegInfo->getSpillSize(RC: *RC),
1840 Alignment: RegInfo->getSpillAlign(RC: *RC));
1841 RS->addScavengingFrameIndex(FI);
1842
1843 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1844 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1845 }
1846
1847 unsigned Size = RVFI->getReservedSpillsSize();
1848 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1849 int FrameIdx = Info.getFrameIdx();
1850 if (FrameIdx < 0 || MFI.getStackID(ObjectIdx: FrameIdx) != TargetStackID::Default)
1851 continue;
1852
1853 Size += MFI.getObjectSize(ObjectIdx: FrameIdx);
1854 }
1855 RVFI->setCalleeSavedStackSize(Size);
1856}
1857
1858// Not preserve stack space within prologue for outgoing variables when the
1859// function contains variable size objects or there are vector objects accessed
1860// by the frame pointer.
1861// Let eliminateCallFramePseudoInstr preserve stack space for it.
1862bool RISCVFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
1863 return !MF.getFrameInfo().hasVarSizedObjects() &&
1864 !(hasFP(MF) && hasRVVFrameObject(MF));
1865}
1866
1867// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1868MachineBasicBlock::iterator RISCVFrameLowering::eliminateCallFramePseudoInstr(
1869 MachineFunction &MF, MachineBasicBlock &MBB,
1870 MachineBasicBlock::iterator MI) const {
1871 DebugLoc DL = MI->getDebugLoc();
1872
1873 if (!hasReservedCallFrame(MF)) {
1874 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1875 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
1876 // pointer. This is necessary when there is a variable length stack
1877 // allocation (e.g. alloca), which means it's not possible to allocate
1878 // space for outgoing arguments from within the function prologue.
1879 int64_t Amount = MI->getOperand(i: 0).getImm();
1880
1881 if (Amount != 0) {
1882 // Ensure the stack remains aligned after adjustment.
1883 Amount = alignSPAdjust(SPAdj: Amount);
1884
1885 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
1886 Amount = -Amount;
1887
1888 const RISCVTargetLowering *TLI =
1889 MF.getSubtarget<RISCVSubtarget>().getTargetLowering();
1890 int64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1891 if (TLI->hasInlineStackProbe(MF) && -Amount >= ProbeSize) {
1892 // When stack probing is enabled, the decrement of SP may need to be
1893 // probed. We can handle both the decrement and the probing in
1894 // allocateStack.
1895 bool DynAllocation =
1896 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1897 allocateStack(MBB, MBBI: MI, MF, Offset: -Amount, RealStackSize: -Amount,
1898 EmitCFI: needsDwarfCFI(MF) && !hasFP(MF),
1899 /*NeedProbe=*/true, ProbeSize, DynAllocation,
1900 Flag: MachineInstr::NoFlags);
1901 } else {
1902 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
1903 RI.adjustReg(MBB, II: MI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: Amount),
1904 Flag: MachineInstr::NoFlags, RequiredAlign: getStackAlign());
1905 }
1906 }
1907 }
1908
1909 return MBB.erase(I: MI);
1910}
1911
1912// We would like to split the SP adjustment to reduce prologue/epilogue
1913// as following instructions. In this way, the offset of the callee saved
1914// register could fit in a single store. Supposed that the first sp adjust
1915// amount is 2032.
1916// add sp,sp,-2032
1917// sw ra,2028(sp)
1918// sw s0,2024(sp)
1919// sw s1,2020(sp)
1920// sw s3,2012(sp)
1921// sw s4,2008(sp)
1922// add sp,sp,-64
1923uint64_t
1924RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
1925 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1926 const MachineFrameInfo &MFI = MF.getFrameInfo();
1927 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1928 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
1929
1930 // Disable SplitSPAdjust if save-restore libcall, push/pop or QCI interrupts
1931 // are used. The callee-saved registers will be pushed by the save-restore
1932 // libcalls, so we don't have to split the SP adjustment in this case.
1933 if (RVFI->getReservedSpillsSize())
1934 return 0;
1935
1936 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
1937 // 12-bit and there exists a callee-saved register needing to be pushed.
1938 if (!isInt<12>(x: StackSize) && (CSI.size() > 0)) {
1939 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
1940 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
1941 // instructions. Offsets smaller than 2048 can fit in a single load/store
1942 // instruction, and we have to stick with the stack alignment. 2048 has
1943 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
1944 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
1945 const uint64_t StackAlign = getStackAlign().value();
1946
1947 // Amount of (2048 - StackAlign) will prevent callee saved and restored
1948 // instructions be compressed, so try to adjust the amount to the largest
1949 // offset that stack compression instructions accept when target supports
1950 // compression instructions.
1951 if (STI.hasStdExtZca()) {
1952 // The compression extensions may support the following instructions:
1953 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
1954 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
1955 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
1956 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
1957 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
1958 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
1959 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
1960 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
1961 const uint64_t RVCompressLen = STI.getXLen() * 8;
1962 // Compared with amount (2048 - StackAlign), StackSize needs to
1963 // satisfy the following conditions to avoid using more instructions
1964 // to adjust the sp after adjusting the amount, such as
1965 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
1966 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
1967 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
1968 auto CanCompress = [&](uint64_t CompressLen) -> bool {
1969 if (StackSize <= 2047 + CompressLen ||
1970 (StackSize > 2048 * 2 - StackAlign &&
1971 StackSize <= 2047 * 2 + CompressLen) ||
1972 StackSize > 2048 * 3 - StackAlign)
1973 return true;
1974
1975 return false;
1976 };
1977 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
1978 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
1979 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
1980 const uint64_t ADDI16SPCompressLen = 496;
1981 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
1982 return ADDI16SPCompressLen;
1983 if (CanCompress(RVCompressLen))
1984 return RVCompressLen;
1985 }
1986 return 2048 - StackAlign;
1987 }
1988 return 0;
1989}
1990
1991bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
1992 MachineFunction &MF, const TargetRegisterInfo *TRI,
1993 std::vector<CalleeSavedInfo> &CSI) const {
1994 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1995 MachineFrameInfo &MFI = MF.getFrameInfo();
1996 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
1997
1998 // Preemptible Interrupts have two additional Callee-save Frame Indexes,
1999 // not tracked by `CSI`.
2000 if (RVFI->isSiFivePreemptibleInterrupt(MF)) {
2001 for (int I = 0; I < 2; ++I) {
2002 int FI = RVFI->getInterruptCSRFrameIndex(Idx: I);
2003 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FI, IsCalleeSaved: true);
2004 }
2005 }
2006
2007 // Early exit if no callee saved registers are modified!
2008 if (CSI.empty())
2009 return true;
2010
2011 if (RVFI->useQCIInterrupt(MF)) {
2012 RVFI->setQCIInterruptStackSize(QCIInterruptPushAmount);
2013 }
2014
2015 if (RVFI->isPushable(MF)) {
2016 // Determine how many GPRs we need to push and save it to RVFI.
2017 unsigned PushedRegNum = getNumPushPopRegs(CSI);
2018
2019 // `QC.C.MIENTER(.NEST)` will save `ra` and `s0`, so we should only push if
2020 // we want to push more than 2 registers. Otherwise, we should push if we
2021 // want to push more than 0 registers.
2022 unsigned OnlyPushIfMoreThan = RVFI->useQCIInterrupt(MF) ? 2 : 0;
2023 if (PushedRegNum > OnlyPushIfMoreThan) {
2024 RVFI->setRVPushRegs(PushedRegNum);
2025 RVFI->setRVPushStackSize(alignTo(Value: (STI.getXLen() / 8) * PushedRegNum, Align: 16));
2026 }
2027 }
2028
2029 for (auto &CS : CSI) {
2030 MCRegister Reg = CS.getReg();
2031 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
2032 unsigned Size = RegInfo->getSpillSize(RC: *RC);
2033
2034 if (RVFI->useQCIInterrupt(MF)) {
2035 const auto *FFI = llvm::find_if(Range: FixedCSRFIQCIInterruptMap, P: [&](auto P) {
2036 return P.first == CS.getReg();
2037 });
2038 if (FFI != std::end(arr: FixedCSRFIQCIInterruptMap)) {
2039 int64_t Offset = FFI->second * (int64_t)Size;
2040
2041 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2042 assert(FrameIdx < 0);
2043 CS.setFrameIdx(FrameIdx);
2044 continue;
2045 }
2046 }
2047
2048 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
2049 const auto *FII = llvm::find_if(
2050 Range: FixedCSRFIMap, P: [&](MCPhysReg P) { return P == CS.getReg(); });
2051 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
2052
2053 if (FII != std::end(arr: FixedCSRFIMap)) {
2054 int64_t Offset;
2055 if (RVFI->getPushPopKind(MF) ==
2056 RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp)
2057 Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
2058 else
2059 Offset = -int64_t(RegNum + 1) * Size;
2060
2061 if (RVFI->useQCIInterrupt(MF))
2062 Offset -= QCIInterruptPushAmount;
2063
2064 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2065 assert(FrameIdx < 0);
2066 CS.setFrameIdx(FrameIdx);
2067 continue;
2068 }
2069 }
2070
2071 // Not a fixed slot.
2072 Align Alignment = RegInfo->getSpillAlign(RC: *RC);
2073 // We may not be able to satisfy the desired alignment specification of
2074 // the TargetRegisterClass if the stack alignment is smaller. Use the
2075 // min.
2076 Alignment = std::min(a: Alignment, b: getStackAlign());
2077 int FrameIdx = MFI.CreateStackObject(Size, Alignment, isSpillSlot: true);
2078 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FrameIdx, IsCalleeSaved: true);
2079 CS.setFrameIdx(FrameIdx);
2080 if (RISCVRegisterInfo::isRVVRegClass(RC))
2081 MFI.setStackID(ObjectIdx: FrameIdx, ID: TargetStackID::ScalableVector);
2082 }
2083
2084 if (RVFI->useQCIInterrupt(MF)) {
2085 // Allocate a fixed object that covers the entire QCI stack allocation,
2086 // because there are gaps which are reserved for future use.
2087 MFI.CreateFixedSpillStackObject(
2088 Size: QCIInterruptPushAmount, SPOffset: -static_cast<int64_t>(QCIInterruptPushAmount));
2089 }
2090
2091 if (RVFI->isPushable(MF)) {
2092 int64_t QCIOffset = RVFI->useQCIInterrupt(MF) ? QCIInterruptPushAmount : 0;
2093 // Allocate a fixed object that covers the full push.
2094 if (int64_t PushSize = RVFI->getRVPushStackSize())
2095 MFI.CreateFixedSpillStackObject(Size: PushSize, SPOffset: -PushSize - QCIOffset);
2096 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
2097 int64_t LibCallFrameSize =
2098 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
2099 MFI.CreateFixedSpillStackObject(Size: LibCallFrameSize, SPOffset: -LibCallFrameSize);
2100 }
2101
2102 return true;
2103}
2104
2105bool RISCVFrameLowering::spillCalleeSavedRegisters(
2106 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2107 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2108 if (CSI.empty())
2109 return true;
2110
2111 MachineFunction *MF = MBB.getParent();
2112 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2113 DebugLoc DL;
2114 if (MI != MBB.end() && !MI->isDebugInstr())
2115 DL = MI->getDebugLoc();
2116
2117 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2118 if (RVFI->useQCIInterrupt(MF: *MF)) {
2119 // Emit QC.C.MIENTER(.NEST)
2120 BuildMI(
2121 BB&: MBB, I: MI, MIMD: DL,
2122 MCID: TII.get(Opcode: RVFI->getInterruptStackKind(MF: *MF) ==
2123 RISCVMachineFunctionInfo::InterruptStackKind::QCINest
2124 ? RISCV::QC_C_MIENTER_NEST
2125 : RISCV::QC_C_MIENTER))
2126 .setMIFlag(MachineInstr::FrameSetup);
2127
2128 for (auto [Reg, _Offset] : FixedCSRFIQCIInterruptMap)
2129 MBB.addLiveIn(PhysReg: Reg);
2130 }
2131
2132 if (RVFI->isPushable(MF: *MF)) {
2133 // Emit CM.PUSH with base StackAdj & evaluate Push stack
2134 unsigned PushedRegNum = RVFI->getRVPushRegs();
2135 if (PushedRegNum > 0) {
2136 // Use encoded number to represent registers to spill.
2137 unsigned Opcode = getPushOpcode(
2138 Kind: RVFI->getPushPopKind(MF: *MF), UpdateFP: hasFP(MF: *MF) && !RVFI->useQCIInterrupt(MF: *MF));
2139 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2140 MachineInstrBuilder PushBuilder =
2141 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2142 .setMIFlag(MachineInstr::FrameSetup);
2143 PushBuilder.addImm(Val: RegEnc);
2144 PushBuilder.addImm(Val: 0);
2145
2146 for (unsigned i = 0; i < PushedRegNum; i++)
2147 PushBuilder.addUse(RegNo: FixedCSRFIMap[i], Flags: RegState::Implicit);
2148 }
2149 } else if (const char *SpillLibCall = getSpillLibCallName(MF: *MF, CSI)) {
2150 // Add spill libcall via non-callee-saved register t0.
2151 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoCALLReg), DestReg: RISCV::X5)
2152 .addExternalSymbol(FnName: SpillLibCall, TargetFlags: RISCVII::MO_CALL)
2153 .setMIFlag(MachineInstr::FrameSetup);
2154
2155 // Add registers spilled in libcall as liveins.
2156 for (auto &CS : CSI)
2157 MBB.addLiveIn(PhysReg: CS.getReg());
2158 }
2159
2160 // Manually spill values not spilled by libcall & Push/Pop.
2161 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2162 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2163
2164 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
2165 for (auto &CS : CSInfo) {
2166 // Insert the spill to the stack frame.
2167 MCRegister Reg = CS.getReg();
2168 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2169 TII.storeRegToStackSlot(MBB, MI, SrcReg: Reg, isKill: !MBB.isLiveIn(Reg),
2170 FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2171 Flags: MachineInstr::FrameSetup);
2172 }
2173 };
2174 storeRegsToStackSlots(UnmanagedCSI);
2175 storeRegsToStackSlots(RVVCSI);
2176
2177 return true;
2178}
2179
2180static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
2181 return RISCV::VRRegClass.contains(Reg: BaseReg) ? 1
2182 : RISCV::VRM2RegClass.contains(Reg: BaseReg) ? 2
2183 : RISCV::VRM4RegClass.contains(Reg: BaseReg) ? 4
2184 : 8;
2185}
2186
2187void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
2188 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const {
2189 MachineFunction *MF = MBB.getParent();
2190 const MachineFrameInfo &MFI = MF->getFrameInfo();
2191 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2192 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2193
2194 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2195 if (RVVCSI.empty())
2196 return;
2197
2198 uint64_t FixedSize = getStackSizeWithRVVPadding(MF: *MF);
2199 if (!HasFP) {
2200 uint64_t ScalarLocalVarSize =
2201 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
2202 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
2203 FixedSize -= ScalarLocalVarSize;
2204 }
2205
2206 CFIInstBuilder CFIBuilder(MBB, MI, MachineInstr::FrameSetup);
2207 for (auto &CS : RVVCSI) {
2208 // Insert the spill to the stack frame.
2209 int FI = CS.getFrameIdx();
2210 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2211 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2212 for (unsigned i = 0; i < NumRegs; ++i) {
2213 CFIBuilder.insertCFIInst(CFIInst: createDefCFAOffset(
2214 TRI, Reg: BaseReg + i,
2215 Offset: StackOffset::get(Fixed: -FixedSize, Scalable: MFI.getObjectOffset(ObjectIdx: FI) / 8 + i)));
2216 }
2217 }
2218}
2219
2220void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
2221 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
2222 MachineFunction *MF = MBB.getParent();
2223 const MachineFrameInfo &MFI = MF->getFrameInfo();
2224 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2225
2226 CFIInstBuilder CFIHelper(MBB, MI, MachineInstr::FrameDestroy);
2227 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2228 for (auto &CS : RVVCSI) {
2229 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2230 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2231 for (unsigned i = 0; i < NumRegs; ++i)
2232 CFIHelper.buildRestore(Reg: BaseReg + i);
2233 }
2234}
2235
2236bool RISCVFrameLowering::restoreCalleeSavedRegisters(
2237 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2238 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2239 if (CSI.empty())
2240 return true;
2241
2242 MachineFunction *MF = MBB.getParent();
2243 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2244 DebugLoc DL;
2245 if (MI != MBB.end() && !MI->isDebugInstr())
2246 DL = MI->getDebugLoc();
2247
2248 // Manually restore values not restored by libcall & Push/Pop.
2249 // Reverse the restore order in epilog. In addition, the return
2250 // address will be restored first in the epilogue. It increases
2251 // the opportunity to avoid the load-to-use data hazard between
2252 // loading RA and return by RA. loadRegFromStackSlot can insert
2253 // multiple instructions.
2254 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2255 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2256
2257 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
2258 for (auto &CS : CSInfo) {
2259 MCRegister Reg = CS.getReg();
2260 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2261 TII.loadRegFromStackSlot(MBB, MI, DestReg: Reg, FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2262 SubReg: RISCV::NoSubRegister,
2263 Flags: MachineInstr::FrameDestroy);
2264 assert(MI != MBB.begin() &&
2265 "loadRegFromStackSlot didn't insert any code!");
2266 }
2267 };
2268 loadRegFromStackSlot(RVVCSI);
2269 loadRegFromStackSlot(UnmanagedCSI);
2270
2271 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2272 if (RVFI->useQCIInterrupt(MF: *MF)) {
2273 // Don't emit anything here because restoration is handled by
2274 // QC.C.MILEAVERET which we already inserted to return.
2275 assert(MI->getOpcode() == RISCV::QC_C_MILEAVERET &&
2276 "Unexpected QCI Interrupt Return Instruction");
2277 }
2278
2279 if (RVFI->isPushable(MF: *MF)) {
2280 unsigned PushedRegNum = RVFI->getRVPushRegs();
2281 if (PushedRegNum > 0) {
2282 unsigned Opcode = getPopOpcode(Kind: RVFI->getPushPopKind(MF: *MF));
2283 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2284 MachineInstrBuilder PopBuilder =
2285 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2286 .setMIFlag(MachineInstr::FrameDestroy);
2287 // Use encoded number to represent registers to restore.
2288 PopBuilder.addImm(Val: RegEnc);
2289 PopBuilder.addImm(Val: 0);
2290
2291 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
2292 PopBuilder.addDef(RegNo: FixedCSRFIMap[i], Flags: RegState::ImplicitDefine);
2293 }
2294 } else {
2295 const char *RestoreLibCall = getRestoreLibCallName(MF: *MF, CSI);
2296 if (RestoreLibCall) {
2297 // Add restore libcall via tail call.
2298 MachineBasicBlock::iterator NewMI =
2299 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoTAIL))
2300 .addExternalSymbol(FnName: RestoreLibCall, TargetFlags: RISCVII::MO_CALL)
2301 .setMIFlag(MachineInstr::FrameDestroy);
2302
2303 // Remove trailing returns, since the terminator is now a tail call to the
2304 // restore function.
2305 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
2306 NewMI->copyImplicitOps(MF&: *MF, MI: *MI);
2307 MI->eraseFromParent();
2308 }
2309 }
2310 }
2311 return true;
2312}
2313
2314bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2315 // Keep the conventional code flow when not optimizing.
2316 if (MF.getFunction().hasOptNone())
2317 return false;
2318
2319 return true;
2320}
2321
2322bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
2323 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2324 const MachineFunction *MF = MBB.getParent();
2325 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2326
2327 // Make sure VTYPE and VL are not live-in since we will use vsetvli in the
2328 // prologue to get the VLEN, and that will clobber these registers.
2329 //
2330 // We may do also check the stack contains objects with scalable vector type,
2331 // but this will require iterating over all the stack objects, but this may
2332 // not worth since the situation is rare, we could do further check in future
2333 // if we find it is necessary.
2334 if (STI.preferVsetvliOverReadVLENB() &&
2335 (MBB.isLiveIn(Reg: RISCV::VTYPE) || MBB.isLiveIn(Reg: RISCV::VL)))
2336 return false;
2337
2338 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2339 return true;
2340
2341 // Inserting a call to a __riscv_save libcall requires the use of the register
2342 // t0 (X5) to hold the return address. Therefore if this register is already
2343 // used we can't insert the call.
2344
2345 RegScavenger RS;
2346 RS.enterBasicBlock(MBB&: *TmpMBB);
2347 return !RS.isRegUsed(Reg: RISCV::X5);
2348}
2349
2350bool RISCVFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2351 const MachineFunction *MF = MBB.getParent();
2352 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2353 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2354
2355 // We do not want QC.C.MILEAVERET to be subject to shrink-wrapping - it must
2356 // come in the final block of its function as it both pops and returns.
2357 if (RVFI->useQCIInterrupt(MF: *MF))
2358 return MBB.succ_empty();
2359
2360 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2361 return true;
2362
2363 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
2364 // This means if we still need to continue executing code within this function
2365 // the restore cannot take place in this basic block.
2366
2367 if (MBB.succ_size() > 1)
2368 return false;
2369
2370 MachineBasicBlock *SuccMBB =
2371 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
2372
2373 // Doing a tail call should be safe if there are no successors, because either
2374 // we have a returning block or the end of the block is unreachable, so the
2375 // restore will be eliminated regardless.
2376 if (!SuccMBB)
2377 return true;
2378
2379 // The successor can only contain a return, since we would effectively be
2380 // replacing the successor with our own tail return at the end of our block.
2381 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2382}
2383
2384bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
2385 switch (ID) {
2386 case TargetStackID::Default:
2387 case TargetStackID::ScalableVector:
2388 return true;
2389 case TargetStackID::NoAlloc:
2390 case TargetStackID::SGPRSpill:
2391 case TargetStackID::WasmLocal:
2392 case TargetStackID::ScalablePredicateVector:
2393 return false;
2394 }
2395 llvm_unreachable("Invalid TargetStackID::Value");
2396}
2397
2398TargetStackID::Value RISCVFrameLowering::getStackIDForScalableVectors() const {
2399 return TargetStackID::ScalableVector;
2400}
2401
2402// Synthesize the probe loop.
2403static void emitStackProbeInline(MachineBasicBlock::iterator MBBI, DebugLoc DL,
2404 Register TargetReg, bool IsRVV) {
2405 assert(TargetReg != RISCV::X2 && "New top of stack cannot already be in SP");
2406
2407 MachineBasicBlock &MBB = *MBBI->getParent();
2408 MachineFunction &MF = *MBB.getParent();
2409
2410 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2411 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2412 bool IsRV64 = Subtarget.is64Bit();
2413 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2414 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2415 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2416
2417 MachineFunction::iterator MBBInsertPoint = std::next(x: MBB.getIterator());
2418 MachineBasicBlock *LoopTestMBB =
2419 MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2420 MF.insert(MBBI: MBBInsertPoint, MBB: LoopTestMBB);
2421 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2422 MF.insert(MBBI: MBBInsertPoint, MBB: ExitMBB);
2423 MachineInstr::MIFlag Flags = MachineInstr::FrameSetup;
2424 Register ScratchReg = RISCV::X7;
2425
2426 // ScratchReg = ProbeSize
2427 TII->movImm(MBB, MBBI, DL, DstReg: ScratchReg, Val: ProbeSize, Flag: Flags);
2428
2429 // LoopTest:
2430 // SUB SP, SP, ProbeSize
2431 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
2432 .addReg(RegNo: SPReg)
2433 .addReg(RegNo: ScratchReg)
2434 .setMIFlags(Flags);
2435
2436 // s[d|w] zero, 0(sp)
2437 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL,
2438 MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
2439 .addReg(RegNo: RISCV::X0)
2440 .addReg(RegNo: SPReg)
2441 .addImm(Val: 0)
2442 .setMIFlags(Flags);
2443
2444 if (IsRVV) {
2445 // SUB TargetReg, TargetReg, ProbeSize
2446 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB),
2447 DestReg: TargetReg)
2448 .addReg(RegNo: TargetReg)
2449 .addReg(RegNo: ScratchReg)
2450 .setMIFlags(Flags);
2451
2452 // BGE TargetReg, ProbeSize, LoopTest
2453 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE))
2454 .addReg(RegNo: TargetReg)
2455 .addReg(RegNo: ScratchReg)
2456 .addMBB(MBB: LoopTestMBB)
2457 .setMIFlags(Flags);
2458
2459 } else {
2460 // BNE SP, TargetReg, LoopTest
2461 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE))
2462 .addReg(RegNo: SPReg)
2463 .addReg(RegNo: TargetReg)
2464 .addMBB(MBB: LoopTestMBB)
2465 .setMIFlags(Flags);
2466 }
2467
2468 ExitMBB->splice(Where: ExitMBB->end(), Other: &MBB, From: std::next(x: MBBI), To: MBB.end());
2469 ExitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: &MBB);
2470
2471 LoopTestMBB->addSuccessor(Succ: ExitMBB);
2472 LoopTestMBB->addSuccessor(Succ: LoopTestMBB);
2473 MBB.addSuccessor(Succ: LoopTestMBB);
2474 // Update liveins.
2475 fullyRecomputeLiveIns(MBBs: {ExitMBB, LoopTestMBB});
2476}
2477
2478void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2479 MachineBasicBlock &MBB) const {
2480 // Get the instructions that need to be replaced. We emit at most two of
2481 // these. Remember them in order to avoid complications coming from the need
2482 // to traverse the block while potentially creating more blocks.
2483 SmallVector<MachineInstr *, 4> ToReplace;
2484 for (MachineInstr &MI : MBB) {
2485 unsigned Opc = MI.getOpcode();
2486 if (Opc == RISCV::PROBED_STACKALLOC ||
2487 Opc == RISCV::PROBED_STACKALLOC_RVV) {
2488 ToReplace.push_back(Elt: &MI);
2489 }
2490 }
2491
2492 for (MachineInstr *MI : ToReplace) {
2493 if (MI->getOpcode() == RISCV::PROBED_STACKALLOC ||
2494 MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV) {
2495 MachineBasicBlock::iterator MBBI = MI->getIterator();
2496 DebugLoc DL = MBB.findDebugLoc(MBBI);
2497 Register TargetReg = MI->getOperand(i: 0).getReg();
2498 emitStackProbeInline(MBBI, DL, TargetReg,
2499 IsRVV: (MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV));
2500 MBBI->eraseFromParent();
2501 }
2502 }
2503}
2504
2505int RISCVFrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
2506 return 0;
2507}
2508
2509Register
2510RISCVFrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
2511 return RISCV::X2;
2512}
2513