1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
14#include "MCTargetDesc/RISCVBaseInfo.h"
15#include "RISCVMachineFunctionInfo.h"
16#include "RISCVSubtarget.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/CFIInstBuilder.h"
19#include "llvm/CodeGen/LivePhysRegs.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/RegisterScavenging.h"
25#include "llvm/IR/DiagnosticInfo.h"
26#include "llvm/MC/MCDwarf.h"
27#include "llvm/Support/LEB128.h"
28
29#include <algorithm>
30
31#define DEBUG_TYPE "riscv-frame"
32
33using namespace llvm;
34
35static Align getABIStackAlignment(RISCVABI::ABI ABI) {
36 if (ABI == RISCVABI::ABI_ILP32E)
37 return Align(4);
38 if (ABI == RISCVABI::ABI_LP64E)
39 return Align(8);
40 return Align(16);
41}
42
43RISCVFrameLowering::RISCVFrameLowering(const RISCVSubtarget &STI)
44 : TargetFrameLowering(
45 StackGrowsDown, getABIStackAlignment(ABI: STI.getTargetABI()),
46 /*LocalAreaOffset=*/0,
47 /*TransientStackAlignment=*/getABIStackAlignment(ABI: STI.getTargetABI())),
48 STI(STI) {}
49
50// The register used to hold the frame pointer.
51static constexpr MCPhysReg FPReg = RISCV::X8;
52
53// The register used to hold the stack pointer.
54static constexpr MCPhysReg SPReg = RISCV::X2;
55
56// The register used to hold the return address.
57static constexpr MCPhysReg RAReg = RISCV::X1;
58
59// LIst of CSRs that are given a fixed location by save/restore libcalls or
60// Zcmp/Xqccmp Push/Pop. The order in this table indicates the order the
61// registers are saved on the stack. Zcmp uses the reverse order of save/restore
62// and Xqccmp on the stack, but this is handled when offsets are calculated.
63static const MCPhysReg FixedCSRFIMap[] = {
64 /*ra*/ RAReg, /*s0*/ FPReg, /*s1*/ RISCV::X9,
65 /*s2*/ RISCV::X18, /*s3*/ RISCV::X19, /*s4*/ RISCV::X20,
66 /*s5*/ RISCV::X21, /*s6*/ RISCV::X22, /*s7*/ RISCV::X23,
67 /*s8*/ RISCV::X24, /*s9*/ RISCV::X25, /*s10*/ RISCV::X26,
68 /*s11*/ RISCV::X27};
69
70// The number of stack bytes allocated by `QC.C.MIENTER(.NEST)` and popped by
71// `QC.C.MILEAVERET`.
72static constexpr uint64_t QCIInterruptPushAmount = 96;
73
74static const std::pair<MCPhysReg, int8_t> FixedCSRFIQCIInterruptMap[] = {
75 /* -1 is a gap for mepc/mnepc */
76 {/*fp*/ FPReg, -2},
77 /* -3 is a gap for qc.mcause */
78 {/*ra*/ RAReg, -4},
79 /* -5 is reserved */
80 {/*t0*/ RISCV::X5, -6},
81 {/*t1*/ RISCV::X6, -7},
82 {/*t2*/ RISCV::X7, -8},
83 {/*a0*/ RISCV::X10, -9},
84 {/*a1*/ RISCV::X11, -10},
85 {/*a2*/ RISCV::X12, -11},
86 {/*a3*/ RISCV::X13, -12},
87 {/*a4*/ RISCV::X14, -13},
88 {/*a5*/ RISCV::X15, -14},
89 {/*a6*/ RISCV::X16, -15},
90 {/*a7*/ RISCV::X17, -16},
91 {/*t3*/ RISCV::X28, -17},
92 {/*t4*/ RISCV::X29, -18},
93 {/*t5*/ RISCV::X30, -19},
94 {/*t6*/ RISCV::X31, -20},
95 /* -21, -22, -23, -24 are reserved */
96};
97
98/// Returns true if DWARF CFI instructions ("frame moves") should be emitted.
99static bool needsDwarfCFI(const MachineFunction &MF) {
100 return MF.needsFrameMoves();
101}
102
103// For now we use x3, a.k.a gp, as pointer to shadow call stack.
104// User should not use x3 in their asm.
105static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MI,
107 const DebugLoc &DL) {
108 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
109 // We check Zimop instead of (Zimop || Zcmop) to determine whether HW shadow
110 // stack is available despite the fact that sspush/sspopchk both have a
111 // compressed form, because if only Zcmop is available, we would need to
112 // reserve X5 due to c.sspopchk only takes X5 and we currently do not support
113 // using X5 as the return address register.
114 // However, we can still aggressively use c.sspush x1 if zcmop is available.
115 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
116 STI.hasStdExtZimop();
117 bool HasSWShadowStack =
118 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
119 if (!HasHWShadowStack && !HasSWShadowStack)
120 return;
121
122 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
123
124 // Do not save RA to the SCS if it's not saved to the regular stack,
125 // i.e. RA is not at risk of being overwritten.
126 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
127 if (llvm::none_of(
128 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
129 return;
130
131 const RISCVInstrInfo *TII = STI.getInstrInfo();
132 if (HasHWShadowStack) {
133 if (STI.hasStdExtZcmop()) {
134 static_assert(RAReg == RISCV::X1, "C.SSPUSH only accepts X1");
135 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_C_SSPUSH));
136 } else {
137 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPUSH)).addReg(RegNo: RAReg);
138 }
139 return;
140 }
141
142 Register SCSPReg = RISCVABI::getSCSPReg();
143
144 bool IsRV64 = STI.is64Bit();
145 int64_t SlotSize = STI.getXLen() / 8;
146 // Store return address to shadow call stack
147 // addi gp, gp, [4|8]
148 // s[w|d] ra, -[4|8](gp)
149 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
150 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
151 .addReg(RegNo: SCSPReg)
152 .addImm(Val: SlotSize)
153 .setMIFlag(MachineInstr::FrameSetup);
154 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
155 .addReg(RegNo: RAReg)
156 .addReg(RegNo: SCSPReg)
157 .addImm(Val: -SlotSize)
158 .setMIFlag(MachineInstr::FrameSetup);
159
160 if (!needsDwarfCFI(MF))
161 return;
162
163 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
164 // of the shadow stack pointer when unwinding past this frame.
165 char DwarfSCSReg = TRI->getDwarfRegNum(Reg: SCSPReg, /*IsEH*/ isEH: true);
166 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
167
168 char Offset = static_cast<char>(-SlotSize) & 0x7f;
169 const char CFIInst[] = {
170 dwarf::DW_CFA_val_expression,
171 DwarfSCSReg, // register
172 2, // length
173 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
174 Offset, // addend (sleb128)
175 };
176
177 CFIInstBuilder(MBB, MI, MachineInstr::FrameSetup)
178 .buildEscape(Bytes: StringRef(CFIInst, sizeof(CFIInst)));
179}
180
181static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
182 MachineBasicBlock::iterator MI,
183 const DebugLoc &DL) {
184 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
185 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
186 STI.hasStdExtZimop();
187 bool HasSWShadowStack =
188 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
189 if (!HasHWShadowStack && !HasSWShadowStack)
190 return;
191
192 // See emitSCSPrologue() above.
193 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
194 if (llvm::none_of(
195 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
196 return;
197
198 const RISCVInstrInfo *TII = STI.getInstrInfo();
199 if (HasHWShadowStack) {
200 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPOPCHK)).addReg(RegNo: RAReg);
201 return;
202 }
203
204 Register SCSPReg = RISCVABI::getSCSPReg();
205
206 bool IsRV64 = STI.is64Bit();
207 int64_t SlotSize = STI.getXLen() / 8;
208 // Load return address from shadow call stack
209 // l[w|d] ra, -[4|8](gp)
210 // addi gp, gp, -[4|8]
211 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::LD : RISCV::LW))
212 .addReg(RegNo: RAReg, Flags: RegState::Define)
213 .addReg(RegNo: SCSPReg)
214 .addImm(Val: -SlotSize)
215 .setMIFlag(MachineInstr::FrameDestroy);
216 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
217 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
218 .addReg(RegNo: SCSPReg)
219 .addImm(Val: -SlotSize)
220 .setMIFlag(MachineInstr::FrameDestroy);
221 if (needsDwarfCFI(MF)) {
222 // Restore the SCS pointer
223 CFIInstBuilder(MBB, MI, MachineInstr::FrameDestroy).buildRestore(Reg: SCSPReg);
224 }
225}
226
227// Insert instruction to swap mscratchsw with sp
228static void emitSiFiveCLICStackSwap(MachineFunction &MF, MachineBasicBlock &MBB,
229 MachineBasicBlock::iterator MBBI,
230 const DebugLoc &DL) {
231 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
232
233 if (!RVFI->isSiFiveStackSwapInterrupt(MF))
234 return;
235
236 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
237 const RISCVInstrInfo *TII = STI.getInstrInfo();
238
239 assert(STI.hasVendorXSfmclic() && "Stack Swapping Requires XSfmclic");
240
241 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
242 .addReg(RegNo: SPReg, Flags: RegState::Define)
243 .addImm(Val: RISCVSysReg::sf_mscratchcsw)
244 .addReg(RegNo: SPReg, Flags: RegState::Kill)
245 .setMIFlag(MachineInstr::FrameSetup);
246
247 // FIXME: CFI Information for this swap.
248}
249
250static void
251createSiFivePreemptibleInterruptFrameEntries(MachineFunction &MF,
252 RISCVMachineFunctionInfo &RVFI) {
253 if (!RVFI.isSiFivePreemptibleInterrupt(MF))
254 return;
255
256 const TargetRegisterClass &RC = RISCV::GPRRegClass;
257 const TargetRegisterInfo &TRI =
258 *MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
259 MachineFrameInfo &MFI = MF.getFrameInfo();
260
261 // Create two frame objects for spilling X8 and X9, which will be done in
262 // `emitSiFiveCLICPreemptibleSaves`. This is in addition to any other stack
263 // objects we might have for X8 and X9, as they might be saved twice.
264 for (int I = 0; I < 2; ++I) {
265 int FI = MFI.CreateStackObject(Size: TRI.getSpillSize(RC), Alignment: TRI.getSpillAlign(RC),
266 isSpillSlot: true);
267 RVFI.pushInterruptCSRFrameIndex(FI);
268 }
269}
270
271static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF,
272 MachineBasicBlock &MBB,
273 MachineBasicBlock::iterator MBBI,
274 const DebugLoc &DL) {
275 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
276
277 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
278 return;
279
280 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
281 const RISCVInstrInfo *TII = STI.getInstrInfo();
282
283 // FIXME: CFI Information here is nonexistent/wrong.
284
285 // X8 and X9 might be stored into the stack twice, initially into the
286 // `interruptCSRFrameIndex` here, and then maybe again into their CSI frame
287 // index.
288 //
289 // This is done instead of telling the register allocator that we need two
290 // VRegs to store the value of `mcause` and `mepc` through the instruction,
291 // which affects other passes.
292 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X8, /* IsKill=*/true,
293 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
294 RC: &RISCV::GPRRegClass, VReg: Register(),
295 Flags: MachineInstr::FrameSetup);
296 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X9, /* IsKill=*/true,
297 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
298 RC: &RISCV::GPRRegClass, VReg: Register(),
299 Flags: MachineInstr::FrameSetup);
300
301 // Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
302 // used in the function, then they will appear in `getUnmanagedCSI` and will
303 // be saved again.
304 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
305 .addReg(RegNo: RISCV::X8, Flags: RegState::Define)
306 .addImm(Val: RISCVSysReg::mcause)
307 .addReg(RegNo: RISCV::X0)
308 .setMIFlag(MachineInstr::FrameSetup);
309 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
310 .addReg(RegNo: RISCV::X9, Flags: RegState::Define)
311 .addImm(Val: RISCVSysReg::mepc)
312 .addReg(RegNo: RISCV::X0)
313 .setMIFlag(MachineInstr::FrameSetup);
314
315 // Enable interrupts.
316 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRSI))
317 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
318 .addImm(Val: RISCVSysReg::mstatus)
319 .addImm(Val: 8)
320 .setMIFlag(MachineInstr::FrameSetup);
321}
322
323static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF,
324 MachineBasicBlock &MBB,
325 MachineBasicBlock::iterator MBBI,
326 const DebugLoc &DL) {
327 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
328
329 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
330 return;
331
332 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
333 const RISCVInstrInfo *TII = STI.getInstrInfo();
334
335 // FIXME: CFI Information here is nonexistent/wrong.
336
337 // Disable interrupts.
338 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRCI))
339 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
340 .addImm(Val: RISCVSysReg::mstatus)
341 .addImm(Val: 8)
342 .setMIFlag(MachineInstr::FrameSetup);
343
344 // Restore `mepc` from x9 (s1), and `mcause` from x8 (s0). If either were used
345 // in the function, they have already been restored once, so now have the
346 // value stored in `emitSiFiveCLICPreemptibleSaves`.
347 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
348 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
349 .addImm(Val: RISCVSysReg::mepc)
350 .addReg(RegNo: RISCV::X9, Flags: RegState::Kill)
351 .setMIFlag(MachineInstr::FrameSetup);
352 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
353 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
354 .addImm(Val: RISCVSysReg::mcause)
355 .addReg(RegNo: RISCV::X8, Flags: RegState::Kill)
356 .setMIFlag(MachineInstr::FrameSetup);
357
358 // X8 and X9 need to be restored to their values on function entry, which we
359 // saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
360 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X9,
361 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
362 RC: &RISCV::GPRRegClass, VReg: Register(),
363 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
364 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X8,
365 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
366 RC: &RISCV::GPRRegClass, VReg: Register(),
367 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
368}
369
370// Get the ID of the libcall used for spilling and restoring callee saved
371// registers. The ID is representative of the number of registers saved or
372// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
373// single register.
374static int getLibCallID(const MachineFunction &MF,
375 const std::vector<CalleeSavedInfo> &CSI) {
376 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
377
378 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
379 return -1;
380
381 MCRegister MaxReg;
382 for (auto &CS : CSI)
383 // assignCalleeSavedSpillSlots assigns negative frame indexes to
384 // registers which can be saved by libcall.
385 if (CS.getFrameIdx() < 0)
386 MaxReg = std::max(a: MaxReg.id(), b: CS.getReg().id());
387
388 if (!MaxReg)
389 return -1;
390
391 switch (MaxReg.id()) {
392 default:
393 llvm_unreachable("Something has gone wrong!");
394 // clang-format off
395 case /*s11*/ RISCV::X27: return 12;
396 case /*s10*/ RISCV::X26: return 11;
397 case /*s9*/ RISCV::X25: return 10;
398 case /*s8*/ RISCV::X24: return 9;
399 case /*s7*/ RISCV::X23: return 8;
400 case /*s6*/ RISCV::X22: return 7;
401 case /*s5*/ RISCV::X21: return 6;
402 case /*s4*/ RISCV::X20: return 5;
403 case /*s3*/ RISCV::X19: return 4;
404 case /*s2*/ RISCV::X18: return 3;
405 case /*s1*/ RISCV::X9: return 2;
406 case /*s0*/ FPReg: return 1;
407 case /*ra*/ RAReg: return 0;
408 // clang-format on
409 }
410}
411
412// Get the name of the libcall used for spilling callee saved registers.
413// If this function will not use save/restore libcalls, then return a nullptr.
414static const char *
415getSpillLibCallName(const MachineFunction &MF,
416 const std::vector<CalleeSavedInfo> &CSI) {
417 static const char *const SpillLibCalls[] = {
418 "__riscv_save_0",
419 "__riscv_save_1",
420 "__riscv_save_2",
421 "__riscv_save_3",
422 "__riscv_save_4",
423 "__riscv_save_5",
424 "__riscv_save_6",
425 "__riscv_save_7",
426 "__riscv_save_8",
427 "__riscv_save_9",
428 "__riscv_save_10",
429 "__riscv_save_11",
430 "__riscv_save_12"
431 };
432
433 int LibCallID = getLibCallID(MF, CSI);
434 if (LibCallID == -1)
435 return nullptr;
436 return SpillLibCalls[LibCallID];
437}
438
439// Get the name of the libcall used for restoring callee saved registers.
440// If this function will not use save/restore libcalls, then return a nullptr.
441static const char *
442getRestoreLibCallName(const MachineFunction &MF,
443 const std::vector<CalleeSavedInfo> &CSI) {
444 static const char *const RestoreLibCalls[] = {
445 "__riscv_restore_0",
446 "__riscv_restore_1",
447 "__riscv_restore_2",
448 "__riscv_restore_3",
449 "__riscv_restore_4",
450 "__riscv_restore_5",
451 "__riscv_restore_6",
452 "__riscv_restore_7",
453 "__riscv_restore_8",
454 "__riscv_restore_9",
455 "__riscv_restore_10",
456 "__riscv_restore_11",
457 "__riscv_restore_12"
458 };
459
460 int LibCallID = getLibCallID(MF, CSI);
461 if (LibCallID == -1)
462 return nullptr;
463 return RestoreLibCalls[LibCallID];
464}
465
466// Get the max reg of Push/Pop for restoring callee saved registers.
467static unsigned getNumPushPopRegs(const std::vector<CalleeSavedInfo> &CSI) {
468 unsigned NumPushPopRegs = 0;
469 for (auto &CS : CSI) {
470 auto *FII = llvm::find_if(Range: FixedCSRFIMap,
471 P: [&](MCPhysReg P) { return P == CS.getReg(); });
472 if (FII != std::end(arr: FixedCSRFIMap)) {
473 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
474 NumPushPopRegs = std::max(a: NumPushPopRegs, b: RegNum + 1);
475 }
476 }
477 assert(NumPushPopRegs != 12 && "x26 requires x27 to also be pushed");
478 return NumPushPopRegs;
479}
480
481// Return true if the specified function should have a dedicated frame
482// pointer register. This is true if frame pointer elimination is
483// disabled, if it needs dynamic stack realignment, if the function has
484// variable sized allocas, or if the frame address is taken.
485bool RISCVFrameLowering::hasFPImpl(const MachineFunction &MF) const {
486 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
487
488 const MachineFrameInfo &MFI = MF.getFrameInfo();
489 if (MF.getTarget().Options.DisableFramePointerElim(MF) ||
490 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
491 MFI.isFrameAddressTaken())
492 return true;
493
494 // With large callframes around we may need to use FP to access the scavenging
495 // emergency spillslot.
496 //
497 // We calculate the MaxCallFrameSize at the end of isel so this value should
498 // be stable for the whole post-isel MIR pipeline.
499 //
500 // NOTE: The idea of forcing a frame pointer is copied from AArch64, but they
501 // conservatively return true when the call frame size hasd not been
502 // computed yet. On RISC-V that caused MachineOutliner tests to fail the
503 // MachineVerifier due to outlined functions not computing max call frame
504 // size thus the frame pointer would always be reserved.
505 if (MFI.isMaxCallFrameSizeComputed() && MFI.getMaxCallFrameSize() > 2047)
506 return true;
507
508 return false;
509}
510
511bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
512 const MachineFrameInfo &MFI = MF.getFrameInfo();
513 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
514
515 // If we do not reserve stack space for outgoing arguments in prologue,
516 // we will adjust the stack pointer before call instruction. After the
517 // adjustment, we can not use SP to access the stack objects for the
518 // arguments. Instead, use BP to access these stack objects.
519 return (MFI.hasVarSizedObjects() ||
520 (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
521 MFI.getMaxCallFrameSize() != 0))) &&
522 TRI->hasStackRealignment(MF);
523}
524
525// Determines the size of the frame and maximum call frame size.
526void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
527 MachineFrameInfo &MFI = MF.getFrameInfo();
528 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
529
530 // Get the number of bytes to allocate from the FrameInfo.
531 uint64_t FrameSize = MFI.getStackSize();
532
533 // QCI Interrupts use at least 96 bytes of stack space
534 if (RVFI->useQCIInterrupt(MF))
535 FrameSize = std::max(a: FrameSize, b: QCIInterruptPushAmount);
536
537 // Get the alignment.
538 Align StackAlign = getStackAlign();
539
540 // Make sure the frame is aligned.
541 FrameSize = alignTo(Size: FrameSize, A: StackAlign);
542
543 // Update frame info.
544 MFI.setStackSize(FrameSize);
545
546 // When using SP or BP to access stack objects, we may require extra padding
547 // to ensure the bottom of the RVV stack is correctly aligned within the main
548 // stack. We calculate this as the amount required to align the scalar local
549 // variable section up to the RVV alignment.
550 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
551 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
552 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
553 RVFI->getVarArgsSaveSize();
554 if (auto RVVPadding =
555 offsetToAlignment(Value: ScalarLocalVarSize, Alignment: RVFI->getRVVStackAlign()))
556 RVFI->setRVVPadding(RVVPadding);
557 }
558}
559
560// Returns the stack size including RVV padding (when required), rounded back
561// up to the required stack alignment.
562uint64_t RISCVFrameLowering::getStackSizeWithRVVPadding(
563 const MachineFunction &MF) const {
564 const MachineFrameInfo &MFI = MF.getFrameInfo();
565 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
566 return alignTo(Size: MFI.getStackSize() + RVFI->getRVVPadding(), A: getStackAlign());
567}
568
569static SmallVector<CalleeSavedInfo, 8>
570getUnmanagedCSI(const MachineFunction &MF,
571 const std::vector<CalleeSavedInfo> &CSI) {
572 const MachineFrameInfo &MFI = MF.getFrameInfo();
573 SmallVector<CalleeSavedInfo, 8> NonLibcallCSI;
574
575 for (auto &CS : CSI) {
576 int FI = CS.getFrameIdx();
577 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default)
578 NonLibcallCSI.push_back(Elt: CS);
579 }
580
581 return NonLibcallCSI;
582}
583
584static SmallVector<CalleeSavedInfo, 8>
585getRVVCalleeSavedInfo(const MachineFunction &MF,
586 const std::vector<CalleeSavedInfo> &CSI) {
587 const MachineFrameInfo &MFI = MF.getFrameInfo();
588 SmallVector<CalleeSavedInfo, 8> RVVCSI;
589
590 for (auto &CS : CSI) {
591 int FI = CS.getFrameIdx();
592 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector)
593 RVVCSI.push_back(Elt: CS);
594 }
595
596 return RVVCSI;
597}
598
599static SmallVector<CalleeSavedInfo, 8>
600getPushOrLibCallsSavedInfo(const MachineFunction &MF,
601 const std::vector<CalleeSavedInfo> &CSI) {
602 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
603
604 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
605 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
606 return PushOrLibCallsCSI;
607
608 for (const auto &CS : CSI) {
609 if (RVFI->useQCIInterrupt(MF)) {
610 // Some registers are saved by both `QC.C.MIENTER(.NEST)` and
611 // `QC.CM.PUSH(FP)`. In these cases, prioritise the CFI info that points
612 // to the versions saved by `QC.C.MIENTER(.NEST)` which is what FP
613 // unwinding would use.
614 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
615 Element: CS.getReg()))
616 continue;
617 }
618
619 if (llvm::is_contained(Range: FixedCSRFIMap, Element: CS.getReg()))
620 PushOrLibCallsCSI.push_back(Elt: CS);
621 }
622
623 return PushOrLibCallsCSI;
624}
625
626static SmallVector<CalleeSavedInfo, 8>
627getQCISavedInfo(const MachineFunction &MF,
628 const std::vector<CalleeSavedInfo> &CSI) {
629 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
630
631 SmallVector<CalleeSavedInfo, 8> QCIInterruptCSI;
632 if (!RVFI->useQCIInterrupt(MF))
633 return QCIInterruptCSI;
634
635 for (const auto &CS : CSI) {
636 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
637 Element: CS.getReg()))
638 QCIInterruptCSI.push_back(Elt: CS);
639 }
640
641 return QCIInterruptCSI;
642}
643
644void RISCVFrameLowering::allocateAndProbeStackForRVV(
645 MachineFunction &MF, MachineBasicBlock &MBB,
646 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount,
647 MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const {
648 assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
649
650 // Emit a variable-length allocation probing loop.
651
652 // Get VLEN in TargetReg
653 const RISCVInstrInfo *TII = STI.getInstrInfo();
654 Register TargetReg = RISCV::X6;
655 uint32_t NumOfVReg = Amount / RISCV::RVVBytesPerBlock;
656 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoReadVLENB), DestReg: TargetReg)
657 .setMIFlag(Flag);
658 TII->mulImm(MF, MBB, II: MBBI, DL, DestReg: TargetReg, Amt: NumOfVReg, Flag);
659
660 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
661 if (EmitCFI) {
662 // Set the CFA register to TargetReg.
663 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: -Amount);
664 }
665
666 // It will be expanded to a probe loop in `inlineStackProbe`.
667 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC_RVV))
668 .addReg(RegNo: TargetReg);
669
670 if (EmitCFI) {
671 // Set the CFA register back to SP.
672 CFIBuilder.buildDefCFARegister(Reg: SPReg);
673 }
674
675 // SUB SP, SP, T1
676 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
677 .addReg(RegNo: SPReg)
678 .addReg(RegNo: TargetReg)
679 .setMIFlag(Flag);
680
681 // If we have a dynamic allocation later we need to probe any residuals.
682 if (DynAllocation) {
683 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: STI.is64Bit() ? RISCV::SD : RISCV::SW))
684 .addReg(RegNo: RISCV::X0)
685 .addReg(RegNo: SPReg)
686 .addImm(Val: 0)
687 .setMIFlags(MachineInstr::FrameSetup);
688 }
689}
690
691static void appendScalableVectorExpression(const TargetRegisterInfo &TRI,
692 SmallVectorImpl<char> &Expr,
693 StackOffset Offset,
694 llvm::raw_string_ostream &Comment) {
695 int64_t FixedOffset = Offset.getFixed();
696 int64_t ScalableOffset = Offset.getScalable();
697 unsigned DwarfVLenB = TRI.getDwarfRegNum(Reg: RISCV::VLENB, isEH: true);
698 if (FixedOffset) {
699 Expr.push_back(Elt: dwarf::DW_OP_consts);
700 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: FixedOffset);
701 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
702 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(i: FixedOffset);
703 }
704
705 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_consts);
706 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: ScalableOffset);
707
708 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_bregx);
709 appendLEB128<LEB128Sign::Unsigned>(Buffer&: Expr, Value: DwarfVLenB);
710 Expr.push_back(Elt: 0);
711
712 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_mul);
713 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
714
715 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(i: ScalableOffset)
716 << " * vlenb";
717}
718
719static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
720 Register Reg,
721 StackOffset Offset) {
722 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
723 SmallString<64> Expr;
724 std::string CommentBuffer;
725 llvm::raw_string_ostream Comment(CommentBuffer);
726 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
727 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
728 Expr.push_back(Elt: (uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
729 Expr.push_back(Elt: 0);
730 if (Reg == SPReg)
731 Comment << "sp";
732 else
733 Comment << printReg(Reg, TRI: &TRI);
734
735 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
736
737 SmallString<64> DefCfaExpr;
738 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_def_cfa_expression);
739 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
740 DefCfaExpr.append(RHS: Expr.str());
741
742 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
743 Comment: Comment.str());
744}
745
746static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
747 Register Reg, StackOffset Offset) {
748 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
749 SmallString<64> Expr;
750 std::string CommentBuffer;
751 llvm::raw_string_ostream Comment(CommentBuffer);
752 Comment << printReg(Reg, TRI: &TRI) << " @ cfa";
753
754 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
755 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
756
757 SmallString<64> DefCfaExpr;
758 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
759 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_expression);
760 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: DwarfReg);
761 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
762 DefCfaExpr.append(RHS: Expr.str());
763
764 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
765 Comment: Comment.str());
766}
767
768// Allocate stack space and probe it if necessary.
769void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB,
770 MachineBasicBlock::iterator MBBI,
771 MachineFunction &MF, uint64_t Offset,
772 uint64_t RealStackSize, bool EmitCFI,
773 bool NeedProbe, uint64_t ProbeSize,
774 bool DynAllocation,
775 MachineInstr::MIFlag Flag) const {
776 DebugLoc DL;
777 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
778 const RISCVInstrInfo *TII = STI.getInstrInfo();
779 bool IsRV64 = STI.is64Bit();
780 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
781
782 // Simply allocate the stack if it's not big enough to require a probe.
783 if (!NeedProbe || Offset <= ProbeSize) {
784 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Offset),
785 Flag, RequiredAlign: getStackAlign());
786
787 if (EmitCFI)
788 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
789
790 if (NeedProbe && DynAllocation) {
791 // s[d|w] zero, 0(sp)
792 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
793 .addReg(RegNo: RISCV::X0)
794 .addReg(RegNo: SPReg)
795 .addImm(Val: 0)
796 .setMIFlags(Flag);
797 }
798
799 return;
800 }
801
802 // Unroll the probe loop depending on the number of iterations.
803 if (Offset < ProbeSize * 5) {
804 uint64_t CFAAdjust = RealStackSize - Offset;
805
806 uint64_t CurrentOffset = 0;
807 while (CurrentOffset + ProbeSize <= Offset) {
808 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
809 Offset: StackOffset::getFixed(Fixed: -ProbeSize), Flag, RequiredAlign: getStackAlign());
810 // s[d|w] zero, 0(sp)
811 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
812 .addReg(RegNo: RISCV::X0)
813 .addReg(RegNo: SPReg)
814 .addImm(Val: 0)
815 .setMIFlags(Flag);
816
817 CurrentOffset += ProbeSize;
818 if (EmitCFI)
819 CFIBuilder.buildDefCFAOffset(Offset: CurrentOffset + CFAAdjust);
820 }
821
822 uint64_t Residual = Offset - CurrentOffset;
823 if (Residual) {
824 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
825 Offset: StackOffset::getFixed(Fixed: -Residual), Flag, RequiredAlign: getStackAlign());
826 if (EmitCFI)
827 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
828
829 if (DynAllocation) {
830 // s[d|w] zero, 0(sp)
831 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
832 .addReg(RegNo: RISCV::X0)
833 .addReg(RegNo: SPReg)
834 .addImm(Val: 0)
835 .setMIFlags(Flag);
836 }
837 }
838
839 return;
840 }
841
842 // Emit a variable-length allocation probing loop.
843 uint64_t RoundedSize = alignDown(Value: Offset, Align: ProbeSize);
844 uint64_t Residual = Offset - RoundedSize;
845
846 Register TargetReg = RISCV::X6;
847 // SUB TargetReg, SP, RoundedSize
848 RI->adjustReg(MBB, II: MBBI, DL, DestReg: TargetReg, SrcReg: SPReg,
849 Offset: StackOffset::getFixed(Fixed: -RoundedSize), Flag, RequiredAlign: getStackAlign());
850
851 if (EmitCFI) {
852 // Set the CFA register to TargetReg.
853 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: RoundedSize);
854 }
855
856 // It will be expanded to a probe loop in `inlineStackProbe`.
857 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC)).addReg(RegNo: TargetReg);
858
859 if (EmitCFI) {
860 // Set the CFA register back to SP.
861 CFIBuilder.buildDefCFARegister(Reg: SPReg);
862 }
863
864 if (Residual) {
865 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Residual),
866 Flag, RequiredAlign: getStackAlign());
867 if (DynAllocation) {
868 // s[d|w] zero, 0(sp)
869 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
870 .addReg(RegNo: RISCV::X0)
871 .addReg(RegNo: SPReg)
872 .addImm(Val: 0)
873 .setMIFlags(Flag);
874 }
875 }
876
877 if (EmitCFI)
878 CFIBuilder.buildDefCFAOffset(Offset);
879}
880
881static bool isPush(unsigned Opcode) {
882 switch (Opcode) {
883 case RISCV::CM_PUSH:
884 case RISCV::QC_CM_PUSH:
885 case RISCV::QC_CM_PUSHFP:
886 return true;
887 default:
888 return false;
889 }
890}
891
892static bool isPop(unsigned Opcode) {
893 // There are other pops but these are the only ones introduced during this
894 // pass.
895 switch (Opcode) {
896 case RISCV::CM_POP:
897 case RISCV::QC_CM_POP:
898 return true;
899 default:
900 return false;
901 }
902}
903
904static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind,
905 bool UpdateFP) {
906 switch (Kind) {
907 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
908 return RISCV::CM_PUSH;
909 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
910 return UpdateFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
911 default:
912 llvm_unreachable("Unhandled PushPopKind");
913 }
914}
915
916static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind) {
917 // There are other pops but they are introduced later by the Push/Pop
918 // Optimizer.
919 switch (Kind) {
920 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
921 return RISCV::CM_POP;
922 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
923 return RISCV::QC_CM_POP;
924 default:
925 llvm_unreachable("Unhandled PushPopKind");
926 }
927}
928
929void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
930 MachineBasicBlock &MBB) const {
931 MachineFrameInfo &MFI = MF.getFrameInfo();
932 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
933 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
934 MachineBasicBlock::iterator MBBI = MBB.begin();
935
936 Register BPReg = RISCVABI::getBPReg();
937
938 // Debug location must be unknown since the first debug location is used
939 // to determine the end of the prologue.
940 DebugLoc DL;
941
942 // All calls are tail calls in GHC calling conv, and functions have no
943 // prologue/epilogue.
944 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
945 return;
946
947 // SiFive CLIC needs to swap `sp` into `sf.mscratchcsw`
948 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
949
950 // Emit prologue for shadow call stack.
951 emitSCSPrologue(MF, MBB, MI: MBBI, DL);
952
953 // We keep track of the first instruction because it might be a
954 // `(QC.)CM.PUSH(FP)`, and we may need to adjust the immediate rather than
955 // inserting an `addi sp, sp, -N*16`
956 auto PossiblePush = MBBI;
957
958 // Skip past all callee-saved register spill instructions.
959 while (MBBI != MBB.end() && MBBI->getFlag(Flag: MachineInstr::FrameSetup))
960 ++MBBI;
961
962 // Determine the correct frame layout
963 determineFrameLayout(MF);
964
965 const auto &CSI = MFI.getCalleeSavedInfo();
966
967 // Skip to before the spills of scalar callee-saved registers
968 // FIXME: assumes exactly one instruction is used to restore each
969 // callee-saved register.
970 MBBI = std::prev(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size() +
971 getUnmanagedCSI(MF, CSI).size());
972 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
973 bool NeedsDwarfCFI = needsDwarfCFI(MF);
974
975 // If libcalls are used to spill and restore callee-saved registers, the frame
976 // has two sections; the opaque section managed by the libcalls, and the
977 // section managed by MachineFrameInfo which can also hold callee saved
978 // registers in fixed stack slots, both of which have negative frame indices.
979 // This gets even more complicated when incoming arguments are passed via the
980 // stack, as these too have negative frame indices. An example is detailed
981 // below:
982 //
983 // | incoming arg | <- FI[-3]
984 // | libcallspill |
985 // | calleespill | <- FI[-2]
986 // | calleespill | <- FI[-1]
987 // | this_frame | <- FI[0]
988 //
989 // For negative frame indices, the offset from the frame pointer will differ
990 // depending on which of these groups the frame index applies to.
991 // The following calculates the correct offset knowing the number of callee
992 // saved registers spilt by the two methods.
993 if (int LibCallRegs = getLibCallID(MF, CSI: MFI.getCalleeSavedInfo()) + 1) {
994 // Calculate the size of the frame managed by the libcall. The stack
995 // alignment of these libcalls should be the same as how we set it in
996 // getABIStackAlignment.
997 unsigned LibCallFrameSize =
998 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
999 RVFI->setLibCallStackSize(LibCallFrameSize);
1000
1001 if (NeedsDwarfCFI) {
1002 CFIBuilder.buildDefCFAOffset(Offset: LibCallFrameSize);
1003 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1004 CFIBuilder.buildOffset(Reg: CS.getReg(),
1005 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1006 }
1007 }
1008
1009 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
1010 // investigation. Get the number of bytes to allocate from the FrameInfo.
1011 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
1012 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
1013 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1014
1015 // Early exit if there is no need to allocate on the stack
1016 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
1017 return;
1018
1019 // If the stack pointer has been marked as reserved, then produce an error if
1020 // the frame requires stack allocation
1021 if (STI.isRegisterReservedByUser(i: SPReg))
1022 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1023 MF.getFunction(), "Stack pointer required, but has been reserved."});
1024
1025 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1026 // Split the SP adjustment to reduce the offsets of callee saved spill.
1027 if (FirstSPAdjustAmount) {
1028 StackSize = FirstSPAdjustAmount;
1029 RealStackSize = FirstSPAdjustAmount;
1030 }
1031
1032 if (RVFI->useQCIInterrupt(MF)) {
1033 // The function starts with `QC.C.MIENTER(.NEST)`, so the `(QC.)CM.PUSH(FP)`
1034 // could only be the next instruction.
1035 ++PossiblePush;
1036
1037 if (NeedsDwarfCFI) {
1038 // Insert the CFI metadata before where we think the `(QC.)CM.PUSH(FP)`
1039 // could be. The PUSH will also get its own CFI metadata for its own
1040 // modifications, which should come after the PUSH.
1041 CFIInstBuilder PushCFIBuilder(MBB, PossiblePush,
1042 MachineInstr::FrameSetup);
1043 PushCFIBuilder.buildDefCFAOffset(Offset: QCIInterruptPushAmount);
1044 for (const CalleeSavedInfo &CS : getQCISavedInfo(MF, CSI))
1045 PushCFIBuilder.buildOffset(Reg: CS.getReg(),
1046 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1047 }
1048 }
1049
1050 if (RVFI->isPushable(MF) && PossiblePush != MBB.end() &&
1051 isPush(Opcode: PossiblePush->getOpcode())) {
1052 // Use available stack adjustment in push instruction to allocate additional
1053 // stack space. Align the stack size down to a multiple of 16. This is
1054 // needed for RVE.
1055 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1056 uint64_t StackAdj =
1057 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1058 PossiblePush->getOperand(i: 1).setImm(StackAdj);
1059 StackSize -= StackAdj;
1060
1061 if (NeedsDwarfCFI) {
1062 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize - StackSize);
1063 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1064 CFIBuilder.buildOffset(Reg: CS.getReg(),
1065 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1066 }
1067 }
1068
1069 // Allocate space on the stack if necessary.
1070 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1071 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
1072 bool NeedProbe = TLI->hasInlineStackProbe(MF);
1073 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1074 bool DynAllocation =
1075 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1076 if (StackSize != 0)
1077 allocateStack(MBB, MBBI, MF, Offset: StackSize, RealStackSize, EmitCFI: NeedsDwarfCFI,
1078 NeedProbe, ProbeSize, DynAllocation,
1079 Flag: MachineInstr::FrameSetup);
1080
1081 // Save SiFive CLIC CSRs into Stack
1082 emitSiFiveCLICPreemptibleSaves(MF, MBB, MBBI, DL);
1083
1084 // The frame pointer is callee-saved, and code has been generated for us to
1085 // save it to the stack. We need to skip over the storing of callee-saved
1086 // registers as the frame pointer must be modified after it has been saved
1087 // to the stack, not before.
1088 // FIXME: assumes exactly one instruction is used to save each callee-saved
1089 // register.
1090 std::advance(i&: MBBI, n: getUnmanagedCSI(MF, CSI).size());
1091 CFIBuilder.setInsertPoint(MBBI);
1092
1093 // Iterate over list of callee-saved registers and emit .cfi_offset
1094 // directives.
1095 if (NeedsDwarfCFI)
1096 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1097 CFIBuilder.buildOffset(Reg: CS.getReg(),
1098 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1099
1100 // Generate new FP.
1101 if (hasFP(MF)) {
1102 if (STI.isRegisterReservedByUser(i: FPReg))
1103 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1104 MF.getFunction(), "Frame pointer required, but has been reserved."});
1105 // The frame pointer does need to be reserved from register allocation.
1106 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
1107
1108 // Some stack management variants automatically keep FP updated, so we don't
1109 // need an instruction to do so.
1110 if (!RVFI->hasImplicitFPUpdates(MF)) {
1111 RI->adjustReg(
1112 MBB, II: MBBI, DL, DestReg: FPReg, SrcReg: SPReg,
1113 Offset: StackOffset::getFixed(Fixed: RealStackSize - RVFI->getVarArgsSaveSize()),
1114 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1115 }
1116
1117 if (NeedsDwarfCFI)
1118 CFIBuilder.buildDefCFA(Reg: FPReg, Offset: RVFI->getVarArgsSaveSize());
1119 }
1120
1121 uint64_t SecondSPAdjustAmount = 0;
1122 // Emit the second SP adjustment after saving callee saved registers.
1123 if (FirstSPAdjustAmount) {
1124 SecondSPAdjustAmount = getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1125 assert(SecondSPAdjustAmount > 0 &&
1126 "SecondSPAdjustAmount should be greater than zero");
1127
1128 allocateStack(MBB, MBBI, MF, Offset: SecondSPAdjustAmount,
1129 RealStackSize: getStackSizeWithRVVPadding(MF), EmitCFI: NeedsDwarfCFI && !hasFP(MF),
1130 NeedProbe, ProbeSize, DynAllocation,
1131 Flag: MachineInstr::FrameSetup);
1132 }
1133
1134 if (RVVStackSize) {
1135 if (NeedProbe) {
1136 allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, Amount: RVVStackSize,
1137 Flag: MachineInstr::FrameSetup,
1138 EmitCFI: NeedsDwarfCFI && !hasFP(MF), DynAllocation);
1139 } else {
1140 // We must keep the stack pointer aligned through any intermediate
1141 // updates.
1142 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
1143 Offset: StackOffset::getScalable(Scalable: -RVVStackSize),
1144 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1145 }
1146
1147 if (NeedsDwarfCFI && !hasFP(MF)) {
1148 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
1149 CFIBuilder.insertCFIInst(CFIInst: createDefCFAExpression(
1150 TRI: *RI, Reg: SPReg,
1151 Offset: StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF), Scalable: RVVStackSize / 8)));
1152 }
1153
1154 std::advance(i&: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1155 if (NeedsDwarfCFI)
1156 emitCalleeSavedRVVPrologCFI(MBB, MI: MBBI, HasFP: hasFP(MF));
1157 }
1158
1159 if (hasFP(MF)) {
1160 // Realign Stack
1161 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1162 if (RI->hasStackRealignment(MF)) {
1163 Align MaxAlignment = MFI.getMaxAlign();
1164
1165 const RISCVInstrInfo *TII = STI.getInstrInfo();
1166 if (isInt<12>(x: -(int)MaxAlignment.value())) {
1167 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ANDI), DestReg: SPReg)
1168 .addReg(RegNo: SPReg)
1169 .addImm(Val: -(int)MaxAlignment.value())
1170 .setMIFlag(MachineInstr::FrameSetup);
1171 } else {
1172 unsigned ShiftAmount = Log2(A: MaxAlignment);
1173 Register VR =
1174 MF.getRegInfo().createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1175 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SRLI), DestReg: VR)
1176 .addReg(RegNo: SPReg)
1177 .addImm(Val: ShiftAmount)
1178 .setMIFlag(MachineInstr::FrameSetup);
1179 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SLLI), DestReg: SPReg)
1180 .addReg(RegNo: VR)
1181 .addImm(Val: ShiftAmount)
1182 .setMIFlag(MachineInstr::FrameSetup);
1183 }
1184 if (NeedProbe && RVVStackSize == 0) {
1185 // Do a probe if the align + size allocated just passed the probe size
1186 // and was not yet probed.
1187 if (SecondSPAdjustAmount < ProbeSize &&
1188 SecondSPAdjustAmount + MaxAlignment.value() >= ProbeSize) {
1189 bool IsRV64 = STI.is64Bit();
1190 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
1191 .addReg(RegNo: RISCV::X0)
1192 .addReg(RegNo: SPReg)
1193 .addImm(Val: 0)
1194 .setMIFlags(MachineInstr::FrameSetup);
1195 }
1196 }
1197 // FP will be used to restore the frame in the epilogue, so we need
1198 // another base register BP to record SP after re-alignment. SP will
1199 // track the current stack after allocating variable sized objects.
1200 if (hasBP(MF)) {
1201 // move BP, SP
1202 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: BPReg)
1203 .addReg(RegNo: SPReg)
1204 .addImm(Val: 0)
1205 .setMIFlag(MachineInstr::FrameSetup);
1206 }
1207 }
1208 }
1209}
1210
1211void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
1212 MachineBasicBlock &MBB,
1213 MachineBasicBlock::iterator MBBI,
1214 const DebugLoc &DL,
1215 uint64_t &StackSize,
1216 int64_t CFAOffset) const {
1217 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1218
1219 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: StackSize),
1220 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1221 StackSize = 0;
1222
1223 if (needsDwarfCFI(MF))
1224 CFIInstBuilder(MBB, MBBI, MachineInstr::FrameDestroy)
1225 .buildDefCFAOffset(Offset: CFAOffset);
1226}
1227
1228void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
1229 MachineBasicBlock &MBB) const {
1230 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1231 MachineFrameInfo &MFI = MF.getFrameInfo();
1232 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1233
1234 // All calls are tail calls in GHC calling conv, and functions have no
1235 // prologue/epilogue.
1236 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1237 return;
1238
1239 // Get the insert location for the epilogue. If there were no terminators in
1240 // the block, get the last instruction.
1241 MachineBasicBlock::iterator MBBI = MBB.end();
1242 DebugLoc DL;
1243 if (!MBB.empty()) {
1244 MBBI = MBB.getLastNonDebugInstr();
1245 if (MBBI != MBB.end())
1246 DL = MBBI->getDebugLoc();
1247
1248 MBBI = MBB.getFirstTerminator();
1249
1250 // Skip to before the restores of all callee-saved registers.
1251 while (MBBI != MBB.begin() &&
1252 std::prev(x: MBBI)->getFlag(Flag: MachineInstr::FrameDestroy))
1253 --MBBI;
1254 }
1255
1256 const auto &CSI = MFI.getCalleeSavedInfo();
1257
1258 // Skip to before the restores of scalar callee-saved registers
1259 // FIXME: assumes exactly one instruction is used to restore each
1260 // callee-saved register.
1261 auto FirstScalarCSRRestoreInsn =
1262 std::next(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1263 CFIInstBuilder CFIBuilder(MBB, FirstScalarCSRRestoreInsn,
1264 MachineInstr::FrameDestroy);
1265 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1266
1267 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1268 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1269 : getStackSizeWithRVVPadding(MF);
1270 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1271 : getStackSizeWithRVVPadding(MF) -
1272 RVFI->getReservedSpillsSize();
1273 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
1274 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1275
1276 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
1277 MFI.hasVarSizedObjects() || !hasReservedCallFrame(MF);
1278 if (RVVStackSize) {
1279 // If RestoreSPFromFP the stack pointer will be restored using the frame
1280 // pointer value.
1281 if (!RestoreSPFromFP)
1282 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1283 Offset: StackOffset::getScalable(Scalable: RVVStackSize),
1284 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1285
1286 if (NeedsDwarfCFI) {
1287 if (!hasFP(MF))
1288 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1289 emitCalleeSavedRVVEpilogCFI(MBB, MI: FirstScalarCSRRestoreInsn);
1290 }
1291 }
1292
1293 if (FirstSPAdjustAmount) {
1294 uint64_t SecondSPAdjustAmount =
1295 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1296 assert(SecondSPAdjustAmount > 0 &&
1297 "SecondSPAdjustAmount should be greater than zero");
1298
1299 // If RestoreSPFromFP the stack pointer will be restored using the frame
1300 // pointer value.
1301 if (!RestoreSPFromFP)
1302 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1303 Offset: StackOffset::getFixed(Fixed: SecondSPAdjustAmount),
1304 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1305
1306 if (NeedsDwarfCFI && !hasFP(MF))
1307 CFIBuilder.buildDefCFAOffset(Offset: FirstSPAdjustAmount);
1308 }
1309
1310 // Restore the stack pointer using the value of the frame pointer. Only
1311 // necessary if the stack pointer was modified, meaning the stack size is
1312 // unknown.
1313 //
1314 // In order to make sure the stack point is right through the EH region,
1315 // we also need to restore stack pointer from the frame pointer if we
1316 // don't preserve stack space within prologue/epilogue for outgoing variables,
1317 // normally it's just checking the variable sized object is present or not
1318 // is enough, but we also don't preserve that at prologue/epilogue when
1319 // have vector objects in stack.
1320 if (RestoreSPFromFP) {
1321 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1322 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: FPReg,
1323 Offset: StackOffset::getFixed(Fixed: -FPOffset), Flag: MachineInstr::FrameDestroy,
1324 RequiredAlign: getStackAlign());
1325 }
1326
1327 if (NeedsDwarfCFI && hasFP(MF))
1328 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1329
1330 // Skip to after the restores of scalar callee-saved registers
1331 // FIXME: assumes exactly one instruction is used to restore each
1332 // callee-saved register.
1333 MBBI = std::next(x: FirstScalarCSRRestoreInsn, n: getUnmanagedCSI(MF, CSI).size());
1334 CFIBuilder.setInsertPoint(MBBI);
1335
1336 if (getLibCallID(MF, CSI) != -1) {
1337 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1338 // therefore it is unnecessary to place any CFI instructions after it. Just
1339 // deallocate stack if needed and return.
1340 if (StackSize != 0)
1341 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1342 CFAOffset: RVFI->getLibCallStackSize());
1343
1344 // Emit epilogue for shadow call stack.
1345 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1346 return;
1347 }
1348
1349 // Recover callee-saved registers.
1350 if (NeedsDwarfCFI)
1351 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1352 CFIBuilder.buildRestore(Reg: CS.getReg());
1353
1354 if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(Opcode: MBBI->getOpcode())) {
1355 // Use available stack adjustment in pop instruction to deallocate stack
1356 // space. Align the stack size down to a multiple of 16. This is needed for
1357 // RVE.
1358 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1359 uint64_t StackAdj =
1360 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1361 MBBI->getOperand(i: 1).setImm(StackAdj);
1362 StackSize -= StackAdj;
1363
1364 if (StackSize != 0)
1365 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1366 /*stack_adj of cm.pop instr*/ CFAOffset: RealStackSize - StackSize);
1367
1368 auto NextI = next_nodbg(It: MBBI, End: MBB.end());
1369 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1370 ++MBBI;
1371 if (NeedsDwarfCFI) {
1372 CFIBuilder.setInsertPoint(MBBI);
1373
1374 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1375 CFIBuilder.buildRestore(Reg: CS.getReg());
1376
1377 // Update CFA Offset. If this is a QCI interrupt function, there will
1378 // be a leftover offset which is deallocated by `QC.C.MILEAVERET`,
1379 // otherwise getQCIInterruptStackSize() will be 0.
1380 CFIBuilder.buildDefCFAOffset(Offset: RVFI->getQCIInterruptStackSize());
1381 }
1382 }
1383 }
1384
1385 emitSiFiveCLICPreemptibleRestores(MF, MBB, MBBI, DL);
1386
1387 // Deallocate stack if StackSize isn't a zero yet. If this is a QCI interrupt
1388 // function, there will be a leftover offset which is deallocated by
1389 // `QC.C.MILEAVERET`, otherwise getQCIInterruptStackSize() will be 0.
1390 if (StackSize != 0)
1391 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1392 CFAOffset: RVFI->getQCIInterruptStackSize());
1393
1394 // Emit epilogue for shadow call stack.
1395 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1396
1397 // SiFive CLIC needs to swap `sf.mscratchcsw` into `sp`
1398 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
1399}
1400
1401StackOffset
1402RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1403 Register &FrameReg) const {
1404 const MachineFrameInfo &MFI = MF.getFrameInfo();
1405 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1406 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1407
1408 // Callee-saved registers should be referenced relative to the stack
1409 // pointer (positive offset), otherwise use the frame pointer (negative
1410 // offset).
1411 const auto &CSI = getUnmanagedCSI(MF, CSI: MFI.getCalleeSavedInfo());
1412 int MinCSFI = 0;
1413 int MaxCSFI = -1;
1414 StackOffset Offset;
1415 auto StackID = MFI.getStackID(ObjectIdx: FI);
1416
1417 assert((StackID == TargetStackID::Default ||
1418 StackID == TargetStackID::ScalableVector) &&
1419 "Unexpected stack ID for the frame object.");
1420 if (StackID == TargetStackID::Default) {
1421 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1422 Offset = StackOffset::getFixed(Fixed: MFI.getObjectOffset(ObjectIdx: FI) +
1423 MFI.getOffsetAdjustment());
1424 } else if (StackID == TargetStackID::ScalableVector) {
1425 Offset = StackOffset::getScalable(Scalable: MFI.getObjectOffset(ObjectIdx: FI));
1426 }
1427
1428 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1429
1430 if (CSI.size()) {
1431 MinCSFI = CSI[0].getFrameIdx();
1432 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
1433 }
1434
1435 if (FI >= MinCSFI && FI <= MaxCSFI) {
1436 FrameReg = SPReg;
1437
1438 if (FirstSPAdjustAmount)
1439 Offset += StackOffset::getFixed(Fixed: FirstSPAdjustAmount);
1440 else
1441 Offset += StackOffset::getFixed(Fixed: getStackSizeWithRVVPadding(MF));
1442 return Offset;
1443 }
1444
1445 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1446 // If the stack was realigned, the frame pointer is set in order to allow
1447 // SP to be restored, so we need another base register to record the stack
1448 // after realignment.
1449 // |--------------------------| -- <-- FP
1450 // | callee-allocated save | | <----|
1451 // | area for register varargs| | |
1452 // |--------------------------| | |
1453 // | callee-saved registers | | |
1454 // |--------------------------| -- |
1455 // | realignment (the size of | | |
1456 // | this area is not counted | | |
1457 // | in MFI.getStackSize()) | | |
1458 // |--------------------------| -- |-- MFI.getStackSize()
1459 // | RVV alignment padding | | |
1460 // | (not counted in | | |
1461 // | MFI.getStackSize() but | | |
1462 // | counted in | | |
1463 // | RVFI.getRVVStackSize()) | | |
1464 // |--------------------------| -- |
1465 // | RVV objects | | |
1466 // | (not counted in | | |
1467 // | MFI.getStackSize()) | | |
1468 // |--------------------------| -- |
1469 // | padding before RVV | | |
1470 // | (not counted in | | |
1471 // | MFI.getStackSize() or in | | |
1472 // | RVFI.getRVVStackSize()) | | |
1473 // |--------------------------| -- |
1474 // | scalar local variables | | <----'
1475 // |--------------------------| -- <-- BP (if var sized objects present)
1476 // | VarSize objects | |
1477 // |--------------------------| -- <-- SP
1478 if (hasBP(MF)) {
1479 FrameReg = RISCVABI::getBPReg();
1480 } else {
1481 // VarSize objects must be empty in this case!
1482 assert(!MFI.hasVarSizedObjects());
1483 FrameReg = SPReg;
1484 }
1485 } else {
1486 FrameReg = RI->getFrameRegister(MF);
1487 }
1488
1489 if (FrameReg == FPReg) {
1490 Offset += StackOffset::getFixed(Fixed: RVFI->getVarArgsSaveSize());
1491 // When using FP to access scalable vector objects, we need to minus
1492 // the frame size.
1493 //
1494 // |--------------------------| -- <-- FP
1495 // | callee-allocated save | |
1496 // | area for register varargs| |
1497 // |--------------------------| |
1498 // | callee-saved registers | |
1499 // |--------------------------| | MFI.getStackSize()
1500 // | scalar local variables | |
1501 // |--------------------------| -- (Offset of RVV objects is from here.)
1502 // | RVV objects |
1503 // |--------------------------|
1504 // | VarSize objects |
1505 // |--------------------------| <-- SP
1506 if (StackID == TargetStackID::ScalableVector) {
1507 assert(!RI->hasStackRealignment(MF) &&
1508 "Can't index across variable sized realign");
1509 // We don't expect any extra RVV alignment padding, as the stack size
1510 // and RVV object sections should be correct aligned in their own
1511 // right.
1512 assert(MFI.getStackSize() == getStackSizeWithRVVPadding(MF) &&
1513 "Inconsistent stack layout");
1514 Offset -= StackOffset::getFixed(Fixed: MFI.getStackSize());
1515 }
1516 return Offset;
1517 }
1518
1519 // This case handles indexing off both SP and BP.
1520 // If indexing off SP, there must not be any var sized objects
1521 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1522
1523 // When using SP to access frame objects, we need to add RVV stack size.
1524 //
1525 // |--------------------------| -- <-- FP
1526 // | callee-allocated save | | <----|
1527 // | area for register varargs| | |
1528 // |--------------------------| | |
1529 // | callee-saved registers | | |
1530 // |--------------------------| -- |
1531 // | RVV alignment padding | | |
1532 // | (not counted in | | |
1533 // | MFI.getStackSize() but | | |
1534 // | counted in | | |
1535 // | RVFI.getRVVStackSize()) | | |
1536 // |--------------------------| -- |
1537 // | RVV objects | | |-- MFI.getStackSize()
1538 // | (not counted in | | |
1539 // | MFI.getStackSize()) | | |
1540 // |--------------------------| -- |
1541 // | padding before RVV | | |
1542 // | (not counted in | | |
1543 // | MFI.getStackSize()) | | |
1544 // |--------------------------| -- |
1545 // | scalar local variables | | <----'
1546 // |--------------------------| -- <-- BP (if var sized objects present)
1547 // | VarSize objects | |
1548 // |--------------------------| -- <-- SP
1549 //
1550 // The total amount of padding surrounding RVV objects is described by
1551 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1552 // objects to the required alignment.
1553 if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default) {
1554 if (MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1555 assert(!RI->hasStackRealignment(MF) &&
1556 "Can't index across variable sized realign");
1557 Offset += StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF),
1558 Scalable: RVFI->getRVVStackSize());
1559 } else {
1560 Offset += StackOffset::getFixed(Fixed: MFI.getStackSize());
1561 }
1562 } else if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector) {
1563 // Ensure the base of the RVV stack is correctly aligned: add on the
1564 // alignment padding.
1565 int ScalarLocalVarSize = MFI.getStackSize() -
1566 RVFI->getCalleeSavedStackSize() -
1567 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1568 Offset += StackOffset::get(Fixed: ScalarLocalVarSize, Scalable: RVFI->getRVVStackSize());
1569 }
1570 return Offset;
1571}
1572
1573static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI,
1574 const Register &Reg) {
1575 MCRegister BaseReg = TRI.getSubReg(Reg, Idx: RISCV::sub_vrm1_0);
1576 // If it's not a grouped vector register, it doesn't have subregister, so
1577 // the base register is just itself.
1578 if (!BaseReg.isValid())
1579 BaseReg = Reg;
1580 return BaseReg;
1581}
1582
1583void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
1584 BitVector &SavedRegs,
1585 RegScavenger *RS) const {
1586 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1587
1588 // In TargetFrameLowering::determineCalleeSaves, any vector register is marked
1589 // as saved if any of its subregister is clobbered, this is not correct in
1590 // vector registers. We only want the vector register to be marked as saved
1591 // if all of its subregisters are clobbered.
1592 // For example:
1593 // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked.
1594 // Correct behavior: v24m2 is marked only if v24 and v25 are marked.
1595 const MachineRegisterInfo &MRI = MF.getRegInfo();
1596 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
1597 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
1598 for (unsigned i = 0; CSRegs[i]; ++i) {
1599 unsigned CSReg = CSRegs[i];
1600 // Only vector registers need special care.
1601 if (!RISCV::VRRegClass.contains(Reg: getRVVBaseRegister(TRI, Reg: CSReg)))
1602 continue;
1603
1604 SavedRegs.reset(Idx: CSReg);
1605
1606 auto SubRegs = TRI.subregs(Reg: CSReg);
1607 // Set the register and all its subregisters.
1608 if (!MRI.def_empty(RegNo: CSReg) || MRI.getUsedPhysRegsMask().test(Idx: CSReg)) {
1609 SavedRegs.set(CSReg);
1610 for (unsigned Reg : SubRegs)
1611 SavedRegs.set(Reg);
1612 }
1613
1614 // Combine to super register if all of its subregisters are marked.
1615 if (!SubRegs.empty() && llvm::all_of(Range&: SubRegs, P: [&](unsigned Reg) {
1616 return SavedRegs.test(Idx: Reg);
1617 }))
1618 SavedRegs.set(CSReg);
1619 }
1620
1621 // Unconditionally spill RA and FP only if the function uses a frame
1622 // pointer.
1623 if (hasFP(MF)) {
1624 SavedRegs.set(RAReg);
1625 SavedRegs.set(FPReg);
1626 }
1627 // Mark BP as used if function has dedicated base pointer.
1628 if (hasBP(MF))
1629 SavedRegs.set(RISCVABI::getBPReg());
1630
1631 // When using cm.push/pop we must save X27 if we save X26.
1632 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1633 if (RVFI->isPushable(MF) && SavedRegs.test(Idx: RISCV::X26))
1634 SavedRegs.set(RISCV::X27);
1635
1636 // SiFive Preemptible Interrupt Handlers need additional frame entries
1637 createSiFivePreemptibleInterruptFrameEntries(MF, RVFI&: *RVFI);
1638}
1639
1640std::pair<int64_t, Align>
1641RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1642 MachineFrameInfo &MFI = MF.getFrameInfo();
1643 // Create a buffer of RVV objects to allocate.
1644 SmallVector<int, 8> ObjectsToAllocate;
1645 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1646 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1647 unsigned StackID = MFI.getStackID(ObjectIdx: I);
1648 if (StackID != TargetStackID::ScalableVector)
1649 continue;
1650 if (MFI.isDeadObjectIndex(ObjectIdx: I))
1651 continue;
1652
1653 ObjectsToAllocate.push_back(Elt: I);
1654 }
1655 };
1656 // First push RVV Callee Saved object, then push RVV stack object
1657 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1658 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1659 if (!RVVCSI.empty())
1660 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1661 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1662 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1663
1664 // The minimum alignment is 16 bytes.
1665 Align RVVStackAlign(16);
1666 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1667
1668 if (!ST.hasVInstructions()) {
1669 assert(ObjectsToAllocate.empty() &&
1670 "Can't allocate scalable-vector objects without V instructions");
1671 return std::make_pair(x: 0, y&: RVVStackAlign);
1672 }
1673
1674 // Allocate all RVV locals and spills
1675 int64_t Offset = 0;
1676 for (int FI : ObjectsToAllocate) {
1677 // ObjectSize in bytes.
1678 int64_t ObjectSize = MFI.getObjectSize(ObjectIdx: FI);
1679 auto ObjectAlign =
1680 std::max(a: Align(RISCV::RVVBytesPerBlock), b: MFI.getObjectAlign(ObjectIdx: FI));
1681 // If the data type is the fractional vector type, reserve one vector
1682 // register for it.
1683 if (ObjectSize < RISCV::RVVBytesPerBlock)
1684 ObjectSize = RISCV::RVVBytesPerBlock;
1685 Offset = alignTo(Size: Offset + ObjectSize, A: ObjectAlign);
1686 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: -Offset);
1687 // Update the maximum alignment of the RVV stack section
1688 RVVStackAlign = std::max(a: RVVStackAlign, b: ObjectAlign);
1689 }
1690
1691 uint64_t StackSize = Offset;
1692
1693 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1694 // object right at the bottom (i.e., any padding at the top of the frame),
1695 // readjust all RVV objects down by the alignment padding.
1696 // Stack size and offsets are multiples of vscale, stack alignment is in
1697 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1698 // stack alignment multiple of vscale.
1699 auto VScale =
1700 std::max<uint64_t>(a: ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, b: 1);
1701 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1702 if (auto AlignmentPadding =
1703 offsetToAlignment(Value: StackSize, Alignment: Align(RVVStackAlignVScale))) {
1704 StackSize += AlignmentPadding;
1705 for (int FI : ObjectsToAllocate)
1706 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: MFI.getObjectOffset(ObjectIdx: FI) - AlignmentPadding);
1707 }
1708 }
1709
1710 return std::make_pair(x&: StackSize, y&: RVVStackAlign);
1711}
1712
1713static unsigned getScavSlotsNumForRVV(MachineFunction &MF) {
1714 // For RVV spill, scalable stack offsets computing requires up to two scratch
1715 // registers
1716 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1717
1718 // For RVV spill, non-scalable stack offsets computing requires up to one
1719 // scratch register.
1720 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1721
1722 // ADDI instruction's destination register can be used for computing
1723 // offsets. So Scalable stack offsets require up to one scratch register.
1724 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1725
1726 static constexpr unsigned MaxScavSlotsNumKnown =
1727 std::max(l: {ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1728 ScavSlotsNumRVVSpillNonScalableObject});
1729
1730 unsigned MaxScavSlotsNum = 0;
1731 if (!MF.getSubtarget<RISCVSubtarget>().hasVInstructions())
1732 return false;
1733 for (const MachineBasicBlock &MBB : MF)
1734 for (const MachineInstr &MI : MBB) {
1735 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1736 for (auto &MO : MI.operands()) {
1737 if (!MO.isFI())
1738 continue;
1739 bool IsScalableVectorID = MF.getFrameInfo().getStackID(ObjectIdx: MO.getIndex()) ==
1740 TargetStackID::ScalableVector;
1741 if (IsRVVSpill) {
1742 MaxScavSlotsNum = std::max(
1743 a: MaxScavSlotsNum, b: IsScalableVectorID
1744 ? ScavSlotsNumRVVSpillScalableObject
1745 : ScavSlotsNumRVVSpillNonScalableObject);
1746 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1747 MaxScavSlotsNum =
1748 std::max(a: MaxScavSlotsNum, b: ScavSlotsADDIScalableObject);
1749 }
1750 }
1751 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1752 return MaxScavSlotsNumKnown;
1753 }
1754 return MaxScavSlotsNum;
1755}
1756
1757static bool hasRVVFrameObject(const MachineFunction &MF) {
1758 // Originally, the function will scan all the stack objects to check whether
1759 // if there is any scalable vector object on the stack or not. However, it
1760 // causes errors in the register allocator. In issue 53016, it returns false
1761 // before RA because there is no RVV stack objects. After RA, it returns true
1762 // because there are spilling slots for RVV values during RA. It will not
1763 // reserve BP during register allocation and generate BP access in the PEI
1764 // pass due to the inconsistent behavior of the function.
1765 //
1766 // The function is changed to use hasVInstructions() as the return value. It
1767 // is not precise, but it can make the register allocation correct.
1768 //
1769 // FIXME: Find a better way to make the decision or revisit the solution in
1770 // D103622.
1771 //
1772 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1773 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1774}
1775
1776static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF,
1777 const RISCVInstrInfo &TII) {
1778 unsigned FnSize = 0;
1779 for (auto &MBB : MF) {
1780 for (auto &MI : MBB) {
1781 // Far branches over 20-bit offset will be relaxed in branch relaxation
1782 // pass. In the worst case, conditional branches will be relaxed into
1783 // the following instruction sequence. Unconditional branches are
1784 // relaxed in the same way, with the exception that there is no first
1785 // branch instruction.
1786 //
1787 // foo
1788 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1789 // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1790 // jump .restore, s11 # 8 bytes
1791 // .rev_cond
1792 // bar
1793 // j .dest_bb # 4 bytes, or 2 bytes with Zca
1794 // .restore:
1795 // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1796 // .dest:
1797 // baz
1798 if (MI.isConditionalBranch())
1799 FnSize += TII.getInstSizeInBytes(MI);
1800 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1801 if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
1802 FnSize += 2 + 8 + 2 + 2;
1803 else
1804 FnSize += 4 + 8 + 4 + 4;
1805 continue;
1806 }
1807
1808 FnSize += TII.getInstSizeInBytes(MI);
1809 }
1810 }
1811 return FnSize;
1812}
1813
1814void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
1815 MachineFunction &MF, RegScavenger *RS) const {
1816 const RISCVRegisterInfo *RegInfo =
1817 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1818 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1819 MachineFrameInfo &MFI = MF.getFrameInfo();
1820 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1821 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1822
1823 int64_t RVVStackSize;
1824 Align RVVStackAlign;
1825 std::tie(args&: RVVStackSize, args&: RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1826
1827 RVFI->setRVVStackSize(RVVStackSize);
1828 RVFI->setRVVStackAlign(RVVStackAlign);
1829
1830 if (hasRVVFrameObject(MF)) {
1831 // Ensure the entire stack is aligned to at least the RVV requirement: some
1832 // scalable-vector object alignments are not considered by the
1833 // target-independent code.
1834 MFI.ensureMaxAlignment(Alignment: RVVStackAlign);
1835 }
1836
1837 unsigned ScavSlotsNum = 0;
1838
1839 // estimateStackSize has been observed to under-estimate the final stack
1840 // size, so give ourselves wiggle-room by checking for stack size
1841 // representable an 11-bit signed field rather than 12-bits.
1842 if (!isInt<11>(x: MFI.estimateStackSize(MF)))
1843 ScavSlotsNum = 1;
1844
1845 // Far branches over 20-bit offset require a spill slot for scratch register.
1846 bool IsLargeFunction = !isInt<20>(x: estimateFunctionSizeInBytes(MF, TII: *TII));
1847 if (IsLargeFunction)
1848 ScavSlotsNum = std::max(a: ScavSlotsNum, b: 1u);
1849
1850 // RVV loads & stores have no capacity to hold the immediate address offsets
1851 // so we must always reserve an emergency spill slot if the MachineFunction
1852 // contains any RVV spills.
1853 ScavSlotsNum = std::max(a: ScavSlotsNum, b: getScavSlotsNumForRVV(MF));
1854
1855 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1856 int FI = MFI.CreateSpillStackObject(Size: RegInfo->getSpillSize(RC: *RC),
1857 Alignment: RegInfo->getSpillAlign(RC: *RC));
1858 RS->addScavengingFrameIndex(FI);
1859
1860 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1861 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1862 }
1863
1864 unsigned Size = RVFI->getReservedSpillsSize();
1865 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1866 int FrameIdx = Info.getFrameIdx();
1867 if (FrameIdx < 0 || MFI.getStackID(ObjectIdx: FrameIdx) != TargetStackID::Default)
1868 continue;
1869
1870 Size += MFI.getObjectSize(ObjectIdx: FrameIdx);
1871 }
1872 RVFI->setCalleeSavedStackSize(Size);
1873}
1874
1875// Not preserve stack space within prologue for outgoing variables when the
1876// function contains variable size objects or there are vector objects accessed
1877// by the frame pointer.
1878// Let eliminateCallFramePseudoInstr preserve stack space for it.
1879bool RISCVFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
1880 return !MF.getFrameInfo().hasVarSizedObjects() &&
1881 !(hasFP(MF) && hasRVVFrameObject(MF));
1882}
1883
1884// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1885MachineBasicBlock::iterator RISCVFrameLowering::eliminateCallFramePseudoInstr(
1886 MachineFunction &MF, MachineBasicBlock &MBB,
1887 MachineBasicBlock::iterator MI) const {
1888 DebugLoc DL = MI->getDebugLoc();
1889
1890 if (!hasReservedCallFrame(MF)) {
1891 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1892 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
1893 // pointer. This is necessary when there is a variable length stack
1894 // allocation (e.g. alloca), which means it's not possible to allocate
1895 // space for outgoing arguments from within the function prologue.
1896 int64_t Amount = MI->getOperand(i: 0).getImm();
1897
1898 if (Amount != 0) {
1899 // Ensure the stack remains aligned after adjustment.
1900 Amount = alignSPAdjust(SPAdj: Amount);
1901
1902 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
1903 Amount = -Amount;
1904
1905 const RISCVTargetLowering *TLI =
1906 MF.getSubtarget<RISCVSubtarget>().getTargetLowering();
1907 int64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1908 if (TLI->hasInlineStackProbe(MF) && -Amount >= ProbeSize) {
1909 // When stack probing is enabled, the decrement of SP may need to be
1910 // probed. We can handle both the decrement and the probing in
1911 // allocateStack.
1912 bool DynAllocation =
1913 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1914 allocateStack(MBB, MBBI: MI, MF, Offset: -Amount, RealStackSize: -Amount,
1915 EmitCFI: needsDwarfCFI(MF) && !hasFP(MF),
1916 /*NeedProbe=*/true, ProbeSize, DynAllocation,
1917 Flag: MachineInstr::NoFlags);
1918 } else {
1919 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
1920 RI.adjustReg(MBB, II: MI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: Amount),
1921 Flag: MachineInstr::NoFlags, RequiredAlign: getStackAlign());
1922 }
1923 }
1924 }
1925
1926 return MBB.erase(I: MI);
1927}
1928
1929// We would like to split the SP adjustment to reduce prologue/epilogue
1930// as following instructions. In this way, the offset of the callee saved
1931// register could fit in a single store. Supposed that the first sp adjust
1932// amount is 2032.
1933// add sp,sp,-2032
1934// sw ra,2028(sp)
1935// sw s0,2024(sp)
1936// sw s1,2020(sp)
1937// sw s3,2012(sp)
1938// sw s4,2008(sp)
1939// add sp,sp,-64
1940uint64_t
1941RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
1942 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1943 const MachineFrameInfo &MFI = MF.getFrameInfo();
1944 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1945 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
1946
1947 // Disable SplitSPAdjust if save-restore libcall, push/pop or QCI interrupts
1948 // are used. The callee-saved registers will be pushed by the save-restore
1949 // libcalls, so we don't have to split the SP adjustment in this case.
1950 if (RVFI->getReservedSpillsSize())
1951 return 0;
1952
1953 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
1954 // 12-bit and there exists a callee-saved register needing to be pushed.
1955 if (!isInt<12>(x: StackSize) && (CSI.size() > 0)) {
1956 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
1957 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
1958 // instructions. Offsets smaller than 2048 can fit in a single load/store
1959 // instruction, and we have to stick with the stack alignment. 2048 has
1960 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
1961 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
1962 const uint64_t StackAlign = getStackAlign().value();
1963
1964 // Amount of (2048 - StackAlign) will prevent callee saved and restored
1965 // instructions be compressed, so try to adjust the amount to the largest
1966 // offset that stack compression instructions accept when target supports
1967 // compression instructions.
1968 if (STI.hasStdExtZca()) {
1969 // The compression extensions may support the following instructions:
1970 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
1971 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
1972 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
1973 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
1974 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
1975 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
1976 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
1977 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
1978 const uint64_t RVCompressLen = STI.getXLen() * 8;
1979 // Compared with amount (2048 - StackAlign), StackSize needs to
1980 // satisfy the following conditions to avoid using more instructions
1981 // to adjust the sp after adjusting the amount, such as
1982 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
1983 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
1984 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
1985 auto CanCompress = [&](uint64_t CompressLen) -> bool {
1986 if (StackSize <= 2047 + CompressLen ||
1987 (StackSize > 2048 * 2 - StackAlign &&
1988 StackSize <= 2047 * 2 + CompressLen) ||
1989 StackSize > 2048 * 3 - StackAlign)
1990 return true;
1991
1992 return false;
1993 };
1994 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
1995 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
1996 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
1997 const uint64_t ADDI16SPCompressLen = 496;
1998 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
1999 return ADDI16SPCompressLen;
2000 if (CanCompress(RVCompressLen))
2001 return RVCompressLen;
2002 }
2003 return 2048 - StackAlign;
2004 }
2005 return 0;
2006}
2007
2008bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
2009 MachineFunction &MF, const TargetRegisterInfo *TRI,
2010 std::vector<CalleeSavedInfo> &CSI) const {
2011 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2012 MachineFrameInfo &MFI = MF.getFrameInfo();
2013 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
2014
2015 // Preemptible Interrupts have two additional Callee-save Frame Indexes,
2016 // not tracked by `CSI`.
2017 if (RVFI->isSiFivePreemptibleInterrupt(MF)) {
2018 for (int I = 0; I < 2; ++I) {
2019 int FI = RVFI->getInterruptCSRFrameIndex(Idx: I);
2020 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FI, IsCalleeSaved: true);
2021 }
2022 }
2023
2024 // Early exit if no callee saved registers are modified!
2025 if (CSI.empty())
2026 return true;
2027
2028 if (RVFI->useQCIInterrupt(MF)) {
2029 RVFI->setQCIInterruptStackSize(QCIInterruptPushAmount);
2030 }
2031
2032 if (RVFI->isPushable(MF)) {
2033 // Determine how many GPRs we need to push and save it to RVFI.
2034 unsigned PushedRegNum = getNumPushPopRegs(CSI);
2035
2036 // `QC.C.MIENTER(.NEST)` will save `ra` and `s0`, so we should only push if
2037 // we want to push more than 2 registers. Otherwise, we should push if we
2038 // want to push more than 0 registers.
2039 unsigned OnlyPushIfMoreThan = RVFI->useQCIInterrupt(MF) ? 2 : 0;
2040 if (PushedRegNum > OnlyPushIfMoreThan) {
2041 RVFI->setRVPushRegs(PushedRegNum);
2042 RVFI->setRVPushStackSize(alignTo(Value: (STI.getXLen() / 8) * PushedRegNum, Align: 16));
2043 }
2044 }
2045
2046 for (auto &CS : CSI) {
2047 MCRegister Reg = CS.getReg();
2048 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
2049 unsigned Size = RegInfo->getSpillSize(RC: *RC);
2050
2051 if (RVFI->useQCIInterrupt(MF)) {
2052 const auto *FFI = llvm::find_if(Range: FixedCSRFIQCIInterruptMap, P: [&](auto P) {
2053 return P.first == CS.getReg();
2054 });
2055 if (FFI != std::end(arr: FixedCSRFIQCIInterruptMap)) {
2056 int64_t Offset = FFI->second * (int64_t)Size;
2057
2058 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2059 assert(FrameIdx < 0);
2060 CS.setFrameIdx(FrameIdx);
2061 continue;
2062 }
2063 }
2064
2065 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
2066 const auto *FII = llvm::find_if(
2067 Range: FixedCSRFIMap, P: [&](MCPhysReg P) { return P == CS.getReg(); });
2068 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
2069
2070 if (FII != std::end(arr: FixedCSRFIMap)) {
2071 int64_t Offset;
2072 if (RVFI->getPushPopKind(MF) ==
2073 RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp)
2074 Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
2075 else
2076 Offset = -int64_t(RegNum + 1) * Size;
2077
2078 if (RVFI->useQCIInterrupt(MF))
2079 Offset -= QCIInterruptPushAmount;
2080
2081 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2082 assert(FrameIdx < 0);
2083 CS.setFrameIdx(FrameIdx);
2084 continue;
2085 }
2086 }
2087
2088 // Not a fixed slot.
2089 Align Alignment = RegInfo->getSpillAlign(RC: *RC);
2090 // We may not be able to satisfy the desired alignment specification of
2091 // the TargetRegisterClass if the stack alignment is smaller. Use the
2092 // min.
2093 Alignment = std::min(a: Alignment, b: getStackAlign());
2094 int FrameIdx = MFI.CreateStackObject(Size, Alignment, isSpillSlot: true);
2095 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FrameIdx, IsCalleeSaved: true);
2096 CS.setFrameIdx(FrameIdx);
2097 if (RISCVRegisterInfo::isRVVRegClass(RC))
2098 MFI.setStackID(ObjectIdx: FrameIdx, ID: TargetStackID::ScalableVector);
2099 }
2100
2101 if (RVFI->useQCIInterrupt(MF)) {
2102 // Allocate a fixed object that covers the entire QCI stack allocation,
2103 // because there are gaps which are reserved for future use.
2104 MFI.CreateFixedSpillStackObject(
2105 Size: QCIInterruptPushAmount, SPOffset: -static_cast<int64_t>(QCIInterruptPushAmount));
2106 }
2107
2108 if (RVFI->isPushable(MF)) {
2109 int64_t QCIOffset = RVFI->useQCIInterrupt(MF) ? QCIInterruptPushAmount : 0;
2110 // Allocate a fixed object that covers the full push.
2111 if (int64_t PushSize = RVFI->getRVPushStackSize())
2112 MFI.CreateFixedSpillStackObject(Size: PushSize, SPOffset: -PushSize - QCIOffset);
2113 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
2114 int64_t LibCallFrameSize =
2115 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
2116 MFI.CreateFixedSpillStackObject(Size: LibCallFrameSize, SPOffset: -LibCallFrameSize);
2117 }
2118
2119 return true;
2120}
2121
2122bool RISCVFrameLowering::spillCalleeSavedRegisters(
2123 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2124 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2125 if (CSI.empty())
2126 return true;
2127
2128 MachineFunction *MF = MBB.getParent();
2129 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2130 DebugLoc DL;
2131 if (MI != MBB.end() && !MI->isDebugInstr())
2132 DL = MI->getDebugLoc();
2133
2134 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2135 if (RVFI->useQCIInterrupt(MF: *MF)) {
2136 // Emit QC.C.MIENTER(.NEST)
2137 BuildMI(
2138 BB&: MBB, I: MI, MIMD: DL,
2139 MCID: TII.get(Opcode: RVFI->getInterruptStackKind(MF: *MF) ==
2140 RISCVMachineFunctionInfo::InterruptStackKind::QCINest
2141 ? RISCV::QC_C_MIENTER_NEST
2142 : RISCV::QC_C_MIENTER))
2143 .setMIFlag(MachineInstr::FrameSetup);
2144
2145 for (auto [Reg, _Offset] : FixedCSRFIQCIInterruptMap)
2146 MBB.addLiveIn(PhysReg: Reg);
2147 }
2148
2149 if (RVFI->isPushable(MF: *MF)) {
2150 // Emit CM.PUSH with base StackAdj & evaluate Push stack
2151 unsigned PushedRegNum = RVFI->getRVPushRegs();
2152 if (PushedRegNum > 0) {
2153 // Use encoded number to represent registers to spill.
2154 unsigned Opcode = getPushOpcode(
2155 Kind: RVFI->getPushPopKind(MF: *MF), UpdateFP: hasFP(MF: *MF) && !RVFI->useQCIInterrupt(MF: *MF));
2156 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2157 MachineInstrBuilder PushBuilder =
2158 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2159 .setMIFlag(MachineInstr::FrameSetup);
2160 PushBuilder.addImm(Val: RegEnc);
2161 PushBuilder.addImm(Val: 0);
2162
2163 for (unsigned i = 0; i < PushedRegNum; i++)
2164 PushBuilder.addUse(RegNo: FixedCSRFIMap[i], Flags: RegState::Implicit);
2165 }
2166 } else if (const char *SpillLibCall = getSpillLibCallName(MF: *MF, CSI)) {
2167 // Add spill libcall via non-callee-saved register t0.
2168 MachineInstrBuilder NewMI =
2169 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoCALLReg), DestReg: RISCV::X5)
2170 .addExternalSymbol(FnName: SpillLibCall, TargetFlags: RISCVII::MO_CALL)
2171 .setMIFlag(MachineInstr::FrameSetup)
2172 .addUse(RegNo: RISCV::X2, Flags: RegState::Implicit)
2173 .addDef(RegNo: RISCV::X2, Flags: RegState::ImplicitDefine);
2174
2175 // Add registers spilled as implicit used.
2176 for (auto &CS : CSI)
2177 NewMI.addUse(RegNo: CS.getReg(), Flags: RegState::Implicit);
2178 }
2179
2180 // Manually spill values not spilled by libcall & Push/Pop.
2181 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2182 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2183
2184 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
2185 for (auto &CS : CSInfo) {
2186 // Insert the spill to the stack frame.
2187 MCRegister Reg = CS.getReg();
2188 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2189 TII.storeRegToStackSlot(MBB, MI, SrcReg: Reg, isKill: !MBB.isLiveIn(Reg),
2190 FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2191 Flags: MachineInstr::FrameSetup);
2192 }
2193 };
2194 storeRegsToStackSlots(UnmanagedCSI);
2195 storeRegsToStackSlots(RVVCSI);
2196
2197 return true;
2198}
2199
2200static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
2201 return RISCV::VRRegClass.contains(Reg: BaseReg) ? 1
2202 : RISCV::VRM2RegClass.contains(Reg: BaseReg) ? 2
2203 : RISCV::VRM4RegClass.contains(Reg: BaseReg) ? 4
2204 : 8;
2205}
2206
2207void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
2208 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const {
2209 MachineFunction *MF = MBB.getParent();
2210 const MachineFrameInfo &MFI = MF->getFrameInfo();
2211 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2212 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2213
2214 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2215 if (RVVCSI.empty())
2216 return;
2217
2218 uint64_t FixedSize = getStackSizeWithRVVPadding(MF: *MF);
2219 if (!HasFP) {
2220 uint64_t ScalarLocalVarSize =
2221 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
2222 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
2223 FixedSize -= ScalarLocalVarSize;
2224 }
2225
2226 CFIInstBuilder CFIBuilder(MBB, MI, MachineInstr::FrameSetup);
2227 for (auto &CS : RVVCSI) {
2228 // Insert the spill to the stack frame.
2229 int FI = CS.getFrameIdx();
2230 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2231 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2232 for (unsigned i = 0; i < NumRegs; ++i) {
2233 CFIBuilder.insertCFIInst(CFIInst: createDefCFAOffset(
2234 TRI, Reg: BaseReg + i,
2235 Offset: StackOffset::get(Fixed: -FixedSize, Scalable: MFI.getObjectOffset(ObjectIdx: FI) / 8 + i)));
2236 }
2237 }
2238}
2239
2240void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
2241 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
2242 MachineFunction *MF = MBB.getParent();
2243 const MachineFrameInfo &MFI = MF->getFrameInfo();
2244 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2245
2246 CFIInstBuilder CFIHelper(MBB, MI, MachineInstr::FrameDestroy);
2247 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2248 for (auto &CS : RVVCSI) {
2249 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2250 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2251 for (unsigned i = 0; i < NumRegs; ++i)
2252 CFIHelper.buildRestore(Reg: BaseReg + i);
2253 }
2254}
2255
2256bool RISCVFrameLowering::restoreCalleeSavedRegisters(
2257 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2258 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2259 if (CSI.empty())
2260 return true;
2261
2262 MachineFunction *MF = MBB.getParent();
2263 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2264 DebugLoc DL;
2265 if (MI != MBB.end() && !MI->isDebugInstr())
2266 DL = MI->getDebugLoc();
2267
2268 // Manually restore values not restored by libcall & Push/Pop.
2269 // Reverse the restore order in epilog. In addition, the return
2270 // address will be restored first in the epilogue. It increases
2271 // the opportunity to avoid the load-to-use data hazard between
2272 // loading RA and return by RA. loadRegFromStackSlot can insert
2273 // multiple instructions.
2274 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2275 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2276
2277 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
2278 for (auto &CS : CSInfo) {
2279 MCRegister Reg = CS.getReg();
2280 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2281 TII.loadRegFromStackSlot(MBB, MI, DestReg: Reg, FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2282 SubReg: RISCV::NoSubRegister,
2283 Flags: MachineInstr::FrameDestroy);
2284 assert(MI != MBB.begin() &&
2285 "loadRegFromStackSlot didn't insert any code!");
2286 }
2287 };
2288 loadRegFromStackSlot(RVVCSI);
2289 loadRegFromStackSlot(UnmanagedCSI);
2290
2291 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2292 if (RVFI->useQCIInterrupt(MF: *MF)) {
2293 // Don't emit anything here because restoration is handled by
2294 // QC.C.MILEAVERET which we already inserted to return.
2295 assert(MI->getOpcode() == RISCV::QC_C_MILEAVERET &&
2296 "Unexpected QCI Interrupt Return Instruction");
2297 }
2298
2299 if (RVFI->isPushable(MF: *MF)) {
2300 unsigned PushedRegNum = RVFI->getRVPushRegs();
2301 if (PushedRegNum > 0) {
2302 unsigned Opcode = getPopOpcode(Kind: RVFI->getPushPopKind(MF: *MF));
2303 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2304 MachineInstrBuilder PopBuilder =
2305 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2306 .setMIFlag(MachineInstr::FrameDestroy);
2307 // Use encoded number to represent registers to restore.
2308 PopBuilder.addImm(Val: RegEnc);
2309 PopBuilder.addImm(Val: 0);
2310
2311 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
2312 PopBuilder.addDef(RegNo: FixedCSRFIMap[i], Flags: RegState::ImplicitDefine);
2313 }
2314 } else if (const char *RestoreLibCall = getRestoreLibCallName(MF: *MF, CSI)) {
2315 // Add restore libcall via tail call.
2316 MachineInstrBuilder NewMI =
2317 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoTAIL))
2318 .addExternalSymbol(FnName: RestoreLibCall, TargetFlags: RISCVII::MO_CALL)
2319 .setMIFlag(MachineInstr::FrameDestroy)
2320 .addDef(RegNo: RISCV::X2, Flags: RegState::ImplicitDefine);
2321
2322 // Add registers restored as implicit defined.
2323 for (auto &CS : CSI)
2324 NewMI.addDef(RegNo: CS.getReg(), Flags: RegState::ImplicitDefine);
2325
2326 // Remove trailing returns, since the terminator is now a tail call to the
2327 // restore function.
2328 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
2329 NewMI.getInstr()->copyImplicitOps(MF&: *MF, MI: *MI);
2330 MI->eraseFromParent();
2331 }
2332 }
2333 return true;
2334}
2335
2336bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2337 // Keep the conventional code flow when not optimizing.
2338 if (MF.getFunction().hasOptNone())
2339 return false;
2340
2341 return true;
2342}
2343
2344bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
2345 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2346 const MachineFunction *MF = MBB.getParent();
2347 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2348
2349 // Make sure VTYPE and VL are not live-in since we will use vsetvli in the
2350 // prologue to get the VLEN, and that will clobber these registers.
2351 //
2352 // We may do also check the stack contains objects with scalable vector type,
2353 // but this will require iterating over all the stack objects, but this may
2354 // not worth since the situation is rare, we could do further check in future
2355 // if we find it is necessary.
2356 if (STI.preferVsetvliOverReadVLENB() &&
2357 (MBB.isLiveIn(Reg: RISCV::VTYPE) || MBB.isLiveIn(Reg: RISCV::VL)))
2358 return false;
2359
2360 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2361 return true;
2362
2363 // Inserting a call to a __riscv_save libcall requires the use of the register
2364 // t0 (X5) to hold the return address. Therefore if this register is already
2365 // used we can't insert the call.
2366
2367 RegScavenger RS;
2368 RS.enterBasicBlock(MBB&: *TmpMBB);
2369 return !RS.isRegUsed(Reg: RISCV::X5);
2370}
2371
2372bool RISCVFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2373 const MachineFunction *MF = MBB.getParent();
2374 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2375 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2376
2377 // We do not want QC.C.MILEAVERET to be subject to shrink-wrapping - it must
2378 // come in the final block of its function as it both pops and returns.
2379 if (RVFI->useQCIInterrupt(MF: *MF))
2380 return MBB.succ_empty();
2381
2382 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2383 return true;
2384
2385 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
2386 // This means if we still need to continue executing code within this function
2387 // the restore cannot take place in this basic block.
2388
2389 if (MBB.succ_size() > 1)
2390 return false;
2391
2392 MachineBasicBlock *SuccMBB =
2393 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
2394
2395 // Doing a tail call should be safe if there are no successors, because either
2396 // we have a returning block or the end of the block is unreachable, so the
2397 // restore will be eliminated regardless.
2398 if (!SuccMBB)
2399 return true;
2400
2401 // The successor can only contain a return, since we would effectively be
2402 // replacing the successor with our own tail return at the end of our block.
2403 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2404}
2405
2406bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
2407 switch (ID) {
2408 case TargetStackID::Default:
2409 case TargetStackID::ScalableVector:
2410 return true;
2411 case TargetStackID::NoAlloc:
2412 case TargetStackID::SGPRSpill:
2413 case TargetStackID::WasmLocal:
2414 case TargetStackID::ScalablePredicateVector:
2415 return false;
2416 }
2417 llvm_unreachable("Invalid TargetStackID::Value");
2418}
2419
2420TargetStackID::Value RISCVFrameLowering::getStackIDForScalableVectors() const {
2421 return TargetStackID::ScalableVector;
2422}
2423
2424// Synthesize the probe loop.
2425static void emitStackProbeInline(MachineBasicBlock::iterator MBBI, DebugLoc DL,
2426 Register TargetReg, bool IsRVV) {
2427 assert(TargetReg != RISCV::X2 && "New top of stack cannot already be in SP");
2428
2429 MachineBasicBlock &MBB = *MBBI->getParent();
2430 MachineFunction &MF = *MBB.getParent();
2431
2432 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2433 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2434 bool IsRV64 = Subtarget.is64Bit();
2435 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2436 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2437 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2438
2439 MachineFunction::iterator MBBInsertPoint = std::next(x: MBB.getIterator());
2440 MachineBasicBlock *LoopTestMBB =
2441 MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2442 MF.insert(MBBI: MBBInsertPoint, MBB: LoopTestMBB);
2443 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2444 MF.insert(MBBI: MBBInsertPoint, MBB: ExitMBB);
2445 MachineInstr::MIFlag Flags = MachineInstr::FrameSetup;
2446 Register ScratchReg = RISCV::X7;
2447
2448 // ScratchReg = ProbeSize
2449 TII->movImm(MBB, MBBI, DL, DstReg: ScratchReg, Val: ProbeSize, Flag: Flags);
2450
2451 // LoopTest:
2452 // SUB SP, SP, ProbeSize
2453 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
2454 .addReg(RegNo: SPReg)
2455 .addReg(RegNo: ScratchReg)
2456 .setMIFlags(Flags);
2457
2458 // s[d|w] zero, 0(sp)
2459 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL,
2460 MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
2461 .addReg(RegNo: RISCV::X0)
2462 .addReg(RegNo: SPReg)
2463 .addImm(Val: 0)
2464 .setMIFlags(Flags);
2465
2466 if (IsRVV) {
2467 // SUB TargetReg, TargetReg, ProbeSize
2468 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB),
2469 DestReg: TargetReg)
2470 .addReg(RegNo: TargetReg)
2471 .addReg(RegNo: ScratchReg)
2472 .setMIFlags(Flags);
2473
2474 // BGE TargetReg, ProbeSize, LoopTest
2475 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE))
2476 .addReg(RegNo: TargetReg)
2477 .addReg(RegNo: ScratchReg)
2478 .addMBB(MBB: LoopTestMBB)
2479 .setMIFlags(Flags);
2480
2481 } else {
2482 // BNE SP, TargetReg, LoopTest
2483 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE))
2484 .addReg(RegNo: SPReg)
2485 .addReg(RegNo: TargetReg)
2486 .addMBB(MBB: LoopTestMBB)
2487 .setMIFlags(Flags);
2488 }
2489
2490 ExitMBB->splice(Where: ExitMBB->end(), Other: &MBB, From: std::next(x: MBBI), To: MBB.end());
2491 ExitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: &MBB);
2492
2493 LoopTestMBB->addSuccessor(Succ: ExitMBB);
2494 LoopTestMBB->addSuccessor(Succ: LoopTestMBB);
2495 MBB.addSuccessor(Succ: LoopTestMBB);
2496 // Update liveins.
2497 fullyRecomputeLiveIns(MBBs: {ExitMBB, LoopTestMBB});
2498}
2499
2500void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2501 MachineBasicBlock &MBB) const {
2502 // Get the instructions that need to be replaced. We emit at most two of
2503 // these. Remember them in order to avoid complications coming from the need
2504 // to traverse the block while potentially creating more blocks.
2505 SmallVector<MachineInstr *, 4> ToReplace;
2506 for (MachineInstr &MI : MBB) {
2507 unsigned Opc = MI.getOpcode();
2508 if (Opc == RISCV::PROBED_STACKALLOC ||
2509 Opc == RISCV::PROBED_STACKALLOC_RVV) {
2510 ToReplace.push_back(Elt: &MI);
2511 }
2512 }
2513
2514 for (MachineInstr *MI : ToReplace) {
2515 if (MI->getOpcode() == RISCV::PROBED_STACKALLOC ||
2516 MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV) {
2517 MachineBasicBlock::iterator MBBI = MI->getIterator();
2518 DebugLoc DL = MBB.findDebugLoc(MBBI);
2519 Register TargetReg = MI->getOperand(i: 0).getReg();
2520 emitStackProbeInline(MBBI, DL, TargetReg,
2521 IsRVV: (MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV));
2522 MBBI->eraseFromParent();
2523 }
2524 }
2525}
2526
2527int RISCVFrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
2528 return 0;
2529}
2530
2531Register
2532RISCVFrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
2533 return RISCV::X2;
2534}
2535