1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
14#include "MCTargetDesc/RISCVBaseInfo.h"
15#include "RISCVMachineFunctionInfo.h"
16#include "RISCVSubtarget.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/CFIInstBuilder.h"
19#include "llvm/CodeGen/LivePhysRegs.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/RegisterScavenging.h"
25#include "llvm/IR/DiagnosticInfo.h"
26#include "llvm/MC/MCDwarf.h"
27#include "llvm/Support/LEB128.h"
28
29#include <algorithm>
30
31#define DEBUG_TYPE "riscv-frame"
32
33using namespace llvm;
34
35static Align getABIStackAlignment(RISCVABI::ABI ABI) {
36 if (ABI == RISCVABI::ABI_ILP32E)
37 return Align(4);
38 if (ABI == RISCVABI::ABI_LP64E)
39 return Align(8);
40 return Align(16);
41}
42
43RISCVFrameLowering::RISCVFrameLowering(const RISCVSubtarget &STI)
44 : TargetFrameLowering(
45 StackGrowsDown, getABIStackAlignment(ABI: STI.getTargetABI()),
46 /*LocalAreaOffset=*/0,
47 /*TransientStackAlignment=*/getABIStackAlignment(ABI: STI.getTargetABI())),
48 STI(STI) {}
49
50// The register used to hold the frame pointer.
51static constexpr MCPhysReg FPReg = RISCV::X8;
52
53// The register used to hold the stack pointer.
54static constexpr MCPhysReg SPReg = RISCV::X2;
55
56// The register used to hold the return address.
57static constexpr MCPhysReg RAReg = RISCV::X1;
58
59// LIst of CSRs that are given a fixed location by save/restore libcalls or
60// Zcmp/Xqccmp Push/Pop. The order in this table indicates the order the
61// registers are saved on the stack. Zcmp uses the reverse order of save/restore
62// and Xqccmp on the stack, but this is handled when offsets are calculated.
63static const MCPhysReg FixedCSRFIMap[] = {
64 /*ra*/ RAReg, /*s0*/ FPReg, /*s1*/ RISCV::X9,
65 /*s2*/ RISCV::X18, /*s3*/ RISCV::X19, /*s4*/ RISCV::X20,
66 /*s5*/ RISCV::X21, /*s6*/ RISCV::X22, /*s7*/ RISCV::X23,
67 /*s8*/ RISCV::X24, /*s9*/ RISCV::X25, /*s10*/ RISCV::X26,
68 /*s11*/ RISCV::X27};
69
70// The number of stack bytes allocated by `QC.C.MIENTER(.NEST)` and popped by
71// `QC.C.MILEAVERET`.
72static constexpr uint64_t QCIInterruptPushAmount = 96;
73
74static const std::pair<MCPhysReg, int8_t> FixedCSRFIQCIInterruptMap[] = {
75 /* -1 is a gap for mepc/mnepc */
76 {/*fp*/ FPReg, -2},
77 /* -3 is a gap for qc.mcause */
78 {/*ra*/ RAReg, -4},
79 /* -5 is reserved */
80 {/*t0*/ RISCV::X5, -6},
81 {/*t1*/ RISCV::X6, -7},
82 {/*t2*/ RISCV::X7, -8},
83 {/*a0*/ RISCV::X10, -9},
84 {/*a1*/ RISCV::X11, -10},
85 {/*a2*/ RISCV::X12, -11},
86 {/*a3*/ RISCV::X13, -12},
87 {/*a4*/ RISCV::X14, -13},
88 {/*a5*/ RISCV::X15, -14},
89 {/*a6*/ RISCV::X16, -15},
90 {/*a7*/ RISCV::X17, -16},
91 {/*t3*/ RISCV::X28, -17},
92 {/*t4*/ RISCV::X29, -18},
93 {/*t5*/ RISCV::X30, -19},
94 {/*t6*/ RISCV::X31, -20},
95 /* -21, -22, -23, -24 are reserved */
96};
97
98/// Returns true if DWARF CFI instructions ("frame moves") should be emitted.
99static bool needsDwarfCFI(const MachineFunction &MF) {
100 return MF.needsFrameMoves();
101}
102
103// For now we use x3, a.k.a gp, as pointer to shadow call stack.
104// User should not use x3 in their asm.
105static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MI,
107 const DebugLoc &DL) {
108 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
109 // We check Zimop instead of (Zimop || Zcmop) to determine whether HW shadow
110 // stack is available despite the fact that sspush/sspopchk both have a
111 // compressed form, because if only Zcmop is available, we would need to
112 // reserve X5 due to c.sspopchk only takes X5 and we currently do not support
113 // using X5 as the return address register.
114 // However, we can still aggressively use c.sspush x1 if zcmop is available.
115 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
116 STI.hasStdExtZimop();
117 bool HasSWShadowStack =
118 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
119 if (!HasHWShadowStack && !HasSWShadowStack)
120 return;
121
122 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
123
124 // Do not save RA to the SCS if it's not saved to the regular stack,
125 // i.e. RA is not at risk of being overwritten.
126 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
127 if (llvm::none_of(
128 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
129 return;
130
131 const RISCVInstrInfo *TII = STI.getInstrInfo();
132 if (HasHWShadowStack) {
133 if (STI.hasStdExtZcmop()) {
134 static_assert(RAReg == RISCV::X1, "C.SSPUSH only accepts X1");
135 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_C_SSPUSH));
136 } else {
137 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPUSH)).addReg(RegNo: RAReg);
138 }
139 return;
140 }
141
142 Register SCSPReg = RISCVABI::getSCSPReg();
143
144 bool IsRV64 = STI.is64Bit();
145 int64_t SlotSize = STI.getXLen() / 8;
146 // Store return address to shadow call stack
147 // addi gp, gp, [4|8]
148 // s[w|d] ra, -[4|8](gp)
149 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
150 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
151 .addReg(RegNo: SCSPReg)
152 .addImm(Val: SlotSize)
153 .setMIFlag(MachineInstr::FrameSetup);
154 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
155 .addReg(RegNo: RAReg)
156 .addReg(RegNo: SCSPReg)
157 .addImm(Val: -SlotSize)
158 .setMIFlag(MachineInstr::FrameSetup);
159
160 if (!needsDwarfCFI(MF))
161 return;
162
163 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
164 // of the shadow stack pointer when unwinding past this frame.
165 char DwarfSCSReg = TRI->getDwarfRegNum(Reg: SCSPReg, /*IsEH*/ isEH: true);
166 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
167
168 char Offset = static_cast<char>(-SlotSize) & 0x7f;
169 const char CFIInst[] = {
170 dwarf::DW_CFA_val_expression,
171 DwarfSCSReg, // register
172 2, // length
173 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
174 Offset, // addend (sleb128)
175 };
176
177 CFIInstBuilder(MBB, MI, MachineInstr::FrameSetup)
178 .buildEscape(Bytes: StringRef(CFIInst, sizeof(CFIInst)));
179}
180
181static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
182 MachineBasicBlock::iterator MI,
183 const DebugLoc &DL) {
184 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
185 bool HasHWShadowStack = MF.getFunction().hasFnAttribute(Kind: "hw-shadow-stack") &&
186 STI.hasStdExtZimop();
187 bool HasSWShadowStack =
188 MF.getFunction().hasFnAttribute(Kind: Attribute::ShadowCallStack);
189 if (!HasHWShadowStack && !HasSWShadowStack)
190 return;
191
192 // See emitSCSPrologue() above.
193 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
194 if (llvm::none_of(
195 Range&: CSI, P: [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
196 return;
197
198 const RISCVInstrInfo *TII = STI.getInstrInfo();
199 if (HasHWShadowStack) {
200 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoMOP_SSPOPCHK)).addReg(RegNo: RAReg);
201 return;
202 }
203
204 Register SCSPReg = RISCVABI::getSCSPReg();
205
206 bool IsRV64 = STI.is64Bit();
207 int64_t SlotSize = STI.getXLen() / 8;
208 // Load return address from shadow call stack
209 // l[w|d] ra, -[4|8](gp)
210 // addi gp, gp, -[4|8]
211 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::LD : RISCV::LW))
212 .addReg(RegNo: RAReg, Flags: RegState::Define)
213 .addReg(RegNo: SCSPReg)
214 .addImm(Val: -SlotSize)
215 .setMIFlag(MachineInstr::FrameDestroy);
216 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI))
217 .addReg(RegNo: SCSPReg, Flags: RegState::Define)
218 .addReg(RegNo: SCSPReg)
219 .addImm(Val: -SlotSize)
220 .setMIFlag(MachineInstr::FrameDestroy);
221 if (needsDwarfCFI(MF)) {
222 // Restore the SCS pointer
223 CFIInstBuilder(MBB, MI, MachineInstr::FrameDestroy).buildRestore(Reg: SCSPReg);
224 }
225}
226
227// Insert instruction to swap mscratchsw with sp
228static void emitSiFiveCLICStackSwap(MachineFunction &MF, MachineBasicBlock &MBB,
229 MachineBasicBlock::iterator MBBI,
230 const DebugLoc &DL) {
231 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
232
233 if (!RVFI->isSiFiveStackSwapInterrupt(MF))
234 return;
235
236 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
237 const RISCVInstrInfo *TII = STI.getInstrInfo();
238
239 assert(STI.hasVendorXSfmclic() && "Stack Swapping Requires XSfmclic");
240
241 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
242 .addReg(RegNo: SPReg, Flags: RegState::Define)
243 .addImm(Val: RISCVSysReg::sf_mscratchcsw)
244 .addReg(RegNo: SPReg, Flags: RegState::Kill)
245 .setMIFlag(MachineInstr::FrameSetup);
246
247 // FIXME: CFI Information for this swap.
248}
249
250static void
251createSiFivePreemptibleInterruptFrameEntries(MachineFunction &MF,
252 RISCVMachineFunctionInfo &RVFI) {
253 if (!RVFI.isSiFivePreemptibleInterrupt(MF))
254 return;
255
256 const TargetRegisterClass &RC = RISCV::GPRRegClass;
257 const TargetRegisterInfo &TRI =
258 *MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
259 MachineFrameInfo &MFI = MF.getFrameInfo();
260
261 // Create two frame objects for spilling X8 and X9, which will be done in
262 // `emitSiFiveCLICPreemptibleSaves`. This is in addition to any other stack
263 // objects we might have for X8 and X9, as they might be saved twice.
264 for (int I = 0; I < 2; ++I) {
265 int FI = MFI.CreateStackObject(Size: TRI.getSpillSize(RC), Alignment: TRI.getSpillAlign(RC),
266 isSpillSlot: true);
267 RVFI.pushInterruptCSRFrameIndex(FI);
268 }
269}
270
271static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF,
272 MachineBasicBlock &MBB,
273 MachineBasicBlock::iterator MBBI,
274 const DebugLoc &DL) {
275 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
276
277 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
278 return;
279
280 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
281 const RISCVInstrInfo *TII = STI.getInstrInfo();
282
283 // FIXME: CFI Information here is nonexistent/wrong.
284
285 // X8 and X9 might be stored into the stack twice, initially into the
286 // `interruptCSRFrameIndex` here, and then maybe again into their CSI frame
287 // index.
288 //
289 // This is done instead of telling the register allocator that we need two
290 // VRegs to store the value of `mcause` and `mepc` through the instruction,
291 // which affects other passes.
292 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X8, /* IsKill=*/true,
293 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
294 RC: &RISCV::GPRRegClass, VReg: Register(),
295 Flags: MachineInstr::FrameSetup);
296 TII->storeRegToStackSlot(MBB, MBBI, SrcReg: RISCV::X9, /* IsKill=*/true,
297 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
298 RC: &RISCV::GPRRegClass, VReg: Register(),
299 Flags: MachineInstr::FrameSetup);
300
301 // Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
302 // used in the function, then they will appear in `getUnmanagedCSI` and will
303 // be saved again.
304 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
305 .addReg(RegNo: RISCV::X8, Flags: RegState::Define)
306 .addImm(Val: RISCVSysReg::mcause)
307 .addReg(RegNo: RISCV::X0)
308 .setMIFlag(MachineInstr::FrameSetup);
309 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS))
310 .addReg(RegNo: RISCV::X9, Flags: RegState::Define)
311 .addImm(Val: RISCVSysReg::mepc)
312 .addReg(RegNo: RISCV::X0)
313 .setMIFlag(MachineInstr::FrameSetup);
314
315 // Enable interrupts.
316 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRSI))
317 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
318 .addImm(Val: RISCVSysReg::mstatus)
319 .addImm(Val: 8)
320 .setMIFlag(MachineInstr::FrameSetup);
321}
322
323static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF,
324 MachineBasicBlock &MBB,
325 MachineBasicBlock::iterator MBBI,
326 const DebugLoc &DL) {
327 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
328
329 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
330 return;
331
332 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
333 const RISCVInstrInfo *TII = STI.getInstrInfo();
334
335 // FIXME: CFI Information here is nonexistent/wrong.
336
337 // Disable interrupts.
338 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRCI))
339 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
340 .addImm(Val: RISCVSysReg::mstatus)
341 .addImm(Val: 8)
342 .setMIFlag(MachineInstr::FrameSetup);
343
344 // Restore `mepc` from x9 (s1), and `mcause` from x8 (s0). If either were used
345 // in the function, they have already been restored once, so now have the
346 // value stored in `emitSiFiveCLICPreemptibleSaves`.
347 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
348 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
349 .addImm(Val: RISCVSysReg::mepc)
350 .addReg(RegNo: RISCV::X9, Flags: RegState::Kill)
351 .setMIFlag(MachineInstr::FrameSetup);
352 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRW))
353 .addReg(RegNo: RISCV::X0, Flags: RegState::Define)
354 .addImm(Val: RISCVSysReg::mcause)
355 .addReg(RegNo: RISCV::X8, Flags: RegState::Kill)
356 .setMIFlag(MachineInstr::FrameSetup);
357
358 // X8 and X9 need to be restored to their values on function entry, which we
359 // saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
360 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X9,
361 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 1),
362 RC: &RISCV::GPRRegClass, VReg: Register(),
363 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
364 TII->loadRegFromStackSlot(MBB, MBBI, DstReg: RISCV::X8,
365 FrameIndex: RVFI->getInterruptCSRFrameIndex(Idx: 0),
366 RC: &RISCV::GPRRegClass, VReg: Register(),
367 SubReg: RISCV::NoSubRegister, Flags: MachineInstr::FrameSetup);
368}
369
370// Get the ID of the libcall used for spilling and restoring callee saved
371// registers. The ID is representative of the number of registers saved or
372// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
373// single register.
374static int getLibCallID(const MachineFunction &MF,
375 const std::vector<CalleeSavedInfo> &CSI) {
376 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
377
378 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
379 return -1;
380
381 MCRegister MaxReg;
382 for (auto &CS : CSI)
383 // assignCalleeSavedSpillSlots assigns negative frame indexes to
384 // registers which can be saved by libcall.
385 if (CS.getFrameIdx() < 0)
386 MaxReg = std::max(a: MaxReg.id(), b: CS.getReg().id());
387
388 if (!MaxReg)
389 return -1;
390
391 switch (MaxReg.id()) {
392 default:
393 llvm_unreachable("Something has gone wrong!");
394 // clang-format off
395 case /*s11*/ RISCV::X27: return 12;
396 case /*s10*/ RISCV::X26: return 11;
397 case /*s9*/ RISCV::X25: return 10;
398 case /*s8*/ RISCV::X24: return 9;
399 case /*s7*/ RISCV::X23: return 8;
400 case /*s6*/ RISCV::X22: return 7;
401 case /*s5*/ RISCV::X21: return 6;
402 case /*s4*/ RISCV::X20: return 5;
403 case /*s3*/ RISCV::X19: return 4;
404 case /*s2*/ RISCV::X18: return 3;
405 case /*s1*/ RISCV::X9: return 2;
406 case /*s0*/ FPReg: return 1;
407 case /*ra*/ RAReg: return 0;
408 // clang-format on
409 }
410}
411
412// Get the name of the libcall used for spilling callee saved registers.
413// If this function will not use save/restore libcalls, then return a nullptr.
414static const char *
415getSpillLibCallName(const MachineFunction &MF,
416 const std::vector<CalleeSavedInfo> &CSI) {
417 static const char *const SpillLibCalls[] = {
418 "__riscv_save_0",
419 "__riscv_save_1",
420 "__riscv_save_2",
421 "__riscv_save_3",
422 "__riscv_save_4",
423 "__riscv_save_5",
424 "__riscv_save_6",
425 "__riscv_save_7",
426 "__riscv_save_8",
427 "__riscv_save_9",
428 "__riscv_save_10",
429 "__riscv_save_11",
430 "__riscv_save_12"
431 };
432
433 int LibCallID = getLibCallID(MF, CSI);
434 if (LibCallID == -1)
435 return nullptr;
436 return SpillLibCalls[LibCallID];
437}
438
439// Get the name of the libcall used for restoring callee saved registers.
440// If this function will not use save/restore libcalls, then return a nullptr.
441static const char *
442getRestoreLibCallName(const MachineFunction &MF,
443 const std::vector<CalleeSavedInfo> &CSI) {
444 static const char *const RestoreLibCalls[] = {
445 "__riscv_restore_0",
446 "__riscv_restore_1",
447 "__riscv_restore_2",
448 "__riscv_restore_3",
449 "__riscv_restore_4",
450 "__riscv_restore_5",
451 "__riscv_restore_6",
452 "__riscv_restore_7",
453 "__riscv_restore_8",
454 "__riscv_restore_9",
455 "__riscv_restore_10",
456 "__riscv_restore_11",
457 "__riscv_restore_12"
458 };
459
460 int LibCallID = getLibCallID(MF, CSI);
461 if (LibCallID == -1)
462 return nullptr;
463 return RestoreLibCalls[LibCallID];
464}
465
466// Get the max reg of Push/Pop for restoring callee saved registers.
467static unsigned getNumPushPopRegs(const std::vector<CalleeSavedInfo> &CSI) {
468 unsigned NumPushPopRegs = 0;
469 for (auto &CS : CSI) {
470 auto *FII = llvm::find_if(Range: FixedCSRFIMap,
471 P: [&](MCPhysReg P) { return P == CS.getReg(); });
472 if (FII != std::end(arr: FixedCSRFIMap)) {
473 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
474 NumPushPopRegs = std::max(a: NumPushPopRegs, b: RegNum + 1);
475 }
476 }
477 assert(NumPushPopRegs != 12 && "x26 requires x27 to also be pushed");
478 return NumPushPopRegs;
479}
480
481// Return true if the specified function should have a dedicated frame
482// pointer register. This is true if frame pointer elimination is
483// disabled, if it needs dynamic stack realignment, if the function has
484// variable sized allocas, or if the frame address is taken.
485bool RISCVFrameLowering::hasFPImpl(const MachineFunction &MF) const {
486 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
487
488 const MachineFrameInfo &MFI = MF.getFrameInfo();
489 if (MF.getTarget().Options.DisableFramePointerElim(MF) ||
490 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
491 MFI.isFrameAddressTaken())
492 return true;
493
494 // With large callframes around we may need to use FP to access the scavenging
495 // emergency spillslot.
496 //
497 // We calculate the MaxCallFrameSize at the end of isel so this value should
498 // be stable for the whole post-isel MIR pipeline.
499 //
500 // NOTE: The idea of forcing a frame pointer is copied from AArch64, but they
501 // conservatively return true when the call frame size hasd not been
502 // computed yet. On RISC-V that caused MachineOutliner tests to fail the
503 // MachineVerifier due to outlined functions not computing max call frame
504 // size thus the frame pointer would always be reserved.
505 if (MFI.isMaxCallFrameSizeComputed() && MFI.getMaxCallFrameSize() > 2047)
506 return true;
507
508 return false;
509}
510
511bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
512 const MachineFrameInfo &MFI = MF.getFrameInfo();
513 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
514
515 // If we do not reserve stack space for outgoing arguments in prologue,
516 // we will adjust the stack pointer before call instruction. After the
517 // adjustment, we can not use SP to access the stack objects for the
518 // arguments. Instead, use BP to access these stack objects.
519 return (MFI.hasVarSizedObjects() ||
520 (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
521 MFI.getMaxCallFrameSize() != 0))) &&
522 TRI->hasStackRealignment(MF);
523}
524
525// Determines the size of the frame and maximum call frame size.
526void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
527 MachineFrameInfo &MFI = MF.getFrameInfo();
528 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
529
530 // Get the number of bytes to allocate from the FrameInfo.
531 uint64_t FrameSize = MFI.getStackSize();
532
533 // QCI Interrupts use at least 96 bytes of stack space
534 if (RVFI->useQCIInterrupt(MF))
535 FrameSize = std::max(a: FrameSize, b: QCIInterruptPushAmount);
536
537 // Get the alignment.
538 Align StackAlign = getStackAlign();
539
540 // Make sure the frame is aligned.
541 FrameSize = alignTo(Size: FrameSize, A: StackAlign);
542
543 // Update frame info.
544 MFI.setStackSize(FrameSize);
545
546 // When using SP or BP to access stack objects, we may require extra padding
547 // to ensure the bottom of the RVV stack is correctly aligned within the main
548 // stack. We calculate this as the amount required to align the scalar local
549 // variable section up to the RVV alignment.
550 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
551 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
552 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
553 RVFI->getVarArgsSaveSize();
554 if (auto RVVPadding =
555 offsetToAlignment(Value: ScalarLocalVarSize, Alignment: RVFI->getRVVStackAlign()))
556 RVFI->setRVVPadding(RVVPadding);
557 }
558}
559
560// Returns the stack size including RVV padding (when required), rounded back
561// up to the required stack alignment.
562uint64_t RISCVFrameLowering::getStackSizeWithRVVPadding(
563 const MachineFunction &MF) const {
564 const MachineFrameInfo &MFI = MF.getFrameInfo();
565 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
566 return alignTo(Size: MFI.getStackSize() + RVFI->getRVVPadding(), A: getStackAlign());
567}
568
569static SmallVector<CalleeSavedInfo, 8>
570getUnmanagedCSI(const MachineFunction &MF,
571 const std::vector<CalleeSavedInfo> &CSI) {
572 const MachineFrameInfo &MFI = MF.getFrameInfo();
573 SmallVector<CalleeSavedInfo, 8> NonLibcallCSI;
574
575 for (auto &CS : CSI) {
576 int FI = CS.getFrameIdx();
577 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default)
578 NonLibcallCSI.push_back(Elt: CS);
579 }
580
581 return NonLibcallCSI;
582}
583
584static SmallVector<CalleeSavedInfo, 8>
585getRVVCalleeSavedInfo(const MachineFunction &MF,
586 const std::vector<CalleeSavedInfo> &CSI) {
587 const MachineFrameInfo &MFI = MF.getFrameInfo();
588 SmallVector<CalleeSavedInfo, 8> RVVCSI;
589
590 for (auto &CS : CSI) {
591 int FI = CS.getFrameIdx();
592 if (FI >= 0 && MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector)
593 RVVCSI.push_back(Elt: CS);
594 }
595
596 return RVVCSI;
597}
598
599static SmallVector<CalleeSavedInfo, 8>
600getPushOrLibCallsSavedInfo(const MachineFunction &MF,
601 const std::vector<CalleeSavedInfo> &CSI) {
602 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
603
604 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
605 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
606 return PushOrLibCallsCSI;
607
608 for (const auto &CS : CSI) {
609 if (RVFI->useQCIInterrupt(MF)) {
610 // Some registers are saved by both `QC.C.MIENTER(.NEST)` and
611 // `QC.CM.PUSH(FP)`. In these cases, prioritise the CFI info that points
612 // to the versions saved by `QC.C.MIENTER(.NEST)` which is what FP
613 // unwinding would use.
614 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
615 Element: CS.getReg()))
616 continue;
617 }
618
619 if (llvm::is_contained(Range: FixedCSRFIMap, Element: CS.getReg()))
620 PushOrLibCallsCSI.push_back(Elt: CS);
621 }
622
623 return PushOrLibCallsCSI;
624}
625
626static SmallVector<CalleeSavedInfo, 8>
627getQCISavedInfo(const MachineFunction &MF,
628 const std::vector<CalleeSavedInfo> &CSI) {
629 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
630
631 SmallVector<CalleeSavedInfo, 8> QCIInterruptCSI;
632 if (!RVFI->useQCIInterrupt(MF))
633 return QCIInterruptCSI;
634
635 for (const auto &CS : CSI) {
636 if (llvm::is_contained(Range: llvm::make_first_range(c: FixedCSRFIQCIInterruptMap),
637 Element: CS.getReg()))
638 QCIInterruptCSI.push_back(Elt: CS);
639 }
640
641 return QCIInterruptCSI;
642}
643
644void RISCVFrameLowering::allocateAndProbeStackForRVV(
645 MachineFunction &MF, MachineBasicBlock &MBB,
646 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount,
647 MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const {
648 assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
649
650 // Emit a variable-length allocation probing loop.
651
652 // Get VLEN in TargetReg
653 const RISCVInstrInfo *TII = STI.getInstrInfo();
654 Register TargetReg = RISCV::X6;
655 uint32_t NumOfVReg = Amount / RISCV::RVVBytesPerBlock;
656 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PseudoReadVLENB), DestReg: TargetReg)
657 .setMIFlag(Flag);
658 TII->mulImm(MF, MBB, II: MBBI, DL, DestReg: TargetReg, Amt: NumOfVReg, Flag);
659
660 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
661 if (EmitCFI) {
662 // Set the CFA register to TargetReg.
663 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: -Amount);
664 }
665
666 // It will be expanded to a probe loop in `inlineStackProbe`.
667 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC_RVV))
668 .addReg(RegNo: TargetReg);
669
670 if (EmitCFI) {
671 // Set the CFA register back to SP.
672 CFIBuilder.buildDefCFARegister(Reg: SPReg);
673 }
674
675 // SUB SP, SP, T1
676 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
677 .addReg(RegNo: SPReg)
678 .addReg(RegNo: TargetReg)
679 .setMIFlag(Flag);
680
681 // If we have a dynamic allocation later we need to probe any residuals.
682 if (DynAllocation) {
683 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: STI.is64Bit() ? RISCV::SD : RISCV::SW))
684 .addReg(RegNo: RISCV::X0)
685 .addReg(RegNo: SPReg)
686 .addImm(Val: 0)
687 .setMIFlags(MachineInstr::FrameSetup);
688 }
689}
690
691static void appendScalableVectorExpression(const TargetRegisterInfo &TRI,
692 SmallVectorImpl<char> &Expr,
693 StackOffset Offset,
694 llvm::raw_string_ostream &Comment) {
695 int64_t FixedOffset = Offset.getFixed();
696 int64_t ScalableOffset = Offset.getScalable();
697 unsigned DwarfVLenB = TRI.getDwarfRegNum(Reg: RISCV::VLENB, isEH: true);
698 if (FixedOffset) {
699 Expr.push_back(Elt: dwarf::DW_OP_consts);
700 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: FixedOffset);
701 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
702 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(i: FixedOffset);
703 }
704
705 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_consts);
706 appendLEB128<LEB128Sign::Signed>(Buffer&: Expr, Value: ScalableOffset);
707
708 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_bregx);
709 appendLEB128<LEB128Sign::Unsigned>(Buffer&: Expr, Value: DwarfVLenB);
710 Expr.push_back(Elt: 0);
711
712 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_mul);
713 Expr.push_back(Elt: (uint8_t)dwarf::DW_OP_plus);
714
715 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(i: ScalableOffset)
716 << " * vlenb";
717}
718
719static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
720 Register Reg,
721 StackOffset Offset) {
722 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
723 SmallString<64> Expr;
724 std::string CommentBuffer;
725 llvm::raw_string_ostream Comment(CommentBuffer);
726 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
727 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
728 Expr.push_back(Elt: (uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
729 Expr.push_back(Elt: 0);
730 if (Reg == SPReg)
731 Comment << "sp";
732 else
733 Comment << printReg(Reg, TRI: &TRI);
734
735 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
736
737 SmallString<64> DefCfaExpr;
738 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_def_cfa_expression);
739 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
740 DefCfaExpr.append(RHS: Expr.str());
741
742 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
743 Comment: Comment.str());
744}
745
746static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
747 Register Reg, StackOffset Offset) {
748 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
749 SmallString<64> Expr;
750 std::string CommentBuffer;
751 llvm::raw_string_ostream Comment(CommentBuffer);
752 Comment << printReg(Reg, TRI: &TRI) << " @ cfa";
753
754 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
755 appendScalableVectorExpression(TRI, Expr, Offset, Comment);
756
757 SmallString<64> DefCfaExpr;
758 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, isEH: true);
759 DefCfaExpr.push_back(Elt: dwarf::DW_CFA_expression);
760 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: DwarfReg);
761 appendLEB128<LEB128Sign::Unsigned>(Buffer&: DefCfaExpr, Value: Expr.size());
762 DefCfaExpr.append(RHS: Expr.str());
763
764 return MCCFIInstruction::createEscape(L: nullptr, Vals: DefCfaExpr.str(), Loc: SMLoc(),
765 Comment: Comment.str());
766}
767
768// Allocate stack space and probe it if necessary.
769void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB,
770 MachineBasicBlock::iterator MBBI,
771 MachineFunction &MF, uint64_t Offset,
772 uint64_t RealStackSize, bool EmitCFI,
773 bool NeedProbe, uint64_t ProbeSize,
774 bool DynAllocation,
775 MachineInstr::MIFlag Flag) const {
776 DebugLoc DL;
777 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
778 const RISCVInstrInfo *TII = STI.getInstrInfo();
779 bool IsRV64 = STI.is64Bit();
780 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
781
782 // Simply allocate the stack if it's not big enough to require a probe.
783 if (!NeedProbe || Offset <= ProbeSize) {
784 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Offset),
785 Flag, RequiredAlign: getStackAlign());
786
787 if (EmitCFI)
788 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
789
790 if (NeedProbe && DynAllocation) {
791 // s[d|w] zero, 0(sp)
792 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
793 .addReg(RegNo: RISCV::X0)
794 .addReg(RegNo: SPReg)
795 .addImm(Val: 0)
796 .setMIFlags(Flag);
797 }
798
799 return;
800 }
801
802 // Unroll the probe loop depending on the number of iterations.
803 if (Offset < ProbeSize * 5) {
804 uint64_t CFAAdjust = RealStackSize - Offset;
805
806 uint64_t CurrentOffset = 0;
807 while (CurrentOffset + ProbeSize <= Offset) {
808 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
809 Offset: StackOffset::getFixed(Fixed: -ProbeSize), Flag, RequiredAlign: getStackAlign());
810 // s[d|w] zero, 0(sp)
811 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
812 .addReg(RegNo: RISCV::X0)
813 .addReg(RegNo: SPReg)
814 .addImm(Val: 0)
815 .setMIFlags(Flag);
816
817 CurrentOffset += ProbeSize;
818 if (EmitCFI)
819 CFIBuilder.buildDefCFAOffset(Offset: CurrentOffset + CFAAdjust);
820 }
821
822 uint64_t Residual = Offset - CurrentOffset;
823 if (Residual) {
824 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
825 Offset: StackOffset::getFixed(Fixed: -Residual), Flag, RequiredAlign: getStackAlign());
826 if (EmitCFI)
827 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize);
828
829 if (DynAllocation) {
830 // s[d|w] zero, 0(sp)
831 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
832 .addReg(RegNo: RISCV::X0)
833 .addReg(RegNo: SPReg)
834 .addImm(Val: 0)
835 .setMIFlags(Flag);
836 }
837 }
838
839 return;
840 }
841
842 // Emit a variable-length allocation probing loop.
843 uint64_t RoundedSize = alignDown(Value: Offset, Align: ProbeSize);
844 uint64_t Residual = Offset - RoundedSize;
845
846 Register TargetReg = RISCV::X6;
847 // SUB TargetReg, SP, RoundedSize
848 RI->adjustReg(MBB, II: MBBI, DL, DestReg: TargetReg, SrcReg: SPReg,
849 Offset: StackOffset::getFixed(Fixed: -RoundedSize), Flag, RequiredAlign: getStackAlign());
850
851 if (EmitCFI) {
852 // Set the CFA register to TargetReg.
853 CFIBuilder.buildDefCFA(Reg: TargetReg, Offset: RoundedSize);
854 }
855
856 // It will be expanded to a probe loop in `inlineStackProbe`.
857 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::PROBED_STACKALLOC)).addReg(RegNo: TargetReg);
858
859 if (EmitCFI) {
860 // Set the CFA register back to SP.
861 CFIBuilder.buildDefCFARegister(Reg: SPReg);
862 }
863
864 if (Residual) {
865 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: -Residual),
866 Flag, RequiredAlign: getStackAlign());
867 if (DynAllocation) {
868 // s[d|w] zero, 0(sp)
869 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
870 .addReg(RegNo: RISCV::X0)
871 .addReg(RegNo: SPReg)
872 .addImm(Val: 0)
873 .setMIFlags(Flag);
874 }
875 }
876
877 if (EmitCFI)
878 CFIBuilder.buildDefCFAOffset(Offset);
879}
880
881static bool isPush(unsigned Opcode) {
882 switch (Opcode) {
883 case RISCV::CM_PUSH:
884 case RISCV::QC_CM_PUSH:
885 case RISCV::QC_CM_PUSHFP:
886 return true;
887 default:
888 return false;
889 }
890}
891
892static bool isPop(unsigned Opcode) {
893 // There are other pops but these are the only ones introduced during this
894 // pass.
895 switch (Opcode) {
896 case RISCV::CM_POP:
897 case RISCV::QC_CM_POP:
898 return true;
899 default:
900 return false;
901 }
902}
903
904static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind,
905 bool UpdateFP) {
906 switch (Kind) {
907 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
908 return RISCV::CM_PUSH;
909 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
910 return UpdateFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
911 default:
912 llvm_unreachable("Unhandled PushPopKind");
913 }
914}
915
916static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind) {
917 // There are other pops but they are introduced later by the Push/Pop
918 // Optimizer.
919 switch (Kind) {
920 case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
921 return RISCV::CM_POP;
922 case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
923 return RISCV::QC_CM_POP;
924 default:
925 llvm_unreachable("Unhandled PushPopKind");
926 }
927}
928
929void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
930 MachineBasicBlock &MBB) const {
931 MachineFrameInfo &MFI = MF.getFrameInfo();
932 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
933 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
934 MachineBasicBlock::iterator MBBI = MBB.begin();
935
936 Register BPReg = RISCVABI::getBPReg();
937
938 // Debug location must be unknown since the first debug location is used
939 // to determine the end of the prologue.
940 DebugLoc DL;
941
942 // All calls are tail calls in GHC calling conv, and functions have no
943 // prologue/epilogue.
944 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
945 return;
946
947 // SiFive CLIC needs to swap `sp` into `sf.mscratchcsw`
948 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
949
950 // Emit prologue for shadow call stack.
951 emitSCSPrologue(MF, MBB, MI: MBBI, DL);
952
953 // We keep track of the first instruction because it might be a
954 // `(QC.)CM.PUSH(FP)`, and we may need to adjust the immediate rather than
955 // inserting an `addi sp, sp, -N*16`
956 auto PossiblePush = MBBI;
957
958 // Skip past all callee-saved register spill instructions.
959 while (MBBI != MBB.end() && MBBI->getFlag(Flag: MachineInstr::FrameSetup))
960 ++MBBI;
961
962 // Determine the correct frame layout
963 determineFrameLayout(MF);
964
965 const auto &CSI = MFI.getCalleeSavedInfo();
966
967 // Skip to before the spills of scalar callee-saved registers
968 // FIXME: assumes exactly one instruction is used to restore each
969 // callee-saved register.
970 MBBI = std::prev(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size() +
971 getUnmanagedCSI(MF, CSI).size());
972 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
973 bool NeedsDwarfCFI = needsDwarfCFI(MF);
974
975 // If libcalls are used to spill and restore callee-saved registers, the frame
976 // has two sections; the opaque section managed by the libcalls, and the
977 // section managed by MachineFrameInfo which can also hold callee saved
978 // registers in fixed stack slots, both of which have negative frame indices.
979 // This gets even more complicated when incoming arguments are passed via the
980 // stack, as these too have negative frame indices. An example is detailed
981 // below:
982 //
983 // | incoming arg | <- FI[-3]
984 // | libcallspill |
985 // | calleespill | <- FI[-2]
986 // | calleespill | <- FI[-1]
987 // | this_frame | <- FI[0]
988 //
989 // For negative frame indices, the offset from the frame pointer will differ
990 // depending on which of these groups the frame index applies to.
991 // The following calculates the correct offset knowing the number of callee
992 // saved registers spilt by the two methods.
993 if (int LibCallRegs = getLibCallID(MF, CSI: MFI.getCalleeSavedInfo()) + 1) {
994 // Calculate the size of the frame managed by the libcall. The stack
995 // alignment of these libcalls should be the same as how we set it in
996 // getABIStackAlignment.
997 unsigned LibCallFrameSize =
998 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
999 RVFI->setLibCallStackSize(LibCallFrameSize);
1000
1001 if (NeedsDwarfCFI) {
1002 CFIBuilder.buildDefCFAOffset(Offset: LibCallFrameSize);
1003 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1004 CFIBuilder.buildOffset(Reg: CS.getReg(),
1005 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1006 }
1007 }
1008
1009 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
1010 // investigation. Get the number of bytes to allocate from the FrameInfo.
1011 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
1012 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
1013 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1014
1015 // Early exit if there is no need to allocate on the stack
1016 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
1017 return;
1018
1019 // If the stack pointer has been marked as reserved, then produce an error if
1020 // the frame requires stack allocation
1021 if (STI.isRegisterReservedByUser(i: SPReg))
1022 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1023 MF.getFunction(), "Stack pointer required, but has been reserved."});
1024
1025 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1026 // Split the SP adjustment to reduce the offsets of callee saved spill.
1027 if (FirstSPAdjustAmount) {
1028 StackSize = FirstSPAdjustAmount;
1029 RealStackSize = FirstSPAdjustAmount;
1030 }
1031
1032 if (RVFI->useQCIInterrupt(MF)) {
1033 // The function starts with `QC.C.MIENTER(.NEST)`, so the `(QC.)CM.PUSH(FP)`
1034 // could only be the next instruction.
1035 ++PossiblePush;
1036
1037 if (NeedsDwarfCFI) {
1038 // Insert the CFI metadata before where we think the `(QC.)CM.PUSH(FP)`
1039 // could be. The PUSH will also get its own CFI metadata for its own
1040 // modifications, which should come after the PUSH.
1041 CFIInstBuilder PushCFIBuilder(MBB, PossiblePush,
1042 MachineInstr::FrameSetup);
1043 PushCFIBuilder.buildDefCFAOffset(Offset: QCIInterruptPushAmount);
1044 for (const CalleeSavedInfo &CS : getQCISavedInfo(MF, CSI))
1045 PushCFIBuilder.buildOffset(Reg: CS.getReg(),
1046 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1047 }
1048 }
1049
1050 if (RVFI->isPushable(MF) && PossiblePush != MBB.end() &&
1051 isPush(Opcode: PossiblePush->getOpcode())) {
1052 // Use available stack adjustment in push instruction to allocate additional
1053 // stack space. Align the stack size down to a multiple of 16. This is
1054 // needed for RVE.
1055 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1056 uint64_t StackAdj =
1057 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1058 PossiblePush->getOperand(i: 1).setImm(StackAdj);
1059 StackSize -= StackAdj;
1060
1061 if (NeedsDwarfCFI) {
1062 CFIBuilder.buildDefCFAOffset(Offset: RealStackSize - StackSize);
1063 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1064 CFIBuilder.buildOffset(Reg: CS.getReg(),
1065 Offset: MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx()));
1066 }
1067 }
1068
1069 // Allocate space on the stack if necessary.
1070 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1071 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
1072 bool NeedProbe = TLI->hasInlineStackProbe(MF);
1073 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1074 bool DynAllocation =
1075 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1076 if (StackSize != 0)
1077 allocateStack(MBB, MBBI, MF, Offset: StackSize, RealStackSize, EmitCFI: NeedsDwarfCFI,
1078 NeedProbe, ProbeSize, DynAllocation,
1079 Flag: MachineInstr::FrameSetup);
1080
1081 // Save SiFive CLIC CSRs into Stack
1082 emitSiFiveCLICPreemptibleSaves(MF, MBB, MBBI, DL);
1083
1084 // The frame pointer is callee-saved, and code has been generated for us to
1085 // save it to the stack. We need to skip over the storing of callee-saved
1086 // registers as the frame pointer must be modified after it has been saved
1087 // to the stack, not before.
1088 // FIXME: assumes exactly one instruction is used to save each callee-saved
1089 // register.
1090 std::advance(i&: MBBI, n: getUnmanagedCSI(MF, CSI).size());
1091 CFIBuilder.setInsertPoint(MBBI);
1092
1093 // Iterate over list of callee-saved registers and emit .cfi_offset
1094 // directives.
1095 if (NeedsDwarfCFI) {
1096 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI)) {
1097 MCRegister Reg = CS.getReg();
1098 int64_t Offset = MFI.getObjectOffset(ObjectIdx: CS.getFrameIdx());
1099 // Emit CFI for both sub-registers. The even register is at the base
1100 // offset and odd at base+4.
1101 if (RISCV::GPRPairRegClass.contains(Reg)) {
1102 MCRegister EvenReg = RI->getSubReg(Reg, Idx: RISCV::sub_gpr_even);
1103 MCRegister OddReg = RI->getSubReg(Reg, Idx: RISCV::sub_gpr_odd);
1104 CFIBuilder.buildOffset(Reg: EvenReg, Offset);
1105 CFIBuilder.buildOffset(Reg: OddReg, Offset: Offset + 4);
1106 } else {
1107 CFIBuilder.buildOffset(Reg, Offset);
1108 }
1109 }
1110 }
1111
1112 // Generate new FP.
1113 if (hasFP(MF)) {
1114 if (STI.isRegisterReservedByUser(i: FPReg))
1115 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
1116 MF.getFunction(), "Frame pointer required, but has been reserved."});
1117 // The frame pointer does need to be reserved from register allocation.
1118 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
1119
1120 // Some stack management variants automatically keep FP updated, so we don't
1121 // need an instruction to do so.
1122 if (!RVFI->hasImplicitFPUpdates(MF)) {
1123 RI->adjustReg(
1124 MBB, II: MBBI, DL, DestReg: FPReg, SrcReg: SPReg,
1125 Offset: StackOffset::getFixed(Fixed: RealStackSize - RVFI->getVarArgsSaveSize()),
1126 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1127 }
1128
1129 if (NeedsDwarfCFI)
1130 CFIBuilder.buildDefCFA(Reg: FPReg, Offset: RVFI->getVarArgsSaveSize());
1131 }
1132
1133 uint64_t SecondSPAdjustAmount = 0;
1134 // Emit the second SP adjustment after saving callee saved registers.
1135 if (FirstSPAdjustAmount) {
1136 SecondSPAdjustAmount = getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1137 assert(SecondSPAdjustAmount > 0 &&
1138 "SecondSPAdjustAmount should be greater than zero");
1139
1140 allocateStack(MBB, MBBI, MF, Offset: SecondSPAdjustAmount,
1141 RealStackSize: getStackSizeWithRVVPadding(MF), EmitCFI: NeedsDwarfCFI && !hasFP(MF),
1142 NeedProbe, ProbeSize, DynAllocation,
1143 Flag: MachineInstr::FrameSetup);
1144 }
1145
1146 if (RVVStackSize) {
1147 if (NeedProbe) {
1148 allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, Amount: RVVStackSize,
1149 Flag: MachineInstr::FrameSetup,
1150 EmitCFI: NeedsDwarfCFI && !hasFP(MF), DynAllocation);
1151 } else {
1152 // We must keep the stack pointer aligned through any intermediate
1153 // updates.
1154 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg,
1155 Offset: StackOffset::getScalable(Scalable: -RVVStackSize),
1156 Flag: MachineInstr::FrameSetup, RequiredAlign: getStackAlign());
1157 }
1158
1159 if (NeedsDwarfCFI && !hasFP(MF)) {
1160 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
1161 CFIBuilder.insertCFIInst(CFIInst: createDefCFAExpression(
1162 TRI: *RI, Reg: SPReg,
1163 Offset: StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF), Scalable: RVVStackSize / 8)));
1164 }
1165
1166 std::advance(i&: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1167 if (NeedsDwarfCFI)
1168 emitCalleeSavedRVVPrologCFI(MBB, MI: MBBI, HasFP: hasFP(MF));
1169 }
1170
1171 if (hasFP(MF)) {
1172 // Realign Stack
1173 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1174 if (RI->hasStackRealignment(MF)) {
1175 Align MaxAlignment = MFI.getMaxAlign();
1176
1177 const RISCVInstrInfo *TII = STI.getInstrInfo();
1178 if (isInt<12>(x: -(int)MaxAlignment.value())) {
1179 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ANDI), DestReg: SPReg)
1180 .addReg(RegNo: SPReg)
1181 .addImm(Val: -(int)MaxAlignment.value())
1182 .setMIFlag(MachineInstr::FrameSetup);
1183 } else {
1184 unsigned ShiftAmount = Log2(A: MaxAlignment);
1185 Register VR =
1186 MF.getRegInfo().createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1187 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SRLI), DestReg: VR)
1188 .addReg(RegNo: SPReg)
1189 .addImm(Val: ShiftAmount)
1190 .setMIFlag(MachineInstr::FrameSetup);
1191 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::SLLI), DestReg: SPReg)
1192 .addReg(RegNo: VR)
1193 .addImm(Val: ShiftAmount)
1194 .setMIFlag(MachineInstr::FrameSetup);
1195 }
1196 if (NeedProbe && RVVStackSize == 0) {
1197 // Do a probe if the align + size allocated just passed the probe size
1198 // and was not yet probed.
1199 if (SecondSPAdjustAmount < ProbeSize &&
1200 SecondSPAdjustAmount + MaxAlignment.value() >= ProbeSize) {
1201 bool IsRV64 = STI.is64Bit();
1202 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
1203 .addReg(RegNo: RISCV::X0)
1204 .addReg(RegNo: SPReg)
1205 .addImm(Val: 0)
1206 .setMIFlags(MachineInstr::FrameSetup);
1207 }
1208 }
1209 // FP will be used to restore the frame in the epilogue, so we need
1210 // another base register BP to record SP after re-alignment. SP will
1211 // track the current stack after allocating variable sized objects.
1212 if (hasBP(MF)) {
1213 // move BP, SP
1214 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: BPReg)
1215 .addReg(RegNo: SPReg)
1216 .addImm(Val: 0)
1217 .setMIFlag(MachineInstr::FrameSetup);
1218 }
1219 }
1220 }
1221}
1222
1223void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
1224 MachineBasicBlock &MBB,
1225 MachineBasicBlock::iterator MBBI,
1226 const DebugLoc &DL,
1227 uint64_t &StackSize,
1228 int64_t CFAOffset) const {
1229 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1230
1231 RI->adjustReg(MBB, II: MBBI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: StackSize),
1232 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1233 StackSize = 0;
1234
1235 if (needsDwarfCFI(MF))
1236 CFIInstBuilder(MBB, MBBI, MachineInstr::FrameDestroy)
1237 .buildDefCFAOffset(Offset: CFAOffset);
1238}
1239
1240void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
1241 MachineBasicBlock &MBB) const {
1242 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1243 MachineFrameInfo &MFI = MF.getFrameInfo();
1244 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1245
1246 // All calls are tail calls in GHC calling conv, and functions have no
1247 // prologue/epilogue.
1248 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1249 return;
1250
1251 // Get the insert location for the epilogue. If there were no terminators in
1252 // the block, get the last instruction.
1253 MachineBasicBlock::iterator MBBI = MBB.end();
1254 DebugLoc DL;
1255 if (!MBB.empty()) {
1256 MBBI = MBB.getLastNonDebugInstr();
1257 if (MBBI != MBB.end())
1258 DL = MBBI->getDebugLoc();
1259
1260 MBBI = MBB.getFirstTerminator();
1261
1262 // Skip to before the restores of all callee-saved registers.
1263 while (MBBI != MBB.begin() &&
1264 std::prev(x: MBBI)->getFlag(Flag: MachineInstr::FrameDestroy))
1265 --MBBI;
1266 }
1267
1268 const auto &CSI = MFI.getCalleeSavedInfo();
1269
1270 // Skip to before the restores of scalar callee-saved registers
1271 // FIXME: assumes exactly one instruction is used to restore each
1272 // callee-saved register.
1273 auto FirstScalarCSRRestoreInsn =
1274 std::next(x: MBBI, n: getRVVCalleeSavedInfo(MF, CSI).size());
1275 CFIInstBuilder CFIBuilder(MBB, FirstScalarCSRRestoreInsn,
1276 MachineInstr::FrameDestroy);
1277 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1278
1279 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1280 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1281 : getStackSizeWithRVVPadding(MF);
1282 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1283 : getStackSizeWithRVVPadding(MF) -
1284 RVFI->getReservedSpillsSize();
1285 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
1286 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1287
1288 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
1289 MFI.hasVarSizedObjects() || !hasReservedCallFrame(MF);
1290 if (RVVStackSize) {
1291 // If RestoreSPFromFP the stack pointer will be restored using the frame
1292 // pointer value.
1293 if (!RestoreSPFromFP)
1294 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1295 Offset: StackOffset::getScalable(Scalable: RVVStackSize),
1296 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1297
1298 if (NeedsDwarfCFI) {
1299 if (!hasFP(MF))
1300 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1301 emitCalleeSavedRVVEpilogCFI(MBB, MI: FirstScalarCSRRestoreInsn);
1302 }
1303 }
1304
1305 if (FirstSPAdjustAmount) {
1306 uint64_t SecondSPAdjustAmount =
1307 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1308 assert(SecondSPAdjustAmount > 0 &&
1309 "SecondSPAdjustAmount should be greater than zero");
1310
1311 // If RestoreSPFromFP the stack pointer will be restored using the frame
1312 // pointer value.
1313 if (!RestoreSPFromFP)
1314 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: SPReg,
1315 Offset: StackOffset::getFixed(Fixed: SecondSPAdjustAmount),
1316 Flag: MachineInstr::FrameDestroy, RequiredAlign: getStackAlign());
1317
1318 if (NeedsDwarfCFI && !hasFP(MF))
1319 CFIBuilder.buildDefCFAOffset(Offset: FirstSPAdjustAmount);
1320 }
1321
1322 // Restore the stack pointer using the value of the frame pointer. Only
1323 // necessary if the stack pointer was modified, meaning the stack size is
1324 // unknown.
1325 //
1326 // In order to make sure the stack point is right through the EH region,
1327 // we also need to restore stack pointer from the frame pointer if we
1328 // don't preserve stack space within prologue/epilogue for outgoing variables,
1329 // normally it's just checking the variable sized object is present or not
1330 // is enough, but we also don't preserve that at prologue/epilogue when
1331 // have vector objects in stack.
1332 if (RestoreSPFromFP) {
1333 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1334 RI->adjustReg(MBB, II: FirstScalarCSRRestoreInsn, DL, DestReg: SPReg, SrcReg: FPReg,
1335 Offset: StackOffset::getFixed(Fixed: -FPOffset), Flag: MachineInstr::FrameDestroy,
1336 RequiredAlign: getStackAlign());
1337 }
1338
1339 if (NeedsDwarfCFI && hasFP(MF))
1340 CFIBuilder.buildDefCFA(Reg: SPReg, Offset: RealStackSize);
1341
1342 // Skip to after the restores of scalar callee-saved registers
1343 // FIXME: assumes exactly one instruction is used to restore each
1344 // callee-saved register.
1345 MBBI = std::next(x: FirstScalarCSRRestoreInsn, n: getUnmanagedCSI(MF, CSI).size());
1346 CFIBuilder.setInsertPoint(MBBI);
1347
1348 if (getLibCallID(MF, CSI) != -1) {
1349 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1350 // therefore it is unnecessary to place any CFI instructions after it. Just
1351 // deallocate stack if needed and return.
1352 if (StackSize != 0)
1353 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1354 CFAOffset: RVFI->getLibCallStackSize());
1355
1356 // Emit epilogue for shadow call stack.
1357 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1358 return;
1359 }
1360
1361 // Recover callee-saved registers.
1362 if (NeedsDwarfCFI) {
1363 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI)) {
1364 MCRegister Reg = CS.getReg();
1365 // Emit CFI for both sub-registers.
1366 if (RISCV::GPRPairRegClass.contains(Reg)) {
1367 MCRegister EvenReg = RI->getSubReg(Reg, Idx: RISCV::sub_gpr_even);
1368 MCRegister OddReg = RI->getSubReg(Reg, Idx: RISCV::sub_gpr_odd);
1369 CFIBuilder.buildRestore(Reg: EvenReg);
1370 CFIBuilder.buildRestore(Reg: OddReg);
1371 } else {
1372 CFIBuilder.buildRestore(Reg);
1373 }
1374 }
1375 }
1376
1377 if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(Opcode: MBBI->getOpcode())) {
1378 // Use available stack adjustment in pop instruction to deallocate stack
1379 // space. Align the stack size down to a multiple of 16. This is needed for
1380 // RVE.
1381 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1382 uint64_t StackAdj =
1383 std::min(a: alignDown(Value: StackSize, Align: 16), b: static_cast<uint64_t>(48));
1384 MBBI->getOperand(i: 1).setImm(StackAdj);
1385 StackSize -= StackAdj;
1386
1387 if (StackSize != 0)
1388 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1389 /*stack_adj of cm.pop instr*/ CFAOffset: RealStackSize - StackSize);
1390
1391 auto NextI = next_nodbg(It: MBBI, End: MBB.end());
1392 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1393 ++MBBI;
1394 if (NeedsDwarfCFI) {
1395 CFIBuilder.setInsertPoint(MBBI);
1396
1397 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1398 CFIBuilder.buildRestore(Reg: CS.getReg());
1399
1400 // Update CFA Offset. If this is a QCI interrupt function, there will
1401 // be a leftover offset which is deallocated by `QC.C.MILEAVERET`,
1402 // otherwise getQCIInterruptStackSize() will be 0.
1403 CFIBuilder.buildDefCFAOffset(Offset: RVFI->getQCIInterruptStackSize());
1404 }
1405 }
1406 }
1407
1408 emitSiFiveCLICPreemptibleRestores(MF, MBB, MBBI, DL);
1409
1410 // Deallocate stack if StackSize isn't a zero yet. If this is a QCI interrupt
1411 // function, there will be a leftover offset which is deallocated by
1412 // `QC.C.MILEAVERET`, otherwise getQCIInterruptStackSize() will be 0.
1413 if (StackSize != 0)
1414 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1415 CFAOffset: RVFI->getQCIInterruptStackSize());
1416
1417 // Emit epilogue for shadow call stack.
1418 emitSCSEpilogue(MF, MBB, MI: MBBI, DL);
1419
1420 // SiFive CLIC needs to swap `sf.mscratchcsw` into `sp`
1421 emitSiFiveCLICStackSwap(MF, MBB, MBBI, DL);
1422}
1423
1424StackOffset
1425RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1426 Register &FrameReg) const {
1427 const MachineFrameInfo &MFI = MF.getFrameInfo();
1428 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1429 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1430
1431 // Callee-saved registers should be referenced relative to the stack
1432 // pointer (positive offset), otherwise use the frame pointer (negative
1433 // offset).
1434 const auto &CSI = getUnmanagedCSI(MF, CSI: MFI.getCalleeSavedInfo());
1435 int MinCSFI = 0;
1436 int MaxCSFI = -1;
1437 StackOffset Offset;
1438 auto StackID = MFI.getStackID(ObjectIdx: FI);
1439
1440 assert((StackID == TargetStackID::Default ||
1441 StackID == TargetStackID::ScalableVector) &&
1442 "Unexpected stack ID for the frame object.");
1443 if (StackID == TargetStackID::Default) {
1444 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1445 Offset = StackOffset::getFixed(Fixed: MFI.getObjectOffset(ObjectIdx: FI) +
1446 MFI.getOffsetAdjustment());
1447 } else if (StackID == TargetStackID::ScalableVector) {
1448 Offset = StackOffset::getScalable(Scalable: MFI.getObjectOffset(ObjectIdx: FI));
1449 }
1450
1451 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1452
1453 if (CSI.size()) {
1454 MinCSFI = CSI[0].getFrameIdx();
1455 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
1456 }
1457
1458 if (FI >= MinCSFI && FI <= MaxCSFI) {
1459 FrameReg = SPReg;
1460
1461 if (FirstSPAdjustAmount)
1462 Offset += StackOffset::getFixed(Fixed: FirstSPAdjustAmount);
1463 else
1464 Offset += StackOffset::getFixed(Fixed: getStackSizeWithRVVPadding(MF));
1465 return Offset;
1466 }
1467
1468 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1469 // If the stack was realigned, the frame pointer is set in order to allow
1470 // SP to be restored, so we need another base register to record the stack
1471 // after realignment.
1472 // |--------------------------| -- <-- FP
1473 // | callee-allocated save | | <----|
1474 // | area for register varargs| | |
1475 // |--------------------------| | |
1476 // | callee-saved registers | | |
1477 // |--------------------------| -- |
1478 // | realignment (the size of | | |
1479 // | this area is not counted | | |
1480 // | in MFI.getStackSize()) | | |
1481 // |--------------------------| -- |-- MFI.getStackSize()
1482 // | RVV alignment padding | | |
1483 // | (not counted in | | |
1484 // | MFI.getStackSize() but | | |
1485 // | counted in | | |
1486 // | RVFI.getRVVStackSize()) | | |
1487 // |--------------------------| -- |
1488 // | RVV objects | | |
1489 // | (not counted in | | |
1490 // | MFI.getStackSize()) | | |
1491 // |--------------------------| -- |
1492 // | padding before RVV | | |
1493 // | (not counted in | | |
1494 // | MFI.getStackSize() or in | | |
1495 // | RVFI.getRVVStackSize()) | | |
1496 // |--------------------------| -- |
1497 // | scalar local variables | | <----'
1498 // |--------------------------| -- <-- BP (if var sized objects present)
1499 // | VarSize objects | |
1500 // |--------------------------| -- <-- SP
1501 if (hasBP(MF)) {
1502 FrameReg = RISCVABI::getBPReg();
1503 } else {
1504 // VarSize objects must be empty in this case!
1505 assert(!MFI.hasVarSizedObjects());
1506 FrameReg = SPReg;
1507 }
1508 } else {
1509 FrameReg = RI->getFrameRegister(MF);
1510 }
1511
1512 if (FrameReg == FPReg) {
1513 Offset += StackOffset::getFixed(Fixed: RVFI->getVarArgsSaveSize());
1514 // When using FP to access scalable vector objects, we need to minus
1515 // the frame size.
1516 //
1517 // |--------------------------| -- <-- FP
1518 // | callee-allocated save | |
1519 // | area for register varargs| |
1520 // |--------------------------| |
1521 // | callee-saved registers | |
1522 // |--------------------------| | MFI.getStackSize()
1523 // | scalar local variables | |
1524 // |--------------------------| -- (Offset of RVV objects is from here.)
1525 // | RVV objects |
1526 // |--------------------------|
1527 // | VarSize objects |
1528 // |--------------------------| <-- SP
1529 if (StackID == TargetStackID::ScalableVector) {
1530 assert(!RI->hasStackRealignment(MF) &&
1531 "Can't index across variable sized realign");
1532 // We don't expect any extra RVV alignment padding, as the stack size
1533 // and RVV object sections should be correct aligned in their own
1534 // right.
1535 assert(MFI.getStackSize() == getStackSizeWithRVVPadding(MF) &&
1536 "Inconsistent stack layout");
1537 Offset -= StackOffset::getFixed(Fixed: MFI.getStackSize());
1538 }
1539 return Offset;
1540 }
1541
1542 // This case handles indexing off both SP and BP.
1543 // If indexing off SP, there must not be any var sized objects
1544 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1545
1546 // When using SP to access frame objects, we need to add RVV stack size.
1547 //
1548 // |--------------------------| -- <-- FP
1549 // | callee-allocated save | | <----|
1550 // | area for register varargs| | |
1551 // |--------------------------| | |
1552 // | callee-saved registers | | |
1553 // |--------------------------| -- |
1554 // | RVV alignment padding | | |
1555 // | (not counted in | | |
1556 // | MFI.getStackSize() but | | |
1557 // | counted in | | |
1558 // | RVFI.getRVVStackSize()) | | |
1559 // |--------------------------| -- |
1560 // | RVV objects | | |-- MFI.getStackSize()
1561 // | (not counted in | | |
1562 // | MFI.getStackSize()) | | |
1563 // |--------------------------| -- |
1564 // | padding before RVV | | |
1565 // | (not counted in | | |
1566 // | MFI.getStackSize()) | | |
1567 // |--------------------------| -- |
1568 // | scalar local variables | | <----'
1569 // |--------------------------| -- <-- BP (if var sized objects present)
1570 // | VarSize objects | |
1571 // |--------------------------| -- <-- SP
1572 //
1573 // The total amount of padding surrounding RVV objects is described by
1574 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1575 // objects to the required alignment.
1576 if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::Default) {
1577 if (MFI.isFixedObjectIndex(ObjectIdx: FI)) {
1578 assert(!RI->hasStackRealignment(MF) &&
1579 "Can't index across variable sized realign");
1580 Offset += StackOffset::get(Fixed: getStackSizeWithRVVPadding(MF),
1581 Scalable: RVFI->getRVVStackSize());
1582 } else {
1583 Offset += StackOffset::getFixed(Fixed: MFI.getStackSize());
1584 }
1585 } else if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector) {
1586 // Ensure the base of the RVV stack is correctly aligned: add on the
1587 // alignment padding.
1588 int ScalarLocalVarSize = MFI.getStackSize() -
1589 RVFI->getCalleeSavedStackSize() -
1590 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1591 Offset += StackOffset::get(Fixed: ScalarLocalVarSize, Scalable: RVFI->getRVVStackSize());
1592 }
1593 return Offset;
1594}
1595
1596static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI,
1597 const Register &Reg) {
1598 MCRegister BaseReg = TRI.getSubReg(Reg, Idx: RISCV::sub_vrm1_0);
1599 // If it's not a grouped vector register, it doesn't have subregister, so
1600 // the base register is just itself.
1601 if (!BaseReg.isValid())
1602 BaseReg = Reg;
1603 return BaseReg;
1604}
1605
1606void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
1607 BitVector &SavedRegs,
1608 RegScavenger *RS) const {
1609 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1610
1611 // In TargetFrameLowering::determineCalleeSaves, any vector register is marked
1612 // as saved if any of its subregister is clobbered, this is not correct in
1613 // vector registers. We only want the vector register to be marked as saved
1614 // if all of its subregisters are clobbered.
1615 // For example:
1616 // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked.
1617 // Correct behavior: v24m2 is marked only if v24 and v25 are marked.
1618 MachineRegisterInfo &MRI = MF.getRegInfo();
1619 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
1620 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
1621 for (unsigned i = 0; CSRegs[i]; ++i) {
1622 unsigned CSReg = CSRegs[i];
1623 // Only vector registers need special care.
1624 if (!RISCV::VRRegClass.contains(Reg: getRVVBaseRegister(TRI, Reg: CSReg)))
1625 continue;
1626
1627 SavedRegs.reset(Idx: CSReg);
1628
1629 auto SubRegs = TRI.subregs(Reg: CSReg);
1630 // Set the register and all its subregisters.
1631 if (!MRI.def_empty(RegNo: CSReg) || MRI.getUsedPhysRegsMask().test(Idx: CSReg)) {
1632 SavedRegs.set(CSReg);
1633 for (unsigned Reg : SubRegs)
1634 SavedRegs.set(Reg);
1635 }
1636
1637 }
1638
1639 // Unconditionally spill RA and FP only if the function uses a frame
1640 // pointer.
1641 if (hasFP(MF)) {
1642 SavedRegs.set(RAReg);
1643 SavedRegs.set(FPReg);
1644 }
1645 // Mark BP as used if function has dedicated base pointer.
1646 if (hasBP(MF))
1647 SavedRegs.set(RISCVABI::getBPReg());
1648
1649 // When using cm.push/pop we must save X27 if we save X26.
1650 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1651 if (RVFI->isPushable(MF) && SavedRegs.test(Idx: RISCV::X26))
1652 SavedRegs.set(RISCV::X27);
1653
1654 // For Zilsd on RV32, append GPRPair registers to the CSR list. This prevents
1655 // the need to create register sets for each abi which is a lot more complex.
1656 // Don't use Zilsd for callee-saved coalescing if the required alignment
1657 // exceeds the stack alignment.
1658 bool UseZilsd = !STI.is64Bit() && STI.hasStdExtZilsd() &&
1659 STI.getZilsdAlign() <= getStackAlign();
1660 if (UseZilsd) {
1661 SmallVector<MCPhysReg, 32> NewCSRs;
1662 SmallSet<MCPhysReg, 16> CSRSet;
1663 for (unsigned i = 0; CSRegs[i]; ++i) {
1664 NewCSRs.push_back(Elt: CSRegs[i]);
1665 CSRSet.insert(V: CSRegs[i]);
1666 }
1667
1668 // Append GPRPair registers for pairs where both sub-registers are in CSR
1669 // list. Iterate through all GPRPairs and check if both sub-regs are CSRs.
1670 for (MCPhysReg Pair : RISCV::GPRPairRegClass) {
1671 MCPhysReg EvenReg = TRI.getSubReg(Reg: Pair, Idx: RISCV::sub_gpr_even);
1672 MCPhysReg OddReg = TRI.getSubReg(Reg: Pair, Idx: RISCV::sub_gpr_odd);
1673 if (CSRSet.contains(V: EvenReg) && CSRSet.contains(V: OddReg))
1674 NewCSRs.push_back(Elt: Pair);
1675 }
1676
1677 MRI.setCalleeSavedRegs(NewCSRs);
1678 CSRegs = MRI.getCalleeSavedRegs();
1679 }
1680
1681 // Check if all subregisters are marked for saving. If so, set the super
1682 // register bit. For GPRPair, only check sub_gpr_even and sub_gpr_odd, not
1683 // aliases like X8_W or X8_H which are not set in SavedRegs.
1684 for (unsigned i = 0; CSRegs[i]; ++i) {
1685 unsigned CSReg = CSRegs[i];
1686 bool CombineToSuperReg;
1687 if (RISCV::GPRPairRegClass.contains(Reg: CSReg)) {
1688 MCPhysReg EvenReg = TRI.getSubReg(Reg: CSReg, Idx: RISCV::sub_gpr_even);
1689 MCPhysReg OddReg = TRI.getSubReg(Reg: CSReg, Idx: RISCV::sub_gpr_odd);
1690 CombineToSuperReg = SavedRegs.test(Idx: EvenReg) && SavedRegs.test(Idx: OddReg);
1691 // If s0(x8) is used as FP we can't generate load/store pair because it
1692 // breaks the frame chain.
1693 if (hasFP(MF) && CSReg == RISCV::X8_X9)
1694 CombineToSuperReg = false;
1695 } else {
1696 auto SubRegs = TRI.subregs(Reg: CSReg);
1697 CombineToSuperReg =
1698 !SubRegs.empty() && llvm::all_of(Range&: SubRegs, P: [&](unsigned Reg) {
1699 return SavedRegs.test(Idx: Reg);
1700 });
1701 }
1702
1703 if (CombineToSuperReg)
1704 SavedRegs.set(CSReg);
1705 }
1706
1707 // SiFive Preemptible Interrupt Handlers need additional frame entries
1708 createSiFivePreemptibleInterruptFrameEntries(MF, RVFI&: *RVFI);
1709}
1710
1711std::pair<int64_t, Align>
1712RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1713 MachineFrameInfo &MFI = MF.getFrameInfo();
1714 // Create a buffer of RVV objects to allocate.
1715 SmallVector<int, 8> ObjectsToAllocate;
1716 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1717 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1718 unsigned StackID = MFI.getStackID(ObjectIdx: I);
1719 if (StackID != TargetStackID::ScalableVector)
1720 continue;
1721 if (MFI.isDeadObjectIndex(ObjectIdx: I))
1722 continue;
1723
1724 ObjectsToAllocate.push_back(Elt: I);
1725 }
1726 };
1727 // First push RVV Callee Saved object, then push RVV stack object
1728 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1729 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1730 if (!RVVCSI.empty())
1731 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1732 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1733 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1734
1735 // The minimum alignment is 16 bytes.
1736 Align RVVStackAlign(16);
1737 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1738
1739 if (!ST.hasVInstructions()) {
1740 assert(ObjectsToAllocate.empty() &&
1741 "Can't allocate scalable-vector objects without V instructions");
1742 return std::make_pair(x: 0, y&: RVVStackAlign);
1743 }
1744
1745 // Allocate all RVV locals and spills
1746 int64_t Offset = 0;
1747 for (int FI : ObjectsToAllocate) {
1748 // ObjectSize in bytes.
1749 int64_t ObjectSize = MFI.getObjectSize(ObjectIdx: FI);
1750 auto ObjectAlign =
1751 std::max(a: Align(RISCV::RVVBytesPerBlock), b: MFI.getObjectAlign(ObjectIdx: FI));
1752 // If the data type is the fractional vector type, reserve one vector
1753 // register for it.
1754 if (ObjectSize < RISCV::RVVBytesPerBlock)
1755 ObjectSize = RISCV::RVVBytesPerBlock;
1756 Offset = alignTo(Size: Offset + ObjectSize, A: ObjectAlign);
1757 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: -Offset);
1758 // Update the maximum alignment of the RVV stack section
1759 RVVStackAlign = std::max(a: RVVStackAlign, b: ObjectAlign);
1760 }
1761
1762 uint64_t StackSize = Offset;
1763
1764 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1765 // object right at the bottom (i.e., any padding at the top of the frame),
1766 // readjust all RVV objects down by the alignment padding.
1767 // Stack size and offsets are multiples of vscale, stack alignment is in
1768 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1769 // stack alignment multiple of vscale.
1770 auto VScale =
1771 std::max<uint64_t>(a: ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, b: 1);
1772 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1773 if (auto AlignmentPadding =
1774 offsetToAlignment(Value: StackSize, Alignment: Align(RVVStackAlignVScale))) {
1775 StackSize += AlignmentPadding;
1776 for (int FI : ObjectsToAllocate)
1777 MFI.setObjectOffset(ObjectIdx: FI, SPOffset: MFI.getObjectOffset(ObjectIdx: FI) - AlignmentPadding);
1778 }
1779 }
1780
1781 return std::make_pair(x&: StackSize, y&: RVVStackAlign);
1782}
1783
1784static unsigned getScavSlotsNumForRVV(MachineFunction &MF) {
1785 // For RVV spill, scalable stack offsets computing requires up to two scratch
1786 // registers
1787 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1788
1789 // For RVV spill, non-scalable stack offsets computing requires up to one
1790 // scratch register.
1791 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1792
1793 // ADDI instruction's destination register can be used for computing
1794 // offsets. So Scalable stack offsets require up to one scratch register.
1795 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1796
1797 static constexpr unsigned MaxScavSlotsNumKnown =
1798 std::max(l: {ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1799 ScavSlotsNumRVVSpillNonScalableObject});
1800
1801 unsigned MaxScavSlotsNum = 0;
1802 if (!MF.getSubtarget<RISCVSubtarget>().hasVInstructions())
1803 return false;
1804 for (const MachineBasicBlock &MBB : MF)
1805 for (const MachineInstr &MI : MBB) {
1806 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1807 for (auto &MO : MI.operands()) {
1808 if (!MO.isFI())
1809 continue;
1810 bool IsScalableVectorID = MF.getFrameInfo().getStackID(ObjectIdx: MO.getIndex()) ==
1811 TargetStackID::ScalableVector;
1812 if (IsRVVSpill) {
1813 MaxScavSlotsNum = std::max(
1814 a: MaxScavSlotsNum, b: IsScalableVectorID
1815 ? ScavSlotsNumRVVSpillScalableObject
1816 : ScavSlotsNumRVVSpillNonScalableObject);
1817 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1818 MaxScavSlotsNum =
1819 std::max(a: MaxScavSlotsNum, b: ScavSlotsADDIScalableObject);
1820 }
1821 }
1822 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1823 return MaxScavSlotsNumKnown;
1824 }
1825 return MaxScavSlotsNum;
1826}
1827
1828static bool hasRVVFrameObject(const MachineFunction &MF) {
1829 // Originally, the function will scan all the stack objects to check whether
1830 // if there is any scalable vector object on the stack or not. However, it
1831 // causes errors in the register allocator. In issue 53016, it returns false
1832 // before RA because there is no RVV stack objects. After RA, it returns true
1833 // because there are spilling slots for RVV values during RA. It will not
1834 // reserve BP during register allocation and generate BP access in the PEI
1835 // pass due to the inconsistent behavior of the function.
1836 //
1837 // The function is changed to use hasVInstructions() as the return value. It
1838 // is not precise, but it can make the register allocation correct.
1839 //
1840 // FIXME: Find a better way to make the decision or revisit the solution in
1841 // D103622.
1842 //
1843 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1844 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1845}
1846
1847static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF,
1848 const RISCVInstrInfo &TII) {
1849 unsigned FnSize = 0;
1850 for (auto &MBB : MF) {
1851 for (auto &MI : MBB) {
1852 // Far branches over 20-bit offset will be relaxed in branch relaxation
1853 // pass. In the worst case, conditional branches will be relaxed into
1854 // the following instruction sequence. Unconditional branches are
1855 // relaxed in the same way, with the exception that there is no first
1856 // branch instruction.
1857 //
1858 // foo
1859 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1860 // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1861 // jump .restore, s11 # 8 bytes
1862 // .rev_cond
1863 // bar
1864 // j .dest_bb # 4 bytes, or 2 bytes with Zca
1865 // .restore:
1866 // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1867 // .dest:
1868 // baz
1869 if (MI.isConditionalBranch())
1870 FnSize += TII.getInstSizeInBytes(MI);
1871 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1872 if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
1873 FnSize += 2 + 8 + 2 + 2;
1874 else
1875 FnSize += 4 + 8 + 4 + 4;
1876 continue;
1877 }
1878
1879 FnSize += TII.getInstSizeInBytes(MI);
1880 }
1881 }
1882 return FnSize;
1883}
1884
1885void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
1886 MachineFunction &MF, RegScavenger *RS) const {
1887 const RISCVRegisterInfo *RegInfo =
1888 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1889 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1890 MachineFrameInfo &MFI = MF.getFrameInfo();
1891 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1892 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1893
1894 int64_t RVVStackSize;
1895 Align RVVStackAlign;
1896 std::tie(args&: RVVStackSize, args&: RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1897
1898 RVFI->setRVVStackSize(RVVStackSize);
1899 RVFI->setRVVStackAlign(RVVStackAlign);
1900
1901 if (hasRVVFrameObject(MF)) {
1902 // Ensure the entire stack is aligned to at least the RVV requirement: some
1903 // scalable-vector object alignments are not considered by the
1904 // target-independent code.
1905 MFI.ensureMaxAlignment(Alignment: RVVStackAlign);
1906 }
1907
1908 unsigned ScavSlotsNum = 0;
1909
1910 // estimateStackSize has been observed to under-estimate the final stack
1911 // size, so give ourselves wiggle-room by checking for stack size
1912 // representable an 11-bit signed field rather than 12-bits.
1913 if (!isInt<11>(x: MFI.estimateStackSize(MF)))
1914 ScavSlotsNum = 1;
1915
1916 // Far branches over 20-bit offset require a spill slot for scratch register.
1917 bool IsLargeFunction = !isInt<20>(x: estimateFunctionSizeInBytes(MF, TII: *TII));
1918 if (IsLargeFunction)
1919 ScavSlotsNum = std::max(a: ScavSlotsNum, b: 1u);
1920
1921 // RVV loads & stores have no capacity to hold the immediate address offsets
1922 // so we must always reserve an emergency spill slot if the MachineFunction
1923 // contains any RVV spills.
1924 ScavSlotsNum = std::max(a: ScavSlotsNum, b: getScavSlotsNumForRVV(MF));
1925
1926 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1927 int FI = MFI.CreateSpillStackObject(Size: RegInfo->getSpillSize(RC: *RC),
1928 Alignment: RegInfo->getSpillAlign(RC: *RC));
1929 RS->addScavengingFrameIndex(FI);
1930
1931 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1932 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1933 }
1934
1935 unsigned Size = RVFI->getReservedSpillsSize();
1936 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1937 int FrameIdx = Info.getFrameIdx();
1938 if (FrameIdx < 0 || MFI.getStackID(ObjectIdx: FrameIdx) != TargetStackID::Default)
1939 continue;
1940
1941 Size += MFI.getObjectSize(ObjectIdx: FrameIdx);
1942 }
1943 RVFI->setCalleeSavedStackSize(Size);
1944}
1945
1946// Not preserve stack space within prologue for outgoing variables when the
1947// function contains variable size objects or there are vector objects accessed
1948// by the frame pointer.
1949// Let eliminateCallFramePseudoInstr preserve stack space for it.
1950bool RISCVFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
1951 return !MF.getFrameInfo().hasVarSizedObjects() &&
1952 !(hasFP(MF) && hasRVVFrameObject(MF));
1953}
1954
1955// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1956MachineBasicBlock::iterator RISCVFrameLowering::eliminateCallFramePseudoInstr(
1957 MachineFunction &MF, MachineBasicBlock &MBB,
1958 MachineBasicBlock::iterator MI) const {
1959 DebugLoc DL = MI->getDebugLoc();
1960
1961 if (!hasReservedCallFrame(MF)) {
1962 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1963 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
1964 // pointer. This is necessary when there is a variable length stack
1965 // allocation (e.g. alloca), which means it's not possible to allocate
1966 // space for outgoing arguments from within the function prologue.
1967 int64_t Amount = MI->getOperand(i: 0).getImm();
1968
1969 if (Amount != 0) {
1970 // Ensure the stack remains aligned after adjustment.
1971 Amount = alignSPAdjust(SPAdj: Amount);
1972
1973 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
1974 Amount = -Amount;
1975
1976 const RISCVTargetLowering *TLI =
1977 MF.getSubtarget<RISCVSubtarget>().getTargetLowering();
1978 int64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign: getStackAlign());
1979 if (TLI->hasInlineStackProbe(MF) && -Amount >= ProbeSize) {
1980 // When stack probing is enabled, the decrement of SP may need to be
1981 // probed. We can handle both the decrement and the probing in
1982 // allocateStack.
1983 bool DynAllocation =
1984 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1985 allocateStack(MBB, MBBI: MI, MF, Offset: -Amount, RealStackSize: -Amount,
1986 EmitCFI: needsDwarfCFI(MF) && !hasFP(MF),
1987 /*NeedProbe=*/true, ProbeSize, DynAllocation,
1988 Flag: MachineInstr::NoFlags);
1989 } else {
1990 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
1991 RI.adjustReg(MBB, II: MI, DL, DestReg: SPReg, SrcReg: SPReg, Offset: StackOffset::getFixed(Fixed: Amount),
1992 Flag: MachineInstr::NoFlags, RequiredAlign: getStackAlign());
1993 }
1994 }
1995 }
1996
1997 return MBB.erase(I: MI);
1998}
1999
2000// We would like to split the SP adjustment to reduce prologue/epilogue
2001// as following instructions. In this way, the offset of the callee saved
2002// register could fit in a single store. Supposed that the first sp adjust
2003// amount is 2032.
2004// add sp,sp,-2032
2005// sw ra,2028(sp)
2006// sw s0,2024(sp)
2007// sw s1,2020(sp)
2008// sw s3,2012(sp)
2009// sw s4,2008(sp)
2010// add sp,sp,-64
2011uint64_t
2012RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
2013 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2014 const MachineFrameInfo &MFI = MF.getFrameInfo();
2015 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
2016 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
2017
2018 // Disable SplitSPAdjust if save-restore libcall, push/pop or QCI interrupts
2019 // are used. The callee-saved registers will be pushed by the save-restore
2020 // libcalls, so we don't have to split the SP adjustment in this case.
2021 if (RVFI->getReservedSpillsSize())
2022 return 0;
2023
2024 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
2025 // 12-bit and there exists a callee-saved register needing to be pushed.
2026 if (!isInt<12>(x: StackSize) && (CSI.size() > 0)) {
2027 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
2028 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
2029 // instructions. Offsets smaller than 2048 can fit in a single load/store
2030 // instruction, and we have to stick with the stack alignment. 2048 has
2031 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
2032 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
2033 const uint64_t StackAlign = getStackAlign().value();
2034
2035 // Amount of (2048 - StackAlign) will prevent callee saved and restored
2036 // instructions be compressed, so try to adjust the amount to the largest
2037 // offset that stack compression instructions accept when target supports
2038 // compression instructions.
2039 if (STI.hasStdExtZca()) {
2040 // The compression extensions may support the following instructions:
2041 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
2042 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
2043 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
2044 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
2045 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
2046 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
2047 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
2048 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
2049 const uint64_t RVCompressLen = STI.getXLen() * 8;
2050 // Compared with amount (2048 - StackAlign), StackSize needs to
2051 // satisfy the following conditions to avoid using more instructions
2052 // to adjust the sp after adjusting the amount, such as
2053 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
2054 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
2055 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
2056 auto CanCompress = [&](uint64_t CompressLen) -> bool {
2057 if (StackSize <= 2047 + CompressLen ||
2058 (StackSize > 2048 * 2 - StackAlign &&
2059 StackSize <= 2047 * 2 + CompressLen) ||
2060 StackSize > 2048 * 3 - StackAlign)
2061 return true;
2062
2063 return false;
2064 };
2065 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
2066 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
2067 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
2068 const uint64_t ADDI16SPCompressLen = 496;
2069 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
2070 return ADDI16SPCompressLen;
2071 if (CanCompress(RVCompressLen))
2072 return RVCompressLen;
2073 }
2074 return 2048 - StackAlign;
2075 }
2076 return 0;
2077}
2078
2079bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
2080 MachineFunction &MF, const TargetRegisterInfo *TRI,
2081 std::vector<CalleeSavedInfo> &CSI) const {
2082 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2083 MachineFrameInfo &MFI = MF.getFrameInfo();
2084 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
2085
2086 // Preemptible Interrupts have two additional Callee-save Frame Indexes,
2087 // not tracked by `CSI`.
2088 if (RVFI->isSiFivePreemptibleInterrupt(MF)) {
2089 for (int I = 0; I < 2; ++I) {
2090 int FI = RVFI->getInterruptCSRFrameIndex(Idx: I);
2091 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FI, IsCalleeSaved: true);
2092 }
2093 }
2094
2095 // Early exit if no callee saved registers are modified!
2096 if (CSI.empty())
2097 return true;
2098
2099 if (RVFI->useQCIInterrupt(MF)) {
2100 RVFI->setQCIInterruptStackSize(QCIInterruptPushAmount);
2101 }
2102
2103 if (RVFI->isPushable(MF)) {
2104 // Determine how many GPRs we need to push and save it to RVFI.
2105 unsigned PushedRegNum = getNumPushPopRegs(CSI);
2106
2107 // `QC.C.MIENTER(.NEST)` will save `ra` and `s0`, so we should only push if
2108 // we want to push more than 2 registers. Otherwise, we should push if we
2109 // want to push more than 0 registers.
2110 unsigned OnlyPushIfMoreThan = RVFI->useQCIInterrupt(MF) ? 2 : 0;
2111 if (PushedRegNum > OnlyPushIfMoreThan) {
2112 RVFI->setRVPushRegs(PushedRegNum);
2113 RVFI->setRVPushStackSize(alignTo(Value: (STI.getXLen() / 8) * PushedRegNum, Align: 16));
2114 }
2115 }
2116
2117 for (auto &CS : CSI) {
2118 MCRegister Reg = CS.getReg();
2119 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
2120 unsigned Size = RegInfo->getSpillSize(RC: *RC);
2121
2122 if (RVFI->useQCIInterrupt(MF)) {
2123 const auto *FFI = llvm::find_if(Range: FixedCSRFIQCIInterruptMap, P: [&](auto P) {
2124 return P.first == CS.getReg();
2125 });
2126 if (FFI != std::end(arr: FixedCSRFIQCIInterruptMap)) {
2127 int64_t Offset = FFI->second * (int64_t)Size;
2128
2129 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2130 assert(FrameIdx < 0);
2131 CS.setFrameIdx(FrameIdx);
2132 continue;
2133 }
2134 }
2135
2136 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
2137 const auto *FII = llvm::find_if(
2138 Range: FixedCSRFIMap, P: [&](MCPhysReg P) { return P == CS.getReg(); });
2139 unsigned RegNum = std::distance(first: std::begin(arr: FixedCSRFIMap), last: FII);
2140
2141 if (FII != std::end(arr: FixedCSRFIMap)) {
2142 int64_t Offset;
2143 if (RVFI->getPushPopKind(MF) ==
2144 RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp)
2145 Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
2146 else
2147 Offset = -int64_t(RegNum + 1) * Size;
2148
2149 if (RVFI->useQCIInterrupt(MF))
2150 Offset -= QCIInterruptPushAmount;
2151
2152 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, SPOffset: Offset);
2153 assert(FrameIdx < 0);
2154 CS.setFrameIdx(FrameIdx);
2155 continue;
2156 }
2157 }
2158
2159 // For GPRPair registers, use 8-byte slots with required alignment by zilsd.
2160 if (!STI.is64Bit() && STI.hasStdExtZilsd() &&
2161 RISCV::GPRPairRegClass.contains(Reg)) {
2162 Align PairAlign = STI.getZilsdAlign();
2163 int FrameIdx = MFI.CreateStackObject(Size: 8, Alignment: PairAlign, isSpillSlot: true);
2164 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FrameIdx, IsCalleeSaved: true);
2165 CS.setFrameIdx(FrameIdx);
2166 continue;
2167 }
2168
2169 // Not a fixed slot.
2170 Align Alignment = RegInfo->getSpillAlign(RC: *RC);
2171 // We may not be able to satisfy the desired alignment specification of
2172 // the TargetRegisterClass if the stack alignment is smaller. Use the
2173 // min.
2174 Alignment = std::min(a: Alignment, b: getStackAlign());
2175 int FrameIdx = MFI.CreateStackObject(Size, Alignment, isSpillSlot: true);
2176 MFI.setIsCalleeSavedObjectIndex(ObjectIdx: FrameIdx, IsCalleeSaved: true);
2177 CS.setFrameIdx(FrameIdx);
2178 if (RISCVRegisterInfo::isRVVRegClass(RC))
2179 MFI.setStackID(ObjectIdx: FrameIdx, ID: TargetStackID::ScalableVector);
2180 }
2181
2182 if (RVFI->useQCIInterrupt(MF)) {
2183 // Allocate a fixed object that covers the entire QCI stack allocation,
2184 // because there are gaps which are reserved for future use.
2185 MFI.CreateFixedSpillStackObject(
2186 Size: QCIInterruptPushAmount, SPOffset: -static_cast<int64_t>(QCIInterruptPushAmount));
2187 }
2188
2189 if (RVFI->isPushable(MF)) {
2190 int64_t QCIOffset = RVFI->useQCIInterrupt(MF) ? QCIInterruptPushAmount : 0;
2191 // Allocate a fixed object that covers the full push.
2192 if (int64_t PushSize = RVFI->getRVPushStackSize())
2193 MFI.CreateFixedSpillStackObject(Size: PushSize, SPOffset: -PushSize - QCIOffset);
2194 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
2195 int64_t LibCallFrameSize =
2196 alignTo(Size: (STI.getXLen() / 8) * LibCallRegs, A: getStackAlign());
2197 MFI.CreateFixedSpillStackObject(Size: LibCallFrameSize, SPOffset: -LibCallFrameSize);
2198 }
2199
2200 return true;
2201}
2202
2203bool RISCVFrameLowering::spillCalleeSavedRegisters(
2204 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2205 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2206 if (CSI.empty())
2207 return true;
2208
2209 MachineFunction *MF = MBB.getParent();
2210 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2211 DebugLoc DL;
2212 if (MI != MBB.end() && !MI->isDebugInstr())
2213 DL = MI->getDebugLoc();
2214
2215 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2216 if (RVFI->useQCIInterrupt(MF: *MF)) {
2217 // Emit QC.C.MIENTER(.NEST)
2218 BuildMI(
2219 BB&: MBB, I: MI, MIMD: DL,
2220 MCID: TII.get(Opcode: RVFI->getInterruptStackKind(MF: *MF) ==
2221 RISCVMachineFunctionInfo::InterruptStackKind::QCINest
2222 ? RISCV::QC_C_MIENTER_NEST
2223 : RISCV::QC_C_MIENTER))
2224 .setMIFlag(MachineInstr::FrameSetup);
2225
2226 for (auto [Reg, _Offset] : FixedCSRFIQCIInterruptMap)
2227 MBB.addLiveIn(PhysReg: Reg);
2228 }
2229
2230 if (RVFI->isPushable(MF: *MF)) {
2231 // Emit CM.PUSH with base StackAdj & evaluate Push stack
2232 unsigned PushedRegNum = RVFI->getRVPushRegs();
2233 if (PushedRegNum > 0) {
2234 // Use encoded number to represent registers to spill.
2235 unsigned Opcode = getPushOpcode(
2236 Kind: RVFI->getPushPopKind(MF: *MF), UpdateFP: hasFP(MF: *MF) && !RVFI->useQCIInterrupt(MF: *MF));
2237 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2238 MachineInstrBuilder PushBuilder =
2239 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2240 .setMIFlag(MachineInstr::FrameSetup);
2241 PushBuilder.addImm(Val: RegEnc);
2242 PushBuilder.addImm(Val: 0);
2243
2244 for (unsigned i = 0; i < PushedRegNum; i++)
2245 PushBuilder.addUse(RegNo: FixedCSRFIMap[i], Flags: RegState::Implicit);
2246 }
2247 } else if (const char *SpillLibCall = getSpillLibCallName(MF: *MF, CSI)) {
2248 // Add spill libcall via non-callee-saved register t0.
2249 MachineInstrBuilder NewMI =
2250 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoCALLReg), DestReg: RISCV::X5)
2251 .addExternalSymbol(FnName: SpillLibCall, TargetFlags: RISCVII::MO_CALL)
2252 .setMIFlag(MachineInstr::FrameSetup)
2253 .addUse(RegNo: RISCV::X2, Flags: RegState::Implicit)
2254 .addDef(RegNo: RISCV::X2, Flags: RegState::ImplicitDefine);
2255
2256 // Add registers spilled as implicit used.
2257 for (auto &CS : CSI)
2258 NewMI.addUse(RegNo: CS.getReg(), Flags: RegState::Implicit);
2259 }
2260
2261 // Manually spill values not spilled by libcall & Push/Pop.
2262 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2263 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2264
2265 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
2266 for (auto &CS : CSInfo) {
2267 // Insert the spill to the stack frame.
2268 MCRegister Reg = CS.getReg();
2269 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2270 TII.storeRegToStackSlot(MBB, MI, SrcReg: Reg, isKill: !MBB.isLiveIn(Reg),
2271 FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2272 Flags: MachineInstr::FrameSetup);
2273 }
2274 };
2275 storeRegsToStackSlots(UnmanagedCSI);
2276 storeRegsToStackSlots(RVVCSI);
2277
2278 return true;
2279}
2280
2281static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
2282 return RISCV::VRRegClass.contains(Reg: BaseReg) ? 1
2283 : RISCV::VRM2RegClass.contains(Reg: BaseReg) ? 2
2284 : RISCV::VRM4RegClass.contains(Reg: BaseReg) ? 4
2285 : 8;
2286}
2287
2288void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
2289 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const {
2290 MachineFunction *MF = MBB.getParent();
2291 const MachineFrameInfo &MFI = MF->getFrameInfo();
2292 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2293 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2294
2295 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2296 if (RVVCSI.empty())
2297 return;
2298
2299 uint64_t FixedSize = getStackSizeWithRVVPadding(MF: *MF);
2300 if (!HasFP) {
2301 uint64_t ScalarLocalVarSize =
2302 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
2303 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
2304 FixedSize -= ScalarLocalVarSize;
2305 }
2306
2307 CFIInstBuilder CFIBuilder(MBB, MI, MachineInstr::FrameSetup);
2308 for (auto &CS : RVVCSI) {
2309 // Insert the spill to the stack frame.
2310 int FI = CS.getFrameIdx();
2311 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2312 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2313 for (unsigned i = 0; i < NumRegs; ++i) {
2314 CFIBuilder.insertCFIInst(CFIInst: createDefCFAOffset(
2315 TRI, Reg: BaseReg + i,
2316 Offset: StackOffset::get(Fixed: -FixedSize, Scalable: MFI.getObjectOffset(ObjectIdx: FI) / 8 + i)));
2317 }
2318 }
2319}
2320
2321void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
2322 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
2323 MachineFunction *MF = MBB.getParent();
2324 const MachineFrameInfo &MFI = MF->getFrameInfo();
2325 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2326
2327 CFIInstBuilder CFIHelper(MBB, MI, MachineInstr::FrameDestroy);
2328 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI: MFI.getCalleeSavedInfo());
2329 for (auto &CS : RVVCSI) {
2330 MCRegister BaseReg = getRVVBaseRegister(TRI, Reg: CS.getReg());
2331 unsigned NumRegs = getCalleeSavedRVVNumRegs(BaseReg: CS.getReg());
2332 for (unsigned i = 0; i < NumRegs; ++i)
2333 CFIHelper.buildRestore(Reg: BaseReg + i);
2334 }
2335}
2336
2337bool RISCVFrameLowering::restoreCalleeSavedRegisters(
2338 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2339 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2340 if (CSI.empty())
2341 return true;
2342
2343 MachineFunction *MF = MBB.getParent();
2344 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2345 DebugLoc DL;
2346 if (MI != MBB.end() && !MI->isDebugInstr())
2347 DL = MI->getDebugLoc();
2348
2349 // Manually restore values not restored by libcall & Push/Pop.
2350 // Reverse the restore order in epilog. In addition, the return
2351 // address will be restored first in the epilogue. It increases
2352 // the opportunity to avoid the load-to-use data hazard between
2353 // loading RA and return by RA. loadRegFromStackSlot can insert
2354 // multiple instructions.
2355 const auto &UnmanagedCSI = getUnmanagedCSI(MF: *MF, CSI);
2356 const auto &RVVCSI = getRVVCalleeSavedInfo(MF: *MF, CSI);
2357
2358 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
2359 for (auto &CS : CSInfo) {
2360 MCRegister Reg = CS.getReg();
2361 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2362 TII.loadRegFromStackSlot(MBB, MI, DestReg: Reg, FrameIndex: CS.getFrameIdx(), RC, VReg: Register(),
2363 SubReg: RISCV::NoSubRegister,
2364 Flags: MachineInstr::FrameDestroy);
2365 assert(MI != MBB.begin() &&
2366 "loadRegFromStackSlot didn't insert any code!");
2367 }
2368 };
2369 loadRegFromStackSlot(RVVCSI);
2370 loadRegFromStackSlot(UnmanagedCSI);
2371
2372 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2373 if (RVFI->useQCIInterrupt(MF: *MF)) {
2374 // Don't emit anything here because restoration is handled by
2375 // QC.C.MILEAVERET which we already inserted to return.
2376 assert(MI->getOpcode() == RISCV::QC_C_MILEAVERET &&
2377 "Unexpected QCI Interrupt Return Instruction");
2378 }
2379
2380 if (RVFI->isPushable(MF: *MF)) {
2381 unsigned PushedRegNum = RVFI->getRVPushRegs();
2382 if (PushedRegNum > 0) {
2383 unsigned Opcode = getPopOpcode(Kind: RVFI->getPushPopKind(MF: *MF));
2384 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(NumRegs: PushedRegNum);
2385 MachineInstrBuilder PopBuilder =
2386 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode))
2387 .setMIFlag(MachineInstr::FrameDestroy);
2388 // Use encoded number to represent registers to restore.
2389 PopBuilder.addImm(Val: RegEnc);
2390 PopBuilder.addImm(Val: 0);
2391
2392 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
2393 PopBuilder.addDef(RegNo: FixedCSRFIMap[i], Flags: RegState::ImplicitDefine);
2394 }
2395 } else if (const char *RestoreLibCall = getRestoreLibCallName(MF: *MF, CSI)) {
2396 // Add restore libcall via tail call.
2397 MachineInstrBuilder NewMI =
2398 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::PseudoTAIL))
2399 .addExternalSymbol(FnName: RestoreLibCall, TargetFlags: RISCVII::MO_CALL)
2400 .setMIFlag(MachineInstr::FrameDestroy)
2401 .addDef(RegNo: RISCV::X2, Flags: RegState::ImplicitDefine);
2402
2403 // Add registers restored as implicit defined.
2404 for (auto &CS : CSI)
2405 NewMI.addDef(RegNo: CS.getReg(), Flags: RegState::ImplicitDefine);
2406
2407 // Remove trailing returns, since the terminator is now a tail call to the
2408 // restore function.
2409 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
2410 NewMI.getInstr()->copyImplicitOps(MF&: *MF, MI: *MI);
2411 MI->eraseFromParent();
2412 }
2413 }
2414 return true;
2415}
2416
2417bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2418 // Keep the conventional code flow when not optimizing.
2419 if (MF.getFunction().hasOptNone())
2420 return false;
2421
2422 return true;
2423}
2424
2425bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
2426 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2427 const MachineFunction *MF = MBB.getParent();
2428 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2429
2430 // Make sure VTYPE and VL are not live-in since we will use vsetvli in the
2431 // prologue to get the VLEN, and that will clobber these registers.
2432 //
2433 // We may do also check the stack contains objects with scalable vector type,
2434 // but this will require iterating over all the stack objects, but this may
2435 // not worth since the situation is rare, we could do further check in future
2436 // if we find it is necessary.
2437 if (STI.preferVsetvliOverReadVLENB() &&
2438 (MBB.isLiveIn(Reg: RISCV::VTYPE) || MBB.isLiveIn(Reg: RISCV::VL)))
2439 return false;
2440
2441 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2442 return true;
2443
2444 // Inserting a call to a __riscv_save libcall requires the use of the register
2445 // t0 (X5) to hold the return address. Therefore if this register is already
2446 // used we can't insert the call.
2447
2448 RegScavenger RS;
2449 RS.enterBasicBlock(MBB&: *TmpMBB);
2450 return !RS.isRegUsed(Reg: RISCV::X5);
2451}
2452
2453bool RISCVFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2454 const MachineFunction *MF = MBB.getParent();
2455 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2456 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2457
2458 // We do not want QC.C.MILEAVERET to be subject to shrink-wrapping - it must
2459 // come in the final block of its function as it both pops and returns.
2460 if (RVFI->useQCIInterrupt(MF: *MF))
2461 return MBB.succ_empty();
2462
2463 if (!RVFI->useSaveRestoreLibCalls(MF: *MF))
2464 return true;
2465
2466 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
2467 // This means if we still need to continue executing code within this function
2468 // the restore cannot take place in this basic block.
2469
2470 if (MBB.succ_size() > 1)
2471 return false;
2472
2473 MachineBasicBlock *SuccMBB =
2474 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
2475
2476 // Doing a tail call should be safe if there are no successors, because either
2477 // we have a returning block or the end of the block is unreachable, so the
2478 // restore will be eliminated regardless.
2479 if (!SuccMBB)
2480 return true;
2481
2482 // The successor can only contain a return, since we would effectively be
2483 // replacing the successor with our own tail return at the end of our block.
2484 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2485}
2486
2487bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
2488 switch (ID) {
2489 case TargetStackID::Default:
2490 case TargetStackID::ScalableVector:
2491 return true;
2492 case TargetStackID::NoAlloc:
2493 case TargetStackID::SGPRSpill:
2494 case TargetStackID::WasmLocal:
2495 case TargetStackID::ScalablePredicateVector:
2496 return false;
2497 }
2498 llvm_unreachable("Invalid TargetStackID::Value");
2499}
2500
2501TargetStackID::Value RISCVFrameLowering::getStackIDForScalableVectors() const {
2502 return TargetStackID::ScalableVector;
2503}
2504
2505// Synthesize the probe loop.
2506static void emitStackProbeInline(MachineBasicBlock::iterator MBBI, DebugLoc DL,
2507 Register TargetReg, bool IsRVV) {
2508 assert(TargetReg != RISCV::X2 && "New top of stack cannot already be in SP");
2509
2510 MachineBasicBlock &MBB = *MBBI->getParent();
2511 MachineFunction &MF = *MBB.getParent();
2512
2513 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2514 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2515 bool IsRV64 = Subtarget.is64Bit();
2516 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2517 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2518 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2519
2520 MachineFunction::iterator MBBInsertPoint = std::next(x: MBB.getIterator());
2521 MachineBasicBlock *LoopTestMBB =
2522 MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2523 MF.insert(MBBI: MBBInsertPoint, MBB: LoopTestMBB);
2524 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock());
2525 MF.insert(MBBI: MBBInsertPoint, MBB: ExitMBB);
2526 MachineInstr::MIFlag Flags = MachineInstr::FrameSetup;
2527 Register ScratchReg = RISCV::X7;
2528
2529 // ScratchReg = ProbeSize
2530 TII->movImm(MBB, MBBI, DL, DstReg: ScratchReg, Val: ProbeSize, Flag: Flags);
2531
2532 // LoopTest:
2533 // SUB SP, SP, ProbeSize
2534 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
2535 .addReg(RegNo: SPReg)
2536 .addReg(RegNo: ScratchReg)
2537 .setMIFlags(Flags);
2538
2539 // s[d|w] zero, 0(sp)
2540 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL,
2541 MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
2542 .addReg(RegNo: RISCV::X0)
2543 .addReg(RegNo: SPReg)
2544 .addImm(Val: 0)
2545 .setMIFlags(Flags);
2546
2547 if (IsRVV) {
2548 // SUB TargetReg, TargetReg, ProbeSize
2549 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB),
2550 DestReg: TargetReg)
2551 .addReg(RegNo: TargetReg)
2552 .addReg(RegNo: ScratchReg)
2553 .setMIFlags(Flags);
2554
2555 // BGE TargetReg, ProbeSize, LoopTest
2556 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE))
2557 .addReg(RegNo: TargetReg)
2558 .addReg(RegNo: ScratchReg)
2559 .addMBB(MBB: LoopTestMBB)
2560 .setMIFlags(Flags);
2561
2562 } else {
2563 // BNE SP, TargetReg, LoopTest
2564 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE))
2565 .addReg(RegNo: SPReg)
2566 .addReg(RegNo: TargetReg)
2567 .addMBB(MBB: LoopTestMBB)
2568 .setMIFlags(Flags);
2569 }
2570
2571 ExitMBB->splice(Where: ExitMBB->end(), Other: &MBB, From: std::next(x: MBBI), To: MBB.end());
2572 ExitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: &MBB);
2573
2574 LoopTestMBB->addSuccessor(Succ: ExitMBB);
2575 LoopTestMBB->addSuccessor(Succ: LoopTestMBB);
2576 MBB.addSuccessor(Succ: LoopTestMBB);
2577 // Update liveins.
2578 fullyRecomputeLiveIns(MBBs: {ExitMBB, LoopTestMBB});
2579}
2580
2581void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2582 MachineBasicBlock &MBB) const {
2583 // Get the instructions that need to be replaced. We emit at most two of
2584 // these. Remember them in order to avoid complications coming from the need
2585 // to traverse the block while potentially creating more blocks.
2586 SmallVector<MachineInstr *, 4> ToReplace;
2587 for (MachineInstr &MI : MBB) {
2588 unsigned Opc = MI.getOpcode();
2589 if (Opc == RISCV::PROBED_STACKALLOC ||
2590 Opc == RISCV::PROBED_STACKALLOC_RVV) {
2591 ToReplace.push_back(Elt: &MI);
2592 }
2593 }
2594
2595 for (MachineInstr *MI : ToReplace) {
2596 if (MI->getOpcode() == RISCV::PROBED_STACKALLOC ||
2597 MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV) {
2598 MachineBasicBlock::iterator MBBI = MI->getIterator();
2599 DebugLoc DL = MBB.findDebugLoc(MBBI);
2600 Register TargetReg = MI->getOperand(i: 0).getReg();
2601 emitStackProbeInline(MBBI, DL, TargetReg,
2602 IsRVV: (MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV));
2603 MBBI->eraseFromParent();
2604 }
2605 }
2606}
2607
2608int RISCVFrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
2609 return 0;
2610}
2611
2612Register
2613RISCVFrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
2614 return RISCV::X2;
2615}
2616