1//===-- FixupStatepointCallerSaved.cpp - Fixup caller saved registers ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Statepoint instruction in deopt parameters contains values which are
11/// meaningful to the runtime and should be able to be read at the moment the
12/// call returns. So we can say that we need to encode the fact that these
13/// values are "late read" by runtime. If we could express this notion for
14/// register allocator it would produce the right form for us.
15/// The need to fixup (i.e this pass) is specifically handling the fact that
16/// we cannot describe such a late read for the register allocator.
17/// Register allocator may put the value on a register clobbered by the call.
18/// This pass forces the spill of such registers and replaces corresponding
19/// statepoint operands to added spill slots.
20///
21//===----------------------------------------------------------------------===//
22
23#include "llvm/CodeGen/FixupStatepointCallerSaved.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunctionPass.h"
28#include "llvm/CodeGen/StackMaps.h"
29#include "llvm/CodeGen/TargetInstrInfo.h"
30#include "llvm/IR/Statepoint.h"
31#include "llvm/InitializePasses.h"
32#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "fixup-statepoint-caller-saved"
37STATISTIC(NumSpilledRegisters, "Number of spilled register");
38STATISTIC(NumSpillSlotsAllocated, "Number of spill slots allocated");
39STATISTIC(NumSpillSlotsExtended, "Number of spill slots extended");
40
41static cl::opt<bool> FixupSCSExtendSlotSize(
42 "fixup-scs-extend-slot-size", cl::Hidden, cl::init(Val: false),
43 cl::desc("Allow spill in spill slot of greater size than register size"),
44 cl::Hidden);
45
46static cl::opt<bool> PassGCPtrInCSR(
47 "fixup-allow-gcptr-in-csr", cl::Hidden, cl::init(Val: false),
48 cl::desc("Allow passing GC Pointer arguments in callee saved registers"));
49
50static cl::opt<bool> EnableCopyProp(
51 "fixup-scs-enable-copy-propagation", cl::Hidden, cl::init(Val: true),
52 cl::desc("Enable simple copy propagation during register reloading"));
53
54// This is purely debugging option.
55// It may be handy for investigating statepoint spilling issues.
56static cl::opt<unsigned> MaxStatepointsWithRegs(
57 "fixup-max-csr-statepoints", cl::Hidden,
58 cl::desc("Max number of statepoints allowed to pass GC Ptrs in registers"));
59
60namespace {
61
62struct FixupStatepointCallerSavedImpl {
63 bool run(MachineFunction &MF);
64};
65
66class FixupStatepointCallerSavedLegacy : public MachineFunctionPass {
67public:
68 static char ID;
69
70 FixupStatepointCallerSavedLegacy() : MachineFunctionPass(ID) {
71 initializeFixupStatepointCallerSavedLegacyPass(
72 *PassRegistry::getPassRegistry());
73 }
74 void getAnalysisUsage(AnalysisUsage &AU) const override {
75 AU.setPreservesCFG();
76 MachineFunctionPass::getAnalysisUsage(AU);
77 }
78
79 StringRef getPassName() const override {
80 return "Fixup Statepoint Caller Saved";
81 }
82
83 bool runOnMachineFunction(MachineFunction &MF) override;
84};
85
86} // End anonymous namespace.
87
88char FixupStatepointCallerSavedLegacy::ID = 0;
89char &llvm::FixupStatepointCallerSavedID = FixupStatepointCallerSavedLegacy::ID;
90
91INITIALIZE_PASS_BEGIN(FixupStatepointCallerSavedLegacy, DEBUG_TYPE,
92 "Fixup Statepoint Caller Saved", false, false)
93INITIALIZE_PASS_END(FixupStatepointCallerSavedLegacy, DEBUG_TYPE,
94 "Fixup Statepoint Caller Saved", false, false)
95
96// Utility function to get size of the register.
97static unsigned getRegisterSize(const TargetRegisterInfo &TRI, Register Reg) {
98 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
99 return TRI.getSpillSize(RC: *RC);
100}
101
102// Try to eliminate redundant copy to register which we're going to
103// spill, i.e. try to change:
104// X = COPY Y
105// SPILL X
106// to
107// SPILL Y
108// If there are no uses of X between copy and STATEPOINT, that COPY
109// may be eliminated.
110// Reg - register we're about to spill
111// RI - On entry points to statepoint.
112// On successful copy propagation set to new spill point.
113// IsKill - set to true if COPY is Kill (there are no uses of Y)
114// Returns either found source copy register or original one.
115static Register performCopyPropagation(Register Reg,
116 MachineBasicBlock::iterator &RI,
117 bool &IsKill, const TargetInstrInfo &TII,
118 const TargetRegisterInfo &TRI) {
119 // First check if statepoint itself uses Reg in non-meta operands.
120 int Idx = RI->findRegisterUseOperandIdx(Reg, TRI: &TRI, isKill: false);
121 if (Idx >= 0 && (unsigned)Idx < StatepointOpers(&*RI).getNumDeoptArgsIdx()) {
122 IsKill = false;
123 return Reg;
124 }
125
126 if (!EnableCopyProp)
127 return Reg;
128
129 MachineBasicBlock *MBB = RI->getParent();
130 MachineBasicBlock::reverse_iterator E = MBB->rend();
131 MachineInstr *Def = nullptr, *Use = nullptr;
132 for (auto It = ++(RI.getReverse()); It != E; ++It) {
133 if (It->readsRegister(Reg, TRI: &TRI) && !Use)
134 Use = &*It;
135 if (It->modifiesRegister(Reg, TRI: &TRI)) {
136 Def = &*It;
137 break;
138 }
139 }
140
141 if (!Def)
142 return Reg;
143
144 auto DestSrc = TII.isCopyInstr(MI: *Def);
145 if (!DestSrc || DestSrc->Destination->getReg() != Reg)
146 return Reg;
147
148 Register SrcReg = DestSrc->Source->getReg();
149
150 if (getRegisterSize(TRI, Reg) != getRegisterSize(TRI, Reg: SrcReg))
151 return Reg;
152
153 LLVM_DEBUG(dbgs() << "spillRegisters: perform copy propagation "
154 << printReg(Reg, &TRI) << " -> " << printReg(SrcReg, &TRI)
155 << "\n");
156
157 // Insert spill immediately after Def
158 RI = ++MachineBasicBlock::iterator(Def);
159 IsKill = DestSrc->Source->isKill();
160
161 if (!Use) {
162 // There are no uses of original register between COPY and STATEPOINT.
163 // There can't be any after STATEPOINT, so we can eliminate Def.
164 LLVM_DEBUG(dbgs() << "spillRegisters: removing dead copy " << *Def);
165 Def->eraseFromParent();
166 } else if (IsKill) {
167 // COPY will remain in place, spill will be inserted *after* it, so it is
168 // not a kill of source anymore.
169 const_cast<MachineOperand *>(DestSrc->Source)->setIsKill(false);
170 }
171
172 return SrcReg;
173}
174
175namespace {
176// Pair {Register, FrameIndex}
177using RegSlotPair = std::pair<Register, int>;
178
179// Keeps track of what reloads were inserted in MBB.
180class RegReloadCache {
181 using ReloadSet = SmallSet<RegSlotPair, 8>;
182 DenseMap<const MachineBasicBlock *, ReloadSet> Reloads;
183
184public:
185 RegReloadCache() = default;
186
187 // Record reload of Reg from FI in block MBB if not present yet.
188 // Return true if the reload is successfully recorded.
189 bool tryRecordReload(Register Reg, int FI, const MachineBasicBlock *MBB) {
190 RegSlotPair RSP(Reg, FI);
191 return Reloads[MBB].insert(V: RSP).second;
192 }
193};
194
195// Cache used frame indexes during statepoint re-write to re-use them in
196// processing next statepoint instruction.
197// Two strategies. One is to preserve the size of spill slot while another one
198// extends the size of spill slots to reduce the number of them, causing
199// the less total frame size. But unspill will have "implicit" any extend.
200class FrameIndexesCache {
201private:
202 struct FrameIndexesPerSize {
203 // List of used frame indexes during processing previous statepoints.
204 SmallVector<int, 8> Slots;
205 // Current index of un-used yet frame index.
206 unsigned Index = 0;
207 };
208 MachineFrameInfo &MFI;
209 const TargetRegisterInfo &TRI;
210 // Map size to list of frame indexes of this size. If the mode is
211 // FixupSCSExtendSlotSize then the key 0 is used to keep all frame indexes.
212 // If the size of required spill slot is greater than in a cache then the
213 // size will be increased.
214 DenseMap<unsigned, FrameIndexesPerSize> Cache;
215
216 // Keeps track of slots reserved for the shared landing pad processing.
217 // Initialized from GlobalIndices for the current EHPad.
218 SmallSet<int, 8> ReservedSlots;
219
220 // Landing pad can be destination of several statepoints. Every register
221 // defined by such statepoints must be spilled to the same stack slot.
222 // This map keeps that information.
223 DenseMap<const MachineBasicBlock *, SmallVector<RegSlotPair, 8>>
224 GlobalIndices;
225
226 FrameIndexesPerSize &getCacheBucket(unsigned Size) {
227 // In FixupSCSExtendSlotSize mode the bucket with 0 index is used
228 // for all sizes.
229 return Cache[FixupSCSExtendSlotSize ? 0 : Size];
230 }
231
232public:
233 FrameIndexesCache(MachineFrameInfo &MFI, const TargetRegisterInfo &TRI)
234 : MFI(MFI), TRI(TRI) {}
235 // Reset the current state of used frame indexes. After invocation of
236 // this function all frame indexes are available for allocation with
237 // the exception of slots reserved for landing pad processing (if any).
238 void reset(const MachineBasicBlock *EHPad) {
239 for (auto &It : Cache)
240 It.second.Index = 0;
241
242 ReservedSlots.clear();
243 if (EHPad)
244 if (auto It = GlobalIndices.find(Val: EHPad); It != GlobalIndices.end())
245 ReservedSlots.insert_range(R: llvm::make_second_range(c&: It->second));
246 }
247
248 // Get frame index to spill the register.
249 int getFrameIndex(Register Reg, MachineBasicBlock *EHPad) {
250 // Check if slot for Reg is already reserved at EHPad.
251 auto It = GlobalIndices.find(Val: EHPad);
252 if (It != GlobalIndices.end()) {
253 auto &Vec = It->second;
254 auto Idx = llvm::find_if(
255 Range&: Vec, P: [Reg](RegSlotPair &RSP) { return Reg == RSP.first; });
256 if (Idx != Vec.end()) {
257 int FI = Idx->second;
258 LLVM_DEBUG(dbgs() << "Found global FI " << FI << " for register "
259 << printReg(Reg, &TRI) << " at "
260 << printMBBReference(*EHPad) << "\n");
261 assert(ReservedSlots.count(FI) && "using unreserved slot");
262 return FI;
263 }
264 }
265
266 unsigned Size = getRegisterSize(TRI, Reg);
267 FrameIndexesPerSize &Line = getCacheBucket(Size);
268 while (Line.Index < Line.Slots.size()) {
269 int FI = Line.Slots[Line.Index++];
270 if (ReservedSlots.count(V: FI))
271 continue;
272 // If all sizes are kept together we probably need to extend the
273 // spill slot size.
274 if (MFI.getObjectSize(ObjectIdx: FI) < Size) {
275 MFI.setObjectSize(ObjectIdx: FI, Size);
276 MFI.setObjectAlignment(ObjectIdx: FI, Alignment: Align(Size));
277 NumSpillSlotsExtended++;
278 }
279 return FI;
280 }
281 int FI = MFI.CreateSpillStackObject(Size, Alignment: Align(Size));
282 NumSpillSlotsAllocated++;
283 Line.Slots.push_back(Elt: FI);
284 ++Line.Index;
285
286 // Remember assignment {Reg, FI} for EHPad
287 if (EHPad) {
288 GlobalIndices[EHPad].push_back(Elt: std::make_pair(x&: Reg, y&: FI));
289 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling reg "
290 << printReg(Reg, &TRI) << " at landing pad "
291 << printMBBReference(*EHPad) << "\n");
292 }
293
294 return FI;
295 }
296
297 // Sort all registers to spill in descendent order. In the
298 // FixupSCSExtendSlotSize mode it will minimize the total frame size.
299 // In non FixupSCSExtendSlotSize mode we can skip this step.
300 void sortRegisters(SmallVectorImpl<Register> &Regs) {
301 if (!FixupSCSExtendSlotSize)
302 return;
303 llvm::sort(C&: Regs, Comp: [&](Register &A, Register &B) {
304 return getRegisterSize(TRI, Reg: A) > getRegisterSize(TRI, Reg: B);
305 });
306 }
307};
308
309// Describes the state of the current processing statepoint instruction.
310class StatepointState {
311private:
312 // statepoint instruction.
313 MachineInstr &MI;
314 MachineFunction &MF;
315 // If non-null then statepoint is invoke, and this points to the landing pad.
316 MachineBasicBlock *EHPad;
317 const TargetRegisterInfo &TRI;
318 const TargetInstrInfo &TII;
319 MachineFrameInfo &MFI;
320 // Mask with callee saved registers.
321 const uint32_t *Mask;
322 // Cache of frame indexes used on previous instruction processing.
323 FrameIndexesCache &CacheFI;
324 bool AllowGCPtrInCSR;
325 // Operands with physical registers requiring spilling.
326 SmallVector<unsigned, 8> OpsToSpill;
327 // Set of register to spill.
328 SmallVector<Register, 8> RegsToSpill;
329 // Set of registers to reload after statepoint.
330 SmallVector<Register, 8> RegsToReload;
331 // Map Register to Frame Slot index.
332 DenseMap<Register, int> RegToSlotIdx;
333
334public:
335 StatepointState(MachineInstr &MI, const uint32_t *Mask,
336 FrameIndexesCache &CacheFI, bool AllowGCPtrInCSR)
337 : MI(MI), MF(*MI.getMF()), TRI(*MF.getSubtarget().getRegisterInfo()),
338 TII(*MF.getSubtarget().getInstrInfo()), MFI(MF.getFrameInfo()),
339 Mask(Mask), CacheFI(CacheFI), AllowGCPtrInCSR(AllowGCPtrInCSR) {
340
341 // Find statepoint's landing pad, if any.
342 EHPad = nullptr;
343 MachineBasicBlock *MBB = MI.getParent();
344 // Invoke statepoint must be last one in block.
345 bool Last = std::none_of(first: ++MI.getIterator(), last: MBB->end().getInstrIterator(),
346 pred: [](MachineInstr &I) {
347 return I.getOpcode() == TargetOpcode::STATEPOINT;
348 });
349
350 if (!Last)
351 return;
352
353 auto IsEHPad = [](MachineBasicBlock *B) { return B->isEHPad(); };
354
355 assert(llvm::count_if(MBB->successors(), IsEHPad) < 2 && "multiple EHPads");
356
357 auto It = llvm::find_if(Range: MBB->successors(), P: IsEHPad);
358 if (It != MBB->succ_end())
359 EHPad = *It;
360 }
361
362 MachineBasicBlock *getEHPad() const { return EHPad; }
363
364 // Return true if register is callee saved.
365 bool isCalleeSaved(Register Reg) {
366 return (Mask[Reg.id() / 32] >> (Reg.id() % 32)) & 1;
367 }
368
369 // Iterates over statepoint meta args to find caller saver registers.
370 // Also cache the size of found registers.
371 // Returns true if caller save registers found.
372 bool findRegistersToSpill() {
373 SmallSet<Register, 8> GCRegs;
374 // All GC pointer operands assigned to registers produce new value.
375 // Since they're tied to their defs, it is enough to collect def registers.
376 for (const auto &Def : MI.defs())
377 GCRegs.insert(V: Def.getReg());
378
379 SmallSet<Register, 8> VisitedRegs;
380 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
381 EndIdx = MI.getNumOperands();
382 Idx < EndIdx; ++Idx) {
383 MachineOperand &MO = MI.getOperand(i: Idx);
384 if (!MO.isReg() || MO.isImplicit() || MO.isUndef())
385 continue;
386 Register Reg = MO.getReg();
387 assert(Reg.isPhysical() && "Only physical regs are expected");
388
389 if (isCalleeSaved(Reg) && (AllowGCPtrInCSR || !GCRegs.contains(V: Reg)))
390 continue;
391
392 LLVM_DEBUG(dbgs() << "Will spill " << printReg(Reg, &TRI) << " at index "
393 << Idx << "\n");
394
395 if (VisitedRegs.insert(V: Reg).second)
396 RegsToSpill.push_back(Elt: Reg);
397 OpsToSpill.push_back(Elt: Idx);
398 }
399 CacheFI.sortRegisters(Regs&: RegsToSpill);
400 return !RegsToSpill.empty();
401 }
402
403 // Spill all caller saved registers right before statepoint instruction.
404 // Remember frame index where register is spilled.
405 void spillRegisters() {
406 for (Register Reg : RegsToSpill) {
407 int FI = CacheFI.getFrameIndex(Reg, EHPad);
408
409 NumSpilledRegisters++;
410 RegToSlotIdx[Reg] = FI;
411
412 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, &TRI) << " to FI " << FI
413 << "\n");
414
415 // Perform trivial copy propagation
416 bool IsKill = true;
417 MachineBasicBlock::iterator InsertBefore(MI);
418 Reg = performCopyPropagation(Reg, RI&: InsertBefore, IsKill, TII, TRI);
419 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
420
421 LLVM_DEBUG(dbgs() << "Insert spill before " << *InsertBefore);
422 TII.storeRegToStackSlot(MBB&: *MI.getParent(), MI: InsertBefore, SrcReg: Reg, isKill: IsKill, FrameIndex: FI,
423 RC, TRI: &TRI, VReg: Register());
424 }
425 }
426
427 void insertReloadBefore(Register Reg, MachineBasicBlock::iterator It,
428 MachineBasicBlock *MBB) {
429 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
430 int FI = RegToSlotIdx[Reg];
431 if (It != MBB->end()) {
432 TII.loadRegFromStackSlot(MBB&: *MBB, MI: It, DestReg: Reg, FrameIndex: FI, RC, TRI: &TRI, VReg: Register());
433 return;
434 }
435
436 // To insert reload at the end of MBB, insert it before last instruction
437 // and then swap them.
438 assert(!MBB->empty() && "Empty block");
439 --It;
440 TII.loadRegFromStackSlot(MBB&: *MBB, MI: It, DestReg: Reg, FrameIndex: FI, RC, TRI: &TRI, VReg: Register());
441 MachineInstr *Reload = It->getPrevNode();
442 int Dummy = 0;
443 (void)Dummy;
444 assert(TII.isLoadFromStackSlot(*Reload, Dummy) == Reg);
445 assert(Dummy == FI);
446 MBB->remove(I: Reload);
447 MBB->insertAfter(I: It, MI: Reload);
448 }
449
450 // Insert reloads of (relocated) registers spilled in statepoint.
451 void insertReloads(MachineInstr *NewStatepoint, RegReloadCache &RC) {
452 MachineBasicBlock *MBB = NewStatepoint->getParent();
453 auto InsertPoint = std::next(x: NewStatepoint->getIterator());
454
455 for (auto Reg : RegsToReload) {
456 insertReloadBefore(Reg, It: InsertPoint, MBB);
457 LLVM_DEBUG(dbgs() << "Reloading " << printReg(Reg, &TRI) << " from FI "
458 << RegToSlotIdx[Reg] << " after statepoint\n");
459
460 if (EHPad && RC.tryRecordReload(Reg, FI: RegToSlotIdx[Reg], MBB: EHPad)) {
461 auto EHPadInsertPoint =
462 EHPad->SkipPHIsLabelsAndDebug(I: EHPad->begin(), Reg);
463 insertReloadBefore(Reg, It: EHPadInsertPoint, MBB: EHPad);
464 LLVM_DEBUG(dbgs() << "...also reload at EHPad "
465 << printMBBReference(*EHPad) << "\n");
466 }
467 }
468 }
469
470 // Re-write statepoint machine instruction to replace caller saved operands
471 // with indirect memory location (frame index).
472 MachineInstr *rewriteStatepoint() {
473 MachineInstr *NewMI =
474 MF.CreateMachineInstr(MCID: TII.get(Opcode: MI.getOpcode()), DL: MI.getDebugLoc(), NoImplicit: true);
475 MachineInstrBuilder MIB(MF, NewMI);
476
477 unsigned NumOps = MI.getNumOperands();
478
479 // New indices for the remaining defs.
480 SmallVector<unsigned, 8> NewIndices;
481 unsigned NumDefs = MI.getNumDefs();
482 for (unsigned I = 0; I < NumDefs; ++I) {
483 MachineOperand &DefMO = MI.getOperand(i: I);
484 assert(DefMO.isReg() && DefMO.isDef() && "Expected Reg Def operand");
485 Register Reg = DefMO.getReg();
486 assert(DefMO.isTied() && "Def is expected to be tied");
487 // We skipped undef uses and did not spill them, so we should not
488 // proceed with defs here.
489 if (MI.getOperand(i: MI.findTiedOperandIdx(OpIdx: I)).isUndef()) {
490 if (AllowGCPtrInCSR) {
491 NewIndices.push_back(Elt: NewMI->getNumOperands());
492 MIB.addReg(RegNo: Reg, flags: RegState::Define);
493 }
494 continue;
495 }
496 if (!AllowGCPtrInCSR) {
497 assert(is_contained(RegsToSpill, Reg));
498 RegsToReload.push_back(Elt: Reg);
499 } else {
500 if (isCalleeSaved(Reg)) {
501 NewIndices.push_back(Elt: NewMI->getNumOperands());
502 MIB.addReg(RegNo: Reg, flags: RegState::Define);
503 } else {
504 NewIndices.push_back(Elt: NumOps);
505 RegsToReload.push_back(Elt: Reg);
506 }
507 }
508 }
509
510 // Add End marker.
511 OpsToSpill.push_back(Elt: MI.getNumOperands());
512 unsigned CurOpIdx = 0;
513
514 for (unsigned I = NumDefs; I < MI.getNumOperands(); ++I) {
515 MachineOperand &MO = MI.getOperand(i: I);
516 if (I == OpsToSpill[CurOpIdx]) {
517 int FI = RegToSlotIdx[MO.getReg()];
518 MIB.addImm(Val: StackMaps::IndirectMemRefOp);
519 MIB.addImm(Val: getRegisterSize(TRI, Reg: MO.getReg()));
520 assert(MO.isReg() && "Should be register");
521 assert(MO.getReg().isPhysical() && "Should be physical register");
522 MIB.addFrameIndex(Idx: FI);
523 MIB.addImm(Val: 0);
524 ++CurOpIdx;
525 } else {
526 MIB.add(MO);
527 unsigned OldDef;
528 if (AllowGCPtrInCSR && MI.isRegTiedToDefOperand(UseOpIdx: I, DefOpIdx: &OldDef)) {
529 assert(OldDef < NumDefs);
530 assert(NewIndices[OldDef] < NumOps);
531 MIB->tieOperands(DefIdx: NewIndices[OldDef], UseIdx: MIB->getNumOperands() - 1);
532 }
533 }
534 }
535 assert(CurOpIdx == (OpsToSpill.size() - 1) && "Not all operands processed");
536 // Add mem operands.
537 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
538 for (auto It : RegToSlotIdx) {
539 Register R = It.first;
540 int FrameIndex = It.second;
541 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FI: FrameIndex);
542 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
543 if (is_contained(Range&: RegsToReload, Element: R))
544 Flags |= MachineMemOperand::MOStore;
545 auto *MMO =
546 MF.getMachineMemOperand(PtrInfo, F: Flags, Size: getRegisterSize(TRI, Reg: R),
547 BaseAlignment: MFI.getObjectAlign(ObjectIdx: FrameIndex));
548 NewMI->addMemOperand(MF, MO: MMO);
549 }
550
551 // Insert new statepoint and erase old one.
552 MI.getParent()->insert(I: MI, MI: NewMI);
553
554 LLVM_DEBUG(dbgs() << "rewritten statepoint to : " << *NewMI << "\n");
555 MI.eraseFromParent();
556 return NewMI;
557 }
558};
559
560class StatepointProcessor {
561private:
562 MachineFunction &MF;
563 const TargetRegisterInfo &TRI;
564 FrameIndexesCache CacheFI;
565 RegReloadCache ReloadCache;
566
567public:
568 StatepointProcessor(MachineFunction &MF)
569 : MF(MF), TRI(*MF.getSubtarget().getRegisterInfo()),
570 CacheFI(MF.getFrameInfo(), TRI) {}
571
572 bool process(MachineInstr &MI, bool AllowGCPtrInCSR) {
573 StatepointOpers SO(&MI);
574 uint64_t Flags = SO.getFlags();
575 // Do nothing for LiveIn, it supports all registers.
576 if (Flags & (uint64_t)StatepointFlags::DeoptLiveIn)
577 return false;
578 LLVM_DEBUG(dbgs() << "\nMBB " << MI.getParent()->getNumber() << " "
579 << MI.getParent()->getName() << " : process statepoint "
580 << MI);
581 CallingConv::ID CC = SO.getCallingConv();
582 const uint32_t *Mask = TRI.getCallPreservedMask(MF, CC);
583 StatepointState SS(MI, Mask, CacheFI, AllowGCPtrInCSR);
584 CacheFI.reset(EHPad: SS.getEHPad());
585
586 if (!SS.findRegistersToSpill())
587 return false;
588
589 SS.spillRegisters();
590 auto *NewStatepoint = SS.rewriteStatepoint();
591 SS.insertReloads(NewStatepoint, RC&: ReloadCache);
592 return true;
593 }
594};
595} // namespace
596
597bool FixupStatepointCallerSavedImpl::run(MachineFunction &MF) {
598 const Function &F = MF.getFunction();
599 if (!F.hasGC())
600 return false;
601
602 SmallVector<MachineInstr *, 16> Statepoints;
603 for (MachineBasicBlock &BB : MF)
604 for (MachineInstr &I : BB)
605 if (I.getOpcode() == TargetOpcode::STATEPOINT)
606 Statepoints.push_back(Elt: &I);
607
608 if (Statepoints.empty())
609 return false;
610
611 bool Changed = false;
612 StatepointProcessor SPP(MF);
613 unsigned NumStatepoints = 0;
614 bool AllowGCPtrInCSR = PassGCPtrInCSR;
615 for (MachineInstr *I : Statepoints) {
616 ++NumStatepoints;
617 if (MaxStatepointsWithRegs.getNumOccurrences() &&
618 NumStatepoints >= MaxStatepointsWithRegs)
619 AllowGCPtrInCSR = false;
620 Changed |= SPP.process(MI&: *I, AllowGCPtrInCSR);
621 }
622 return Changed;
623}
624
625bool FixupStatepointCallerSavedLegacy::runOnMachineFunction(
626 MachineFunction &MF) {
627 if (skipFunction(F: MF.getFunction()))
628 return false;
629
630 return FixupStatepointCallerSavedImpl().run(MF);
631}
632
633PreservedAnalyses
634FixupStatepointCallerSavedPass::run(MachineFunction &MF,
635 MachineFunctionAnalysisManager &MFAM) {
636
637 if (!FixupStatepointCallerSavedImpl().run(MF))
638 return PreservedAnalyses::all();
639
640 auto PA = getMachineFunctionPassPreservedAnalyses();
641 PA.preserveSet<CFGAnalyses>();
642 return PA;
643}
644