1//===- RegisterScavenging.cpp - Machine register scavenging ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the machine register scavenger. It can provide
11/// information, such as unused registers, at any point in a machine basic
12/// block. It also provides a mechanism to make registers available by evicting
13/// them to spill slots.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/CodeGen/RegisterScavenging.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/CodeGen/LiveRegUnits.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/TargetFrameLowering.h"
31#include "llvm/CodeGen/TargetInstrInfo.h"
32#include "llvm/CodeGen/TargetRegisterInfo.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/Pass.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include <cassert>
40#include <iterator>
41#include <limits>
42#include <utility>
43
44using namespace llvm;
45
46#define DEBUG_TYPE "reg-scavenging"
47
48STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
49
50void RegScavenger::setRegUsed(Register Reg, LaneBitmask LaneMask) {
51 LiveUnits.addRegMasked(Reg, Mask: LaneMask);
52}
53
54void RegScavenger::init(MachineBasicBlock &MBB) {
55 MachineFunction &MF = *MBB.getParent();
56 TII = MF.getSubtarget().getInstrInfo();
57 TRI = MF.getSubtarget().getRegisterInfo();
58 MRI = &MF.getRegInfo();
59 LiveUnits.init(TRI: *TRI);
60
61 this->MBB = &MBB;
62
63 for (ScavengedInfo &SI : Scavenged) {
64 SI.Reg = 0;
65 SI.Restore = nullptr;
66 }
67}
68
69void RegScavenger::enterBasicBlock(MachineBasicBlock &MBB) {
70 init(MBB);
71 LiveUnits.addLiveIns(MBB);
72 MBBI = MBB.begin();
73}
74
75void RegScavenger::enterBasicBlockEnd(MachineBasicBlock &MBB) {
76 init(MBB);
77 LiveUnits.addLiveOuts(MBB);
78 MBBI = MBB.end();
79}
80
81void RegScavenger::backward() {
82 const MachineInstr &MI = *--MBBI;
83 LiveUnits.stepBackward(MI);
84
85 // Expire scavenge spill frameindex uses.
86 for (ScavengedInfo &I : Scavenged) {
87 if (I.Restore == &MI) {
88 I.Reg = 0;
89 I.Restore = nullptr;
90 }
91 }
92}
93
94bool RegScavenger::isRegUsed(Register Reg, bool includeReserved) const {
95 if (isReserved(Reg))
96 return includeReserved;
97 return !LiveUnits.available(Reg);
98}
99
100Register RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
101 for (Register Reg : *RC) {
102 if (!isRegUsed(Reg)) {
103 LLVM_DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)
104 << "\n");
105 return Reg;
106 }
107 }
108 return 0;
109}
110
111BitVector RegScavenger::getRegsAvailable(const TargetRegisterClass *RC) {
112 BitVector Mask(TRI->getNumRegs());
113 for (Register Reg : *RC)
114 if (!isRegUsed(Reg))
115 Mask.set(Reg.id());
116 return Mask;
117}
118
119/// Given the bitvector \p Available of free register units at position
120/// \p From. Search backwards to find a register that is part of \p
121/// Candidates and not used/clobbered until the point \p To. If there is
122/// multiple candidates continue searching and pick the one that is not used/
123/// clobbered for the longest time.
124/// Returns the register and the earliest position we know it to be free or
125/// the position MBB.end() if no register is available.
126static std::pair<MCPhysReg, MachineBasicBlock::iterator>
127findSurvivorBackwards(const MachineRegisterInfo &MRI,
128 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
129 const LiveRegUnits &LiveOut, ArrayRef<MCPhysReg> AllocationOrder,
130 bool RestoreAfter) {
131 bool FoundTo = false;
132 MCPhysReg Survivor = 0;
133 MachineBasicBlock::iterator Pos;
134 MachineBasicBlock &MBB = *From->getParent();
135 unsigned InstrLimit = 25;
136 unsigned InstrCountDown = InstrLimit;
137 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
138 LiveRegUnits Used(TRI);
139
140 assert(From->getParent() == To->getParent() &&
141 "Target instruction is in other than current basic block, use "
142 "enterBasicBlockEnd first");
143
144 for (MachineBasicBlock::iterator I = From;; --I) {
145 const MachineInstr &MI = *I;
146
147 Used.accumulate(MI);
148
149 if (I == To) {
150 // See if one of the registers in RC wasn't used so far.
151 for (MCPhysReg Reg : AllocationOrder) {
152 if (!MRI.isReserved(PhysReg: Reg) && Used.available(Reg) &&
153 LiveOut.available(Reg))
154 return std::make_pair(x&: Reg, y: MBB.end());
155 }
156 // Otherwise we will continue up to InstrLimit instructions to find
157 // the register which is not defined/used for the longest time.
158 FoundTo = true;
159 Pos = To;
160 // Note: It was fine so far to start our search at From, however now that
161 // we have to spill, and can only place the restore after From then
162 // add the regs used/defed by std::next(From) to the set.
163 if (RestoreAfter)
164 Used.accumulate(MI: *std::next(x: From));
165 }
166 if (FoundTo) {
167 // Don't search to FrameSetup instructions if we were searching from
168 // Non-FrameSetup instructions. Otherwise, the spill position may point
169 // before FrameSetup instructions.
170 if (!From->getFlag(Flag: MachineInstr::FrameSetup) &&
171 MI.getFlag(Flag: MachineInstr::FrameSetup))
172 break;
173
174 if (Survivor == 0 || !Used.available(Reg: Survivor)) {
175 MCPhysReg AvilableReg = 0;
176 for (MCPhysReg Reg : AllocationOrder) {
177 if (!MRI.isReserved(PhysReg: Reg) && Used.available(Reg)) {
178 AvilableReg = Reg;
179 break;
180 }
181 }
182 if (AvilableReg == 0)
183 break;
184 Survivor = AvilableReg;
185 }
186 if (--InstrCountDown == 0)
187 break;
188
189 // Keep searching when we find a vreg since the spilled register will
190 // be usefull for this other vreg as well later.
191 bool FoundVReg = false;
192 for (const MachineOperand &MO : MI.operands()) {
193 if (MO.isReg() && MO.getReg().isVirtual()) {
194 FoundVReg = true;
195 break;
196 }
197 }
198 if (FoundVReg) {
199 InstrCountDown = InstrLimit;
200 Pos = I;
201 }
202 if (I == MBB.begin())
203 break;
204 }
205 assert(I != MBB.begin() && "Did not find target instruction while "
206 "iterating backwards");
207 }
208
209 return std::make_pair(x&: Survivor, y&: Pos);
210}
211
212static unsigned getFrameIndexOperandNum(MachineInstr &MI) {
213 unsigned i = 0;
214 while (!MI.getOperand(i).isFI()) {
215 ++i;
216 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
217 }
218 return i;
219}
220
221RegScavenger::ScavengedInfo &
222RegScavenger::spill(Register Reg, const TargetRegisterClass &RC, int SPAdj,
223 MachineBasicBlock::iterator Before,
224 MachineBasicBlock::iterator &UseMI) {
225 // Find an available scavenging slot with size and alignment matching
226 // the requirements of the class RC.
227 const MachineFunction &MF = *Before->getMF();
228 const MachineFrameInfo &MFI = MF.getFrameInfo();
229 unsigned NeedSize = TRI->getSpillSize(RC);
230 Align NeedAlign = TRI->getSpillAlign(RC);
231
232 unsigned SI = Scavenged.size(), Diff = std::numeric_limits<unsigned>::max();
233 int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
234 for (unsigned I = 0; I < Scavenged.size(); ++I) {
235 if (Scavenged[I].Reg != 0)
236 continue;
237 // Verify that this slot is valid for this register.
238 int FI = Scavenged[I].FrameIndex;
239 if (FI < FIB || FI >= FIE)
240 continue;
241 unsigned S = MFI.getObjectSize(ObjectIdx: FI);
242 Align A = MFI.getObjectAlign(ObjectIdx: FI);
243 if (NeedSize > S || NeedAlign > A)
244 continue;
245 // Avoid wasting slots with large size and/or large alignment. Pick one
246 // that is the best fit for this register class (in street metric).
247 // Picking a larger slot than necessary could happen if a slot for a
248 // larger register is reserved before a slot for a smaller one. When
249 // trying to spill a smaller register, the large slot would be found
250 // first, thus making it impossible to spill the larger register later.
251 unsigned D = (S - NeedSize) + (A.value() - NeedAlign.value());
252 if (D < Diff) {
253 SI = I;
254 Diff = D;
255 }
256 }
257
258 if (SI == Scavenged.size()) {
259 // We need to scavenge a register but have no spill slot, the target
260 // must know how to do it (if not, we'll assert below).
261 Scavenged.push_back(Elt: ScavengedInfo(FIE));
262 }
263
264 // Avoid infinite regress
265 Scavenged[SI].Reg = Reg;
266
267 // If the target knows how to save/restore the register, let it do so;
268 // otherwise, use the emergency stack spill slot.
269 if (!TRI->saveScavengerRegister(MBB&: *MBB, I: Before, UseMI, RC: &RC, Reg)) {
270 // Spill the scavenged register before \p Before.
271 int FI = Scavenged[SI].FrameIndex;
272 if (FI < FIB || FI >= FIE) {
273 report_fatal_error(reason: Twine("Error while trying to spill ") +
274 TRI->getName(RegNo: Reg) + " from class " +
275 TRI->getRegClassName(Class: &RC) +
276 ": Cannot scavenge register without an emergency "
277 "spill slot!");
278 }
279 TII->storeRegToStackSlot(MBB&: *MBB, MI: Before, SrcReg: Reg, isKill: true, FrameIndex: FI, RC: &RC, VReg: Register());
280 MachineBasicBlock::iterator II = std::prev(x: Before);
281
282 unsigned FIOperandNum = getFrameIndexOperandNum(MI&: *II);
283 TRI->eliminateFrameIndex(MI: II, SPAdj, FIOperandNum, RS: this);
284
285 // Restore the scavenged register before its use (or first terminator).
286 TII->loadRegFromStackSlot(MBB&: *MBB, MI: UseMI, DestReg: Reg, FrameIndex: FI, RC: &RC, VReg: Register());
287 II = std::prev(x: UseMI);
288
289 FIOperandNum = getFrameIndexOperandNum(MI&: *II);
290 TRI->eliminateFrameIndex(MI: II, SPAdj, FIOperandNum, RS: this);
291 }
292 return Scavenged[SI];
293}
294
295Register RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
296 MachineBasicBlock::iterator To,
297 bool RestoreAfter, int SPAdj,
298 bool AllowSpill) {
299 const MachineBasicBlock &MBB = *To->getParent();
300 const MachineFunction &MF = *MBB.getParent();
301
302 // Find the register whose use is furthest away.
303 ArrayRef<MCPhysReg> AllocationOrder = RC.getRawAllocationOrder(MF);
304 std::pair<MCPhysReg, MachineBasicBlock::iterator> P = findSurvivorBackwards(
305 MRI: *MRI, From: std::prev(x: MBBI), To, LiveOut: LiveUnits, AllocationOrder, RestoreAfter);
306 MCPhysReg Reg = P.first;
307 MachineBasicBlock::iterator SpillBefore = P.second;
308 // Found an available register?
309 if (Reg != 0 && SpillBefore == MBB.end()) {
310 LLVM_DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI)
311 << '\n');
312 return Reg;
313 }
314
315 if (!AllowSpill)
316 return 0;
317
318 assert(Reg != 0 && "No register left to scavenge!");
319
320 MachineBasicBlock::iterator ReloadBefore =
321 RestoreAfter ? std::next(x: MBBI) : MBBI;
322 if (ReloadBefore != MBB.end())
323 LLVM_DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
324 ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, Before: SpillBefore, UseMI&: ReloadBefore);
325 Scavenged.Restore = &*std::prev(x: SpillBefore);
326 LiveUnits.removeReg(Reg);
327 LLVM_DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
328 << " until " << *SpillBefore);
329 return Reg;
330}
331
332/// Allocate a register for the virtual register \p VReg. The last use of
333/// \p VReg is around the current position of the register scavenger \p RS.
334/// \p ReserveAfter controls whether the scavenged register needs to be reserved
335/// after the current instruction, otherwise it will only be reserved before the
336/// current instruction.
337static Register scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS,
338 Register VReg, bool ReserveAfter) {
339 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
340#ifndef NDEBUG
341 // Verify that all definitions and uses are in the same basic block.
342 const MachineBasicBlock *CommonMBB = nullptr;
343 // Real definition for the reg, re-definitions are not considered.
344 const MachineInstr *RealDef = nullptr;
345 for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) {
346 MachineBasicBlock *MBB = MO.getParent()->getParent();
347 if (CommonMBB == nullptr)
348 CommonMBB = MBB;
349 assert(MBB == CommonMBB && "All defs+uses must be in the same basic block");
350 if (MO.isDef()) {
351 const MachineInstr &MI = *MO.getParent();
352 if (!MI.readsRegister(VReg, &TRI)) {
353 assert((!RealDef || RealDef == &MI) &&
354 "Can have at most one definition which is not a redefinition");
355 RealDef = &MI;
356 }
357 }
358 }
359 assert(RealDef != nullptr && "Must have at least 1 Def");
360#endif
361
362 // We should only have one definition of the register. However to accommodate
363 // the requirements of two address code we also allow definitions in
364 // subsequent instructions provided they also read the register. That way
365 // we get a single contiguous lifetime.
366 //
367 // Definitions in MRI.def_begin() are unordered, search for the first.
368 MachineRegisterInfo::def_iterator FirstDef = llvm::find_if(
369 Range: MRI.def_operands(Reg: VReg), P: [VReg, &TRI](const MachineOperand &MO) {
370 return !MO.getParent()->readsRegister(Reg: VReg, TRI: &TRI);
371 });
372 assert(FirstDef != MRI.def_end() &&
373 "Must have one definition that does not redefine vreg");
374 MachineInstr &DefMI = *FirstDef->getParent();
375
376 // The register scavenger will report a free register inserting an emergency
377 // spill/reload if necessary.
378 int SPAdj = 0;
379 const TargetRegisterClass &RC = *MRI.getRegClass(Reg: VReg);
380 Register SReg = RS.scavengeRegisterBackwards(RC, To: DefMI.getIterator(),
381 RestoreAfter: ReserveAfter, SPAdj);
382 MRI.replaceRegWith(FromReg: VReg, ToReg: SReg);
383 ++NumScavengedRegs;
384 return SReg;
385}
386
387/// Allocate (scavenge) vregs inside a single basic block.
388/// Returns true if the target spill callback created new vregs and a 2nd pass
389/// is necessary.
390static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
391 RegScavenger &RS,
392 MachineBasicBlock &MBB) {
393 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
394 RS.enterBasicBlockEnd(MBB);
395
396 unsigned InitialNumVirtRegs = MRI.getNumVirtRegs();
397 bool NextInstructionReadsVReg = false;
398 for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) {
399 // Move RegScavenger to the position between *std::prev(I) and *I.
400 RS.backward(I);
401 --I;
402
403 // Look for unassigned vregs in the uses of *std::next(I).
404 if (NextInstructionReadsVReg) {
405 MachineBasicBlock::iterator N = std::next(x: I);
406 const MachineInstr &NMI = *N;
407 for (const MachineOperand &MO : NMI.operands()) {
408 if (!MO.isReg())
409 continue;
410 Register Reg = MO.getReg();
411 // We only care about virtual registers and ignore virtual registers
412 // created by the target callbacks in the process (those will be handled
413 // in a scavenging round).
414 if (!Reg.isVirtual() || Reg.virtRegIndex() >= InitialNumVirtRegs)
415 continue;
416 if (!MO.readsReg())
417 continue;
418
419 Register SReg = scavengeVReg(MRI, RS, VReg: Reg, ReserveAfter: true);
420 N->addRegisterKilled(IncomingReg: SReg, RegInfo: &TRI, AddIfNotFound: false);
421 RS.setRegUsed(Reg: SReg);
422 }
423 }
424
425 // Look for unassigned vregs in the defs of *I.
426 NextInstructionReadsVReg = false;
427 const MachineInstr &MI = *I;
428 for (const MachineOperand &MO : MI.operands()) {
429 if (!MO.isReg())
430 continue;
431 Register Reg = MO.getReg();
432 // Only vregs, no newly created vregs (see above).
433 if (!Reg.isVirtual() || Reg.virtRegIndex() >= InitialNumVirtRegs)
434 continue;
435 // We have to look at all operands anyway so we can precalculate here
436 // whether there is a reading operand. This allows use to skip the use
437 // step in the next iteration if there was none.
438 assert(!MO.isInternalRead() && "Cannot assign inside bundles");
439 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses");
440 if (MO.readsReg()) {
441 NextInstructionReadsVReg = true;
442 }
443 if (MO.isDef()) {
444 Register SReg = scavengeVReg(MRI, RS, VReg: Reg, ReserveAfter: false);
445 I->addRegisterDead(Reg: SReg, RegInfo: &TRI, AddIfNotFound: false);
446 }
447 }
448 }
449#ifndef NDEBUG
450 for (const MachineOperand &MO : MBB.front().operands()) {
451 if (!MO.isReg() || !MO.getReg().isVirtual())
452 continue;
453 assert(!MO.isInternalRead() && "Cannot assign inside bundles");
454 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses");
455 assert(!MO.readsReg() && "Vreg use in first instruction not allowed");
456 }
457#endif
458
459 return MRI.getNumVirtRegs() != InitialNumVirtRegs;
460}
461
462void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
463 // FIXME: Iterating over the instruction stream is unnecessary. We can simply
464 // iterate over the vreg use list, which at this point only contains machine
465 // operands for which eliminateFrameIndex need a new scratch reg.
466 MachineRegisterInfo &MRI = MF.getRegInfo();
467 // Shortcut.
468 if (MRI.getNumVirtRegs() == 0) {
469 MF.getProperties().setNoVRegs();
470 return;
471 }
472
473 // Run through the instructions and find any virtual registers.
474 for (MachineBasicBlock &MBB : MF) {
475 if (MBB.empty())
476 continue;
477
478 bool Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
479 if (Again) {
480 LLVM_DEBUG(dbgs() << "Warning: Required two scavenging passes for block "
481 << MBB.getName() << '\n');
482 Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
483 // The target required a 2nd run (because it created new vregs while
484 // spilling). Refuse to do another pass to keep compiletime in check.
485 if (Again)
486 report_fatal_error(reason: "Incomplete scavenging after 2nd pass");
487 }
488 }
489
490 MRI.clearVirtRegs();
491 MF.getProperties().setNoVRegs();
492}
493
494namespace {
495
496/// This class runs register scavenging independ of the PrologEpilogInserter.
497/// This is used in for testing.
498class ScavengerTest : public MachineFunctionPass {
499public:
500 static char ID;
501
502 ScavengerTest() : MachineFunctionPass(ID) {}
503
504 bool runOnMachineFunction(MachineFunction &MF) override {
505 const TargetSubtargetInfo &STI = MF.getSubtarget();
506 const TargetFrameLowering &TFL = *STI.getFrameLowering();
507
508 RegScavenger RS;
509 // Let's hope that calling those outside of PrologEpilogueInserter works
510 // well enough to initialize the scavenger with some emergency spillslots
511 // for the target.
512 BitVector SavedRegs;
513 TFL.determineCalleeSaves(MF, SavedRegs, RS: &RS);
514 TFL.processFunctionBeforeFrameFinalized(MF, RS: &RS);
515
516 // Let's scavenge the current function
517 scavengeFrameVirtualRegs(MF, RS);
518 return true;
519 }
520};
521
522} // end anonymous namespace
523
524char ScavengerTest::ID;
525
526INITIALIZE_PASS(ScavengerTest, "scavenger-test",
527 "Scavenge virtual registers inside basic blocks", false, false)
528