1 | //===- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the VirtRegMap class. |
10 | // |
11 | // It also contains implementations of the Spiller interface, which, given a |
12 | // virtual register map and a machine function, eliminates all virtual |
13 | // references by replacing them with physical register references - adding spill |
14 | // code as necessary. |
15 | // |
16 | //===----------------------------------------------------------------------===// |
17 | |
18 | #include "llvm/CodeGen/VirtRegMap.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/Statistic.h" |
21 | #include "llvm/CodeGen/LiveDebugVariables.h" |
22 | #include "llvm/CodeGen/LiveInterval.h" |
23 | #include "llvm/CodeGen/LiveIntervals.h" |
24 | #include "llvm/CodeGen/LiveRegMatrix.h" |
25 | #include "llvm/CodeGen/LiveStacks.h" |
26 | #include "llvm/CodeGen/MachineBasicBlock.h" |
27 | #include "llvm/CodeGen/MachineFrameInfo.h" |
28 | #include "llvm/CodeGen/MachineFunction.h" |
29 | #include "llvm/CodeGen/MachineFunctionPass.h" |
30 | #include "llvm/CodeGen/MachineInstr.h" |
31 | #include "llvm/CodeGen/MachineOperand.h" |
32 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
33 | #include "llvm/CodeGen/SlotIndexes.h" |
34 | #include "llvm/CodeGen/TargetFrameLowering.h" |
35 | #include "llvm/CodeGen/TargetInstrInfo.h" |
36 | #include "llvm/CodeGen/TargetOpcodes.h" |
37 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
38 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
39 | #include "llvm/Config/llvm-config.h" |
40 | #include "llvm/MC/LaneBitmask.h" |
41 | #include "llvm/Pass.h" |
42 | #include "llvm/Support/Compiler.h" |
43 | #include "llvm/Support/Debug.h" |
44 | #include "llvm/Support/raw_ostream.h" |
45 | #include <cassert> |
46 | #include <iterator> |
47 | #include <utility> |
48 | |
49 | using namespace llvm; |
50 | |
51 | #define DEBUG_TYPE "regalloc" |
52 | |
53 | STATISTIC(NumSpillSlots, "Number of spill slots allocated" ); |
54 | STATISTIC(NumIdCopies, "Number of identity moves eliminated after rewriting" ); |
55 | |
56 | //===----------------------------------------------------------------------===// |
57 | // VirtRegMap implementation |
58 | //===----------------------------------------------------------------------===// |
59 | |
60 | char VirtRegMapWrapperLegacy::ID = 0; |
61 | |
62 | INITIALIZE_PASS(VirtRegMapWrapperLegacy, "virtregmap" , "Virtual Register Map" , |
63 | false, true) |
64 | |
65 | void VirtRegMap::init(MachineFunction &mf) { |
66 | MRI = &mf.getRegInfo(); |
67 | TII = mf.getSubtarget().getInstrInfo(); |
68 | TRI = mf.getSubtarget().getRegisterInfo(); |
69 | MF = &mf; |
70 | |
71 | Virt2PhysMap.clear(); |
72 | Virt2StackSlotMap.clear(); |
73 | Virt2SplitMap.clear(); |
74 | Virt2ShapeMap.clear(); |
75 | |
76 | grow(); |
77 | } |
78 | |
79 | void VirtRegMap::grow() { |
80 | unsigned NumRegs = MF->getRegInfo().getNumVirtRegs(); |
81 | Virt2PhysMap.resize(s: NumRegs); |
82 | Virt2StackSlotMap.resize(s: NumRegs); |
83 | Virt2SplitMap.resize(s: NumRegs); |
84 | } |
85 | |
86 | void VirtRegMap::assignVirt2Phys(Register virtReg, MCRegister physReg) { |
87 | assert(virtReg.isVirtual() && physReg.isPhysical()); |
88 | assert(!Virt2PhysMap[virtReg] && |
89 | "attempt to assign physical register to already mapped " |
90 | "virtual register" ); |
91 | assert(!getRegInfo().isReserved(physReg) && |
92 | "Attempt to map virtReg to a reserved physReg" ); |
93 | Virt2PhysMap[virtReg] = physReg; |
94 | } |
95 | |
96 | unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) { |
97 | unsigned Size = TRI->getSpillSize(RC: *RC); |
98 | Align Alignment = TRI->getSpillAlign(RC: *RC); |
99 | // Set preferred alignment if we are still able to realign the stack |
100 | auto &ST = MF->getSubtarget(); |
101 | Align CurrentAlign = ST.getFrameLowering()->getStackAlign(); |
102 | if (Alignment > CurrentAlign && !ST.getRegisterInfo()->canRealignStack(MF: *MF)) { |
103 | Alignment = CurrentAlign; |
104 | } |
105 | int SS = MF->getFrameInfo().CreateSpillStackObject(Size, Alignment); |
106 | ++NumSpillSlots; |
107 | return SS; |
108 | } |
109 | |
110 | bool VirtRegMap::hasPreferredPhys(Register VirtReg) const { |
111 | Register Hint = MRI->getSimpleHint(VReg: VirtReg); |
112 | if (!Hint.isValid()) |
113 | return false; |
114 | if (Hint.isVirtual()) |
115 | Hint = getPhys(virtReg: Hint); |
116 | return Register(getPhys(virtReg: VirtReg)) == Hint; |
117 | } |
118 | |
119 | bool VirtRegMap::hasKnownPreference(Register VirtReg) const { |
120 | std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(VReg: VirtReg); |
121 | if (Hint.second.isPhysical()) |
122 | return true; |
123 | if (Hint.second.isVirtual()) |
124 | return hasPhys(virtReg: Hint.second); |
125 | return false; |
126 | } |
127 | |
128 | int VirtRegMap::assignVirt2StackSlot(Register virtReg) { |
129 | assert(virtReg.isVirtual()); |
130 | assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && |
131 | "attempt to assign stack slot to already spilled register" ); |
132 | const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(Reg: virtReg); |
133 | return Virt2StackSlotMap[virtReg] = createSpillSlot(RC); |
134 | } |
135 | |
136 | void VirtRegMap::assignVirt2StackSlot(Register virtReg, int SS) { |
137 | assert(virtReg.isVirtual()); |
138 | assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && |
139 | "attempt to assign stack slot to already spilled register" ); |
140 | assert((SS >= 0 || |
141 | (SS >= MF->getFrameInfo().getObjectIndexBegin())) && |
142 | "illegal fixed frame index" ); |
143 | Virt2StackSlotMap[virtReg] = SS; |
144 | } |
145 | |
146 | void VirtRegMap::print(raw_ostream &OS, const Module*) const { |
147 | OS << "********** REGISTER MAP **********\n" ; |
148 | for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { |
149 | Register Reg = Register::index2VirtReg(Index: i); |
150 | if (Virt2PhysMap[Reg]) { |
151 | OS << '[' << printReg(Reg, TRI) << " -> " |
152 | << printReg(Reg: Virt2PhysMap[Reg], TRI) << "] " |
153 | << TRI->getRegClassName(Class: MRI->getRegClass(Reg)) << "\n" ; |
154 | } |
155 | } |
156 | |
157 | for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { |
158 | Register Reg = Register::index2VirtReg(Index: i); |
159 | if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) { |
160 | OS << '[' << printReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg] |
161 | << "] " << TRI->getRegClassName(Class: MRI->getRegClass(Reg)) << "\n" ; |
162 | } |
163 | } |
164 | OS << '\n'; |
165 | } |
166 | |
167 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
168 | LLVM_DUMP_METHOD void VirtRegMap::dump() const { |
169 | print(dbgs()); |
170 | } |
171 | #endif |
172 | |
173 | AnalysisKey VirtRegMapAnalysis::Key; |
174 | |
175 | PreservedAnalyses |
176 | VirtRegMapPrinterPass::run(MachineFunction &MF, |
177 | MachineFunctionAnalysisManager &MFAM) { |
178 | OS << MFAM.getResult<VirtRegMapAnalysis>(IR&: MF); |
179 | return PreservedAnalyses::all(); |
180 | } |
181 | |
182 | VirtRegMap VirtRegMapAnalysis::run(MachineFunction &MF, |
183 | MachineFunctionAnalysisManager &MAM) { |
184 | VirtRegMap VRM; |
185 | VRM.init(mf&: MF); |
186 | return VRM; |
187 | } |
188 | |
189 | //===----------------------------------------------------------------------===// |
190 | // VirtRegRewriter |
191 | //===----------------------------------------------------------------------===// |
192 | // |
193 | // The VirtRegRewriter is the last of the register allocator passes. |
194 | // It rewrites virtual registers to physical registers as specified in the |
195 | // VirtRegMap analysis. It also updates live-in information on basic blocks |
196 | // according to LiveIntervals. |
197 | // |
198 | namespace { |
199 | |
200 | class VirtRegRewriter { |
201 | MachineFunction *MF = nullptr; |
202 | const TargetRegisterInfo *TRI = nullptr; |
203 | const TargetInstrInfo *TII = nullptr; |
204 | MachineRegisterInfo *MRI = nullptr; |
205 | SlotIndexes *Indexes = nullptr; |
206 | LiveIntervals *LIS = nullptr; |
207 | LiveRegMatrix *LRM = nullptr; |
208 | VirtRegMap *VRM = nullptr; |
209 | LiveDebugVariables *DebugVars = nullptr; |
210 | DenseSet<Register> RewriteRegs; |
211 | bool ClearVirtRegs; |
212 | |
213 | void rewrite(); |
214 | void addMBBLiveIns(); |
215 | bool readsUndefSubreg(const MachineOperand &MO) const; |
216 | void addLiveInsForSubRanges(const LiveInterval &LI, MCRegister PhysReg) const; |
217 | void handleIdentityCopy(MachineInstr &MI); |
218 | void expandCopyBundle(MachineInstr &MI) const; |
219 | bool subRegLiveThrough(const MachineInstr &MI, MCRegister SuperPhysReg) const; |
220 | LaneBitmask liveOutUndefPhiLanesForUndefSubregDef( |
221 | const LiveInterval &LI, const MachineBasicBlock &MBB, unsigned SubReg, |
222 | MCRegister PhysReg, const MachineInstr &MI) const; |
223 | |
224 | public: |
225 | VirtRegRewriter(bool ClearVirtRegs, SlotIndexes *Indexes, LiveIntervals *LIS, |
226 | LiveRegMatrix *LRM, VirtRegMap *VRM, |
227 | LiveDebugVariables *DebugVars) |
228 | : Indexes(Indexes), LIS(LIS), LRM(LRM), VRM(VRM), DebugVars(DebugVars), |
229 | ClearVirtRegs(ClearVirtRegs) {} |
230 | |
231 | bool run(MachineFunction &); |
232 | }; |
233 | |
234 | class VirtRegRewriterLegacy : public MachineFunctionPass { |
235 | public: |
236 | static char ID; |
237 | bool ClearVirtRegs; |
238 | VirtRegRewriterLegacy(bool ClearVirtRegs = true) |
239 | : MachineFunctionPass(ID), ClearVirtRegs(ClearVirtRegs) {} |
240 | |
241 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
242 | |
243 | bool runOnMachineFunction(MachineFunction&) override; |
244 | |
245 | MachineFunctionProperties getSetProperties() const override { |
246 | if (ClearVirtRegs) { |
247 | return MachineFunctionProperties().setNoVRegs(); |
248 | } |
249 | |
250 | return MachineFunctionProperties(); |
251 | } |
252 | }; |
253 | |
254 | } // end anonymous namespace |
255 | |
256 | char VirtRegRewriterLegacy::ID = 0; |
257 | |
258 | char &llvm::VirtRegRewriterID = VirtRegRewriterLegacy::ID; |
259 | |
260 | INITIALIZE_PASS_BEGIN(VirtRegRewriterLegacy, "virtregrewriter" , |
261 | "Virtual Register Rewriter" , false, false) |
262 | INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass) |
263 | INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) |
264 | INITIALIZE_PASS_DEPENDENCY(LiveDebugVariablesWrapperLegacy) |
265 | INITIALIZE_PASS_DEPENDENCY(LiveRegMatrixWrapperLegacy) |
266 | INITIALIZE_PASS_DEPENDENCY(LiveStacksWrapperLegacy) |
267 | INITIALIZE_PASS_DEPENDENCY(VirtRegMapWrapperLegacy) |
268 | INITIALIZE_PASS_END(VirtRegRewriterLegacy, "virtregrewriter" , |
269 | "Virtual Register Rewriter" , false, false) |
270 | |
271 | void VirtRegRewriterLegacy::getAnalysisUsage(AnalysisUsage &AU) const { |
272 | AU.setPreservesCFG(); |
273 | AU.addRequired<LiveIntervalsWrapperPass>(); |
274 | AU.addPreserved<LiveIntervalsWrapperPass>(); |
275 | AU.addRequired<SlotIndexesWrapperPass>(); |
276 | AU.addPreserved<SlotIndexesWrapperPass>(); |
277 | AU.addRequired<LiveDebugVariablesWrapperLegacy>(); |
278 | AU.addRequired<LiveStacksWrapperLegacy>(); |
279 | AU.addPreserved<LiveStacksWrapperLegacy>(); |
280 | AU.addRequired<VirtRegMapWrapperLegacy>(); |
281 | AU.addRequired<LiveRegMatrixWrapperLegacy>(); |
282 | |
283 | if (!ClearVirtRegs) |
284 | AU.addPreserved<LiveDebugVariablesWrapperLegacy>(); |
285 | |
286 | MachineFunctionPass::getAnalysisUsage(AU); |
287 | } |
288 | |
289 | bool VirtRegRewriterLegacy::runOnMachineFunction(MachineFunction &MF) { |
290 | VirtRegMap &VRM = getAnalysis<VirtRegMapWrapperLegacy>().getVRM(); |
291 | LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS(); |
292 | LiveRegMatrix &LRM = getAnalysis<LiveRegMatrixWrapperLegacy>().getLRM(); |
293 | SlotIndexes &Indexes = getAnalysis<SlotIndexesWrapperPass>().getSI(); |
294 | LiveDebugVariables &DebugVars = |
295 | getAnalysis<LiveDebugVariablesWrapperLegacy>().getLDV(); |
296 | |
297 | VirtRegRewriter R(ClearVirtRegs, &Indexes, &LIS, &LRM, &VRM, &DebugVars); |
298 | return R.run(MF); |
299 | } |
300 | |
301 | PreservedAnalyses |
302 | VirtRegRewriterPass::run(MachineFunction &MF, |
303 | MachineFunctionAnalysisManager &MFAM) { |
304 | VirtRegMap &VRM = MFAM.getResult<VirtRegMapAnalysis>(IR&: MF); |
305 | LiveIntervals &LIS = MFAM.getResult<LiveIntervalsAnalysis>(IR&: MF); |
306 | LiveRegMatrix &LRM = MFAM.getResult<LiveRegMatrixAnalysis>(IR&: MF); |
307 | SlotIndexes &Indexes = MFAM.getResult<SlotIndexesAnalysis>(IR&: MF); |
308 | LiveDebugVariables &DebugVars = |
309 | MFAM.getResult<LiveDebugVariablesAnalysis>(IR&: MF); |
310 | |
311 | VirtRegRewriter R(ClearVirtRegs, &Indexes, &LIS, &LRM, &VRM, &DebugVars); |
312 | if (!R.run(MF)) |
313 | return PreservedAnalyses::all(); |
314 | |
315 | auto PA = getMachineFunctionPassPreservedAnalyses(); |
316 | PA.preserveSet<CFGAnalyses>(); |
317 | PA.preserve<LiveIntervalsAnalysis>(); |
318 | PA.preserve<SlotIndexesAnalysis>(); |
319 | PA.preserve<LiveStacksAnalysis>(); |
320 | // LiveDebugVariables is preserved by default, so clear it |
321 | // if this VRegRewriter is the last one in the pipeline. |
322 | if (ClearVirtRegs) |
323 | PA.abandon<LiveDebugVariablesAnalysis>(); |
324 | return PA; |
325 | } |
326 | |
327 | bool VirtRegRewriter::run(MachineFunction &fn) { |
328 | MF = &fn; |
329 | TRI = MF->getSubtarget().getRegisterInfo(); |
330 | TII = MF->getSubtarget().getInstrInfo(); |
331 | MRI = &MF->getRegInfo(); |
332 | |
333 | LLVM_DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n" |
334 | << "********** Function: " << MF->getName() << '\n'); |
335 | LLVM_DEBUG(VRM->dump()); |
336 | |
337 | // Add kill flags while we still have virtual registers. |
338 | LIS->addKillFlags(VRM); |
339 | |
340 | // Live-in lists on basic blocks are required for physregs. |
341 | addMBBLiveIns(); |
342 | |
343 | // Rewrite virtual registers. |
344 | rewrite(); |
345 | |
346 | if (ClearVirtRegs) { |
347 | // Write out new DBG_VALUE instructions. |
348 | |
349 | // We only do this if ClearVirtRegs is specified since this should be the |
350 | // final run of the pass and we don't want to emit them multiple times. |
351 | DebugVars->emitDebugValues(VRM); |
352 | |
353 | // All machine operands and other references to virtual registers have been |
354 | // replaced. Remove the virtual registers and release all the transient data. |
355 | VRM->clearAllVirt(); |
356 | MRI->clearVirtRegs(); |
357 | } |
358 | |
359 | return true; |
360 | } |
361 | |
362 | void VirtRegRewriter::addLiveInsForSubRanges(const LiveInterval &LI, |
363 | MCRegister PhysReg) const { |
364 | assert(!LI.empty()); |
365 | assert(LI.hasSubRanges()); |
366 | |
367 | using SubRangeIteratorPair = |
368 | std::pair<const LiveInterval::SubRange *, LiveInterval::const_iterator>; |
369 | |
370 | SmallVector<SubRangeIteratorPair, 4> SubRanges; |
371 | SlotIndex First; |
372 | SlotIndex Last; |
373 | for (const LiveInterval::SubRange &SR : LI.subranges()) { |
374 | SubRanges.push_back(Elt: std::make_pair(x: &SR, y: SR.begin())); |
375 | if (!First.isValid() || SR.segments.front().start < First) |
376 | First = SR.segments.front().start; |
377 | if (!Last.isValid() || SR.segments.back().end > Last) |
378 | Last = SR.segments.back().end; |
379 | } |
380 | |
381 | // Check all mbb start positions between First and Last while |
382 | // simultaneously advancing an iterator for each subrange. |
383 | for (SlotIndexes::MBBIndexIterator MBBI = Indexes->getMBBLowerBound(Idx: First); |
384 | MBBI != Indexes->MBBIndexEnd() && MBBI->first <= Last; ++MBBI) { |
385 | SlotIndex MBBBegin = MBBI->first; |
386 | // Advance all subrange iterators so that their end position is just |
387 | // behind MBBBegin (or the iterator is at the end). |
388 | LaneBitmask LaneMask; |
389 | for (auto &RangeIterPair : SubRanges) { |
390 | const LiveInterval::SubRange *SR = RangeIterPair.first; |
391 | LiveInterval::const_iterator &SRI = RangeIterPair.second; |
392 | while (SRI != SR->end() && SRI->end <= MBBBegin) |
393 | ++SRI; |
394 | if (SRI == SR->end()) |
395 | continue; |
396 | if (SRI->start <= MBBBegin) |
397 | LaneMask |= SR->LaneMask; |
398 | } |
399 | if (LaneMask.none()) |
400 | continue; |
401 | MachineBasicBlock *MBB = MBBI->second; |
402 | MBB->addLiveIn(PhysReg, LaneMask); |
403 | } |
404 | } |
405 | |
406 | // Compute MBB live-in lists from virtual register live ranges and their |
407 | // assignments. |
408 | void VirtRegRewriter::addMBBLiveIns() { |
409 | for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) { |
410 | Register VirtReg = Register::index2VirtReg(Index: Idx); |
411 | if (MRI->reg_nodbg_empty(RegNo: VirtReg)) |
412 | continue; |
413 | LiveInterval &LI = LIS->getInterval(Reg: VirtReg); |
414 | if (LI.empty() || LIS->intervalIsInOneMBB(LI)) |
415 | continue; |
416 | // This is a virtual register that is live across basic blocks. Its |
417 | // assigned PhysReg must be marked as live-in to those blocks. |
418 | MCRegister PhysReg = VRM->getPhys(virtReg: VirtReg); |
419 | if (!PhysReg) { |
420 | // There may be no physical register assigned if only some register |
421 | // classes were already allocated. |
422 | assert(!ClearVirtRegs && "Unmapped virtual register" ); |
423 | continue; |
424 | } |
425 | |
426 | if (LI.hasSubRanges()) { |
427 | addLiveInsForSubRanges(LI, PhysReg); |
428 | } else { |
429 | // Go over MBB begin positions and see if we have segments covering them. |
430 | // The following works because segments and the MBBIndex list are both |
431 | // sorted by slot indexes. |
432 | SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(); |
433 | for (const auto &Seg : LI) { |
434 | I = Indexes->getMBBLowerBound(Start: I, Idx: Seg.start); |
435 | for (; I != Indexes->MBBIndexEnd() && I->first < Seg.end; ++I) { |
436 | MachineBasicBlock *MBB = I->second; |
437 | MBB->addLiveIn(PhysReg); |
438 | } |
439 | } |
440 | } |
441 | } |
442 | |
443 | // Sort and unique MBB LiveIns as we've not checked if SubReg/PhysReg were in |
444 | // each MBB's LiveIns set before calling addLiveIn on them. |
445 | for (MachineBasicBlock &MBB : *MF) |
446 | MBB.sortUniqueLiveIns(); |
447 | } |
448 | |
449 | /// Returns true if the given machine operand \p MO only reads undefined lanes. |
450 | /// The function only works for use operands with a subregister set. |
451 | bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const { |
452 | // Shortcut if the operand is already marked undef. |
453 | if (MO.isUndef()) |
454 | return true; |
455 | |
456 | Register Reg = MO.getReg(); |
457 | const LiveInterval &LI = LIS->getInterval(Reg); |
458 | const MachineInstr &MI = *MO.getParent(); |
459 | SlotIndex BaseIndex = LIS->getInstructionIndex(Instr: MI); |
460 | // This code is only meant to handle reading undefined subregisters which |
461 | // we couldn't properly detect before. |
462 | assert(LI.liveAt(BaseIndex) && |
463 | "Reads of completely dead register should be marked undef already" ); |
464 | unsigned SubRegIdx = MO.getSubReg(); |
465 | assert(SubRegIdx != 0 && LI.hasSubRanges()); |
466 | LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx); |
467 | // See if any of the relevant subregister liveranges is defined at this point. |
468 | for (const LiveInterval::SubRange &SR : LI.subranges()) { |
469 | if ((SR.LaneMask & UseMask).any() && SR.liveAt(index: BaseIndex)) |
470 | return false; |
471 | } |
472 | return true; |
473 | } |
474 | |
475 | void VirtRegRewriter::handleIdentityCopy(MachineInstr &MI) { |
476 | if (!MI.isIdentityCopy()) |
477 | return; |
478 | LLVM_DEBUG(dbgs() << "Identity copy: " << MI); |
479 | ++NumIdCopies; |
480 | |
481 | Register DstReg = MI.getOperand(i: 0).getReg(); |
482 | |
483 | // We may have deferred allocation of the virtual register, and the rewrite |
484 | // regs code doesn't handle the liveness update. |
485 | if (DstReg.isVirtual()) |
486 | return; |
487 | |
488 | RewriteRegs.insert(V: DstReg); |
489 | |
490 | // Copies like: |
491 | // %r0 = COPY undef %r0 |
492 | // %al = COPY %al, implicit-def %eax |
493 | // give us additional liveness information: The target (super-)register |
494 | // must not be valid before this point. Replace the COPY with a KILL |
495 | // instruction to maintain this information. |
496 | if (MI.getOperand(i: 1).isUndef() || MI.getNumOperands() > 2) { |
497 | MI.setDesc(TII->get(Opcode: TargetOpcode::KILL)); |
498 | LLVM_DEBUG(dbgs() << " replace by: " << MI); |
499 | return; |
500 | } |
501 | |
502 | if (Indexes) |
503 | Indexes->removeSingleMachineInstrFromMaps(MI); |
504 | MI.eraseFromBundle(); |
505 | LLVM_DEBUG(dbgs() << " deleted.\n" ); |
506 | } |
507 | |
508 | /// The liverange splitting logic sometimes produces bundles of copies when |
509 | /// subregisters are involved. Expand these into a sequence of copy instructions |
510 | /// after processing the last in the bundle. Does not update LiveIntervals |
511 | /// which we shouldn't need for this instruction anymore. |
512 | void VirtRegRewriter::expandCopyBundle(MachineInstr &MI) const { |
513 | if (!MI.isCopy() && !MI.isKill()) |
514 | return; |
515 | |
516 | if (MI.isBundledWithPred() && !MI.isBundledWithSucc()) { |
517 | SmallVector<MachineInstr *, 2> MIs({&MI}); |
518 | |
519 | // Only do this when the complete bundle is made out of COPYs and KILLs. |
520 | MachineBasicBlock &MBB = *MI.getParent(); |
521 | for (MachineBasicBlock::reverse_instr_iterator I = |
522 | std::next(x: MI.getReverseIterator()), E = MBB.instr_rend(); |
523 | I != E && I->isBundledWithSucc(); ++I) { |
524 | if (!I->isCopy() && !I->isKill()) |
525 | return; |
526 | MIs.push_back(Elt: &*I); |
527 | } |
528 | MachineInstr *FirstMI = MIs.back(); |
529 | |
530 | auto anyRegsAlias = [](const MachineInstr *Dst, |
531 | ArrayRef<MachineInstr *> Srcs, |
532 | const TargetRegisterInfo *TRI) { |
533 | for (const MachineInstr *Src : Srcs) |
534 | if (Src != Dst) |
535 | if (TRI->regsOverlap(RegA: Dst->getOperand(i: 0).getReg(), |
536 | RegB: Src->getOperand(i: 1).getReg())) |
537 | return true; |
538 | return false; |
539 | }; |
540 | |
541 | // If any of the destination registers in the bundle of copies alias any of |
542 | // the source registers, try to schedule the instructions to avoid any |
543 | // clobbering. |
544 | for (int E = MIs.size(), PrevE = E; E > 1; PrevE = E) { |
545 | for (int I = E; I--; ) |
546 | if (!anyRegsAlias(MIs[I], ArrayRef(MIs).take_front(N: E), TRI)) { |
547 | if (I + 1 != E) |
548 | std::swap(a&: MIs[I], b&: MIs[E - 1]); |
549 | --E; |
550 | } |
551 | if (PrevE == E) { |
552 | MF->getFunction().getContext().emitError( |
553 | ErrorStr: "register rewriting failed: cycle in copy bundle" ); |
554 | break; |
555 | } |
556 | } |
557 | |
558 | MachineInstr *BundleStart = FirstMI; |
559 | for (MachineInstr *BundledMI : llvm::reverse(C&: MIs)) { |
560 | // If instruction is in the middle of the bundle, move it before the |
561 | // bundle starts, otherwise, just unbundle it. When we get to the last |
562 | // instruction, the bundle will have been completely undone. |
563 | if (BundledMI != BundleStart) { |
564 | BundledMI->removeFromBundle(); |
565 | MBB.insert(I: BundleStart, MI: BundledMI); |
566 | } else if (BundledMI->isBundledWithSucc()) { |
567 | BundledMI->unbundleFromSucc(); |
568 | BundleStart = &*std::next(x: BundledMI->getIterator()); |
569 | } |
570 | |
571 | if (Indexes && BundledMI != FirstMI) |
572 | Indexes->insertMachineInstrInMaps(MI&: *BundledMI); |
573 | } |
574 | } |
575 | } |
576 | |
577 | /// Check whether (part of) \p SuperPhysReg is live through \p MI. |
578 | /// \pre \p MI defines a subregister of a virtual register that |
579 | /// has been assigned to \p SuperPhysReg. |
580 | bool VirtRegRewriter::subRegLiveThrough(const MachineInstr &MI, |
581 | MCRegister SuperPhysReg) const { |
582 | SlotIndex MIIndex = LIS->getInstructionIndex(Instr: MI); |
583 | SlotIndex BeforeMIUses = MIIndex.getBaseIndex(); |
584 | SlotIndex AfterMIDefs = MIIndex.getBoundaryIndex(); |
585 | for (MCRegUnit Unit : TRI->regunits(Reg: SuperPhysReg)) { |
586 | const LiveRange &UnitRange = LIS->getRegUnit(Unit); |
587 | // If the regunit is live both before and after MI, |
588 | // we assume it is live through. |
589 | // Generally speaking, this is not true, because something like |
590 | // "RU = op RU" would match that description. |
591 | // However, we know that we are trying to assess whether |
592 | // a def of a virtual reg, vreg, is live at the same time of RU. |
593 | // If we are in the "RU = op RU" situation, that means that vreg |
594 | // is defined at the same time as RU (i.e., "vreg, RU = op RU"). |
595 | // Thus, vreg and RU interferes and vreg cannot be assigned to |
596 | // SuperPhysReg. Therefore, this situation cannot happen. |
597 | if (UnitRange.liveAt(index: AfterMIDefs) && UnitRange.liveAt(index: BeforeMIUses)) |
598 | return true; |
599 | } |
600 | return false; |
601 | } |
602 | |
603 | /// Compute a lanemask for undef lanes which need to be preserved out of the |
604 | /// defining block for a register assignment for a subregister def. \p PhysReg |
605 | /// is assigned to \p LI, which is the main range. |
606 | LaneBitmask VirtRegRewriter::liveOutUndefPhiLanesForUndefSubregDef( |
607 | const LiveInterval &LI, const MachineBasicBlock &MBB, unsigned SubReg, |
608 | MCRegister PhysReg, const MachineInstr &MI) const { |
609 | LaneBitmask UndefMask = ~TRI->getSubRegIndexLaneMask(SubIdx: SubReg); |
610 | LaneBitmask LiveOutUndefLanes; |
611 | |
612 | for (const LiveInterval::SubRange &SR : LI.subranges()) { |
613 | // Figure out which lanes are undef live into a successor. |
614 | LaneBitmask NeedImpDefLanes = UndefMask & SR.LaneMask; |
615 | if (NeedImpDefLanes.any() && !LIS->isLiveOutOfMBB(LR: SR, mbb: &MBB)) { |
616 | for (const MachineBasicBlock *Succ : MBB.successors()) { |
617 | if (LIS->isLiveInToMBB(LR: SR, mbb: Succ)) |
618 | LiveOutUndefLanes |= NeedImpDefLanes; |
619 | } |
620 | } |
621 | } |
622 | |
623 | SlotIndex MIIndex = LIS->getInstructionIndex(Instr: MI); |
624 | SlotIndex BeforeMIUses = MIIndex.getBaseIndex(); |
625 | LaneBitmask InterferingLanes = |
626 | LRM->checkInterferenceLanes(Start: BeforeMIUses, End: MIIndex.getRegSlot(), PhysReg); |
627 | LiveOutUndefLanes &= ~InterferingLanes; |
628 | |
629 | LLVM_DEBUG(if (LiveOutUndefLanes.any()) { |
630 | dbgs() << "Need live out undef defs for " << printReg(PhysReg) |
631 | << LiveOutUndefLanes << " from " << printMBBReference(MBB) << '\n'; |
632 | }); |
633 | |
634 | return LiveOutUndefLanes; |
635 | } |
636 | |
637 | void VirtRegRewriter::rewrite() { |
638 | bool NoSubRegLiveness = !MRI->subRegLivenessEnabled(); |
639 | SmallVector<Register, 8> SuperDeads; |
640 | SmallVector<Register, 8> SuperDefs; |
641 | SmallVector<Register, 8> SuperKills; |
642 | |
643 | for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); |
644 | MBBI != MBBE; ++MBBI) { |
645 | LLVM_DEBUG(MBBI->print(dbgs(), Indexes)); |
646 | for (MachineInstr &MI : llvm::make_early_inc_range(Range: MBBI->instrs())) { |
647 | for (MachineOperand &MO : MI.operands()) { |
648 | // Make sure MRI knows about registers clobbered by regmasks. |
649 | if (MO.isRegMask()) |
650 | MRI->addPhysRegsUsedFromRegMask(RegMask: MO.getRegMask()); |
651 | |
652 | if (!MO.isReg() || !MO.getReg().isVirtual()) |
653 | continue; |
654 | Register VirtReg = MO.getReg(); |
655 | MCRegister PhysReg = VRM->getPhys(virtReg: VirtReg); |
656 | if (!PhysReg) |
657 | continue; |
658 | |
659 | assert(Register(PhysReg).isPhysical()); |
660 | |
661 | RewriteRegs.insert(V: PhysReg); |
662 | assert(!MRI->isReserved(PhysReg) && "Reserved register assignment" ); |
663 | |
664 | // Preserve semantics of sub-register operands. |
665 | unsigned SubReg = MO.getSubReg(); |
666 | if (SubReg != 0) { |
667 | if (NoSubRegLiveness || !MRI->shouldTrackSubRegLiveness(VReg: VirtReg)) { |
668 | // A virtual register kill refers to the whole register, so we may |
669 | // have to add implicit killed operands for the super-register. A |
670 | // partial redef always kills and redefines the super-register. |
671 | if ((MO.readsReg() && (MO.isDef() || MO.isKill())) || |
672 | (MO.isDef() && subRegLiveThrough(MI, SuperPhysReg: PhysReg))) |
673 | SuperKills.push_back(Elt: PhysReg); |
674 | |
675 | if (MO.isDef()) { |
676 | // Also add implicit defs for the super-register. |
677 | if (MO.isDead()) |
678 | SuperDeads.push_back(Elt: PhysReg); |
679 | else |
680 | SuperDefs.push_back(Elt: PhysReg); |
681 | } |
682 | } else { |
683 | if (MO.isUse()) { |
684 | if (readsUndefSubreg(MO)) |
685 | // We need to add an <undef> flag if the subregister is |
686 | // completely undefined (and we are not adding super-register |
687 | // defs). |
688 | MO.setIsUndef(true); |
689 | } else if (!MO.isDead()) { |
690 | assert(MO.isDef()); |
691 | if (MO.isUndef()) { |
692 | const LiveInterval &LI = LIS->getInterval(Reg: VirtReg); |
693 | |
694 | LaneBitmask LiveOutUndefLanes = |
695 | liveOutUndefPhiLanesForUndefSubregDef(LI, MBB: *MBBI, SubReg, |
696 | PhysReg, MI); |
697 | if (LiveOutUndefLanes.any()) { |
698 | SmallVector<unsigned, 16> CoveringIndexes; |
699 | |
700 | // TODO: Just use one super register def if none of the lanes |
701 | // are needed? |
702 | if (!TRI->getCoveringSubRegIndexes(RC: MRI->getRegClass(Reg: VirtReg), |
703 | LaneMask: LiveOutUndefLanes, |
704 | Indexes&: CoveringIndexes)) |
705 | llvm_unreachable( |
706 | "cannot represent required subregister defs" ); |
707 | |
708 | // Try to represent the minimum needed live out def as a |
709 | // sequence of subregister defs. |
710 | // |
711 | // FIXME: It would be better if we could directly represent |
712 | // liveness with a lanemask instead of spamming operands. |
713 | for (unsigned SubIdx : CoveringIndexes) |
714 | SuperDefs.push_back(Elt: TRI->getSubReg(Reg: PhysReg, Idx: SubIdx)); |
715 | } |
716 | } |
717 | } |
718 | } |
719 | |
720 | // The def undef and def internal flags only make sense for |
721 | // sub-register defs, and we are substituting a full physreg. An |
722 | // implicit killed operand from the SuperKills list will represent the |
723 | // partial read of the super-register. |
724 | if (MO.isDef()) { |
725 | MO.setIsUndef(false); |
726 | MO.setIsInternalRead(false); |
727 | } |
728 | |
729 | // PhysReg operands cannot have subregister indexes. |
730 | PhysReg = TRI->getSubReg(Reg: PhysReg, Idx: SubReg); |
731 | assert(PhysReg.isValid() && "Invalid SubReg for physical register" ); |
732 | MO.setSubReg(0); |
733 | } |
734 | // Rewrite. Note we could have used MachineOperand::substPhysReg(), but |
735 | // we need the inlining here. |
736 | MO.setReg(PhysReg); |
737 | MO.setIsRenamable(true); |
738 | } |
739 | |
740 | // Add any missing super-register kills after rewriting the whole |
741 | // instruction. |
742 | while (!SuperKills.empty()) |
743 | MI.addRegisterKilled(IncomingReg: SuperKills.pop_back_val(), RegInfo: TRI, AddIfNotFound: true); |
744 | |
745 | while (!SuperDeads.empty()) |
746 | MI.addRegisterDead(Reg: SuperDeads.pop_back_val(), RegInfo: TRI, AddIfNotFound: true); |
747 | |
748 | while (!SuperDefs.empty()) |
749 | MI.addRegisterDefined(Reg: SuperDefs.pop_back_val(), RegInfo: TRI); |
750 | |
751 | LLVM_DEBUG(dbgs() << "> " << MI); |
752 | |
753 | expandCopyBundle(MI); |
754 | |
755 | // We can remove identity copies right now. |
756 | handleIdentityCopy(MI); |
757 | } |
758 | } |
759 | |
760 | if (LIS) { |
761 | // Don't bother maintaining accurate LiveIntervals for registers which were |
762 | // already allocated. |
763 | for (Register PhysReg : RewriteRegs) { |
764 | for (MCRegUnit Unit : TRI->regunits(Reg: PhysReg)) { |
765 | LIS->removeRegUnit(Unit); |
766 | } |
767 | } |
768 | } |
769 | |
770 | RewriteRegs.clear(); |
771 | } |
772 | |
773 | void VirtRegRewriterPass::printPipeline( |
774 | raw_ostream &OS, function_ref<StringRef(StringRef)>) const { |
775 | OS << "virt-reg-rewriter" ; |
776 | if (!ClearVirtRegs) |
777 | OS << "<no-clear-vregs>" ; |
778 | } |
779 | |
780 | FunctionPass *llvm::createVirtRegRewriter(bool ClearVirtRegs) { |
781 | return new VirtRegRewriterLegacy(ClearVirtRegs); |
782 | } |
783 | |