1//===- SplitKit.cpp - Toolkit for splitting live ranges -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SplitAnalysis class as well as mutator functions for
10// live range splitting.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SplitKit.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/LiveRangeEdit.h"
18#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
19#include "llvm/CodeGen/MachineDominators.h"
20#include "llvm/CodeGen/MachineInstr.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineLoopInfo.h"
23#include "llvm/CodeGen/MachineOperand.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/TargetInstrInfo.h"
26#include "llvm/CodeGen/TargetOpcodes.h"
27#include "llvm/CodeGen/TargetRegisterInfo.h"
28#include "llvm/CodeGen/TargetSubtargetInfo.h"
29#include "llvm/CodeGen/VirtRegMap.h"
30#include "llvm/Config/llvm-config.h"
31#include "llvm/IR/DebugLoc.h"
32#include "llvm/Support/Allocator.h"
33#include "llvm/Support/BlockFrequency.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/raw_ostream.h"
37#include <algorithm>
38#include <cassert>
39#include <iterator>
40#include <limits>
41#include <tuple>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "regalloc"
46
47static cl::opt<bool>
48 EnableLoopIVHeuristic("enable-split-loopiv-heuristic",
49 cl::desc("Enable loop iv regalloc heuristic"),
50 cl::init(Val: true));
51
52STATISTIC(NumFinished, "Number of splits finished");
53STATISTIC(NumSimple, "Number of splits that were simple");
54STATISTIC(NumCopies, "Number of copies inserted for splitting");
55STATISTIC(NumRemats, "Number of rematerialized defs for splitting");
56
57//===----------------------------------------------------------------------===//
58// Last Insert Point Analysis
59//===----------------------------------------------------------------------===//
60
61InsertPointAnalysis::InsertPointAnalysis(const LiveIntervals &lis,
62 unsigned BBNum)
63 : LIS(lis), LastInsertPoint(BBNum) {}
64
65SlotIndex
66InsertPointAnalysis::computeLastInsertPoint(const LiveInterval &CurLI,
67 const MachineBasicBlock &MBB) {
68 unsigned Num = MBB.getNumber();
69 std::pair<SlotIndex, SlotIndex> &LIP = LastInsertPoint[Num];
70 SlotIndex MBBEnd = LIS.getMBBEndIdx(mbb: &MBB);
71
72 SmallVector<const MachineBasicBlock *, 1> ExceptionalSuccessors;
73 bool EHPadSuccessor = false;
74 for (const MachineBasicBlock *SMBB : MBB.successors()) {
75 if (SMBB->isEHPad()) {
76 ExceptionalSuccessors.push_back(Elt: SMBB);
77 EHPadSuccessor = true;
78 } else if (SMBB->isInlineAsmBrIndirectTarget())
79 ExceptionalSuccessors.push_back(Elt: SMBB);
80 }
81
82 // Compute insert points on the first call. The pair is independent of the
83 // current live interval.
84 if (!LIP.first.isValid()) {
85 MachineBasicBlock::const_iterator FirstTerm = MBB.getFirstTerminator();
86 if (FirstTerm == MBB.end())
87 LIP.first = MBBEnd;
88 else
89 LIP.first = LIS.getInstructionIndex(Instr: *FirstTerm);
90
91 // If there is a landing pad or inlineasm_br successor, also find the
92 // instruction. If there is no such instruction, we don't need to do
93 // anything special. We assume there cannot be multiple instructions that
94 // are Calls with EHPad successors or INLINEASM_BR in a block. Further, we
95 // assume that if there are any, they will be after any other call
96 // instructions in the block.
97 if (ExceptionalSuccessors.empty())
98 return LIP.first;
99 for (const MachineInstr &MI : llvm::reverse(C: MBB)) {
100 if ((EHPadSuccessor && MI.isCall()) ||
101 MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
102 LIP.second = LIS.getInstructionIndex(Instr: MI);
103 break;
104 }
105 }
106 }
107
108 // If CurLI is live into a landing pad successor, move the last insert point
109 // back to the call that may throw.
110 if (!LIP.second)
111 return LIP.first;
112
113 if (none_of(Range&: ExceptionalSuccessors, P: [&](const MachineBasicBlock *EHPad) {
114 return LIS.isLiveInToMBB(LR: CurLI, mbb: EHPad);
115 }))
116 return LIP.first;
117
118 // Find the value leaving MBB.
119 const VNInfo *VNI = CurLI.getVNInfoBefore(Idx: MBBEnd);
120 if (!VNI)
121 return LIP.first;
122
123 // The def of statepoint instruction is a gc relocation and it should be alive
124 // in landing pad. So we cannot split interval after statepoint instruction.
125 if (SlotIndex::isSameInstr(A: VNI->def, B: LIP.second))
126 if (auto *I = LIS.getInstructionFromIndex(index: LIP.second))
127 if (I->getOpcode() == TargetOpcode::STATEPOINT)
128 return LIP.second;
129
130 // If the value leaving MBB was defined after the call in MBB, it can't
131 // really be live-in to the landing pad. This can happen if the landing pad
132 // has a PHI, and this register is undef on the exceptional edge.
133 if (!SlotIndex::isEarlierInstr(A: VNI->def, B: LIP.second) && VNI->def < MBBEnd)
134 return LIP.first;
135
136 // Value is properly live-in to the landing pad.
137 // Only allow inserts before the call.
138 return LIP.second;
139}
140
141MachineBasicBlock::iterator
142InsertPointAnalysis::getLastInsertPointIter(const LiveInterval &CurLI,
143 MachineBasicBlock &MBB) {
144 SlotIndex LIP = getLastInsertPoint(CurLI, MBB);
145 if (LIP == LIS.getMBBEndIdx(mbb: &MBB))
146 return MBB.end();
147 return LIS.getInstructionFromIndex(index: LIP);
148}
149
150//===----------------------------------------------------------------------===//
151// Split Analysis
152//===----------------------------------------------------------------------===//
153
154SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
155 const MachineLoopInfo &mli)
156 : MF(vrm.getMachineFunction()), VRM(vrm), LIS(lis), Loops(mli),
157 TII(*MF.getSubtarget().getInstrInfo()), IPA(lis, MF.getNumBlockIDs()) {}
158
159void SplitAnalysis::clear() {
160 UseSlots.clear();
161 UseBlocks.clear();
162 ThroughBlocks.clear();
163 CurLI = nullptr;
164}
165
166/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
167void SplitAnalysis::analyzeUses() {
168 assert(UseSlots.empty() && "Call clear first");
169
170 // First get all the defs from the interval values. This provides the correct
171 // slots for early clobbers.
172 for (const VNInfo *VNI : CurLI->valnos)
173 if (!VNI->isPHIDef() && !VNI->isUnused())
174 UseSlots.push_back(Elt: VNI->def);
175
176 // Get use slots form the use-def chain.
177 const MachineRegisterInfo &MRI = MF.getRegInfo();
178 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg: CurLI->reg()))
179 if (!MO.isUndef())
180 UseSlots.push_back(Elt: LIS.getInstructionIndex(Instr: *MO.getParent()).getRegSlot());
181
182 array_pod_sort(Start: UseSlots.begin(), End: UseSlots.end());
183
184 // Remove duplicates, keeping the smaller slot for each instruction.
185 // That is what we want for early clobbers.
186 UseSlots.erase(CS: llvm::unique(R&: UseSlots, P: SlotIndex::isSameInstr),
187 CE: UseSlots.end());
188
189 // Compute per-live block info.
190 calcLiveBlockInfo();
191
192 LLVM_DEBUG(dbgs() << "Analyze counted " << UseSlots.size() << " instrs in "
193 << UseBlocks.size() << " blocks, through "
194 << NumThroughBlocks << " blocks.\n");
195}
196
197/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
198/// where CurLI is live.
199void SplitAnalysis::calcLiveBlockInfo() {
200 ThroughBlocks.resize(N: MF.getNumBlockIDs());
201 NumThroughBlocks = NumGapBlocks = 0;
202 if (CurLI->empty())
203 return;
204
205 LiveInterval::const_iterator LVI = CurLI->begin();
206 LiveInterval::const_iterator LVE = CurLI->end();
207
208 SmallVectorImpl<SlotIndex>::const_iterator UseI, UseE;
209 UseI = UseSlots.begin();
210 UseE = UseSlots.end();
211
212 // Loop over basic blocks where CurLI is live.
213 MachineFunction::iterator MFI =
214 LIS.getMBBFromIndex(index: LVI->start)->getIterator();
215 while (true) {
216 BlockInfo BI;
217 BI.MBB = &*MFI;
218 SlotIndex Start, Stop;
219 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
220
221 // If the block contains no uses, the range must be live through. At one
222 // point, RegisterCoalescer could create dangling ranges that ended
223 // mid-block.
224 if (UseI == UseE || *UseI >= Stop) {
225 ++NumThroughBlocks;
226 ThroughBlocks.set(BI.MBB->getNumber());
227 // The range shouldn't end mid-block if there are no uses. This shouldn't
228 // happen.
229 assert(LVI->end >= Stop && "range ends mid block with no uses");
230 } else {
231 // This block has uses. Find the first and last uses in the block.
232 BI.FirstInstr = *UseI;
233 assert(BI.FirstInstr >= Start);
234 do ++UseI;
235 while (UseI != UseE && *UseI < Stop);
236 BI.LastInstr = UseI[-1];
237 assert(BI.LastInstr < Stop);
238
239 // LVI is the first live segment overlapping MBB.
240 BI.LiveIn = LVI->start <= Start;
241
242 // When not live in, the first use should be a def.
243 if (!BI.LiveIn) {
244 assert(LVI->start == LVI->valno->def && "Dangling Segment start");
245 assert(LVI->start == BI.FirstInstr && "First instr should be a def");
246 BI.FirstDef = BI.FirstInstr;
247 }
248
249 // Look for gaps in the live range.
250 BI.LiveOut = true;
251 while (LVI->end < Stop) {
252 SlotIndex LastStop = LVI->end;
253 if (++LVI == LVE || LVI->start >= Stop) {
254 BI.LiveOut = false;
255 BI.LastInstr = LastStop;
256 break;
257 }
258
259 if (LastStop < LVI->start) {
260 // There is a gap in the live range. Create duplicate entries for the
261 // live-in snippet and the live-out snippet.
262 ++NumGapBlocks;
263
264 // Push the Live-in part.
265 BI.LiveOut = false;
266 UseBlocks.push_back(Elt: BI);
267 UseBlocks.back().LastInstr = LastStop;
268
269 // Set up BI for the live-out part.
270 BI.LiveIn = false;
271 BI.LiveOut = true;
272 BI.FirstInstr = BI.FirstDef = LVI->start;
273 }
274
275 // A Segment that starts in the middle of the block must be a def.
276 assert(LVI->start == LVI->valno->def && "Dangling Segment start");
277 if (!BI.FirstDef)
278 BI.FirstDef = LVI->start;
279 }
280
281 UseBlocks.push_back(Elt: BI);
282
283 // LVI is now at LVE or LVI->end >= Stop.
284 if (LVI == LVE)
285 break;
286 }
287
288 // Live segment ends exactly at Stop. Move to the next segment.
289 if (LVI->end == Stop && ++LVI == LVE)
290 break;
291
292 // Pick the next basic block.
293 if (LVI->start < Stop)
294 ++MFI;
295 else
296 MFI = LIS.getMBBFromIndex(index: LVI->start)->getIterator();
297 }
298
299 LooksLikeLoopIV = EnableLoopIVHeuristic && UseBlocks.size() == 2 &&
300 any_of(Range&: UseBlocks, P: [this](BlockInfo &BI) {
301 MachineLoop *L = Loops.getLoopFor(BB: BI.MBB);
302 return BI.LiveIn && BI.LiveOut && BI.FirstDef && L &&
303 L->isLoopLatch(BB: BI.MBB);
304 });
305
306 assert(getNumLiveBlocks() == countLiveBlocks(CurLI) && "Bad block count");
307}
308
309unsigned SplitAnalysis::countLiveBlocks(const LiveInterval *cli) const {
310 if (cli->empty())
311 return 0;
312 LiveInterval *li = const_cast<LiveInterval*>(cli);
313 LiveInterval::iterator LVI = li->begin();
314 LiveInterval::iterator LVE = li->end();
315 unsigned Count = 0;
316
317 // Loop over basic blocks where li is live.
318 MachineFunction::const_iterator MFI =
319 LIS.getMBBFromIndex(index: LVI->start)->getIterator();
320 SlotIndex Stop = LIS.getMBBEndIdx(mbb: &*MFI);
321 while (true) {
322 ++Count;
323 LVI = li->advanceTo(I: LVI, Pos: Stop);
324 if (LVI == LVE)
325 return Count;
326 do {
327 ++MFI;
328 Stop = LIS.getMBBEndIdx(mbb: &*MFI);
329 } while (Stop <= LVI->start);
330 }
331}
332
333bool SplitAnalysis::isOriginalEndpoint(SlotIndex Idx) const {
334 Register OrigReg = VRM.getOriginal(VirtReg: CurLI->reg());
335 const LiveInterval &Orig = LIS.getInterval(Reg: OrigReg);
336 assert(!Orig.empty() && "Splitting empty interval?");
337 LiveInterval::const_iterator I = Orig.find(Pos: Idx);
338
339 // Range containing Idx should begin at Idx.
340 if (I != Orig.end() && I->start <= Idx)
341 return I->start == Idx;
342
343 // Range does not contain Idx, previous must end at Idx.
344 return I != Orig.begin() && (--I)->end == Idx;
345}
346
347void SplitAnalysis::analyze(const LiveInterval *li) {
348 clear();
349 CurLI = li;
350 analyzeUses();
351}
352
353//===----------------------------------------------------------------------===//
354// Split Editor
355//===----------------------------------------------------------------------===//
356
357/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
358SplitEditor::SplitEditor(SplitAnalysis &SA, LiveIntervals &LIS, VirtRegMap &VRM,
359 MachineDominatorTree &MDT,
360 MachineBlockFrequencyInfo &MBFI, VirtRegAuxInfo &VRAI)
361 : SA(SA), LIS(LIS), VRM(VRM), MRI(VRM.getMachineFunction().getRegInfo()),
362 MDT(MDT), TII(*VRM.getMachineFunction().getSubtarget().getInstrInfo()),
363 TRI(*VRM.getMachineFunction().getSubtarget().getRegisterInfo()),
364 MBFI(MBFI), VRAI(VRAI), RegAssign(Allocator) {}
365
366void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
367 Edit = &LRE;
368 SpillMode = SM;
369 OpenIdx = 0;
370 RegAssign.clear();
371 Values.clear();
372
373 // Reset the LiveIntervalCalc instances needed for this spill mode.
374 LICalc[0].reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
375 VNIA: &LIS.getVNInfoAllocator());
376 if (SpillMode)
377 LICalc[1].reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
378 VNIA: &LIS.getVNInfoAllocator());
379}
380
381#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
382LLVM_DUMP_METHOD void SplitEditor::dump() const {
383 if (RegAssign.empty()) {
384 dbgs() << " empty\n";
385 return;
386 }
387
388 for (RegAssignMap::const_iterator I = RegAssign.begin(); I.valid(); ++I)
389 dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value();
390 dbgs() << '\n';
391}
392#endif
393
394/// Find a subrange corresponding to the exact lane mask @p LM in the live
395/// interval @p LI. The interval @p LI is assumed to contain such a subrange.
396/// This function is used to find corresponding subranges between the
397/// original interval and the new intervals.
398template <typename T> auto &getSubrangeImpl(LaneBitmask LM, T &LI) {
399 for (auto &S : LI.subranges())
400 if (S.LaneMask == LM)
401 return S;
402 llvm_unreachable("SubRange for this mask not found");
403}
404
405LiveInterval::SubRange &getSubRangeForMaskExact(LaneBitmask LM,
406 LiveInterval &LI) {
407 return getSubrangeImpl(LM, LI);
408}
409
410const LiveInterval::SubRange &getSubRangeForMaskExact(LaneBitmask LM,
411 const LiveInterval &LI) {
412 return getSubrangeImpl(LM, LI);
413}
414
415/// Find a subrange corresponding to the lane mask @p LM, or a superset of it,
416/// in the live interval @p LI. The interval @p LI is assumed to contain such
417/// a subrange. This function is used to find corresponding subranges between
418/// the original interval and the new intervals.
419const LiveInterval::SubRange &getSubRangeForMask(LaneBitmask LM,
420 const LiveInterval &LI) {
421 for (const LiveInterval::SubRange &S : LI.subranges())
422 if ((S.LaneMask & LM) == LM)
423 return S;
424 llvm_unreachable("SubRange for this mask not found");
425}
426
427void SplitEditor::addDeadDef(LiveInterval &LI, VNInfo *VNI, bool Original) {
428 if (!LI.hasSubRanges()) {
429 LI.createDeadDef(VNI);
430 return;
431 }
432
433 SlotIndex Def = VNI->def;
434 if (Original) {
435 // If we are transferring a def from the original interval, make sure
436 // to only update the subranges for which the original subranges had
437 // a def at this location.
438 for (LiveInterval::SubRange &S : LI.subranges()) {
439 auto &PS = getSubRangeForMask(LM: S.LaneMask, LI: Edit->getParent());
440 VNInfo *PV = PS.getVNInfoAt(Idx: Def);
441 if (PV != nullptr && PV->def == Def)
442 S.createDeadDef(Def, VNIAlloc&: LIS.getVNInfoAllocator());
443 }
444 } else {
445 // This is a new def: either from rematerialization, or from an inserted
446 // copy. Since rematerialization can regenerate a definition of a sub-
447 // register, we need to check which subranges need to be updated.
448 const MachineInstr *DefMI = LIS.getInstructionFromIndex(index: Def);
449 assert(DefMI != nullptr);
450 LaneBitmask LM;
451 for (const MachineOperand &DefOp : DefMI->defs()) {
452 Register R = DefOp.getReg();
453 if (R != LI.reg())
454 continue;
455 if (unsigned SR = DefOp.getSubReg())
456 LM |= TRI.getSubRegIndexLaneMask(SubIdx: SR);
457 else {
458 LM = MRI.getMaxLaneMaskForVReg(Reg: R);
459 break;
460 }
461 }
462 for (LiveInterval::SubRange &S : LI.subranges())
463 if ((S.LaneMask & LM).any())
464 S.createDeadDef(Def, VNIAlloc&: LIS.getVNInfoAllocator());
465 }
466}
467
468VNInfo *SplitEditor::defValue(unsigned RegIdx,
469 const VNInfo *ParentVNI,
470 SlotIndex Idx,
471 bool Original) {
472 assert(ParentVNI && "Mapping NULL value");
473 assert(Idx.isValid() && "Invalid SlotIndex");
474 assert(Edit->getParent().getVNInfoAt(Idx) == ParentVNI && "Bad Parent VNI");
475 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: RegIdx));
476
477 // Create a new value.
478 VNInfo *VNI = LI->getNextValue(Def: Idx, VNInfoAllocator&: LIS.getVNInfoAllocator());
479
480 bool Force = LI->hasSubRanges();
481 ValueForcePair FP(Force ? nullptr : VNI, Force);
482 // Use insert for lookup, so we can add missing values with a second lookup.
483 std::pair<ValueMap::iterator, bool> InsP =
484 Values.insert(KV: std::make_pair(x: std::make_pair(x&: RegIdx, y: ParentVNI->id), y&: FP));
485
486 // This was the first time (RegIdx, ParentVNI) was mapped, and it is not
487 // forced. Keep it as a simple def without any liveness.
488 if (!Force && InsP.second)
489 return VNI;
490
491 // If the previous value was a simple mapping, add liveness for it now.
492 if (VNInfo *OldVNI = InsP.first->second.getPointer()) {
493 addDeadDef(LI&: *LI, VNI: OldVNI, Original);
494
495 // No longer a simple mapping. Switch to a complex mapping. If the
496 // interval has subranges, make it a forced mapping.
497 InsP.first->second = ValueForcePair(nullptr, Force);
498 }
499
500 // This is a complex mapping, add liveness for VNI
501 addDeadDef(LI&: *LI, VNI, Original);
502 return VNI;
503}
504
505void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo &ParentVNI) {
506 ValueForcePair &VFP = Values[std::make_pair(x&: RegIdx, y: ParentVNI.id)];
507 VNInfo *VNI = VFP.getPointer();
508
509 // ParentVNI was either unmapped or already complex mapped. Either way, just
510 // set the force bit.
511 if (!VNI) {
512 VFP.setInt(true);
513 return;
514 }
515
516 // This was previously a single mapping. Make sure the old def is represented
517 // by a trivial live range.
518 addDeadDef(LI&: LIS.getInterval(Reg: Edit->get(idx: RegIdx)), VNI, Original: false);
519
520 // Mark as complex mapped, forced.
521 VFP = ValueForcePair(nullptr, true);
522}
523
524SlotIndex SplitEditor::buildSingleSubRegCopy(
525 Register FromReg, Register ToReg, MachineBasicBlock &MBB,
526 MachineBasicBlock::iterator InsertBefore, unsigned SubIdx,
527 LiveInterval &DestLI, bool Late, SlotIndex Def, const MCInstrDesc &Desc) {
528 bool FirstCopy = !Def.isValid();
529 MachineInstr *CopyMI =
530 BuildMI(BB&: MBB, I: InsertBefore, MIMD: DebugLoc(), MCID: Desc)
531 .addReg(RegNo: ToReg,
532 Flags: RegState::Define | getUndefRegState(B: FirstCopy) |
533 getInternalReadRegState(B: !FirstCopy),
534 SubReg: SubIdx)
535 .addReg(RegNo: FromReg, Flags: {}, SubReg: SubIdx);
536
537 CopyMI->setFlag(MachineInstr::LRSplit);
538 SlotIndexes &Indexes = *LIS.getSlotIndexes();
539 if (FirstCopy) {
540 Def = Indexes.insertMachineInstrInMaps(MI&: *CopyMI, Late).getRegSlot();
541 } else {
542 CopyMI->bundleWithPred();
543 }
544 return Def;
545}
546
547SlotIndex SplitEditor::buildCopy(Register FromReg, Register ToReg,
548 LaneBitmask LaneMask, MachineBasicBlock &MBB,
549 MachineBasicBlock::iterator InsertBefore, bool Late, unsigned RegIdx) {
550 const MCInstrDesc &Desc =
551 TII.get(Opcode: TII.getLiveRangeSplitOpcode(Reg: FromReg, MF: *MBB.getParent()));
552 SlotIndexes &Indexes = *LIS.getSlotIndexes();
553 if (LaneMask.all() || LaneMask == MRI.getMaxLaneMaskForVReg(Reg: FromReg)) {
554 // The full vreg is copied.
555 MachineInstr *CopyMI =
556 BuildMI(BB&: MBB, I: InsertBefore, MIMD: DebugLoc(), MCID: Desc, DestReg: ToReg).addReg(RegNo: FromReg);
557 CopyMI->setFlag(MachineInstr::LRSplit);
558 return Indexes.insertMachineInstrInMaps(MI&: *CopyMI, Late).getRegSlot();
559 }
560
561 // Only a subset of lanes needs to be copied. The following is a simple
562 // heuristic to construct a sequence of COPYs. We could add a target
563 // specific callback if this turns out to be suboptimal.
564 LiveInterval &DestLI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
565
566 // First pass: Try to find a perfectly matching subregister index. If none
567 // exists find the one covering the most lanemask bits.
568 const TargetRegisterClass *RC = MRI.getRegClass(Reg: FromReg);
569 assert(RC == MRI.getRegClass(ToReg) && "Should have same reg class");
570
571 SmallVector<unsigned, 8> SubIndexes;
572
573 // Abort if we cannot possibly implement the COPY with the given indexes.
574 if (!TRI.getCoveringSubRegIndexes(RC, LaneMask, Indexes&: SubIndexes))
575 report_fatal_error(reason: "Impossible to implement partial COPY");
576
577 SlotIndex Def;
578 for (unsigned BestIdx : SubIndexes) {
579 Def = buildSingleSubRegCopy(FromReg, ToReg, MBB, InsertBefore, SubIdx: BestIdx,
580 DestLI, Late, Def, Desc);
581 }
582
583 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
584 DestLI.refineSubRanges(
585 Allocator, LaneMask,
586 Apply: [Def, &Allocator](LiveInterval::SubRange &SR) {
587 SR.createDeadDef(Def, VNIAlloc&: Allocator);
588 },
589 Indexes, TRI);
590
591 return Def;
592}
593
594bool SplitEditor::rematWillIncreaseRestriction(const MachineInstr *DefMI,
595 MachineBasicBlock &MBB,
596 SlotIndex UseIdx) const {
597 const MachineInstr *UseMI = LIS.getInstructionFromIndex(index: UseIdx);
598 if (!UseMI)
599 return false;
600
601 // Currently code assumes rematerialization only happens for a def at 0.
602 const unsigned DefOperandIdx = 0;
603 // We want to compute the static register class constraint for the instruction
604 // def. If it is a smaller subclass than getLargestLegalSuperClass at the use
605 // site, then rematerializing it will increase the constraints.
606 const TargetRegisterClass *DefConstrainRC =
607 DefMI->getRegClassConstraint(OpIdx: DefOperandIdx, TII: &TII, TRI: &TRI);
608 if (!DefConstrainRC)
609 return false;
610
611 const TargetRegisterClass *RC = MRI.getRegClass(Reg: Edit->getReg());
612
613 // We want to find the register class that can be inflated to after the split
614 // occurs in recomputeRegClass
615 const TargetRegisterClass *SuperRC =
616 TRI.getLargestLegalSuperClass(RC, *MBB.getParent());
617
618 Register DefReg = DefMI->getOperand(i: DefOperandIdx).getReg();
619 const TargetRegisterClass *UseConstrainRC =
620 UseMI->getRegClassConstraintEffectForVReg(Reg: DefReg, CurRC: SuperRC, TII: &TII, TRI: &TRI,
621 /*ExploreBundle=*/true);
622 return UseConstrainRC->hasSubClass(RC: DefConstrainRC);
623}
624
625VNInfo *SplitEditor::defFromParent(unsigned RegIdx, const VNInfo *ParentVNI,
626 SlotIndex UseIdx, MachineBasicBlock &MBB,
627 MachineBasicBlock::iterator I) {
628 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: RegIdx));
629
630 // We may be trying to avoid interference that ends at a deleted instruction,
631 // so always begin RegIdx 0 early and all others late.
632 bool Late = RegIdx != 0;
633
634 // Attempt cheap-as-a-copy rematerialization.
635 Register Original = VRM.getOriginal(VirtReg: Edit->get(idx: RegIdx));
636 LiveInterval &OrigLI = LIS.getInterval(Reg: Original);
637 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx: UseIdx);
638
639 Register Reg = LI->reg();
640 if (OrigVNI) {
641 LiveRangeEdit::Remat RM(ParentVNI);
642 RM.OrigMI = LIS.getInstructionFromIndex(index: OrigVNI->def);
643 if (RM.OrigMI && TII.isAsCheapAsAMove(MI: *RM.OrigMI) &&
644 Edit->canRematerializeAt(RM, UseIdx)) {
645 if (!rematWillIncreaseRestriction(DefMI: RM.OrigMI, MBB, UseIdx)) {
646 LaneBitmask UsedLanes = LaneBitmask::getAll();
647 if (OrigLI.hasSubRanges()) {
648 UsedLanes = LaneBitmask::getNone();
649 for (const LiveInterval::SubRange &SR : OrigLI.subranges())
650 if (SR.liveAt(index: UseIdx))
651 UsedLanes |= SR.LaneMask;
652 }
653 SlotIndex Def = Edit->rematerializeAt(MBB, MI: I, DestReg: Reg, RM, TRI, Late, SubIdx: 0,
654 ReplaceIndexMI: nullptr, UsedLanes);
655 ++NumRemats;
656 // Define the value in Reg.
657 return defValue(RegIdx, ParentVNI, Idx: Def, Original: false);
658 }
659 LLVM_DEBUG(
660 dbgs() << "skipping rematerialize of " << printReg(Reg) << " at "
661 << UseIdx
662 << " since it will increase register class restrictions\n");
663 }
664 }
665
666 LaneBitmask LaneMask;
667 if (OrigLI.hasSubRanges()) {
668 LaneMask = LaneBitmask::getNone();
669 for (LiveInterval::SubRange &S : OrigLI.subranges()) {
670 if (S.liveAt(index: UseIdx))
671 LaneMask |= S.LaneMask;
672 }
673 } else {
674 LaneMask = LaneBitmask::getAll();
675 }
676
677 SlotIndex Def;
678 if (LaneMask.none()) {
679 const MCInstrDesc &Desc = TII.get(Opcode: TargetOpcode::IMPLICIT_DEF);
680 MachineInstr *ImplicitDef = BuildMI(BB&: MBB, I, MIMD: DebugLoc(), MCID: Desc, DestReg: Reg);
681 SlotIndexes &Indexes = *LIS.getSlotIndexes();
682 Def = Indexes.insertMachineInstrInMaps(MI&: *ImplicitDef, Late).getRegSlot();
683 } else {
684 ++NumCopies;
685 Def = buildCopy(FromReg: Edit->getReg(), ToReg: Reg, LaneMask, MBB, InsertBefore: I, Late, RegIdx);
686 }
687
688 // Define the value in Reg.
689 return defValue(RegIdx, ParentVNI, Idx: Def, Original: false);
690}
691
692/// Create a new virtual register and live interval.
693unsigned SplitEditor::openIntv() {
694 // Create the complement as index 0.
695 if (Edit->empty())
696 Edit->createEmptyInterval();
697
698 // Create the open interval.
699 OpenIdx = Edit->size();
700 Edit->createEmptyInterval();
701 return OpenIdx;
702}
703
704void SplitEditor::selectIntv(unsigned Idx) {
705 assert(Idx != 0 && "Cannot select the complement interval");
706 assert(Idx < Edit->size() && "Can only select previously opened interval");
707 LLVM_DEBUG(dbgs() << " selectIntv " << OpenIdx << " -> " << Idx << '\n');
708 OpenIdx = Idx;
709}
710
711SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
712 assert(OpenIdx && "openIntv not called before enterIntvBefore");
713 LLVM_DEBUG(dbgs() << " enterIntvBefore " << Idx);
714 Idx = Idx.getBaseIndex();
715 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
716 if (!ParentVNI) {
717 LLVM_DEBUG(dbgs() << ": not live\n");
718 return Idx;
719 }
720 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
721 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
722 assert(MI && "enterIntvBefore called with invalid index");
723
724 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
725 return VNI->def;
726}
727
728SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
729 assert(OpenIdx && "openIntv not called before enterIntvAfter");
730 LLVM_DEBUG(dbgs() << " enterIntvAfter " << Idx);
731 Idx = Idx.getBoundaryIndex();
732 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
733 if (!ParentVNI) {
734 LLVM_DEBUG(dbgs() << ": not live\n");
735 return Idx;
736 }
737 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
738 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
739 assert(MI && "enterIntvAfter called with invalid index");
740
741 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(),
742 I: std::next(x: MachineBasicBlock::iterator(MI)));
743 return VNI->def;
744}
745
746SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
747 assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
748 SlotIndex End = LIS.getMBBEndIdx(mbb: &MBB);
749 SlotIndex Last = End.getPrevSlot();
750 LLVM_DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", "
751 << Last);
752 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Last);
753 if (!ParentVNI) {
754 LLVM_DEBUG(dbgs() << ": not live\n");
755 return End;
756 }
757 SlotIndex LSP = SA.getLastSplitPoint(BB: &MBB);
758 if (LSP < Last) {
759 // It could be that the use after LSP is a def, and thus the ParentVNI
760 // just selected starts at that def. For this case to exist, the def
761 // must be part of a tied def/use pair (as otherwise we'd have split
762 // distinct live ranges into individual live intervals), and thus we
763 // can insert the def into the VNI of the use and the tied def/use
764 // pair can live in the resulting interval.
765 Last = LSP;
766 ParentVNI = Edit->getParent().getVNInfoAt(Idx: Last);
767 if (!ParentVNI) {
768 // undef use --> undef tied def
769 LLVM_DEBUG(dbgs() << ": tied use not live\n");
770 return End;
771 }
772 }
773
774 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id);
775 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Last, MBB,
776 I: SA.getLastSplitPointIter(BB: &MBB));
777 RegAssign.insert(a: VNI->def, b: End, y: OpenIdx);
778 LLVM_DEBUG(dump());
779 return VNI->def;
780}
781
782/// useIntv - indicate that all instructions in MBB should use OpenLI.
783void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
784 useIntv(Start: LIS.getMBBStartIdx(mbb: &MBB), End: LIS.getMBBEndIdx(mbb: &MBB));
785}
786
787void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
788 assert(OpenIdx && "openIntv not called before useIntv");
789 LLVM_DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
790 RegAssign.insert(a: Start, b: End, y: OpenIdx);
791 LLVM_DEBUG(dump());
792}
793
794SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
795 assert(OpenIdx && "openIntv not called before leaveIntvAfter");
796 LLVM_DEBUG(dbgs() << " leaveIntvAfter " << Idx);
797
798 // The interval must be live beyond the instruction at Idx.
799 SlotIndex Boundary = Idx.getBoundaryIndex();
800 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Boundary);
801 if (!ParentVNI) {
802 LLVM_DEBUG(dbgs() << ": not live\n");
803 return Boundary.getNextSlot();
804 }
805 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
806 MachineInstr *MI = LIS.getInstructionFromIndex(index: Boundary);
807 assert(MI && "No instruction at index");
808
809 // In spill mode, make live ranges as short as possible by inserting the copy
810 // before MI. This is only possible if that instruction doesn't redefine the
811 // value. The inserted COPY is not a kill, and we don't need to recompute
812 // the source live range. The spiller also won't try to hoist this copy.
813 if (SpillMode && !SlotIndex::isSameInstr(A: ParentVNI->def, B: Idx) &&
814 MI->readsVirtualRegister(Reg: Edit->getReg())) {
815 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
816 defFromParent(RegIdx: 0, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
817 return Idx;
818 }
819
820 VNInfo *VNI = defFromParent(RegIdx: 0, ParentVNI, UseIdx: Boundary, MBB&: *MI->getParent(),
821 I: std::next(x: MachineBasicBlock::iterator(MI)));
822 return VNI->def;
823}
824
825SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
826 assert(OpenIdx && "openIntv not called before leaveIntvBefore");
827 LLVM_DEBUG(dbgs() << " leaveIntvBefore " << Idx);
828
829 // The interval must be live into the instruction at Idx.
830 Idx = Idx.getBaseIndex();
831 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
832 if (!ParentVNI) {
833 LLVM_DEBUG(dbgs() << ": not live\n");
834 return Idx.getNextSlot();
835 }
836 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
837
838 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
839 assert(MI && "No instruction at index");
840 VNInfo *VNI = defFromParent(RegIdx: 0, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
841 return VNI->def;
842}
843
844SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
845 assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
846 SlotIndex Start = LIS.getMBBStartIdx(mbb: &MBB);
847 LLVM_DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", "
848 << Start);
849
850 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Start);
851 if (!ParentVNI) {
852 LLVM_DEBUG(dbgs() << ": not live\n");
853 return Start;
854 }
855
856 unsigned RegIdx = 0;
857 Register Reg = LIS.getInterval(Reg: Edit->get(idx: RegIdx)).reg();
858 VNInfo *VNI = defFromParent(RegIdx, ParentVNI, UseIdx: Start, MBB,
859 I: MBB.SkipPHIsLabelsAndDebug(I: MBB.begin(), Reg));
860 RegAssign.insert(a: Start, b: VNI->def, y: OpenIdx);
861 LLVM_DEBUG(dump());
862 return VNI->def;
863}
864
865static bool hasTiedUseOf(MachineInstr &MI, Register Reg) {
866 return any_of(Range: MI.defs(), P: [Reg](const MachineOperand &MO) {
867 return MO.isReg() && MO.isTied() && MO.getReg() == Reg;
868 });
869}
870
871void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
872 assert(OpenIdx && "openIntv not called before overlapIntv");
873 const VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Start);
874 assert(ParentVNI == Edit->getParent().getVNInfoBefore(End) &&
875 "Parent changes value in extended range");
876 assert(LIS.getMBBFromIndex(Start) == LIS.getMBBFromIndex(End) &&
877 "Range cannot span basic blocks");
878
879 // The complement interval will be extended as needed by LICalc.extend().
880 if (ParentVNI)
881 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
882
883 // If the last use is tied to a def, we can't mark it as live for the
884 // interval which includes only the use. That would cause the tied pair
885 // to end up in two different intervals.
886 if (auto *MI = LIS.getInstructionFromIndex(index: End))
887 if (hasTiedUseOf(MI&: *MI, Reg: Edit->getReg())) {
888 LLVM_DEBUG(dbgs() << "skip overlap due to tied def at end\n");
889 return;
890 }
891
892 LLVM_DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
893 RegAssign.insert(a: Start, b: End, y: OpenIdx);
894 LLVM_DEBUG(dump());
895}
896
897//===----------------------------------------------------------------------===//
898// Spill modes
899//===----------------------------------------------------------------------===//
900
901void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
902 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
903 LLVM_DEBUG(dbgs() << "Removing " << Copies.size() << " back-copies.\n");
904 RegAssignMap::iterator AssignI;
905 AssignI.setMap(RegAssign);
906
907 for (const VNInfo *C : Copies) {
908 SlotIndex Def = C->def;
909 MachineInstr *MI = LIS.getInstructionFromIndex(index: Def);
910 assert(MI && "No instruction for back-copy");
911
912 MachineBasicBlock *MBB = MI->getParent();
913 MachineBasicBlock::iterator MBBI(MI);
914 bool AtBegin;
915 do AtBegin = MBBI == MBB->begin();
916 while (!AtBegin && (--MBBI)->isDebugOrPseudoInstr());
917
918 LLVM_DEBUG(dbgs() << "Removing " << Def << '\t' << *MI);
919 LIS.removeVRegDefAt(LI&: *LI, Pos: Def);
920 LIS.RemoveMachineInstrFromMaps(MI&: *MI);
921 MI->eraseFromParent();
922
923 // Adjust RegAssign if a register assignment is killed at Def. We want to
924 // avoid calculating the live range of the source register if possible.
925 AssignI.find(x: Def.getPrevSlot());
926 if (!AssignI.valid() || AssignI.start() >= Def)
927 continue;
928 // If MI doesn't kill the assigned register, just leave it.
929 if (AssignI.stop() != Def)
930 continue;
931 unsigned RegIdx = AssignI.value();
932 // We could hoist back-copy right after another back-copy. As a result
933 // MMBI points to copy instruction which is actually dead now.
934 // We cannot set its stop to MBBI which will be the same as start and
935 // interval does not support that.
936 SlotIndex Kill =
937 AtBegin ? SlotIndex() : LIS.getInstructionIndex(Instr: *MBBI).getRegSlot();
938 if (AtBegin || !MBBI->readsVirtualRegister(Reg: Edit->getReg()) ||
939 Kill <= AssignI.start()) {
940 LLVM_DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx
941 << '\n');
942 forceRecompute(RegIdx, ParentVNI: *Edit->getParent().getVNInfoAt(Idx: Def));
943 } else {
944 LLVM_DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
945 AssignI.setStop(Kill);
946 }
947 }
948}
949
950MachineBasicBlock*
951SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
952 MachineBasicBlock *DefMBB) {
953 if (MBB == DefMBB)
954 return MBB;
955 assert(MDT.dominates(DefMBB, MBB) && "MBB must be dominated by the def.");
956
957 const MachineLoopInfo &Loops = SA.Loops;
958 const MachineLoop *DefLoop = Loops.getLoopFor(BB: DefMBB);
959 MachineDomTreeNode *DefDomNode = MDT[DefMBB];
960
961 // Best candidate so far.
962 MachineBasicBlock *BestMBB = MBB;
963 unsigned BestDepth = std::numeric_limits<unsigned>::max();
964
965 while (true) {
966 const MachineLoop *Loop = Loops.getLoopFor(BB: MBB);
967
968 // MBB isn't in a loop, it doesn't get any better. All dominators have a
969 // higher frequency by definition.
970 if (!Loop) {
971 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
972 << " dominates " << printMBBReference(*MBB)
973 << " at depth 0\n");
974 return MBB;
975 }
976
977 // We'll never be able to exit the DefLoop.
978 if (Loop == DefLoop) {
979 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
980 << " dominates " << printMBBReference(*MBB)
981 << " in the same loop\n");
982 return MBB;
983 }
984
985 // Least busy dominator seen so far.
986 unsigned Depth = Loop->getLoopDepth();
987 if (Depth < BestDepth) {
988 BestMBB = MBB;
989 BestDepth = Depth;
990 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
991 << " dominates " << printMBBReference(*MBB)
992 << " at depth " << Depth << '\n');
993 }
994
995 // Leave loop by going to the immediate dominator of the loop header.
996 // This is a bigger stride than simply walking up the dominator tree.
997 MachineDomTreeNode *IDom = MDT[Loop->getHeader()]->getIDom();
998
999 // Too far up the dominator tree?
1000 if (!IDom || !MDT.dominates(A: DefDomNode, B: IDom))
1001 return BestMBB;
1002
1003 MBB = IDom->getBlock();
1004 }
1005}
1006
1007void SplitEditor::computeRedundantBackCopies(
1008 DenseSet<unsigned> &NotToHoistSet, SmallVectorImpl<VNInfo *> &BackCopies) {
1009 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
1010 const LiveInterval *Parent = &Edit->getParent();
1011 SmallVector<SmallPtrSet<VNInfo *, 8>, 8> EqualVNs(Parent->getNumValNums());
1012 SmallPtrSet<VNInfo *, 8> DominatedVNIs;
1013
1014 // Aggregate VNIs having the same value as ParentVNI.
1015 for (VNInfo *VNI : LI->valnos) {
1016 if (VNI->isUnused())
1017 continue;
1018 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
1019 EqualVNs[ParentVNI->id].insert(Ptr: VNI);
1020 }
1021
1022 // For VNI aggregation of each ParentVNI, collect dominated, i.e.,
1023 // redundant VNIs to BackCopies.
1024 for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {
1025 const VNInfo *ParentVNI = Parent->getValNumInfo(ValNo: i);
1026 if (!NotToHoistSet.count(V: ParentVNI->id))
1027 continue;
1028 SmallPtrSetIterator<VNInfo *> It1 = EqualVNs[ParentVNI->id].begin();
1029 SmallPtrSetIterator<VNInfo *> It2 = It1;
1030 for (; It1 != EqualVNs[ParentVNI->id].end(); ++It1) {
1031 It2 = It1;
1032 for (++It2; It2 != EqualVNs[ParentVNI->id].end(); ++It2) {
1033 if (DominatedVNIs.count(Ptr: *It1) || DominatedVNIs.count(Ptr: *It2))
1034 continue;
1035
1036 MachineBasicBlock *MBB1 = LIS.getMBBFromIndex(index: (*It1)->def);
1037 MachineBasicBlock *MBB2 = LIS.getMBBFromIndex(index: (*It2)->def);
1038 if (MBB1 == MBB2) {
1039 DominatedVNIs.insert(Ptr: (*It1)->def < (*It2)->def ? (*It2) : (*It1));
1040 } else if (MDT.dominates(A: MBB1, B: MBB2)) {
1041 DominatedVNIs.insert(Ptr: *It2);
1042 } else if (MDT.dominates(A: MBB2, B: MBB1)) {
1043 DominatedVNIs.insert(Ptr: *It1);
1044 }
1045 }
1046 }
1047 if (!DominatedVNIs.empty()) {
1048 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
1049 append_range(C&: BackCopies, R&: DominatedVNIs);
1050 DominatedVNIs.clear();
1051 }
1052 }
1053}
1054
1055/// For SM_Size mode, find a common dominator for all the back-copies for
1056/// the same ParentVNI and hoist the backcopies to the dominator BB.
1057/// For SM_Speed mode, if the common dominator is hot and it is not beneficial
1058/// to do the hoisting, simply remove the dominated backcopies for the same
1059/// ParentVNI.
1060void SplitEditor::hoistCopies() {
1061 // Get the complement interval, always RegIdx 0.
1062 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
1063 const LiveInterval *Parent = &Edit->getParent();
1064
1065 // Track the nearest common dominator for all back-copies for each ParentVNI,
1066 // indexed by ParentVNI->id.
1067 using DomPair = std::pair<MachineBasicBlock *, SlotIndex>;
1068 SmallVector<DomPair, 8> NearestDom(Parent->getNumValNums());
1069 // The total cost of all the back-copies for each ParentVNI.
1070 SmallVector<BlockFrequency, 8> Costs(Parent->getNumValNums());
1071 // The ParentVNI->id set for which hoisting back-copies are not beneficial
1072 // for Speed.
1073 DenseSet<unsigned> NotToHoistSet;
1074
1075 // Find the nearest common dominator for parent values with multiple
1076 // back-copies. If a single back-copy dominates, put it in DomPair.second.
1077 for (VNInfo *VNI : LI->valnos) {
1078 if (VNI->isUnused())
1079 continue;
1080 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
1081 assert(ParentVNI && "Parent not live at complement def");
1082
1083 // Don't hoist remats. The complement is probably going to disappear
1084 // completely anyway.
1085 if (Edit->didRematerialize(ParentVNI))
1086 continue;
1087
1088 MachineBasicBlock *ValMBB = LIS.getMBBFromIndex(index: VNI->def);
1089
1090 DomPair &Dom = NearestDom[ParentVNI->id];
1091
1092 // Keep directly defined parent values. This is either a PHI or an
1093 // instruction in the complement range. All other copies of ParentVNI
1094 // should be eliminated.
1095 if (VNI->def == ParentVNI->def) {
1096 LLVM_DEBUG(dbgs() << "Direct complement def at " << VNI->def << '\n');
1097 Dom = DomPair(ValMBB, VNI->def);
1098 continue;
1099 }
1100 // Skip the singly mapped values. There is nothing to gain from hoisting a
1101 // single back-copy.
1102 if (Values.lookup(Val: std::make_pair(x: 0, y&: ParentVNI->id)).getPointer()) {
1103 LLVM_DEBUG(dbgs() << "Single complement def at " << VNI->def << '\n');
1104 continue;
1105 }
1106
1107 if (!Dom.first) {
1108 // First time we see ParentVNI. VNI dominates itself.
1109 Dom = DomPair(ValMBB, VNI->def);
1110 } else if (Dom.first == ValMBB) {
1111 // Two defs in the same block. Pick the earlier def.
1112 if (!Dom.second.isValid() || VNI->def < Dom.second)
1113 Dom.second = VNI->def;
1114 } else {
1115 // Different basic blocks. Check if one dominates.
1116 MachineBasicBlock *Near =
1117 MDT.findNearestCommonDominator(A: Dom.first, B: ValMBB);
1118 if (Near == ValMBB)
1119 // Def ValMBB dominates.
1120 Dom = DomPair(ValMBB, VNI->def);
1121 else if (Near != Dom.first)
1122 // None dominate. Hoist to common dominator, need new def.
1123 Dom = DomPair(Near, SlotIndex());
1124 Costs[ParentVNI->id] += MBFI.getBlockFreq(MBB: ValMBB);
1125 }
1126
1127 LLVM_DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@'
1128 << VNI->def << " for parent " << ParentVNI->id << '@'
1129 << ParentVNI->def << " hoist to "
1130 << printMBBReference(*Dom.first) << ' ' << Dom.second
1131 << '\n');
1132 }
1133
1134 // Insert the hoisted copies.
1135 for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {
1136 DomPair &Dom = NearestDom[i];
1137 if (!Dom.first || Dom.second.isValid())
1138 continue;
1139 // This value needs a hoisted copy inserted at the end of Dom.first.
1140 const VNInfo *ParentVNI = Parent->getValNumInfo(ValNo: i);
1141 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(index: ParentVNI->def);
1142 // Get a less loopy dominator than Dom.first.
1143 Dom.first = findShallowDominator(MBB: Dom.first, DefMBB);
1144 if (SpillMode == SM_Speed &&
1145 MBFI.getBlockFreq(MBB: Dom.first) > Costs[ParentVNI->id]) {
1146 NotToHoistSet.insert(V: ParentVNI->id);
1147 continue;
1148 }
1149 SlotIndex LSP = SA.getLastSplitPoint(BB: Dom.first);
1150 if (LSP <= ParentVNI->def) {
1151 NotToHoistSet.insert(V: ParentVNI->id);
1152 continue;
1153 }
1154 Dom.second = defFromParent(RegIdx: 0, ParentVNI, UseIdx: LSP, MBB&: *Dom.first,
1155 I: SA.getLastSplitPointIter(BB: Dom.first))->def;
1156 }
1157
1158 // Remove redundant back-copies that are now known to be dominated by another
1159 // def with the same value.
1160 SmallVector<VNInfo*, 8> BackCopies;
1161 for (VNInfo *VNI : LI->valnos) {
1162 if (VNI->isUnused())
1163 continue;
1164 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
1165 const DomPair &Dom = NearestDom[ParentVNI->id];
1166 if (!Dom.first || Dom.second == VNI->def ||
1167 NotToHoistSet.count(V: ParentVNI->id))
1168 continue;
1169 BackCopies.push_back(Elt: VNI);
1170 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
1171 }
1172
1173 // If it is not beneficial to hoist all the BackCopies, simply remove
1174 // redundant BackCopies in speed mode.
1175 if (SpillMode == SM_Speed && !NotToHoistSet.empty())
1176 computeRedundantBackCopies(NotToHoistSet, BackCopies);
1177
1178 removeBackCopies(Copies&: BackCopies);
1179}
1180
1181/// transferValues - Transfer all possible values to the new live ranges.
1182/// Values that were rematerialized are left alone, they need LICalc.extend().
1183bool SplitEditor::transferValues() {
1184 bool Skipped = false;
1185 RegAssignMap::const_iterator AssignI = RegAssign.begin();
1186 for (const LiveRange::Segment &S : Edit->getParent()) {
1187 LLVM_DEBUG(dbgs() << " blit " << S << ':');
1188 VNInfo *ParentVNI = S.valno;
1189 // RegAssign has holes where RegIdx 0 should be used.
1190 SlotIndex Start = S.start;
1191 AssignI.advanceTo(x: Start);
1192 do {
1193 unsigned RegIdx;
1194 SlotIndex End = S.end;
1195 if (!AssignI.valid()) {
1196 RegIdx = 0;
1197 } else if (AssignI.start() <= Start) {
1198 RegIdx = AssignI.value();
1199 if (AssignI.stop() < End) {
1200 End = AssignI.stop();
1201 ++AssignI;
1202 }
1203 } else {
1204 RegIdx = 0;
1205 End = std::min(a: End, b: AssignI.start());
1206 }
1207
1208 // The interval [Start;End) is continuously mapped to RegIdx, ParentVNI.
1209 LLVM_DEBUG(dbgs() << " [" << Start << ';' << End << ")=" << RegIdx << '('
1210 << printReg(Edit->get(RegIdx)) << ')');
1211 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1212
1213 // Check for a simply defined value that can be blitted directly.
1214 ValueForcePair VFP = Values.lookup(Val: std::make_pair(x&: RegIdx, y&: ParentVNI->id));
1215 if (VNInfo *VNI = VFP.getPointer()) {
1216 LLVM_DEBUG(dbgs() << ':' << VNI->id);
1217 LI.addSegment(S: LiveInterval::Segment(Start, End, VNI));
1218 Start = End;
1219 continue;
1220 }
1221
1222 // Skip values with forced recomputation.
1223 if (VFP.getInt()) {
1224 LLVM_DEBUG(dbgs() << "(recalc)");
1225 Skipped = true;
1226 Start = End;
1227 continue;
1228 }
1229
1230 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1231
1232 // This value has multiple defs in RegIdx, but it wasn't rematerialized,
1233 // so the live range is accurate. Add live-in blocks in [Start;End) to the
1234 // LiveInBlocks.
1235 MachineFunction::iterator MBB = LIS.getMBBFromIndex(index: Start)->getIterator();
1236 SlotIndex BlockStart, BlockEnd;
1237 std::tie(args&: BlockStart, args&: BlockEnd) = LIS.getSlotIndexes()->getMBBRange(MBB: &*MBB);
1238
1239 // The first block may be live-in, or it may have its own def.
1240 if (Start != BlockStart) {
1241 VNInfo *VNI = LI.extendInBlock(StartIdx: BlockStart, Kill: std::min(a: BlockEnd, b: End));
1242 assert(VNI && "Missing def for complex mapped value");
1243 LLVM_DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB));
1244 // MBB has its own def. Is it also live-out?
1245 if (BlockEnd <= End)
1246 LIC.setLiveOutValue(MBB: &*MBB, VNI);
1247
1248 // Skip to the next block for live-in.
1249 ++MBB;
1250 BlockStart = BlockEnd;
1251 }
1252
1253 // Handle the live-in blocks covered by [Start;End).
1254 assert(Start <= BlockStart && "Expected live-in block");
1255 while (BlockStart < End) {
1256 LLVM_DEBUG(dbgs() << ">" << printMBBReference(*MBB));
1257 BlockEnd = LIS.getMBBEndIdx(mbb: &*MBB);
1258 if (BlockStart == ParentVNI->def) {
1259 // This block has the def of a parent PHI, so it isn't live-in.
1260 assert(ParentVNI->isPHIDef() && "Non-phi defined at block start?");
1261 VNInfo *VNI = LI.extendInBlock(StartIdx: BlockStart, Kill: std::min(a: BlockEnd, b: End));
1262 assert(VNI && "Missing def for complex mapped parent PHI");
1263 if (End >= BlockEnd)
1264 LIC.setLiveOutValue(MBB: &*MBB, VNI); // Live-out as well.
1265 } else {
1266 // This block needs a live-in value. The last block covered may not
1267 // be live-out.
1268 if (End < BlockEnd)
1269 LIC.addLiveInBlock(LR&: LI, DomNode: MDT[&*MBB], Kill: End);
1270 else {
1271 // Live-through, and we don't know the value.
1272 LIC.addLiveInBlock(LR&: LI, DomNode: MDT[&*MBB]);
1273 LIC.setLiveOutValue(MBB: &*MBB, VNI: nullptr);
1274 }
1275 }
1276 BlockStart = BlockEnd;
1277 ++MBB;
1278 }
1279 Start = End;
1280 } while (Start != S.end);
1281 LLVM_DEBUG(dbgs() << '\n');
1282 }
1283
1284 LICalc[0].calculateValues();
1285 if (SpillMode)
1286 LICalc[1].calculateValues();
1287
1288 return Skipped;
1289}
1290
1291static bool removeDeadSegment(SlotIndex Def, LiveRange &LR) {
1292 const LiveRange::Segment *Seg = LR.getSegmentContaining(Idx: Def);
1293 if (Seg == nullptr)
1294 return true;
1295 if (Seg->end != Def.getDeadSlot())
1296 return false;
1297 // This is a dead PHI. Remove it.
1298 LR.removeSegment(S: *Seg, RemoveDeadValNo: true);
1299 return true;
1300}
1301
1302void SplitEditor::extendPHIRange(MachineBasicBlock &B, LiveIntervalCalc &LIC,
1303 LiveRange &LR, LaneBitmask LM,
1304 ArrayRef<SlotIndex> Undefs) {
1305 for (MachineBasicBlock *P : B.predecessors()) {
1306 SlotIndex End = LIS.getMBBEndIdx(mbb: P);
1307 SlotIndex LastUse = End.getPrevSlot();
1308 // The predecessor may not have a live-out value. That is OK, like an
1309 // undef PHI operand.
1310 const LiveInterval &PLI = Edit->getParent();
1311 // Need the cast because the inputs to ?: would otherwise be deemed
1312 // "incompatible": SubRange vs LiveInterval.
1313 const LiveRange &PSR = !LM.all() ? getSubRangeForMaskExact(LM, LI: PLI)
1314 : static_cast<const LiveRange &>(PLI);
1315 if (PSR.liveAt(index: LastUse))
1316 LIC.extend(LR, Use: End, /*PhysReg=*/0, Undefs);
1317 }
1318}
1319
1320void SplitEditor::extendPHIKillRanges() {
1321 // Extend live ranges to be live-out for successor PHI values.
1322
1323 // Visit each PHI def slot in the parent live interval. If the def is dead,
1324 // remove it. Otherwise, extend the live interval to reach the end indexes
1325 // of all predecessor blocks.
1326
1327 const LiveInterval &ParentLI = Edit->getParent();
1328 for (const VNInfo *V : ParentLI.valnos) {
1329 if (V->isUnused() || !V->isPHIDef())
1330 continue;
1331
1332 unsigned RegIdx = RegAssign.lookup(x: V->def);
1333 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1334 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1335 MachineBasicBlock &B = *LIS.getMBBFromIndex(index: V->def);
1336 if (!removeDeadSegment(Def: V->def, LR&: LI))
1337 extendPHIRange(B, LIC, LR&: LI, LM: LaneBitmask::getAll(), /*Undefs=*/{});
1338 }
1339
1340 SmallVector<SlotIndex, 4> Undefs;
1341 LiveIntervalCalc SubLIC;
1342
1343 for (const LiveInterval::SubRange &PS : ParentLI.subranges()) {
1344 for (const VNInfo *V : PS.valnos) {
1345 if (V->isUnused() || !V->isPHIDef())
1346 continue;
1347 unsigned RegIdx = RegAssign.lookup(x: V->def);
1348 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1349 LiveInterval::SubRange &S = getSubRangeForMaskExact(LM: PS.LaneMask, LI);
1350 if (removeDeadSegment(Def: V->def, LR&: S))
1351 continue;
1352
1353 MachineBasicBlock &B = *LIS.getMBBFromIndex(index: V->def);
1354 SubLIC.reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
1355 VNIA: &LIS.getVNInfoAllocator());
1356 Undefs.clear();
1357 LI.computeSubRangeUndefs(Undefs, LaneMask: PS.LaneMask, MRI, Indexes: *LIS.getSlotIndexes());
1358 extendPHIRange(B, LIC&: SubLIC, LR&: S, LM: PS.LaneMask, Undefs);
1359 }
1360 }
1361}
1362
1363/// rewriteAssigned - Rewrite all uses of Edit->getReg().
1364void SplitEditor::rewriteAssigned(bool ExtendRanges) {
1365 struct ExtPoint {
1366 ExtPoint(const MachineOperand &O, unsigned R, SlotIndex N)
1367 : MO(O), RegIdx(R), Next(N) {}
1368
1369 MachineOperand MO;
1370 unsigned RegIdx;
1371 SlotIndex Next;
1372 };
1373
1374 SmallVector<ExtPoint,4> ExtPoints;
1375
1376 for (MachineOperand &MO :
1377 llvm::make_early_inc_range(Range: MRI.reg_operands(Reg: Edit->getReg()))) {
1378 MachineInstr *MI = MO.getParent();
1379 // LiveDebugVariables should have handled all DBG_VALUE instructions.
1380 if (MI->isDebugValue()) {
1381 LLVM_DEBUG(dbgs() << "Zapping " << *MI);
1382 MO.setReg(0);
1383 continue;
1384 }
1385
1386 // <undef> operands don't really read the register, so it doesn't matter
1387 // which register we choose. When the use operand is tied to a def, we must
1388 // use the same register as the def, so just do that always.
1389 SlotIndex Idx = LIS.getInstructionIndex(Instr: *MI);
1390 if (MO.isDef() || MO.isUndef())
1391 Idx = Idx.getRegSlot(EC: MO.isEarlyClobber());
1392
1393 // Rewrite to the mapped register at Idx.
1394 unsigned RegIdx = RegAssign.lookup(x: Idx);
1395 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1396 MO.setReg(LI.reg());
1397 LLVM_DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent())
1398 << '\t' << Idx << ':' << RegIdx << '\t' << *MI);
1399
1400 // Extend liveness to Idx if the instruction reads reg.
1401 if (!ExtendRanges || MO.isUndef())
1402 continue;
1403
1404 // Skip instructions that don't read Reg.
1405 if (MO.isDef()) {
1406 if (!MO.getSubReg() && !MO.isEarlyClobber())
1407 continue;
1408 // We may want to extend a live range for a partial redef, or for a use
1409 // tied to an early clobber.
1410 if (!Edit->getParent().liveAt(index: Idx.getPrevSlot()))
1411 continue;
1412 } else {
1413 assert(MO.isUse());
1414 bool IsEarlyClobber = false;
1415 if (MO.isTied()) {
1416 // We want to extend a live range into `e` slot rather than `r` slot if
1417 // tied-def is early clobber, because the `e` slot already contained
1418 // in the live range of early-clobber tied-def operand, give an example
1419 // here:
1420 // 0 %0 = ...
1421 // 16 early-clobber %0 = Op %0 (tied-def 0), ...
1422 // 32 ... = Op %0
1423 // Before extend:
1424 // %0 = [0r, 0d) [16e, 32d)
1425 // The point we want to extend is 0d to 16e not 16r in this case, but if
1426 // we use 16r here we will extend nothing because that already contained
1427 // in [16e, 32d).
1428 unsigned OpIdx = MO.getOperandNo();
1429 unsigned DefOpIdx = MI->findTiedOperandIdx(OpIdx);
1430 const MachineOperand &DefOp = MI->getOperand(i: DefOpIdx);
1431 IsEarlyClobber = DefOp.isEarlyClobber();
1432 }
1433
1434 Idx = Idx.getRegSlot(EC: IsEarlyClobber);
1435 }
1436
1437 SlotIndex Next = Idx;
1438 if (LI.hasSubRanges()) {
1439 // We have to delay extending subranges until we have seen all operands
1440 // defining the register. This is because a <def,read-undef> operand
1441 // will create an "undef" point, and we cannot extend any subranges
1442 // until all of them have been accounted for.
1443 if (MO.isUse())
1444 ExtPoints.push_back(Elt: ExtPoint(MO, RegIdx, Next));
1445 } else {
1446 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1447 LIC.extend(LR&: LI, Use: Next, PhysReg: 0, Undefs: ArrayRef<SlotIndex>());
1448 }
1449 }
1450
1451 for (ExtPoint &EP : ExtPoints) {
1452 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: EP.RegIdx));
1453 assert(LI.hasSubRanges());
1454
1455 LiveIntervalCalc SubLIC;
1456 Register Reg = EP.MO.getReg();
1457 unsigned Sub = EP.MO.getSubReg();
1458 LaneBitmask LM = Sub != 0 ? TRI.getSubRegIndexLaneMask(SubIdx: Sub)
1459 : MRI.getMaxLaneMaskForVReg(Reg);
1460 for (LiveInterval::SubRange &S : LI.subranges()) {
1461 if ((S.LaneMask & LM).none())
1462 continue;
1463 // The problem here can be that the new register may have been created
1464 // for a partially defined original register. For example:
1465 // %0:subreg_hireg<def,read-undef> = ...
1466 // ...
1467 // %1 = COPY %0
1468 if (S.empty())
1469 continue;
1470 SubLIC.reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
1471 VNIA: &LIS.getVNInfoAllocator());
1472 SmallVector<SlotIndex, 4> Undefs;
1473 LI.computeSubRangeUndefs(Undefs, LaneMask: S.LaneMask, MRI, Indexes: *LIS.getSlotIndexes());
1474 SubLIC.extend(LR&: S, Use: EP.Next, PhysReg: 0, Undefs);
1475 }
1476 }
1477
1478 for (Register R : *Edit) {
1479 LiveInterval &LI = LIS.getInterval(Reg: R);
1480 if (!LI.hasSubRanges())
1481 continue;
1482 LI.clear();
1483 LI.removeEmptySubRanges();
1484 LIS.constructMainRangeFromSubranges(LI);
1485 }
1486}
1487
1488void SplitEditor::deleteRematVictims() {
1489 SmallVector<MachineInstr*, 8> Dead;
1490 for (const Register &R : *Edit) {
1491 LiveInterval *LI = &LIS.getInterval(Reg: R);
1492 for (const LiveRange::Segment &S : LI->segments) {
1493 // Dead defs end at the dead slot.
1494 if (S.end != S.valno->def.getDeadSlot())
1495 continue;
1496 if (S.valno->isPHIDef())
1497 continue;
1498 MachineInstr *MI = LIS.getInstructionFromIndex(index: S.valno->def);
1499 assert(MI && "Missing instruction for dead def");
1500 MI->addRegisterDead(Reg: LI->reg(), RegInfo: &TRI);
1501
1502 if (!MI->allDefsAreDead())
1503 continue;
1504
1505 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
1506 Dead.push_back(Elt: MI);
1507 }
1508 }
1509
1510 if (Dead.empty())
1511 return;
1512
1513 Edit->eliminateDeadDefs(Dead, RegsBeingSpilled: {});
1514}
1515
1516void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) {
1517 // Fast-path for common case.
1518 if (!ParentVNI.isPHIDef()) {
1519 for (unsigned I = 0, E = Edit->size(); I != E; ++I)
1520 forceRecompute(RegIdx: I, ParentVNI);
1521 return;
1522 }
1523
1524 // Trace value through phis.
1525 ///< whether VNI was/is in worklist.
1526 SmallPtrSet<const VNInfo *, 8> Visited = {&ParentVNI};
1527 SmallVector<const VNInfo *, 4> WorkList = {&ParentVNI};
1528
1529 const LiveInterval &ParentLI = Edit->getParent();
1530 const SlotIndexes &Indexes = *LIS.getSlotIndexes();
1531 do {
1532 const VNInfo &VNI = *WorkList.pop_back_val();
1533 for (unsigned I = 0, E = Edit->size(); I != E; ++I)
1534 forceRecompute(RegIdx: I, ParentVNI: VNI);
1535 if (!VNI.isPHIDef())
1536 continue;
1537
1538 MachineBasicBlock &MBB = *Indexes.getMBBFromIndex(index: VNI.def);
1539 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
1540 SlotIndex PredEnd = Indexes.getMBBEndIdx(mbb: Pred);
1541 VNInfo *PredVNI = ParentLI.getVNInfoBefore(Idx: PredEnd);
1542 assert(PredVNI && "Value available in PhiVNI predecessor");
1543 if (Visited.insert(Ptr: PredVNI).second)
1544 WorkList.push_back(Elt: PredVNI);
1545 }
1546 } while(!WorkList.empty());
1547}
1548
1549void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
1550 ++NumFinished;
1551
1552 // At this point, the live intervals in Edit contain VNInfos corresponding to
1553 // the inserted copies.
1554
1555 // Add the original defs from the parent interval.
1556 for (const VNInfo *ParentVNI : Edit->getParent().valnos) {
1557 if (ParentVNI->isUnused())
1558 continue;
1559 unsigned RegIdx = RegAssign.lookup(x: ParentVNI->def);
1560 defValue(RegIdx, ParentVNI, Idx: ParentVNI->def, Original: true);
1561
1562 // Force rematted values to be recomputed everywhere.
1563 // The new live ranges may be truncated.
1564 if (Edit->didRematerialize(ParentVNI))
1565 forceRecomputeVNI(ParentVNI: *ParentVNI);
1566 }
1567
1568 // Hoist back-copies to the complement interval when in spill mode.
1569 switch (SpillMode) {
1570 case SM_Partition:
1571 // Leave all back-copies as is.
1572 break;
1573 case SM_Size:
1574 case SM_Speed:
1575 // hoistCopies will behave differently between size and speed.
1576 hoistCopies();
1577 }
1578
1579 // Transfer the simply mapped values, check if any are skipped.
1580 bool Skipped = transferValues();
1581
1582 // Rewrite virtual registers, possibly extending ranges.
1583 rewriteAssigned(ExtendRanges: Skipped);
1584
1585 if (Skipped)
1586 extendPHIKillRanges();
1587 else
1588 ++NumSimple;
1589
1590 // Delete defs that were rematted everywhere.
1591 if (Skipped)
1592 deleteRematVictims();
1593
1594 // Get rid of unused values and set phi-kill flags.
1595 for (Register Reg : *Edit) {
1596 LiveInterval &LI = LIS.getInterval(Reg);
1597 LI.removeEmptySubRanges();
1598 LI.RenumberValues();
1599 }
1600
1601 // Provide a reverse mapping from original indices to Edit ranges.
1602 if (LRMap) {
1603 auto Seq = llvm::seq<unsigned>(Begin: 0, End: Edit->size());
1604 LRMap->assign(in_start: Seq.begin(), in_end: Seq.end());
1605 }
1606
1607 // Now check if any registers were separated into multiple components.
1608 ConnectedVNInfoEqClasses ConEQ(LIS);
1609 for (unsigned i = 0, e = Edit->size(); i != e; ++i) {
1610 // Don't use iterators, they are invalidated by create() below.
1611 Register VReg = Edit->get(idx: i);
1612 LiveInterval &LI = LIS.getInterval(Reg: VReg);
1613 SmallVector<LiveInterval*, 8> SplitLIs;
1614 LIS.splitSeparateComponents(LI, SplitLIs);
1615 Register Original = VRM.getOriginal(VirtReg: VReg);
1616 for (LiveInterval *SplitLI : SplitLIs)
1617 VRM.setIsSplitFromReg(virtReg: SplitLI->reg(), SReg: Original);
1618
1619 // The new intervals all map back to i.
1620 if (LRMap)
1621 LRMap->resize(N: Edit->size(), NV: i);
1622 }
1623
1624 // Calculate spill weight and allocation hints for new intervals.
1625 Edit->calculateRegClassAndHint(VRM.getMachineFunction(), VRAI);
1626
1627 assert(!LRMap || LRMap->size() == Edit->size());
1628}
1629
1630//===----------------------------------------------------------------------===//
1631// Single Block Splitting
1632//===----------------------------------------------------------------------===//
1633
1634bool SplitAnalysis::shouldSplitSingleBlock(const BlockInfo &BI,
1635 bool SingleInstrs) const {
1636 // Always split for multiple instructions.
1637 if (!BI.isOneInstr())
1638 return true;
1639 // Don't split for single instructions unless explicitly requested.
1640 if (!SingleInstrs)
1641 return false;
1642 // Splitting a live-through range always makes progress.
1643 if (BI.LiveIn && BI.LiveOut)
1644 return true;
1645 // No point in isolating a copy. It has no register class constraints.
1646 MachineInstr *MI = LIS.getInstructionFromIndex(index: BI.FirstInstr);
1647 bool copyLike = TII.isCopyInstr(MI: *MI) || MI->isSubregToReg();
1648 if (copyLike)
1649 return false;
1650 // Finally, don't isolate an end point that was created by earlier splits.
1651 return isOriginalEndpoint(Idx: BI.FirstInstr);
1652}
1653
1654void SplitEditor::splitSingleBlock(const SplitAnalysis::BlockInfo &BI) {
1655 openIntv();
1656 SlotIndex LastSplitPoint = SA.getLastSplitPoint(BB: BI.MBB);
1657 SlotIndex SegStart = enterIntvBefore(Idx: std::min(a: BI.FirstInstr,
1658 b: LastSplitPoint));
1659 if (!BI.LiveOut || BI.LastInstr < LastSplitPoint) {
1660 useIntv(Start: SegStart, End: leaveIntvAfter(Idx: BI.LastInstr));
1661 } else {
1662 // The last use is after the last valid split point.
1663 SlotIndex SegStop = leaveIntvBefore(Idx: LastSplitPoint);
1664 useIntv(Start: SegStart, End: SegStop);
1665 overlapIntv(Start: SegStop, End: BI.LastInstr);
1666 }
1667}
1668
1669//===----------------------------------------------------------------------===//
1670// Global Live Range Splitting Support
1671//===----------------------------------------------------------------------===//
1672
1673// These methods support a method of global live range splitting that uses a
1674// global algorithm to decide intervals for CFG edges. They will insert split
1675// points and color intervals in basic blocks while avoiding interference.
1676//
1677// Note that splitSingleBlock is also useful for blocks where both CFG edges
1678// are on the stack.
1679
1680void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
1681 unsigned IntvIn, SlotIndex LeaveBefore,
1682 unsigned IntvOut, SlotIndex EnterAfter){
1683 SlotIndex Start, Stop;
1684 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(Num: MBBNum);
1685
1686 LLVM_DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop
1687 << ") intf " << LeaveBefore << '-' << EnterAfter
1688 << ", live-through " << IntvIn << " -> " << IntvOut);
1689
1690 assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks");
1691
1692 assert((!LeaveBefore || LeaveBefore < Stop) && "Interference after block");
1693 assert((!IntvIn || !LeaveBefore || LeaveBefore > Start) && "Impossible intf");
1694 assert((!EnterAfter || EnterAfter >= Start) && "Interference before block");
1695
1696 MachineBasicBlock *MBB = VRM.getMachineFunction().getBlockNumbered(N: MBBNum);
1697
1698 if (!IntvOut) {
1699 LLVM_DEBUG(dbgs() << ", spill on entry.\n");
1700 //
1701 // <<<<<<<<< Possible LeaveBefore interference.
1702 // |-----------| Live through.
1703 // -____________ Spill on entry.
1704 //
1705 selectIntv(Idx: IntvIn);
1706 SlotIndex Idx = leaveIntvAtTop(MBB&: *MBB);
1707 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1708 (void)Idx;
1709 return;
1710 }
1711
1712 if (!IntvIn) {
1713 LLVM_DEBUG(dbgs() << ", reload on exit.\n");
1714 //
1715 // >>>>>>> Possible EnterAfter interference.
1716 // |-----------| Live through.
1717 // ___________-- Reload on exit.
1718 //
1719 selectIntv(Idx: IntvOut);
1720 SlotIndex Idx = enterIntvAtEnd(MBB&: *MBB);
1721 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1722 (void)Idx;
1723 return;
1724 }
1725
1726 if (IntvIn == IntvOut && !LeaveBefore && !EnterAfter) {
1727 LLVM_DEBUG(dbgs() << ", straight through.\n");
1728 //
1729 // |-----------| Live through.
1730 // ------------- Straight through, same intv, no interference.
1731 //
1732 selectIntv(Idx: IntvOut);
1733 useIntv(Start, End: Stop);
1734 return;
1735 }
1736
1737 // We cannot legally insert splits after LSP.
1738 SlotIndex LSP = SA.getLastSplitPoint(Num: MBBNum);
1739 assert((!IntvOut || !EnterAfter || EnterAfter < LSP) && "Impossible intf");
1740
1741 if (IntvIn != IntvOut && (!LeaveBefore || !EnterAfter ||
1742 LeaveBefore.getBaseIndex() > EnterAfter.getBoundaryIndex())) {
1743 LLVM_DEBUG(dbgs() << ", switch avoiding interference.\n");
1744 //
1745 // >>>> <<<< Non-overlapping EnterAfter/LeaveBefore interference.
1746 // |-----------| Live through.
1747 // ------======= Switch intervals between interference.
1748 //
1749 selectIntv(Idx: IntvOut);
1750 SlotIndex Idx;
1751 if (LeaveBefore && LeaveBefore < LSP) {
1752 Idx = enterIntvBefore(Idx: LeaveBefore);
1753 useIntv(Start: Idx, End: Stop);
1754 } else {
1755 Idx = enterIntvAtEnd(MBB&: *MBB);
1756 }
1757 selectIntv(Idx: IntvIn);
1758 useIntv(Start, End: Idx);
1759 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1760 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1761 return;
1762 }
1763
1764 LLVM_DEBUG(dbgs() << ", create local intv for interference.\n");
1765 //
1766 // >>><><><><<<< Overlapping EnterAfter/LeaveBefore interference.
1767 // |-----------| Live through.
1768 // ==---------== Switch intervals before/after interference.
1769 //
1770 assert(LeaveBefore <= EnterAfter && "Missed case");
1771
1772 selectIntv(Idx: IntvOut);
1773 SlotIndex Idx = enterIntvAfter(Idx: EnterAfter);
1774 useIntv(Start: Idx, End: Stop);
1775 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1776
1777 selectIntv(Idx: IntvIn);
1778 Idx = leaveIntvBefore(Idx: LeaveBefore);
1779 useIntv(Start, End: Idx);
1780 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1781}
1782
1783void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
1784 unsigned IntvIn, SlotIndex LeaveBefore) {
1785 SlotIndex Start, Stop;
1786 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
1787
1788 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
1789 << Stop << "), uses " << BI.FirstInstr << '-'
1790 << BI.LastInstr << ", reg-in " << IntvIn
1791 << ", leave before " << LeaveBefore
1792 << (BI.LiveOut ? ", stack-out" : ", killed in block"));
1793
1794 assert(IntvIn && "Must have register in");
1795 assert(BI.LiveIn && "Must be live-in");
1796 assert((!LeaveBefore || LeaveBefore > Start) && "Bad interference");
1797
1798 if (!BI.LiveOut && (!LeaveBefore || LeaveBefore >= BI.LastInstr)) {
1799 LLVM_DEBUG(dbgs() << " before interference.\n");
1800 //
1801 // <<< Interference after kill.
1802 // |---o---x | Killed in block.
1803 // ========= Use IntvIn everywhere.
1804 //
1805 selectIntv(Idx: IntvIn);
1806 useIntv(Start, End: BI.LastInstr);
1807 return;
1808 }
1809
1810 SlotIndex LSP = SA.getLastSplitPoint(BB: BI.MBB);
1811
1812 if (!LeaveBefore || LeaveBefore > BI.LastInstr.getBoundaryIndex()) {
1813 //
1814 // <<< Possible interference after last use.
1815 // |---o---o---| Live-out on stack.
1816 // =========____ Leave IntvIn after last use.
1817 //
1818 // < Interference after last use.
1819 // |---o---o--o| Live-out on stack, late last use.
1820 // ============ Copy to stack after LSP, overlap IntvIn.
1821 // \_____ Stack interval is live-out.
1822 //
1823 if (BI.LastInstr < LSP) {
1824 LLVM_DEBUG(dbgs() << ", spill after last use before interference.\n");
1825 selectIntv(Idx: IntvIn);
1826 SlotIndex Idx = leaveIntvAfter(Idx: BI.LastInstr);
1827 useIntv(Start, End: Idx);
1828 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1829 } else {
1830 LLVM_DEBUG(dbgs() << ", spill before last split point.\n");
1831 selectIntv(Idx: IntvIn);
1832 SlotIndex Idx = leaveIntvBefore(Idx: LSP);
1833 overlapIntv(Start: Idx, End: BI.LastInstr);
1834 useIntv(Start, End: Idx);
1835 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1836 }
1837 return;
1838 }
1839
1840 // The interference is overlapping somewhere we wanted to use IntvIn. That
1841 // means we need to create a local interval that can be allocated a
1842 // different register.
1843 unsigned LocalIntv = openIntv();
1844 (void)LocalIntv;
1845 LLVM_DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n");
1846
1847 if (!BI.LiveOut || BI.LastInstr < LSP) {
1848 //
1849 // <<<<<<< Interference overlapping uses.
1850 // |---o---o---| Live-out on stack.
1851 // =====----____ Leave IntvIn before interference, then spill.
1852 //
1853 SlotIndex To = leaveIntvAfter(Idx: BI.LastInstr);
1854 SlotIndex From = enterIntvBefore(Idx: LeaveBefore);
1855 useIntv(Start: From, End: To);
1856 selectIntv(Idx: IntvIn);
1857 useIntv(Start, End: From);
1858 assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
1859 return;
1860 }
1861
1862 // <<<<<<< Interference overlapping uses.
1863 // |---o---o--o| Live-out on stack, late last use.
1864 // =====------- Copy to stack before LSP, overlap LocalIntv.
1865 // \_____ Stack interval is live-out.
1866 //
1867 SlotIndex To = leaveIntvBefore(Idx: LSP);
1868 overlapIntv(Start: To, End: BI.LastInstr);
1869 SlotIndex From = enterIntvBefore(Idx: std::min(a: To, b: LeaveBefore));
1870 useIntv(Start: From, End: To);
1871 selectIntv(Idx: IntvIn);
1872 useIntv(Start, End: From);
1873 assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
1874}
1875
1876void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
1877 unsigned IntvOut, SlotIndex EnterAfter) {
1878 SlotIndex Start, Stop;
1879 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
1880
1881 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
1882 << Stop << "), uses " << BI.FirstInstr << '-'
1883 << BI.LastInstr << ", reg-out " << IntvOut
1884 << ", enter after " << EnterAfter
1885 << (BI.LiveIn ? ", stack-in" : ", defined in block"));
1886
1887 SlotIndex LSP = SA.getLastSplitPoint(BB: BI.MBB);
1888
1889 assert(IntvOut && "Must have register out");
1890 assert(BI.LiveOut && "Must be live-out");
1891 assert((!EnterAfter || EnterAfter < LSP) && "Bad interference");
1892
1893 if (!BI.LiveIn && (!EnterAfter || EnterAfter <= BI.FirstInstr)) {
1894 LLVM_DEBUG(dbgs() << " after interference.\n");
1895 //
1896 // >>>> Interference before def.
1897 // | o---o---| Defined in block.
1898 // ========= Use IntvOut everywhere.
1899 //
1900 selectIntv(Idx: IntvOut);
1901 useIntv(Start: BI.FirstInstr, End: Stop);
1902 return;
1903 }
1904
1905 if (!EnterAfter || EnterAfter < BI.FirstInstr.getBaseIndex()) {
1906 LLVM_DEBUG(dbgs() << ", reload after interference.\n");
1907 //
1908 // >>>> Interference before def.
1909 // |---o---o---| Live-through, stack-in.
1910 // ____========= Enter IntvOut before first use.
1911 //
1912 selectIntv(Idx: IntvOut);
1913 SlotIndex Idx = enterIntvBefore(Idx: std::min(a: LSP, b: BI.FirstInstr));
1914 useIntv(Start: Idx, End: Stop);
1915 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1916 return;
1917 }
1918
1919 // The interference is overlapping somewhere we wanted to use IntvOut. That
1920 // means we need to create a local interval that can be allocated a
1921 // different register.
1922 LLVM_DEBUG(dbgs() << ", interference overlaps uses.\n");
1923 //
1924 // >>>>>>> Interference overlapping uses.
1925 // |---o---o---| Live-through, stack-in.
1926 // ____---====== Create local interval for interference range.
1927 //
1928 selectIntv(Idx: IntvOut);
1929 SlotIndex Idx = enterIntvAfter(Idx: EnterAfter);
1930 useIntv(Start: Idx, End: Stop);
1931 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1932
1933 openIntv();
1934 SlotIndex From = enterIntvBefore(Idx: std::min(a: Idx, b: BI.FirstInstr));
1935 useIntv(Start: From, End: Idx);
1936}
1937
1938void SplitAnalysis::BlockInfo::print(raw_ostream &OS) const {
1939 OS << "{" << printMBBReference(MBB: *MBB) << ", "
1940 << "uses " << FirstInstr << " to " << LastInstr << ", "
1941 << "1st def " << FirstDef << ", "
1942 << (LiveIn ? "live in" : "dead in") << ", "
1943 << (LiveOut ? "live out" : "dead out") << "}";
1944}
1945
1946void SplitAnalysis::BlockInfo::dump() const {
1947 print(OS&: dbgs());
1948 dbgs() << "\n";
1949}
1950