1//===- SplitKit.cpp - Toolkit for splitting live ranges -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SplitAnalysis class as well as mutator functions for
10// live range splitting.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SplitKit.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/CodeGen/LiveRangeEdit.h"
19#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
20#include "llvm/CodeGen/MachineDominators.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineLoopInfo.h"
24#include "llvm/CodeGen/MachineOperand.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/TargetInstrInfo.h"
27#include "llvm/CodeGen/TargetOpcodes.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/CodeGen/TargetSubtargetInfo.h"
30#include "llvm/CodeGen/VirtRegMap.h"
31#include "llvm/Config/llvm-config.h"
32#include "llvm/IR/DebugLoc.h"
33#include "llvm/Support/Allocator.h"
34#include "llvm/Support/BlockFrequency.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/raw_ostream.h"
38#include <algorithm>
39#include <cassert>
40#include <iterator>
41#include <limits>
42#include <tuple>
43
44using namespace llvm;
45
46#define DEBUG_TYPE "regalloc"
47
48static cl::opt<bool>
49 EnableLoopIVHeuristic("enable-split-loopiv-heuristic",
50 cl::desc("Enable loop iv regalloc heuristic"),
51 cl::init(Val: true));
52
53STATISTIC(NumFinished, "Number of splits finished");
54STATISTIC(NumSimple, "Number of splits that were simple");
55STATISTIC(NumCopies, "Number of copies inserted for splitting");
56STATISTIC(NumRemats, "Number of rematerialized defs for splitting");
57
58//===----------------------------------------------------------------------===//
59// Last Insert Point Analysis
60//===----------------------------------------------------------------------===//
61
62InsertPointAnalysis::InsertPointAnalysis(const LiveIntervals &lis,
63 unsigned BBNum)
64 : LIS(lis), LastInsertPoint(BBNum) {}
65
66SlotIndex
67InsertPointAnalysis::computeLastInsertPoint(const LiveInterval &CurLI,
68 const MachineBasicBlock &MBB) {
69 unsigned Num = MBB.getNumber();
70 std::pair<SlotIndex, SlotIndex> &LIP = LastInsertPoint[Num];
71 SlotIndex MBBEnd = LIS.getMBBEndIdx(mbb: &MBB);
72
73 SmallVector<const MachineBasicBlock *, 1> ExceptionalSuccessors;
74 bool EHPadSuccessor = false;
75 for (const MachineBasicBlock *SMBB : MBB.successors()) {
76 if (SMBB->isEHPad()) {
77 ExceptionalSuccessors.push_back(Elt: SMBB);
78 EHPadSuccessor = true;
79 } else if (SMBB->isInlineAsmBrIndirectTarget())
80 ExceptionalSuccessors.push_back(Elt: SMBB);
81 }
82
83 // Compute insert points on the first call. The pair is independent of the
84 // current live interval.
85 if (!LIP.first.isValid()) {
86 MachineBasicBlock::const_iterator FirstTerm = MBB.getFirstTerminator();
87 if (FirstTerm == MBB.end())
88 LIP.first = MBBEnd;
89 else
90 LIP.first = LIS.getInstructionIndex(Instr: *FirstTerm);
91
92 // If there is a landing pad or inlineasm_br successor, also find the
93 // instruction. If there is no such instruction, we don't need to do
94 // anything special. We assume there cannot be multiple instructions that
95 // are Calls with EHPad successors or INLINEASM_BR in a block. Further, we
96 // assume that if there are any, they will be after any other call
97 // instructions in the block.
98 if (ExceptionalSuccessors.empty())
99 return LIP.first;
100 for (const MachineInstr &MI : llvm::reverse(C: MBB)) {
101 if ((EHPadSuccessor && MI.isCall()) ||
102 MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
103 LIP.second = LIS.getInstructionIndex(Instr: MI);
104 break;
105 }
106 }
107 }
108
109 // If CurLI is live into a landing pad successor, move the last insert point
110 // back to the call that may throw.
111 if (!LIP.second)
112 return LIP.first;
113
114 if (none_of(Range&: ExceptionalSuccessors, P: [&](const MachineBasicBlock *EHPad) {
115 return LIS.isLiveInToMBB(LR: CurLI, mbb: EHPad);
116 }))
117 return LIP.first;
118
119 // Find the value leaving MBB.
120 const VNInfo *VNI = CurLI.getVNInfoBefore(Idx: MBBEnd);
121 if (!VNI)
122 return LIP.first;
123
124 // The def of statepoint instruction is a gc relocation and it should be alive
125 // in landing pad. So we cannot split interval after statepoint instruction.
126 if (SlotIndex::isSameInstr(A: VNI->def, B: LIP.second))
127 if (auto *I = LIS.getInstructionFromIndex(index: LIP.second))
128 if (I->getOpcode() == TargetOpcode::STATEPOINT)
129 return LIP.second;
130
131 // If the value leaving MBB was defined after the call in MBB, it can't
132 // really be live-in to the landing pad. This can happen if the landing pad
133 // has a PHI, and this register is undef on the exceptional edge.
134 if (!SlotIndex::isEarlierInstr(A: VNI->def, B: LIP.second) && VNI->def < MBBEnd)
135 return LIP.first;
136
137 // Value is properly live-in to the landing pad.
138 // Only allow inserts before the call.
139 return LIP.second;
140}
141
142MachineBasicBlock::iterator
143InsertPointAnalysis::getLastInsertPointIter(const LiveInterval &CurLI,
144 MachineBasicBlock &MBB) {
145 SlotIndex LIP = getLastInsertPoint(CurLI, MBB);
146 if (LIP == LIS.getMBBEndIdx(mbb: &MBB))
147 return MBB.end();
148 return LIS.getInstructionFromIndex(index: LIP);
149}
150
151//===----------------------------------------------------------------------===//
152// Split Analysis
153//===----------------------------------------------------------------------===//
154
155SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
156 const MachineLoopInfo &mli)
157 : MF(vrm.getMachineFunction()), VRM(vrm), LIS(lis), Loops(mli),
158 TII(*MF.getSubtarget().getInstrInfo()), IPA(lis, MF.getNumBlockIDs()) {}
159
160void SplitAnalysis::clear() {
161 UseSlots.clear();
162 UseBlocks.clear();
163 ThroughBlocks.clear();
164 CurLI = nullptr;
165}
166
167/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
168void SplitAnalysis::analyzeUses() {
169 assert(UseSlots.empty() && "Call clear first");
170
171 // First get all the defs from the interval values. This provides the correct
172 // slots for early clobbers.
173 for (const VNInfo *VNI : CurLI->valnos)
174 if (!VNI->isPHIDef() && !VNI->isUnused())
175 UseSlots.push_back(Elt: VNI->def);
176
177 // Get use slots form the use-def chain.
178 const MachineRegisterInfo &MRI = MF.getRegInfo();
179 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg: CurLI->reg()))
180 if (!MO.isUndef())
181 UseSlots.push_back(Elt: LIS.getInstructionIndex(Instr: *MO.getParent()).getRegSlot());
182
183 array_pod_sort(Start: UseSlots.begin(), End: UseSlots.end());
184
185 // Remove duplicates, keeping the smaller slot for each instruction.
186 // That is what we want for early clobbers.
187 UseSlots.erase(CS: llvm::unique(R&: UseSlots, P: SlotIndex::isSameInstr),
188 CE: UseSlots.end());
189
190 // Compute per-live block info.
191 calcLiveBlockInfo();
192
193 LLVM_DEBUG(dbgs() << "Analyze counted " << UseSlots.size() << " instrs in "
194 << UseBlocks.size() << " blocks, through "
195 << NumThroughBlocks << " blocks.\n");
196}
197
198/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
199/// where CurLI is live.
200void SplitAnalysis::calcLiveBlockInfo() {
201 ThroughBlocks.resize(N: MF.getNumBlockIDs());
202 NumThroughBlocks = NumGapBlocks = 0;
203 if (CurLI->empty())
204 return;
205
206 LiveInterval::const_iterator LVI = CurLI->begin();
207 LiveInterval::const_iterator LVE = CurLI->end();
208
209 SmallVectorImpl<SlotIndex>::const_iterator UseI, UseE;
210 UseI = UseSlots.begin();
211 UseE = UseSlots.end();
212
213 // Loop over basic blocks where CurLI is live.
214 MachineFunction::iterator MFI =
215 LIS.getMBBFromIndex(index: LVI->start)->getIterator();
216 while (true) {
217 BlockInfo BI;
218 BI.MBB = &*MFI;
219 SlotIndex Start, Stop;
220 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
221
222 // If the block contains no uses, the range must be live through. At one
223 // point, RegisterCoalescer could create dangling ranges that ended
224 // mid-block.
225 if (UseI == UseE || *UseI >= Stop) {
226 ++NumThroughBlocks;
227 ThroughBlocks.set(BI.MBB->getNumber());
228 // The range shouldn't end mid-block if there are no uses. This shouldn't
229 // happen.
230 assert(LVI->end >= Stop && "range ends mid block with no uses");
231 } else {
232 // This block has uses. Find the first and last uses in the block.
233 BI.FirstInstr = *UseI;
234 assert(BI.FirstInstr >= Start);
235 do ++UseI;
236 while (UseI != UseE && *UseI < Stop);
237 BI.LastInstr = UseI[-1];
238 assert(BI.LastInstr < Stop);
239
240 // LVI is the first live segment overlapping MBB.
241 BI.LiveIn = LVI->start <= Start;
242
243 // When not live in, the first use should be a def.
244 if (!BI.LiveIn) {
245 assert(LVI->start == LVI->valno->def && "Dangling Segment start");
246 assert(LVI->start == BI.FirstInstr && "First instr should be a def");
247 BI.FirstDef = BI.FirstInstr;
248 }
249
250 // Look for gaps in the live range.
251 BI.LiveOut = true;
252 while (LVI->end < Stop) {
253 SlotIndex LastStop = LVI->end;
254 if (++LVI == LVE || LVI->start >= Stop) {
255 BI.LiveOut = false;
256 BI.LastInstr = LastStop;
257 break;
258 }
259
260 if (LastStop < LVI->start) {
261 // There is a gap in the live range. Create duplicate entries for the
262 // live-in snippet and the live-out snippet.
263 ++NumGapBlocks;
264
265 // Push the Live-in part.
266 BI.LiveOut = false;
267 UseBlocks.push_back(Elt: BI);
268 UseBlocks.back().LastInstr = LastStop;
269
270 // Set up BI for the live-out part.
271 BI.LiveIn = false;
272 BI.LiveOut = true;
273 BI.FirstInstr = BI.FirstDef = LVI->start;
274 }
275
276 // A Segment that starts in the middle of the block must be a def.
277 assert(LVI->start == LVI->valno->def && "Dangling Segment start");
278 if (!BI.FirstDef)
279 BI.FirstDef = LVI->start;
280 }
281
282 UseBlocks.push_back(Elt: BI);
283
284 // LVI is now at LVE or LVI->end >= Stop.
285 if (LVI == LVE)
286 break;
287 }
288
289 // Live segment ends exactly at Stop. Move to the next segment.
290 if (LVI->end == Stop && ++LVI == LVE)
291 break;
292
293 // Pick the next basic block.
294 if (LVI->start < Stop)
295 ++MFI;
296 else
297 MFI = LIS.getMBBFromIndex(index: LVI->start)->getIterator();
298 }
299
300 LooksLikeLoopIV = EnableLoopIVHeuristic && UseBlocks.size() == 2 &&
301 any_of(Range&: UseBlocks, P: [this](BlockInfo &BI) {
302 MachineLoop *L = Loops.getLoopFor(BB: BI.MBB);
303 return BI.LiveIn && BI.LiveOut && BI.FirstDef && L &&
304 L->isLoopLatch(BB: BI.MBB);
305 });
306
307 assert(getNumLiveBlocks() == countLiveBlocks(CurLI) && "Bad block count");
308}
309
310unsigned SplitAnalysis::countLiveBlocks(const LiveInterval *cli) const {
311 if (cli->empty())
312 return 0;
313 LiveInterval *li = const_cast<LiveInterval*>(cli);
314 LiveInterval::iterator LVI = li->begin();
315 LiveInterval::iterator LVE = li->end();
316 unsigned Count = 0;
317
318 // Loop over basic blocks where li is live.
319 MachineFunction::const_iterator MFI =
320 LIS.getMBBFromIndex(index: LVI->start)->getIterator();
321 SlotIndex Stop = LIS.getMBBEndIdx(mbb: &*MFI);
322 while (true) {
323 ++Count;
324 LVI = li->advanceTo(I: LVI, Pos: Stop);
325 if (LVI == LVE)
326 return Count;
327 do {
328 ++MFI;
329 Stop = LIS.getMBBEndIdx(mbb: &*MFI);
330 } while (Stop <= LVI->start);
331 }
332}
333
334bool SplitAnalysis::isOriginalEndpoint(SlotIndex Idx) const {
335 Register OrigReg = VRM.getOriginal(VirtReg: CurLI->reg());
336 const LiveInterval &Orig = LIS.getInterval(Reg: OrigReg);
337 assert(!Orig.empty() && "Splitting empty interval?");
338 LiveInterval::const_iterator I = Orig.find(Pos: Idx);
339
340 // Range containing Idx should begin at Idx.
341 if (I != Orig.end() && I->start <= Idx)
342 return I->start == Idx;
343
344 // Range does not contain Idx, previous must end at Idx.
345 return I != Orig.begin() && (--I)->end == Idx;
346}
347
348void SplitAnalysis::analyze(const LiveInterval *li) {
349 clear();
350 CurLI = li;
351 analyzeUses();
352}
353
354//===----------------------------------------------------------------------===//
355// Split Editor
356//===----------------------------------------------------------------------===//
357
358/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
359SplitEditor::SplitEditor(SplitAnalysis &SA, LiveIntervals &LIS, VirtRegMap &VRM,
360 MachineDominatorTree &MDT,
361 MachineBlockFrequencyInfo &MBFI, VirtRegAuxInfo &VRAI)
362 : SA(SA), LIS(LIS), VRM(VRM), MRI(VRM.getMachineFunction().getRegInfo()),
363 MDT(MDT), TII(*VRM.getMachineFunction().getSubtarget().getInstrInfo()),
364 TRI(*VRM.getMachineFunction().getSubtarget().getRegisterInfo()),
365 MBFI(MBFI), VRAI(VRAI), RegAssign(Allocator) {}
366
367void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
368 Edit = &LRE;
369 SpillMode = SM;
370 OpenIdx = 0;
371 RegAssign.clear();
372 Values.clear();
373
374 // Reset the LiveIntervalCalc instances needed for this spill mode.
375 LICalc[0].reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
376 VNIA: &LIS.getVNInfoAllocator());
377 if (SpillMode)
378 LICalc[1].reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
379 VNIA: &LIS.getVNInfoAllocator());
380
381 Edit->anyRematerializable();
382}
383
384#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
385LLVM_DUMP_METHOD void SplitEditor::dump() const {
386 if (RegAssign.empty()) {
387 dbgs() << " empty\n";
388 return;
389 }
390
391 for (RegAssignMap::const_iterator I = RegAssign.begin(); I.valid(); ++I)
392 dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value();
393 dbgs() << '\n';
394}
395#endif
396
397/// Find a subrange corresponding to the exact lane mask @p LM in the live
398/// interval @p LI. The interval @p LI is assumed to contain such a subrange.
399/// This function is used to find corresponding subranges between the
400/// original interval and the new intervals.
401template <typename T> auto &getSubrangeImpl(LaneBitmask LM, T &LI) {
402 for (auto &S : LI.subranges())
403 if (S.LaneMask == LM)
404 return S;
405 llvm_unreachable("SubRange for this mask not found");
406}
407
408LiveInterval::SubRange &getSubRangeForMaskExact(LaneBitmask LM,
409 LiveInterval &LI) {
410 return getSubrangeImpl(LM, LI);
411}
412
413const LiveInterval::SubRange &getSubRangeForMaskExact(LaneBitmask LM,
414 const LiveInterval &LI) {
415 return getSubrangeImpl(LM, LI);
416}
417
418/// Find a subrange corresponding to the lane mask @p LM, or a superset of it,
419/// in the live interval @p LI. The interval @p LI is assumed to contain such
420/// a subrange. This function is used to find corresponding subranges between
421/// the original interval and the new intervals.
422const LiveInterval::SubRange &getSubRangeForMask(LaneBitmask LM,
423 const LiveInterval &LI) {
424 for (const LiveInterval::SubRange &S : LI.subranges())
425 if ((S.LaneMask & LM) == LM)
426 return S;
427 llvm_unreachable("SubRange for this mask not found");
428}
429
430void SplitEditor::addDeadDef(LiveInterval &LI, VNInfo *VNI, bool Original) {
431 if (!LI.hasSubRanges()) {
432 LI.createDeadDef(VNI);
433 return;
434 }
435
436 SlotIndex Def = VNI->def;
437 if (Original) {
438 // If we are transferring a def from the original interval, make sure
439 // to only update the subranges for which the original subranges had
440 // a def at this location.
441 for (LiveInterval::SubRange &S : LI.subranges()) {
442 auto &PS = getSubRangeForMask(LM: S.LaneMask, LI: Edit->getParent());
443 VNInfo *PV = PS.getVNInfoAt(Idx: Def);
444 if (PV != nullptr && PV->def == Def)
445 S.createDeadDef(Def, VNIAlloc&: LIS.getVNInfoAllocator());
446 }
447 } else {
448 // This is a new def: either from rematerialization, or from an inserted
449 // copy. Since rematerialization can regenerate a definition of a sub-
450 // register, we need to check which subranges need to be updated.
451 const MachineInstr *DefMI = LIS.getInstructionFromIndex(index: Def);
452 assert(DefMI != nullptr);
453 LaneBitmask LM;
454 for (const MachineOperand &DefOp : DefMI->defs()) {
455 Register R = DefOp.getReg();
456 if (R != LI.reg())
457 continue;
458 if (unsigned SR = DefOp.getSubReg())
459 LM |= TRI.getSubRegIndexLaneMask(SubIdx: SR);
460 else {
461 LM = MRI.getMaxLaneMaskForVReg(Reg: R);
462 break;
463 }
464 }
465 for (LiveInterval::SubRange &S : LI.subranges())
466 if ((S.LaneMask & LM).any())
467 S.createDeadDef(Def, VNIAlloc&: LIS.getVNInfoAllocator());
468 }
469}
470
471VNInfo *SplitEditor::defValue(unsigned RegIdx,
472 const VNInfo *ParentVNI,
473 SlotIndex Idx,
474 bool Original) {
475 assert(ParentVNI && "Mapping NULL value");
476 assert(Idx.isValid() && "Invalid SlotIndex");
477 assert(Edit->getParent().getVNInfoAt(Idx) == ParentVNI && "Bad Parent VNI");
478 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: RegIdx));
479
480 // Create a new value.
481 VNInfo *VNI = LI->getNextValue(Def: Idx, VNInfoAllocator&: LIS.getVNInfoAllocator());
482
483 bool Force = LI->hasSubRanges();
484 ValueForcePair FP(Force ? nullptr : VNI, Force);
485 // Use insert for lookup, so we can add missing values with a second lookup.
486 std::pair<ValueMap::iterator, bool> InsP =
487 Values.insert(KV: std::make_pair(x: std::make_pair(x&: RegIdx, y: ParentVNI->id), y&: FP));
488
489 // This was the first time (RegIdx, ParentVNI) was mapped, and it is not
490 // forced. Keep it as a simple def without any liveness.
491 if (!Force && InsP.second)
492 return VNI;
493
494 // If the previous value was a simple mapping, add liveness for it now.
495 if (VNInfo *OldVNI = InsP.first->second.getPointer()) {
496 addDeadDef(LI&: *LI, VNI: OldVNI, Original);
497
498 // No longer a simple mapping. Switch to a complex mapping. If the
499 // interval has subranges, make it a forced mapping.
500 InsP.first->second = ValueForcePair(nullptr, Force);
501 }
502
503 // This is a complex mapping, add liveness for VNI
504 addDeadDef(LI&: *LI, VNI, Original);
505 return VNI;
506}
507
508void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo &ParentVNI) {
509 ValueForcePair &VFP = Values[std::make_pair(x&: RegIdx, y: ParentVNI.id)];
510 VNInfo *VNI = VFP.getPointer();
511
512 // ParentVNI was either unmapped or already complex mapped. Either way, just
513 // set the force bit.
514 if (!VNI) {
515 VFP.setInt(true);
516 return;
517 }
518
519 // This was previously a single mapping. Make sure the old def is represented
520 // by a trivial live range.
521 addDeadDef(LI&: LIS.getInterval(Reg: Edit->get(idx: RegIdx)), VNI, Original: false);
522
523 // Mark as complex mapped, forced.
524 VFP = ValueForcePair(nullptr, true);
525}
526
527SlotIndex SplitEditor::buildSingleSubRegCopy(
528 Register FromReg, Register ToReg, MachineBasicBlock &MBB,
529 MachineBasicBlock::iterator InsertBefore, unsigned SubIdx,
530 LiveInterval &DestLI, bool Late, SlotIndex Def, const MCInstrDesc &Desc) {
531 bool FirstCopy = !Def.isValid();
532 MachineInstr *CopyMI = BuildMI(BB&: MBB, I: InsertBefore, MIMD: DebugLoc(), MCID: Desc)
533 .addReg(RegNo: ToReg, flags: RegState::Define | getUndefRegState(B: FirstCopy)
534 | getInternalReadRegState(B: !FirstCopy), SubReg: SubIdx)
535 .addReg(RegNo: FromReg, flags: 0, SubReg: SubIdx);
536
537 SlotIndexes &Indexes = *LIS.getSlotIndexes();
538 if (FirstCopy) {
539 Def = Indexes.insertMachineInstrInMaps(MI&: *CopyMI, Late).getRegSlot();
540 } else {
541 CopyMI->bundleWithPred();
542 }
543 return Def;
544}
545
546SlotIndex SplitEditor::buildCopy(Register FromReg, Register ToReg,
547 LaneBitmask LaneMask, MachineBasicBlock &MBB,
548 MachineBasicBlock::iterator InsertBefore, bool Late, unsigned RegIdx) {
549 const MCInstrDesc &Desc =
550 TII.get(Opcode: TII.getLiveRangeSplitOpcode(Reg: FromReg, MF: *MBB.getParent()));
551 SlotIndexes &Indexes = *LIS.getSlotIndexes();
552 if (LaneMask.all() || LaneMask == MRI.getMaxLaneMaskForVReg(Reg: FromReg)) {
553 // The full vreg is copied.
554 MachineInstr *CopyMI =
555 BuildMI(BB&: MBB, I: InsertBefore, MIMD: DebugLoc(), MCID: Desc, DestReg: ToReg).addReg(RegNo: FromReg);
556 return Indexes.insertMachineInstrInMaps(MI&: *CopyMI, Late).getRegSlot();
557 }
558
559 // Only a subset of lanes needs to be copied. The following is a simple
560 // heuristic to construct a sequence of COPYs. We could add a target
561 // specific callback if this turns out to be suboptimal.
562 LiveInterval &DestLI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
563
564 // First pass: Try to find a perfectly matching subregister index. If none
565 // exists find the one covering the most lanemask bits.
566 const TargetRegisterClass *RC = MRI.getRegClass(Reg: FromReg);
567 assert(RC == MRI.getRegClass(ToReg) && "Should have same reg class");
568
569 SmallVector<unsigned, 8> SubIndexes;
570
571 // Abort if we cannot possibly implement the COPY with the given indexes.
572 if (!TRI.getCoveringSubRegIndexes(MRI, RC, LaneMask, Indexes&: SubIndexes))
573 report_fatal_error(reason: "Impossible to implement partial COPY");
574
575 SlotIndex Def;
576 for (unsigned BestIdx : SubIndexes) {
577 Def = buildSingleSubRegCopy(FromReg, ToReg, MBB, InsertBefore, SubIdx: BestIdx,
578 DestLI, Late, Def, Desc);
579 }
580
581 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
582 DestLI.refineSubRanges(
583 Allocator, LaneMask,
584 Apply: [Def, &Allocator](LiveInterval::SubRange &SR) {
585 SR.createDeadDef(Def, VNIAlloc&: Allocator);
586 },
587 Indexes, TRI);
588
589 return Def;
590}
591
592VNInfo *SplitEditor::defFromParent(unsigned RegIdx, const VNInfo *ParentVNI,
593 SlotIndex UseIdx, MachineBasicBlock &MBB,
594 MachineBasicBlock::iterator I) {
595 SlotIndex Def;
596 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: RegIdx));
597
598 // We may be trying to avoid interference that ends at a deleted instruction,
599 // so always begin RegIdx 0 early and all others late.
600 bool Late = RegIdx != 0;
601
602 // Attempt cheap-as-a-copy rematerialization.
603 Register Original = VRM.getOriginal(VirtReg: Edit->get(idx: RegIdx));
604 LiveInterval &OrigLI = LIS.getInterval(Reg: Original);
605 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx: UseIdx);
606
607 Register Reg = LI->reg();
608 bool DidRemat = false;
609 if (OrigVNI) {
610 LiveRangeEdit::Remat RM(ParentVNI);
611 RM.OrigMI = LIS.getInstructionFromIndex(index: OrigVNI->def);
612 if (Edit->canRematerializeAt(RM, OrigVNI, UseIdx, cheapAsAMove: true)) {
613 Def = Edit->rematerializeAt(MBB, MI: I, DestReg: Reg, RM, TRI, Late);
614 ++NumRemats;
615 DidRemat = true;
616 }
617 }
618 if (!DidRemat) {
619 LaneBitmask LaneMask;
620 if (OrigLI.hasSubRanges()) {
621 LaneMask = LaneBitmask::getNone();
622 for (LiveInterval::SubRange &S : OrigLI.subranges()) {
623 if (S.liveAt(index: UseIdx))
624 LaneMask |= S.LaneMask;
625 }
626 } else {
627 LaneMask = LaneBitmask::getAll();
628 }
629
630 if (LaneMask.none()) {
631 const MCInstrDesc &Desc = TII.get(Opcode: TargetOpcode::IMPLICIT_DEF);
632 MachineInstr *ImplicitDef = BuildMI(BB&: MBB, I, MIMD: DebugLoc(), MCID: Desc, DestReg: Reg);
633 SlotIndexes &Indexes = *LIS.getSlotIndexes();
634 Def = Indexes.insertMachineInstrInMaps(MI&: *ImplicitDef, Late).getRegSlot();
635 } else {
636 ++NumCopies;
637 Def = buildCopy(FromReg: Edit->getReg(), ToReg: Reg, LaneMask, MBB, InsertBefore: I, Late, RegIdx);
638 }
639 }
640
641 // Define the value in Reg.
642 return defValue(RegIdx, ParentVNI, Idx: Def, Original: false);
643}
644
645/// Create a new virtual register and live interval.
646unsigned SplitEditor::openIntv() {
647 // Create the complement as index 0.
648 if (Edit->empty())
649 Edit->createEmptyInterval();
650
651 // Create the open interval.
652 OpenIdx = Edit->size();
653 Edit->createEmptyInterval();
654 return OpenIdx;
655}
656
657void SplitEditor::selectIntv(unsigned Idx) {
658 assert(Idx != 0 && "Cannot select the complement interval");
659 assert(Idx < Edit->size() && "Can only select previously opened interval");
660 LLVM_DEBUG(dbgs() << " selectIntv " << OpenIdx << " -> " << Idx << '\n');
661 OpenIdx = Idx;
662}
663
664SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
665 assert(OpenIdx && "openIntv not called before enterIntvBefore");
666 LLVM_DEBUG(dbgs() << " enterIntvBefore " << Idx);
667 Idx = Idx.getBaseIndex();
668 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
669 if (!ParentVNI) {
670 LLVM_DEBUG(dbgs() << ": not live\n");
671 return Idx;
672 }
673 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
674 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
675 assert(MI && "enterIntvBefore called with invalid index");
676
677 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
678 return VNI->def;
679}
680
681SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
682 assert(OpenIdx && "openIntv not called before enterIntvAfter");
683 LLVM_DEBUG(dbgs() << " enterIntvAfter " << Idx);
684 Idx = Idx.getBoundaryIndex();
685 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
686 if (!ParentVNI) {
687 LLVM_DEBUG(dbgs() << ": not live\n");
688 return Idx;
689 }
690 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
691 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
692 assert(MI && "enterIntvAfter called with invalid index");
693
694 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(),
695 I: std::next(x: MachineBasicBlock::iterator(MI)));
696 return VNI->def;
697}
698
699SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
700 assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
701 SlotIndex End = LIS.getMBBEndIdx(mbb: &MBB);
702 SlotIndex Last = End.getPrevSlot();
703 LLVM_DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", "
704 << Last);
705 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Last);
706 if (!ParentVNI) {
707 LLVM_DEBUG(dbgs() << ": not live\n");
708 return End;
709 }
710 SlotIndex LSP = SA.getLastSplitPoint(BB: &MBB);
711 if (LSP < Last) {
712 // It could be that the use after LSP is a def, and thus the ParentVNI
713 // just selected starts at that def. For this case to exist, the def
714 // must be part of a tied def/use pair (as otherwise we'd have split
715 // distinct live ranges into individual live intervals), and thus we
716 // can insert the def into the VNI of the use and the tied def/use
717 // pair can live in the resulting interval.
718 Last = LSP;
719 ParentVNI = Edit->getParent().getVNInfoAt(Idx: Last);
720 if (!ParentVNI) {
721 // undef use --> undef tied def
722 LLVM_DEBUG(dbgs() << ": tied use not live\n");
723 return End;
724 }
725 }
726
727 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id);
728 VNInfo *VNI = defFromParent(RegIdx: OpenIdx, ParentVNI, UseIdx: Last, MBB,
729 I: SA.getLastSplitPointIter(BB: &MBB));
730 RegAssign.insert(a: VNI->def, b: End, y: OpenIdx);
731 LLVM_DEBUG(dump());
732 return VNI->def;
733}
734
735/// useIntv - indicate that all instructions in MBB should use OpenLI.
736void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
737 useIntv(Start: LIS.getMBBStartIdx(mbb: &MBB), End: LIS.getMBBEndIdx(mbb: &MBB));
738}
739
740void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
741 assert(OpenIdx && "openIntv not called before useIntv");
742 LLVM_DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
743 RegAssign.insert(a: Start, b: End, y: OpenIdx);
744 LLVM_DEBUG(dump());
745}
746
747SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
748 assert(OpenIdx && "openIntv not called before leaveIntvAfter");
749 LLVM_DEBUG(dbgs() << " leaveIntvAfter " << Idx);
750
751 // The interval must be live beyond the instruction at Idx.
752 SlotIndex Boundary = Idx.getBoundaryIndex();
753 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Boundary);
754 if (!ParentVNI) {
755 LLVM_DEBUG(dbgs() << ": not live\n");
756 return Boundary.getNextSlot();
757 }
758 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
759 MachineInstr *MI = LIS.getInstructionFromIndex(index: Boundary);
760 assert(MI && "No instruction at index");
761
762 // In spill mode, make live ranges as short as possible by inserting the copy
763 // before MI. This is only possible if that instruction doesn't redefine the
764 // value. The inserted COPY is not a kill, and we don't need to recompute
765 // the source live range. The spiller also won't try to hoist this copy.
766 if (SpillMode && !SlotIndex::isSameInstr(A: ParentVNI->def, B: Idx) &&
767 MI->readsVirtualRegister(Reg: Edit->getReg())) {
768 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
769 defFromParent(RegIdx: 0, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
770 return Idx;
771 }
772
773 VNInfo *VNI = defFromParent(RegIdx: 0, ParentVNI, UseIdx: Boundary, MBB&: *MI->getParent(),
774 I: std::next(x: MachineBasicBlock::iterator(MI)));
775 return VNI->def;
776}
777
778SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
779 assert(OpenIdx && "openIntv not called before leaveIntvBefore");
780 LLVM_DEBUG(dbgs() << " leaveIntvBefore " << Idx);
781
782 // The interval must be live into the instruction at Idx.
783 Idx = Idx.getBaseIndex();
784 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
785 if (!ParentVNI) {
786 LLVM_DEBUG(dbgs() << ": not live\n");
787 return Idx.getNextSlot();
788 }
789 LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
790
791 MachineInstr *MI = LIS.getInstructionFromIndex(index: Idx);
792 assert(MI && "No instruction at index");
793 VNInfo *VNI = defFromParent(RegIdx: 0, ParentVNI, UseIdx: Idx, MBB&: *MI->getParent(), I: MI);
794 return VNI->def;
795}
796
797SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
798 assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
799 SlotIndex Start = LIS.getMBBStartIdx(mbb: &MBB);
800 LLVM_DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", "
801 << Start);
802
803 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Start);
804 if (!ParentVNI) {
805 LLVM_DEBUG(dbgs() << ": not live\n");
806 return Start;
807 }
808
809 unsigned RegIdx = 0;
810 Register Reg = LIS.getInterval(Reg: Edit->get(idx: RegIdx)).reg();
811 VNInfo *VNI = defFromParent(RegIdx, ParentVNI, UseIdx: Start, MBB,
812 I: MBB.SkipPHIsLabelsAndDebug(I: MBB.begin(), Reg));
813 RegAssign.insert(a: Start, b: VNI->def, y: OpenIdx);
814 LLVM_DEBUG(dump());
815 return VNI->def;
816}
817
818static bool hasTiedUseOf(MachineInstr &MI, unsigned Reg) {
819 return any_of(Range: MI.defs(), P: [Reg](const MachineOperand &MO) {
820 return MO.isReg() && MO.isTied() && MO.getReg() == Reg;
821 });
822}
823
824void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
825 assert(OpenIdx && "openIntv not called before overlapIntv");
826 const VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: Start);
827 assert(ParentVNI == Edit->getParent().getVNInfoBefore(End) &&
828 "Parent changes value in extended range");
829 assert(LIS.getMBBFromIndex(Start) == LIS.getMBBFromIndex(End) &&
830 "Range cannot span basic blocks");
831
832 // The complement interval will be extended as needed by LICalc.extend().
833 if (ParentVNI)
834 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
835
836 // If the last use is tied to a def, we can't mark it as live for the
837 // interval which includes only the use. That would cause the tied pair
838 // to end up in two different intervals.
839 if (auto *MI = LIS.getInstructionFromIndex(index: End))
840 if (hasTiedUseOf(MI&: *MI, Reg: Edit->getReg())) {
841 LLVM_DEBUG(dbgs() << "skip overlap due to tied def at end\n");
842 return;
843 }
844
845 LLVM_DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
846 RegAssign.insert(a: Start, b: End, y: OpenIdx);
847 LLVM_DEBUG(dump());
848}
849
850//===----------------------------------------------------------------------===//
851// Spill modes
852//===----------------------------------------------------------------------===//
853
854void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
855 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
856 LLVM_DEBUG(dbgs() << "Removing " << Copies.size() << " back-copies.\n");
857 RegAssignMap::iterator AssignI;
858 AssignI.setMap(RegAssign);
859
860 for (const VNInfo *C : Copies) {
861 SlotIndex Def = C->def;
862 MachineInstr *MI = LIS.getInstructionFromIndex(index: Def);
863 assert(MI && "No instruction for back-copy");
864
865 MachineBasicBlock *MBB = MI->getParent();
866 MachineBasicBlock::iterator MBBI(MI);
867 bool AtBegin;
868 do AtBegin = MBBI == MBB->begin();
869 while (!AtBegin && (--MBBI)->isDebugOrPseudoInstr());
870
871 LLVM_DEBUG(dbgs() << "Removing " << Def << '\t' << *MI);
872 LIS.removeVRegDefAt(LI&: *LI, Pos: Def);
873 LIS.RemoveMachineInstrFromMaps(MI&: *MI);
874 MI->eraseFromParent();
875
876 // Adjust RegAssign if a register assignment is killed at Def. We want to
877 // avoid calculating the live range of the source register if possible.
878 AssignI.find(x: Def.getPrevSlot());
879 if (!AssignI.valid() || AssignI.start() >= Def)
880 continue;
881 // If MI doesn't kill the assigned register, just leave it.
882 if (AssignI.stop() != Def)
883 continue;
884 unsigned RegIdx = AssignI.value();
885 // We could hoist back-copy right after another back-copy. As a result
886 // MMBI points to copy instruction which is actually dead now.
887 // We cannot set its stop to MBBI which will be the same as start and
888 // interval does not support that.
889 SlotIndex Kill =
890 AtBegin ? SlotIndex() : LIS.getInstructionIndex(Instr: *MBBI).getRegSlot();
891 if (AtBegin || !MBBI->readsVirtualRegister(Reg: Edit->getReg()) ||
892 Kill <= AssignI.start()) {
893 LLVM_DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx
894 << '\n');
895 forceRecompute(RegIdx, ParentVNI: *Edit->getParent().getVNInfoAt(Idx: Def));
896 } else {
897 LLVM_DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
898 AssignI.setStop(Kill);
899 }
900 }
901}
902
903MachineBasicBlock*
904SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
905 MachineBasicBlock *DefMBB) {
906 if (MBB == DefMBB)
907 return MBB;
908 assert(MDT.dominates(DefMBB, MBB) && "MBB must be dominated by the def.");
909
910 const MachineLoopInfo &Loops = SA.Loops;
911 const MachineLoop *DefLoop = Loops.getLoopFor(BB: DefMBB);
912 MachineDomTreeNode *DefDomNode = MDT[DefMBB];
913
914 // Best candidate so far.
915 MachineBasicBlock *BestMBB = MBB;
916 unsigned BestDepth = std::numeric_limits<unsigned>::max();
917
918 while (true) {
919 const MachineLoop *Loop = Loops.getLoopFor(BB: MBB);
920
921 // MBB isn't in a loop, it doesn't get any better. All dominators have a
922 // higher frequency by definition.
923 if (!Loop) {
924 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
925 << " dominates " << printMBBReference(*MBB)
926 << " at depth 0\n");
927 return MBB;
928 }
929
930 // We'll never be able to exit the DefLoop.
931 if (Loop == DefLoop) {
932 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
933 << " dominates " << printMBBReference(*MBB)
934 << " in the same loop\n");
935 return MBB;
936 }
937
938 // Least busy dominator seen so far.
939 unsigned Depth = Loop->getLoopDepth();
940 if (Depth < BestDepth) {
941 BestMBB = MBB;
942 BestDepth = Depth;
943 LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
944 << " dominates " << printMBBReference(*MBB)
945 << " at depth " << Depth << '\n');
946 }
947
948 // Leave loop by going to the immediate dominator of the loop header.
949 // This is a bigger stride than simply walking up the dominator tree.
950 MachineDomTreeNode *IDom = MDT[Loop->getHeader()]->getIDom();
951
952 // Too far up the dominator tree?
953 if (!IDom || !MDT.dominates(A: DefDomNode, B: IDom))
954 return BestMBB;
955
956 MBB = IDom->getBlock();
957 }
958}
959
960void SplitEditor::computeRedundantBackCopies(
961 DenseSet<unsigned> &NotToHoistSet, SmallVectorImpl<VNInfo *> &BackCopies) {
962 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
963 const LiveInterval *Parent = &Edit->getParent();
964 SmallVector<SmallPtrSet<VNInfo *, 8>, 8> EqualVNs(Parent->getNumValNums());
965 SmallPtrSet<VNInfo *, 8> DominatedVNIs;
966
967 // Aggregate VNIs having the same value as ParentVNI.
968 for (VNInfo *VNI : LI->valnos) {
969 if (VNI->isUnused())
970 continue;
971 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
972 EqualVNs[ParentVNI->id].insert(Ptr: VNI);
973 }
974
975 // For VNI aggregation of each ParentVNI, collect dominated, i.e.,
976 // redundant VNIs to BackCopies.
977 for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {
978 const VNInfo *ParentVNI = Parent->getValNumInfo(ValNo: i);
979 if (!NotToHoistSet.count(V: ParentVNI->id))
980 continue;
981 SmallPtrSetIterator<VNInfo *> It1 = EqualVNs[ParentVNI->id].begin();
982 SmallPtrSetIterator<VNInfo *> It2 = It1;
983 for (; It1 != EqualVNs[ParentVNI->id].end(); ++It1) {
984 It2 = It1;
985 for (++It2; It2 != EqualVNs[ParentVNI->id].end(); ++It2) {
986 if (DominatedVNIs.count(Ptr: *It1) || DominatedVNIs.count(Ptr: *It2))
987 continue;
988
989 MachineBasicBlock *MBB1 = LIS.getMBBFromIndex(index: (*It1)->def);
990 MachineBasicBlock *MBB2 = LIS.getMBBFromIndex(index: (*It2)->def);
991 if (MBB1 == MBB2) {
992 DominatedVNIs.insert(Ptr: (*It1)->def < (*It2)->def ? (*It2) : (*It1));
993 } else if (MDT.dominates(A: MBB1, B: MBB2)) {
994 DominatedVNIs.insert(Ptr: *It2);
995 } else if (MDT.dominates(A: MBB2, B: MBB1)) {
996 DominatedVNIs.insert(Ptr: *It1);
997 }
998 }
999 }
1000 if (!DominatedVNIs.empty()) {
1001 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
1002 append_range(C&: BackCopies, R&: DominatedVNIs);
1003 DominatedVNIs.clear();
1004 }
1005 }
1006}
1007
1008/// For SM_Size mode, find a common dominator for all the back-copies for
1009/// the same ParentVNI and hoist the backcopies to the dominator BB.
1010/// For SM_Speed mode, if the common dominator is hot and it is not beneficial
1011/// to do the hoisting, simply remove the dominated backcopies for the same
1012/// ParentVNI.
1013void SplitEditor::hoistCopies() {
1014 // Get the complement interval, always RegIdx 0.
1015 LiveInterval *LI = &LIS.getInterval(Reg: Edit->get(idx: 0));
1016 const LiveInterval *Parent = &Edit->getParent();
1017
1018 // Track the nearest common dominator for all back-copies for each ParentVNI,
1019 // indexed by ParentVNI->id.
1020 using DomPair = std::pair<MachineBasicBlock *, SlotIndex>;
1021 SmallVector<DomPair, 8> NearestDom(Parent->getNumValNums());
1022 // The total cost of all the back-copies for each ParentVNI.
1023 SmallVector<BlockFrequency, 8> Costs(Parent->getNumValNums());
1024 // The ParentVNI->id set for which hoisting back-copies are not beneficial
1025 // for Speed.
1026 DenseSet<unsigned> NotToHoistSet;
1027
1028 // Find the nearest common dominator for parent values with multiple
1029 // back-copies. If a single back-copy dominates, put it in DomPair.second.
1030 for (VNInfo *VNI : LI->valnos) {
1031 if (VNI->isUnused())
1032 continue;
1033 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
1034 assert(ParentVNI && "Parent not live at complement def");
1035
1036 // Don't hoist remats. The complement is probably going to disappear
1037 // completely anyway.
1038 if (Edit->didRematerialize(ParentVNI))
1039 continue;
1040
1041 MachineBasicBlock *ValMBB = LIS.getMBBFromIndex(index: VNI->def);
1042
1043 DomPair &Dom = NearestDom[ParentVNI->id];
1044
1045 // Keep directly defined parent values. This is either a PHI or an
1046 // instruction in the complement range. All other copies of ParentVNI
1047 // should be eliminated.
1048 if (VNI->def == ParentVNI->def) {
1049 LLVM_DEBUG(dbgs() << "Direct complement def at " << VNI->def << '\n');
1050 Dom = DomPair(ValMBB, VNI->def);
1051 continue;
1052 }
1053 // Skip the singly mapped values. There is nothing to gain from hoisting a
1054 // single back-copy.
1055 if (Values.lookup(Val: std::make_pair(x: 0, y&: ParentVNI->id)).getPointer()) {
1056 LLVM_DEBUG(dbgs() << "Single complement def at " << VNI->def << '\n');
1057 continue;
1058 }
1059
1060 if (!Dom.first) {
1061 // First time we see ParentVNI. VNI dominates itself.
1062 Dom = DomPair(ValMBB, VNI->def);
1063 } else if (Dom.first == ValMBB) {
1064 // Two defs in the same block. Pick the earlier def.
1065 if (!Dom.second.isValid() || VNI->def < Dom.second)
1066 Dom.second = VNI->def;
1067 } else {
1068 // Different basic blocks. Check if one dominates.
1069 MachineBasicBlock *Near =
1070 MDT.findNearestCommonDominator(A: Dom.first, B: ValMBB);
1071 if (Near == ValMBB)
1072 // Def ValMBB dominates.
1073 Dom = DomPair(ValMBB, VNI->def);
1074 else if (Near != Dom.first)
1075 // None dominate. Hoist to common dominator, need new def.
1076 Dom = DomPair(Near, SlotIndex());
1077 Costs[ParentVNI->id] += MBFI.getBlockFreq(MBB: ValMBB);
1078 }
1079
1080 LLVM_DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@'
1081 << VNI->def << " for parent " << ParentVNI->id << '@'
1082 << ParentVNI->def << " hoist to "
1083 << printMBBReference(*Dom.first) << ' ' << Dom.second
1084 << '\n');
1085 }
1086
1087 // Insert the hoisted copies.
1088 for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {
1089 DomPair &Dom = NearestDom[i];
1090 if (!Dom.first || Dom.second.isValid())
1091 continue;
1092 // This value needs a hoisted copy inserted at the end of Dom.first.
1093 const VNInfo *ParentVNI = Parent->getValNumInfo(ValNo: i);
1094 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(index: ParentVNI->def);
1095 // Get a less loopy dominator than Dom.first.
1096 Dom.first = findShallowDominator(MBB: Dom.first, DefMBB);
1097 if (SpillMode == SM_Speed &&
1098 MBFI.getBlockFreq(MBB: Dom.first) > Costs[ParentVNI->id]) {
1099 NotToHoistSet.insert(V: ParentVNI->id);
1100 continue;
1101 }
1102 SlotIndex LSP = SA.getLastSplitPoint(BB: Dom.first);
1103 if (LSP <= ParentVNI->def) {
1104 NotToHoistSet.insert(V: ParentVNI->id);
1105 continue;
1106 }
1107 Dom.second = defFromParent(RegIdx: 0, ParentVNI, UseIdx: LSP, MBB&: *Dom.first,
1108 I: SA.getLastSplitPointIter(BB: Dom.first))->def;
1109 }
1110
1111 // Remove redundant back-copies that are now known to be dominated by another
1112 // def with the same value.
1113 SmallVector<VNInfo*, 8> BackCopies;
1114 for (VNInfo *VNI : LI->valnos) {
1115 if (VNI->isUnused())
1116 continue;
1117 VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx: VNI->def);
1118 const DomPair &Dom = NearestDom[ParentVNI->id];
1119 if (!Dom.first || Dom.second == VNI->def ||
1120 NotToHoistSet.count(V: ParentVNI->id))
1121 continue;
1122 BackCopies.push_back(Elt: VNI);
1123 forceRecompute(RegIdx: 0, ParentVNI: *ParentVNI);
1124 }
1125
1126 // If it is not beneficial to hoist all the BackCopies, simply remove
1127 // redundant BackCopies in speed mode.
1128 if (SpillMode == SM_Speed && !NotToHoistSet.empty())
1129 computeRedundantBackCopies(NotToHoistSet, BackCopies);
1130
1131 removeBackCopies(Copies&: BackCopies);
1132}
1133
1134/// transferValues - Transfer all possible values to the new live ranges.
1135/// Values that were rematerialized are left alone, they need LICalc.extend().
1136bool SplitEditor::transferValues() {
1137 bool Skipped = false;
1138 RegAssignMap::const_iterator AssignI = RegAssign.begin();
1139 for (const LiveRange::Segment &S : Edit->getParent()) {
1140 LLVM_DEBUG(dbgs() << " blit " << S << ':');
1141 VNInfo *ParentVNI = S.valno;
1142 // RegAssign has holes where RegIdx 0 should be used.
1143 SlotIndex Start = S.start;
1144 AssignI.advanceTo(x: Start);
1145 do {
1146 unsigned RegIdx;
1147 SlotIndex End = S.end;
1148 if (!AssignI.valid()) {
1149 RegIdx = 0;
1150 } else if (AssignI.start() <= Start) {
1151 RegIdx = AssignI.value();
1152 if (AssignI.stop() < End) {
1153 End = AssignI.stop();
1154 ++AssignI;
1155 }
1156 } else {
1157 RegIdx = 0;
1158 End = std::min(a: End, b: AssignI.start());
1159 }
1160
1161 // The interval [Start;End) is continuously mapped to RegIdx, ParentVNI.
1162 LLVM_DEBUG(dbgs() << " [" << Start << ';' << End << ")=" << RegIdx << '('
1163 << printReg(Edit->get(RegIdx)) << ')');
1164 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1165
1166 // Check for a simply defined value that can be blitted directly.
1167 ValueForcePair VFP = Values.lookup(Val: std::make_pair(x&: RegIdx, y&: ParentVNI->id));
1168 if (VNInfo *VNI = VFP.getPointer()) {
1169 LLVM_DEBUG(dbgs() << ':' << VNI->id);
1170 LI.addSegment(S: LiveInterval::Segment(Start, End, VNI));
1171 Start = End;
1172 continue;
1173 }
1174
1175 // Skip values with forced recomputation.
1176 if (VFP.getInt()) {
1177 LLVM_DEBUG(dbgs() << "(recalc)");
1178 Skipped = true;
1179 Start = End;
1180 continue;
1181 }
1182
1183 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1184
1185 // This value has multiple defs in RegIdx, but it wasn't rematerialized,
1186 // so the live range is accurate. Add live-in blocks in [Start;End) to the
1187 // LiveInBlocks.
1188 MachineFunction::iterator MBB = LIS.getMBBFromIndex(index: Start)->getIterator();
1189 SlotIndex BlockStart, BlockEnd;
1190 std::tie(args&: BlockStart, args&: BlockEnd) = LIS.getSlotIndexes()->getMBBRange(MBB: &*MBB);
1191
1192 // The first block may be live-in, or it may have its own def.
1193 if (Start != BlockStart) {
1194 VNInfo *VNI = LI.extendInBlock(StartIdx: BlockStart, Kill: std::min(a: BlockEnd, b: End));
1195 assert(VNI && "Missing def for complex mapped value");
1196 LLVM_DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB));
1197 // MBB has its own def. Is it also live-out?
1198 if (BlockEnd <= End)
1199 LIC.setLiveOutValue(MBB: &*MBB, VNI);
1200
1201 // Skip to the next block for live-in.
1202 ++MBB;
1203 BlockStart = BlockEnd;
1204 }
1205
1206 // Handle the live-in blocks covered by [Start;End).
1207 assert(Start <= BlockStart && "Expected live-in block");
1208 while (BlockStart < End) {
1209 LLVM_DEBUG(dbgs() << ">" << printMBBReference(*MBB));
1210 BlockEnd = LIS.getMBBEndIdx(mbb: &*MBB);
1211 if (BlockStart == ParentVNI->def) {
1212 // This block has the def of a parent PHI, so it isn't live-in.
1213 assert(ParentVNI->isPHIDef() && "Non-phi defined at block start?");
1214 VNInfo *VNI = LI.extendInBlock(StartIdx: BlockStart, Kill: std::min(a: BlockEnd, b: End));
1215 assert(VNI && "Missing def for complex mapped parent PHI");
1216 if (End >= BlockEnd)
1217 LIC.setLiveOutValue(MBB: &*MBB, VNI); // Live-out as well.
1218 } else {
1219 // This block needs a live-in value. The last block covered may not
1220 // be live-out.
1221 if (End < BlockEnd)
1222 LIC.addLiveInBlock(LR&: LI, DomNode: MDT[&*MBB], Kill: End);
1223 else {
1224 // Live-through, and we don't know the value.
1225 LIC.addLiveInBlock(LR&: LI, DomNode: MDT[&*MBB]);
1226 LIC.setLiveOutValue(MBB: &*MBB, VNI: nullptr);
1227 }
1228 }
1229 BlockStart = BlockEnd;
1230 ++MBB;
1231 }
1232 Start = End;
1233 } while (Start != S.end);
1234 LLVM_DEBUG(dbgs() << '\n');
1235 }
1236
1237 LICalc[0].calculateValues();
1238 if (SpillMode)
1239 LICalc[1].calculateValues();
1240
1241 return Skipped;
1242}
1243
1244static bool removeDeadSegment(SlotIndex Def, LiveRange &LR) {
1245 const LiveRange::Segment *Seg = LR.getSegmentContaining(Idx: Def);
1246 if (Seg == nullptr)
1247 return true;
1248 if (Seg->end != Def.getDeadSlot())
1249 return false;
1250 // This is a dead PHI. Remove it.
1251 LR.removeSegment(S: *Seg, RemoveDeadValNo: true);
1252 return true;
1253}
1254
1255void SplitEditor::extendPHIRange(MachineBasicBlock &B, LiveIntervalCalc &LIC,
1256 LiveRange &LR, LaneBitmask LM,
1257 ArrayRef<SlotIndex> Undefs) {
1258 for (MachineBasicBlock *P : B.predecessors()) {
1259 SlotIndex End = LIS.getMBBEndIdx(mbb: P);
1260 SlotIndex LastUse = End.getPrevSlot();
1261 // The predecessor may not have a live-out value. That is OK, like an
1262 // undef PHI operand.
1263 const LiveInterval &PLI = Edit->getParent();
1264 // Need the cast because the inputs to ?: would otherwise be deemed
1265 // "incompatible": SubRange vs LiveInterval.
1266 const LiveRange &PSR = !LM.all() ? getSubRangeForMaskExact(LM, LI: PLI)
1267 : static_cast<const LiveRange &>(PLI);
1268 if (PSR.liveAt(index: LastUse))
1269 LIC.extend(LR, Use: End, /*PhysReg=*/0, Undefs);
1270 }
1271}
1272
1273void SplitEditor::extendPHIKillRanges() {
1274 // Extend live ranges to be live-out for successor PHI values.
1275
1276 // Visit each PHI def slot in the parent live interval. If the def is dead,
1277 // remove it. Otherwise, extend the live interval to reach the end indexes
1278 // of all predecessor blocks.
1279
1280 const LiveInterval &ParentLI = Edit->getParent();
1281 for (const VNInfo *V : ParentLI.valnos) {
1282 if (V->isUnused() || !V->isPHIDef())
1283 continue;
1284
1285 unsigned RegIdx = RegAssign.lookup(x: V->def);
1286 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1287 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1288 MachineBasicBlock &B = *LIS.getMBBFromIndex(index: V->def);
1289 if (!removeDeadSegment(Def: V->def, LR&: LI))
1290 extendPHIRange(B, LIC, LR&: LI, LM: LaneBitmask::getAll(), /*Undefs=*/{});
1291 }
1292
1293 SmallVector<SlotIndex, 4> Undefs;
1294 LiveIntervalCalc SubLIC;
1295
1296 for (const LiveInterval::SubRange &PS : ParentLI.subranges()) {
1297 for (const VNInfo *V : PS.valnos) {
1298 if (V->isUnused() || !V->isPHIDef())
1299 continue;
1300 unsigned RegIdx = RegAssign.lookup(x: V->def);
1301 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1302 LiveInterval::SubRange &S = getSubRangeForMaskExact(LM: PS.LaneMask, LI);
1303 if (removeDeadSegment(Def: V->def, LR&: S))
1304 continue;
1305
1306 MachineBasicBlock &B = *LIS.getMBBFromIndex(index: V->def);
1307 SubLIC.reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
1308 VNIA: &LIS.getVNInfoAllocator());
1309 Undefs.clear();
1310 LI.computeSubRangeUndefs(Undefs, LaneMask: PS.LaneMask, MRI, Indexes: *LIS.getSlotIndexes());
1311 extendPHIRange(B, LIC&: SubLIC, LR&: S, LM: PS.LaneMask, Undefs);
1312 }
1313 }
1314}
1315
1316/// rewriteAssigned - Rewrite all uses of Edit->getReg().
1317void SplitEditor::rewriteAssigned(bool ExtendRanges) {
1318 struct ExtPoint {
1319 ExtPoint(const MachineOperand &O, unsigned R, SlotIndex N)
1320 : MO(O), RegIdx(R), Next(N) {}
1321
1322 MachineOperand MO;
1323 unsigned RegIdx;
1324 SlotIndex Next;
1325 };
1326
1327 SmallVector<ExtPoint,4> ExtPoints;
1328
1329 for (MachineOperand &MO :
1330 llvm::make_early_inc_range(Range: MRI.reg_operands(Reg: Edit->getReg()))) {
1331 MachineInstr *MI = MO.getParent();
1332 // LiveDebugVariables should have handled all DBG_VALUE instructions.
1333 if (MI->isDebugValue()) {
1334 LLVM_DEBUG(dbgs() << "Zapping " << *MI);
1335 MO.setReg(0);
1336 continue;
1337 }
1338
1339 // <undef> operands don't really read the register, so it doesn't matter
1340 // which register we choose. When the use operand is tied to a def, we must
1341 // use the same register as the def, so just do that always.
1342 SlotIndex Idx = LIS.getInstructionIndex(Instr: *MI);
1343 if (MO.isDef() || MO.isUndef())
1344 Idx = Idx.getRegSlot(EC: MO.isEarlyClobber());
1345
1346 // Rewrite to the mapped register at Idx.
1347 unsigned RegIdx = RegAssign.lookup(x: Idx);
1348 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: RegIdx));
1349 MO.setReg(LI.reg());
1350 LLVM_DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent())
1351 << '\t' << Idx << ':' << RegIdx << '\t' << *MI);
1352
1353 // Extend liveness to Idx if the instruction reads reg.
1354 if (!ExtendRanges || MO.isUndef())
1355 continue;
1356
1357 // Skip instructions that don't read Reg.
1358 if (MO.isDef()) {
1359 if (!MO.getSubReg() && !MO.isEarlyClobber())
1360 continue;
1361 // We may want to extend a live range for a partial redef, or for a use
1362 // tied to an early clobber.
1363 if (!Edit->getParent().liveAt(index: Idx.getPrevSlot()))
1364 continue;
1365 } else {
1366 assert(MO.isUse());
1367 bool IsEarlyClobber = false;
1368 if (MO.isTied()) {
1369 // We want to extend a live range into `e` slot rather than `r` slot if
1370 // tied-def is early clobber, because the `e` slot already contained
1371 // in the live range of early-clobber tied-def operand, give an example
1372 // here:
1373 // 0 %0 = ...
1374 // 16 early-clobber %0 = Op %0 (tied-def 0), ...
1375 // 32 ... = Op %0
1376 // Before extend:
1377 // %0 = [0r, 0d) [16e, 32d)
1378 // The point we want to extend is 0d to 16e not 16r in this case, but if
1379 // we use 16r here we will extend nothing because that already contained
1380 // in [16e, 32d).
1381 unsigned OpIdx = MO.getOperandNo();
1382 unsigned DefOpIdx = MI->findTiedOperandIdx(OpIdx);
1383 const MachineOperand &DefOp = MI->getOperand(i: DefOpIdx);
1384 IsEarlyClobber = DefOp.isEarlyClobber();
1385 }
1386
1387 Idx = Idx.getRegSlot(EC: IsEarlyClobber);
1388 }
1389
1390 SlotIndex Next = Idx;
1391 if (LI.hasSubRanges()) {
1392 // We have to delay extending subranges until we have seen all operands
1393 // defining the register. This is because a <def,read-undef> operand
1394 // will create an "undef" point, and we cannot extend any subranges
1395 // until all of them have been accounted for.
1396 if (MO.isUse())
1397 ExtPoints.push_back(Elt: ExtPoint(MO, RegIdx, Next));
1398 } else {
1399 LiveIntervalCalc &LIC = getLICalc(RegIdx);
1400 LIC.extend(LR&: LI, Use: Next, PhysReg: 0, Undefs: ArrayRef<SlotIndex>());
1401 }
1402 }
1403
1404 for (ExtPoint &EP : ExtPoints) {
1405 LiveInterval &LI = LIS.getInterval(Reg: Edit->get(idx: EP.RegIdx));
1406 assert(LI.hasSubRanges());
1407
1408 LiveIntervalCalc SubLIC;
1409 Register Reg = EP.MO.getReg(), Sub = EP.MO.getSubReg();
1410 LaneBitmask LM = Sub != 0 ? TRI.getSubRegIndexLaneMask(SubIdx: Sub)
1411 : MRI.getMaxLaneMaskForVReg(Reg);
1412 for (LiveInterval::SubRange &S : LI.subranges()) {
1413 if ((S.LaneMask & LM).none())
1414 continue;
1415 // The problem here can be that the new register may have been created
1416 // for a partially defined original register. For example:
1417 // %0:subreg_hireg<def,read-undef> = ...
1418 // ...
1419 // %1 = COPY %0
1420 if (S.empty())
1421 continue;
1422 SubLIC.reset(mf: &VRM.getMachineFunction(), SI: LIS.getSlotIndexes(), MDT: &MDT,
1423 VNIA: &LIS.getVNInfoAllocator());
1424 SmallVector<SlotIndex, 4> Undefs;
1425 LI.computeSubRangeUndefs(Undefs, LaneMask: S.LaneMask, MRI, Indexes: *LIS.getSlotIndexes());
1426 SubLIC.extend(LR&: S, Use: EP.Next, PhysReg: 0, Undefs);
1427 }
1428 }
1429
1430 for (Register R : *Edit) {
1431 LiveInterval &LI = LIS.getInterval(Reg: R);
1432 if (!LI.hasSubRanges())
1433 continue;
1434 LI.clear();
1435 LI.removeEmptySubRanges();
1436 LIS.constructMainRangeFromSubranges(LI);
1437 }
1438}
1439
1440void SplitEditor::deleteRematVictims() {
1441 SmallVector<MachineInstr*, 8> Dead;
1442 for (const Register &R : *Edit) {
1443 LiveInterval *LI = &LIS.getInterval(Reg: R);
1444 for (const LiveRange::Segment &S : LI->segments) {
1445 // Dead defs end at the dead slot.
1446 if (S.end != S.valno->def.getDeadSlot())
1447 continue;
1448 if (S.valno->isPHIDef())
1449 continue;
1450 MachineInstr *MI = LIS.getInstructionFromIndex(index: S.valno->def);
1451 assert(MI && "Missing instruction for dead def");
1452 MI->addRegisterDead(Reg: LI->reg(), RegInfo: &TRI);
1453
1454 if (!MI->allDefsAreDead())
1455 continue;
1456
1457 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
1458 Dead.push_back(Elt: MI);
1459 }
1460 }
1461
1462 if (Dead.empty())
1463 return;
1464
1465 Edit->eliminateDeadDefs(Dead, RegsBeingSpilled: std::nullopt);
1466}
1467
1468void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) {
1469 // Fast-path for common case.
1470 if (!ParentVNI.isPHIDef()) {
1471 for (unsigned I = 0, E = Edit->size(); I != E; ++I)
1472 forceRecompute(RegIdx: I, ParentVNI);
1473 return;
1474 }
1475
1476 // Trace value through phis.
1477 SmallPtrSet<const VNInfo *, 8> Visited; ///< whether VNI was/is in worklist.
1478 SmallVector<const VNInfo *, 4> WorkList;
1479 Visited.insert(Ptr: &ParentVNI);
1480 WorkList.push_back(Elt: &ParentVNI);
1481
1482 const LiveInterval &ParentLI = Edit->getParent();
1483 const SlotIndexes &Indexes = *LIS.getSlotIndexes();
1484 do {
1485 const VNInfo &VNI = *WorkList.back();
1486 WorkList.pop_back();
1487 for (unsigned I = 0, E = Edit->size(); I != E; ++I)
1488 forceRecompute(RegIdx: I, ParentVNI: VNI);
1489 if (!VNI.isPHIDef())
1490 continue;
1491
1492 MachineBasicBlock &MBB = *Indexes.getMBBFromIndex(index: VNI.def);
1493 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
1494 SlotIndex PredEnd = Indexes.getMBBEndIdx(mbb: Pred);
1495 VNInfo *PredVNI = ParentLI.getVNInfoBefore(Idx: PredEnd);
1496 assert(PredVNI && "Value available in PhiVNI predecessor");
1497 if (Visited.insert(Ptr: PredVNI).second)
1498 WorkList.push_back(Elt: PredVNI);
1499 }
1500 } while(!WorkList.empty());
1501}
1502
1503void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
1504 ++NumFinished;
1505
1506 // At this point, the live intervals in Edit contain VNInfos corresponding to
1507 // the inserted copies.
1508
1509 // Add the original defs from the parent interval.
1510 for (const VNInfo *ParentVNI : Edit->getParent().valnos) {
1511 if (ParentVNI->isUnused())
1512 continue;
1513 unsigned RegIdx = RegAssign.lookup(x: ParentVNI->def);
1514 defValue(RegIdx, ParentVNI, Idx: ParentVNI->def, Original: true);
1515
1516 // Force rematted values to be recomputed everywhere.
1517 // The new live ranges may be truncated.
1518 if (Edit->didRematerialize(ParentVNI))
1519 forceRecomputeVNI(ParentVNI: *ParentVNI);
1520 }
1521
1522 // Hoist back-copies to the complement interval when in spill mode.
1523 switch (SpillMode) {
1524 case SM_Partition:
1525 // Leave all back-copies as is.
1526 break;
1527 case SM_Size:
1528 case SM_Speed:
1529 // hoistCopies will behave differently between size and speed.
1530 hoistCopies();
1531 }
1532
1533 // Transfer the simply mapped values, check if any are skipped.
1534 bool Skipped = transferValues();
1535
1536 // Rewrite virtual registers, possibly extending ranges.
1537 rewriteAssigned(ExtendRanges: Skipped);
1538
1539 if (Skipped)
1540 extendPHIKillRanges();
1541 else
1542 ++NumSimple;
1543
1544 // Delete defs that were rematted everywhere.
1545 if (Skipped)
1546 deleteRematVictims();
1547
1548 // Get rid of unused values and set phi-kill flags.
1549 for (Register Reg : *Edit) {
1550 LiveInterval &LI = LIS.getInterval(Reg);
1551 LI.removeEmptySubRanges();
1552 LI.RenumberValues();
1553 }
1554
1555 // Provide a reverse mapping from original indices to Edit ranges.
1556 if (LRMap) {
1557 auto Seq = llvm::seq<unsigned>(Begin: 0, End: Edit->size());
1558 LRMap->assign(in_start: Seq.begin(), in_end: Seq.end());
1559 }
1560
1561 // Now check if any registers were separated into multiple components.
1562 ConnectedVNInfoEqClasses ConEQ(LIS);
1563 for (unsigned i = 0, e = Edit->size(); i != e; ++i) {
1564 // Don't use iterators, they are invalidated by create() below.
1565 Register VReg = Edit->get(idx: i);
1566 LiveInterval &LI = LIS.getInterval(Reg: VReg);
1567 SmallVector<LiveInterval*, 8> SplitLIs;
1568 LIS.splitSeparateComponents(LI, SplitLIs);
1569 Register Original = VRM.getOriginal(VirtReg: VReg);
1570 for (LiveInterval *SplitLI : SplitLIs)
1571 VRM.setIsSplitFromReg(virtReg: SplitLI->reg(), SReg: Original);
1572
1573 // The new intervals all map back to i.
1574 if (LRMap)
1575 LRMap->resize(N: Edit->size(), NV: i);
1576 }
1577
1578 // Calculate spill weight and allocation hints for new intervals.
1579 Edit->calculateRegClassAndHint(VRM.getMachineFunction(), VRAI);
1580
1581 assert(!LRMap || LRMap->size() == Edit->size());
1582}
1583
1584//===----------------------------------------------------------------------===//
1585// Single Block Splitting
1586//===----------------------------------------------------------------------===//
1587
1588bool SplitAnalysis::shouldSplitSingleBlock(const BlockInfo &BI,
1589 bool SingleInstrs) const {
1590 // Always split for multiple instructions.
1591 if (!BI.isOneInstr())
1592 return true;
1593 // Don't split for single instructions unless explicitly requested.
1594 if (!SingleInstrs)
1595 return false;
1596 // Splitting a live-through range always makes progress.
1597 if (BI.LiveIn && BI.LiveOut)
1598 return true;
1599 // No point in isolating a copy. It has no register class constraints.
1600 MachineInstr *MI = LIS.getInstructionFromIndex(index: BI.FirstInstr);
1601 bool copyLike = TII.isCopyInstr(MI: *MI) || MI->isSubregToReg();
1602 if (copyLike)
1603 return false;
1604 // Finally, don't isolate an end point that was created by earlier splits.
1605 return isOriginalEndpoint(Idx: BI.FirstInstr);
1606}
1607
1608void SplitEditor::splitSingleBlock(const SplitAnalysis::BlockInfo &BI) {
1609 openIntv();
1610 SlotIndex LastSplitPoint = SA.getLastSplitPoint(BB: BI.MBB);
1611 SlotIndex SegStart = enterIntvBefore(Idx: std::min(a: BI.FirstInstr,
1612 b: LastSplitPoint));
1613 if (!BI.LiveOut || BI.LastInstr < LastSplitPoint) {
1614 useIntv(Start: SegStart, End: leaveIntvAfter(Idx: BI.LastInstr));
1615 } else {
1616 // The last use is after the last valid split point.
1617 SlotIndex SegStop = leaveIntvBefore(Idx: LastSplitPoint);
1618 useIntv(Start: SegStart, End: SegStop);
1619 overlapIntv(Start: SegStop, End: BI.LastInstr);
1620 }
1621}
1622
1623//===----------------------------------------------------------------------===//
1624// Global Live Range Splitting Support
1625//===----------------------------------------------------------------------===//
1626
1627// These methods support a method of global live range splitting that uses a
1628// global algorithm to decide intervals for CFG edges. They will insert split
1629// points and color intervals in basic blocks while avoiding interference.
1630//
1631// Note that splitSingleBlock is also useful for blocks where both CFG edges
1632// are on the stack.
1633
1634void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
1635 unsigned IntvIn, SlotIndex LeaveBefore,
1636 unsigned IntvOut, SlotIndex EnterAfter){
1637 SlotIndex Start, Stop;
1638 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(Num: MBBNum);
1639
1640 LLVM_DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop
1641 << ") intf " << LeaveBefore << '-' << EnterAfter
1642 << ", live-through " << IntvIn << " -> " << IntvOut);
1643
1644 assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks");
1645
1646 assert((!LeaveBefore || LeaveBefore < Stop) && "Interference after block");
1647 assert((!IntvIn || !LeaveBefore || LeaveBefore > Start) && "Impossible intf");
1648 assert((!EnterAfter || EnterAfter >= Start) && "Interference before block");
1649
1650 MachineBasicBlock *MBB = VRM.getMachineFunction().getBlockNumbered(N: MBBNum);
1651
1652 if (!IntvOut) {
1653 LLVM_DEBUG(dbgs() << ", spill on entry.\n");
1654 //
1655 // <<<<<<<<< Possible LeaveBefore interference.
1656 // |-----------| Live through.
1657 // -____________ Spill on entry.
1658 //
1659 selectIntv(Idx: IntvIn);
1660 SlotIndex Idx = leaveIntvAtTop(MBB&: *MBB);
1661 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1662 (void)Idx;
1663 return;
1664 }
1665
1666 if (!IntvIn) {
1667 LLVM_DEBUG(dbgs() << ", reload on exit.\n");
1668 //
1669 // >>>>>>> Possible EnterAfter interference.
1670 // |-----------| Live through.
1671 // ___________-- Reload on exit.
1672 //
1673 selectIntv(Idx: IntvOut);
1674 SlotIndex Idx = enterIntvAtEnd(MBB&: *MBB);
1675 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1676 (void)Idx;
1677 return;
1678 }
1679
1680 if (IntvIn == IntvOut && !LeaveBefore && !EnterAfter) {
1681 LLVM_DEBUG(dbgs() << ", straight through.\n");
1682 //
1683 // |-----------| Live through.
1684 // ------------- Straight through, same intv, no interference.
1685 //
1686 selectIntv(Idx: IntvOut);
1687 useIntv(Start, End: Stop);
1688 return;
1689 }
1690
1691 // We cannot legally insert splits after LSP.
1692 SlotIndex LSP = SA.getLastSplitPoint(Num: MBBNum);
1693 assert((!IntvOut || !EnterAfter || EnterAfter < LSP) && "Impossible intf");
1694
1695 if (IntvIn != IntvOut && (!LeaveBefore || !EnterAfter ||
1696 LeaveBefore.getBaseIndex() > EnterAfter.getBoundaryIndex())) {
1697 LLVM_DEBUG(dbgs() << ", switch avoiding interference.\n");
1698 //
1699 // >>>> <<<< Non-overlapping EnterAfter/LeaveBefore interference.
1700 // |-----------| Live through.
1701 // ------======= Switch intervals between interference.
1702 //
1703 selectIntv(Idx: IntvOut);
1704 SlotIndex Idx;
1705 if (LeaveBefore && LeaveBefore < LSP) {
1706 Idx = enterIntvBefore(Idx: LeaveBefore);
1707 useIntv(Start: Idx, End: Stop);
1708 } else {
1709 Idx = enterIntvAtEnd(MBB&: *MBB);
1710 }
1711 selectIntv(Idx: IntvIn);
1712 useIntv(Start, End: Idx);
1713 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1714 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1715 return;
1716 }
1717
1718 LLVM_DEBUG(dbgs() << ", create local intv for interference.\n");
1719 //
1720 // >>><><><><<<< Overlapping EnterAfter/LeaveBefore interference.
1721 // |-----------| Live through.
1722 // ==---------== Switch intervals before/after interference.
1723 //
1724 assert(LeaveBefore <= EnterAfter && "Missed case");
1725
1726 selectIntv(Idx: IntvOut);
1727 SlotIndex Idx = enterIntvAfter(Idx: EnterAfter);
1728 useIntv(Start: Idx, End: Stop);
1729 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1730
1731 selectIntv(Idx: IntvIn);
1732 Idx = leaveIntvBefore(Idx: LeaveBefore);
1733 useIntv(Start, End: Idx);
1734 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1735}
1736
1737void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
1738 unsigned IntvIn, SlotIndex LeaveBefore) {
1739 SlotIndex Start, Stop;
1740 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
1741
1742 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
1743 << Stop << "), uses " << BI.FirstInstr << '-'
1744 << BI.LastInstr << ", reg-in " << IntvIn
1745 << ", leave before " << LeaveBefore
1746 << (BI.LiveOut ? ", stack-out" : ", killed in block"));
1747
1748 assert(IntvIn && "Must have register in");
1749 assert(BI.LiveIn && "Must be live-in");
1750 assert((!LeaveBefore || LeaveBefore > Start) && "Bad interference");
1751
1752 if (!BI.LiveOut && (!LeaveBefore || LeaveBefore >= BI.LastInstr)) {
1753 LLVM_DEBUG(dbgs() << " before interference.\n");
1754 //
1755 // <<< Interference after kill.
1756 // |---o---x | Killed in block.
1757 // ========= Use IntvIn everywhere.
1758 //
1759 selectIntv(Idx: IntvIn);
1760 useIntv(Start, End: BI.LastInstr);
1761 return;
1762 }
1763
1764 SlotIndex LSP = SA.getLastSplitPoint(BB: BI.MBB);
1765
1766 if (!LeaveBefore || LeaveBefore > BI.LastInstr.getBoundaryIndex()) {
1767 //
1768 // <<< Possible interference after last use.
1769 // |---o---o---| Live-out on stack.
1770 // =========____ Leave IntvIn after last use.
1771 //
1772 // < Interference after last use.
1773 // |---o---o--o| Live-out on stack, late last use.
1774 // ============ Copy to stack after LSP, overlap IntvIn.
1775 // \_____ Stack interval is live-out.
1776 //
1777 if (BI.LastInstr < LSP) {
1778 LLVM_DEBUG(dbgs() << ", spill after last use before interference.\n");
1779 selectIntv(Idx: IntvIn);
1780 SlotIndex Idx = leaveIntvAfter(Idx: BI.LastInstr);
1781 useIntv(Start, End: Idx);
1782 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1783 } else {
1784 LLVM_DEBUG(dbgs() << ", spill before last split point.\n");
1785 selectIntv(Idx: IntvIn);
1786 SlotIndex Idx = leaveIntvBefore(Idx: LSP);
1787 overlapIntv(Start: Idx, End: BI.LastInstr);
1788 useIntv(Start, End: Idx);
1789 assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
1790 }
1791 return;
1792 }
1793
1794 // The interference is overlapping somewhere we wanted to use IntvIn. That
1795 // means we need to create a local interval that can be allocated a
1796 // different register.
1797 unsigned LocalIntv = openIntv();
1798 (void)LocalIntv;
1799 LLVM_DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n");
1800
1801 if (!BI.LiveOut || BI.LastInstr < LSP) {
1802 //
1803 // <<<<<<< Interference overlapping uses.
1804 // |---o---o---| Live-out on stack.
1805 // =====----____ Leave IntvIn before interference, then spill.
1806 //
1807 SlotIndex To = leaveIntvAfter(Idx: BI.LastInstr);
1808 SlotIndex From = enterIntvBefore(Idx: LeaveBefore);
1809 useIntv(Start: From, End: To);
1810 selectIntv(Idx: IntvIn);
1811 useIntv(Start, End: From);
1812 assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
1813 return;
1814 }
1815
1816 // <<<<<<< Interference overlapping uses.
1817 // |---o---o--o| Live-out on stack, late last use.
1818 // =====------- Copy to stack before LSP, overlap LocalIntv.
1819 // \_____ Stack interval is live-out.
1820 //
1821 SlotIndex To = leaveIntvBefore(Idx: LSP);
1822 overlapIntv(Start: To, End: BI.LastInstr);
1823 SlotIndex From = enterIntvBefore(Idx: std::min(a: To, b: LeaveBefore));
1824 useIntv(Start: From, End: To);
1825 selectIntv(Idx: IntvIn);
1826 useIntv(Start, End: From);
1827 assert((!LeaveBefore || From <= LeaveBefore) && "Interference");
1828}
1829
1830void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
1831 unsigned IntvOut, SlotIndex EnterAfter) {
1832 SlotIndex Start, Stop;
1833 std::tie(args&: Start, args&: Stop) = LIS.getSlotIndexes()->getMBBRange(MBB: BI.MBB);
1834
1835 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
1836 << Stop << "), uses " << BI.FirstInstr << '-'
1837 << BI.LastInstr << ", reg-out " << IntvOut
1838 << ", enter after " << EnterAfter
1839 << (BI.LiveIn ? ", stack-in" : ", defined in block"));
1840
1841 SlotIndex LSP = SA.getLastSplitPoint(BB: BI.MBB);
1842
1843 assert(IntvOut && "Must have register out");
1844 assert(BI.LiveOut && "Must be live-out");
1845 assert((!EnterAfter || EnterAfter < LSP) && "Bad interference");
1846
1847 if (!BI.LiveIn && (!EnterAfter || EnterAfter <= BI.FirstInstr)) {
1848 LLVM_DEBUG(dbgs() << " after interference.\n");
1849 //
1850 // >>>> Interference before def.
1851 // | o---o---| Defined in block.
1852 // ========= Use IntvOut everywhere.
1853 //
1854 selectIntv(Idx: IntvOut);
1855 useIntv(Start: BI.FirstInstr, End: Stop);
1856 return;
1857 }
1858
1859 if (!EnterAfter || EnterAfter < BI.FirstInstr.getBaseIndex()) {
1860 LLVM_DEBUG(dbgs() << ", reload after interference.\n");
1861 //
1862 // >>>> Interference before def.
1863 // |---o---o---| Live-through, stack-in.
1864 // ____========= Enter IntvOut before first use.
1865 //
1866 selectIntv(Idx: IntvOut);
1867 SlotIndex Idx = enterIntvBefore(Idx: std::min(a: LSP, b: BI.FirstInstr));
1868 useIntv(Start: Idx, End: Stop);
1869 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1870 return;
1871 }
1872
1873 // The interference is overlapping somewhere we wanted to use IntvOut. That
1874 // means we need to create a local interval that can be allocated a
1875 // different register.
1876 LLVM_DEBUG(dbgs() << ", interference overlaps uses.\n");
1877 //
1878 // >>>>>>> Interference overlapping uses.
1879 // |---o---o---| Live-through, stack-in.
1880 // ____---====== Create local interval for interference range.
1881 //
1882 selectIntv(Idx: IntvOut);
1883 SlotIndex Idx = enterIntvAfter(Idx: EnterAfter);
1884 useIntv(Start: Idx, End: Stop);
1885 assert((!EnterAfter || Idx >= EnterAfter) && "Interference");
1886
1887 openIntv();
1888 SlotIndex From = enterIntvBefore(Idx: std::min(a: Idx, b: BI.FirstInstr));
1889 useIntv(Start: From, End: Idx);
1890}
1891
1892void SplitAnalysis::BlockInfo::print(raw_ostream &OS) const {
1893 OS << "{" << printMBBReference(MBB: *MBB) << ", "
1894 << "uses " << FirstInstr << " to " << LastInstr << ", "
1895 << "1st def " << FirstDef << ", "
1896 << (LiveIn ? "live in" : "dead in") << ", "
1897 << (LiveOut ? "live out" : "dead out") << "}";
1898}
1899
1900void SplitAnalysis::BlockInfo::dump() const {
1901 print(OS&: dbgs());
1902 dbgs() << "\n";
1903}
1904