1//==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetRegisterInfo interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetRegisterInfo.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/BinaryFormat/Dwarf.h"
20#include "llvm/CodeGen/LiveInterval.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/TargetFrameLowering.h"
25#include "llvm/CodeGen/TargetInstrInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/VirtRegMap.h"
28#include "llvm/CodeGenTypes/MachineValueType.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/IR/Function.h"
33#include "llvm/MC/MCRegisterInfo.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/Printable.h"
38#include "llvm/Support/raw_ostream.h"
39#include <cassert>
40#include <utility>
41
42#define DEBUG_TYPE "target-reg-info"
43
44using namespace llvm;
45
46static cl::opt<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
50 cl::init(Val: 5000));
51
52TargetRegisterInfo::TargetRegisterInfo(
53 const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
54 regclass_iterator RCE, const char *SRIStrings,
55 ArrayRef<uint32_t> SRINameOffsets, const SubRegCoveredBits *SubIdxRanges,
56 const LaneBitmask *SRILaneMasks, LaneBitmask SRICoveringLanes,
57 const RegClassInfo *const RCIs, const MVT::SimpleValueType *const RCVTLists,
58 unsigned Mode)
59 : InfoDesc(ID), SubRegIndexStrings(SRIStrings),
60 SubRegIndexNameOffsets(SRINameOffsets), SubRegIdxRanges(SubIdxRanges),
61 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE),
62 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists),
63 HwMode(Mode) {}
64
65TargetRegisterInfo::~TargetRegisterInfo() = default;
66
67bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
68 const MachineFunction &MF, const LiveInterval &VirtReg) const {
69 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
70 const MachineRegisterInfo &MRI = MF.getRegInfo();
71 MachineInstr *MI = MRI.getUniqueVRegDef(Reg: VirtReg.reg());
72 if (MI && TII->isTriviallyReMaterializable(MI: *MI) &&
73 VirtReg.size() > HugeSizeForSplit)
74 return false;
75 return true;
76}
77
78void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
79 MCRegister Reg) const {
80 for (MCPhysReg SR : superregs_inclusive(Reg))
81 RegisterSet.set(SR);
82}
83
84bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
85 ArrayRef<MCPhysReg> Exceptions) const {
86 // Check that all super registers of reserved regs are reserved as well.
87 BitVector Checked(getNumRegs());
88 for (unsigned Reg : RegisterSet.set_bits()) {
89 if (Checked[Reg])
90 continue;
91 for (MCPhysReg SR : superregs(Reg)) {
92 if (!RegisterSet[SR] && !is_contained(Range&: Exceptions, Element: Reg)) {
93 dbgs() << "Error: Super register " << printReg(Reg: SR, TRI: this)
94 << " of reserved register " << printReg(Reg, TRI: this)
95 << " is not reserved.\n";
96 return false;
97 }
98
99 // We transitively check superregs. So we can remember this for later
100 // to avoid compiletime explosion in deep register hierarchies.
101 Checked.set(SR);
102 }
103 }
104 return true;
105}
106
107Printable llvm::printReg(Register Reg, const TargetRegisterInfo *TRI,
108 unsigned SubIdx, const MachineRegisterInfo *MRI) {
109 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
110 if (!Reg)
111 OS << "$noreg";
112 else if (Reg.isStack())
113 OS << "SS#" << Reg.stackSlotIndex();
114 else if (Reg.isVirtual()) {
115 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
116 if (Name != "") {
117 OS << '%' << Name;
118 } else {
119 OS << '%' << Reg.virtRegIndex();
120 }
121 } else if (!TRI)
122 OS << '$' << "physreg" << Reg.id();
123 else if (Reg < TRI->getNumRegs()) {
124 OS << '$';
125 printLowerCase(String: TRI->getName(RegNo: Reg), Out&: OS);
126 } else
127 llvm_unreachable("Register kind is unsupported.");
128
129 if (SubIdx) {
130 if (TRI)
131 OS << ':' << TRI->getSubRegIndexName(SubIdx);
132 else
133 OS << ":sub(" << SubIdx << ')';
134 }
135 });
136}
137
138Printable llvm::printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI) {
139 return Printable([Unit, TRI](raw_ostream &OS) {
140 // Generic printout when TRI is missing.
141 if (!TRI) {
142 OS << "Unit~" << static_cast<unsigned>(Unit);
143 return;
144 }
145
146 // Check for invalid register units.
147 if (static_cast<unsigned>(Unit) >= TRI->getNumRegUnits()) {
148 OS << "BadUnit~" << static_cast<unsigned>(Unit);
149 return;
150 }
151
152 // Normal units have at least one root.
153 MCRegUnitRootIterator Roots(Unit, TRI);
154 assert(Roots.isValid() && "Unit has no roots.");
155 OS << TRI->getName(RegNo: *Roots);
156 for (++Roots; Roots.isValid(); ++Roots)
157 OS << '~' << TRI->getName(RegNo: *Roots);
158 });
159}
160
161Printable llvm::printVRegOrUnit(VirtRegOrUnit VRegOrUnit,
162 const TargetRegisterInfo *TRI) {
163 return Printable([VRegOrUnit, TRI](raw_ostream &OS) {
164 if (VRegOrUnit.isVirtualReg()) {
165 OS << '%' << VRegOrUnit.asVirtualReg().virtRegIndex();
166 } else {
167 OS << printRegUnit(Unit: VRegOrUnit.asMCRegUnit(), TRI);
168 }
169 });
170}
171
172Printable llvm::printRegClassOrBank(Register Reg,
173 const MachineRegisterInfo &RegInfo,
174 const TargetRegisterInfo *TRI) {
175 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
176 if (RegInfo.getRegClassOrNull(Reg))
177 OS << StringRef(TRI->getRegClassName(Class: RegInfo.getRegClass(Reg))).lower();
178 else if (RegInfo.getRegBankOrNull(Reg))
179 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
180 else {
181 OS << "_";
182 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
183 "Generic registers must have a valid type");
184 }
185 });
186}
187
188/// getAllocatableClass - Return the maximal subclass of the given register
189/// class that is alloctable, or NULL.
190const TargetRegisterClass *
191TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
192 if (!RC || RC->isAllocatable())
193 return RC;
194
195 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
196 ++It) {
197 const TargetRegisterClass *SubRC = getRegClass(i: It.getID());
198 if (SubRC->isAllocatable())
199 return SubRC;
200 }
201 return nullptr;
202}
203
204template <typename TypeT>
205static const TargetRegisterClass *
206getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg,
207 TypeT Ty) {
208 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
209 assert(Reg.isPhysical() && "reg must be a physical register");
210
211 bool IsDefault = [&]() {
212 if constexpr (std::is_same_v<TypeT, MVT>)
213 return Ty == MVT::Other;
214 else
215 return !Ty.isValid();
216 }();
217
218 // Pick the most sub register class of the right type that contains
219 // this physreg.
220 const TargetRegisterClass *BestRC = nullptr;
221 for (const TargetRegisterClass *RC : TRI->regclasses()) {
222 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) && RC->contains(Reg) &&
223 (!BestRC || BestRC->hasSubClass(RC)))
224 BestRC = RC;
225 }
226
227 if constexpr (std::is_same_v<TypeT, MVT>)
228 assert(BestRC && "Couldn't find the register class");
229 return BestRC;
230}
231
232template <typename TypeT>
233static const TargetRegisterClass *
234getCommonMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg1,
235 MCRegister Reg2, TypeT Ty) {
236 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
237 assert(Reg1.isPhysical() && Reg2.isPhysical() &&
238 "Reg1/Reg2 must be a physical register");
239
240 bool IsDefault = [&]() {
241 if constexpr (std::is_same_v<TypeT, MVT>)
242 return Ty == MVT::Other;
243 else
244 return !Ty.isValid();
245 }();
246
247 // Pick the most sub register class of the right type that contains
248 // this physreg.
249 const TargetRegisterClass *BestRC = nullptr;
250 for (const TargetRegisterClass *RC : TRI->regclasses()) {
251 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) &&
252 RC->contains(Reg1, Reg2) && (!BestRC || BestRC->hasSubClass(RC)))
253 BestRC = RC;
254 }
255
256 if constexpr (std::is_same_v<TypeT, MVT>)
257 assert(BestRC && "Couldn't find the register class");
258 return BestRC;
259}
260
261const TargetRegisterClass *
262TargetRegisterInfo::getMinimalPhysRegClass(MCRegister Reg, MVT VT) const {
263 return ::getMinimalPhysRegClass(TRI: this, Reg, Ty: VT);
264}
265
266const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClass(
267 MCRegister Reg1, MCRegister Reg2, MVT VT) const {
268 return ::getCommonMinimalPhysRegClass(TRI: this, Reg1, Reg2, Ty: VT);
269}
270
271const TargetRegisterClass *
272TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister Reg, LLT Ty) const {
273 return ::getMinimalPhysRegClass(TRI: this, Reg, Ty);
274}
275
276const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClassLLT(
277 MCRegister Reg1, MCRegister Reg2, LLT Ty) const {
278 return ::getCommonMinimalPhysRegClass(TRI: this, Reg1, Reg2, Ty);
279}
280
281/// getAllocatableSetForRC - Toggle the bits that represent allocatable
282/// registers for the specific register class.
283static void getAllocatableSetForRC(const MachineFunction &MF,
284 const TargetRegisterClass *RC, BitVector &R){
285 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
286 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
287 for (MCPhysReg PR : Order)
288 R.set(PR);
289}
290
291BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
292 const TargetRegisterClass *RC) const {
293 BitVector Allocatable(getNumRegs());
294 if (RC) {
295 // A register class with no allocatable subclass returns an empty set.
296 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
297 if (SubClass)
298 getAllocatableSetForRC(MF, RC: SubClass, R&: Allocatable);
299 } else {
300 for (const TargetRegisterClass *C : regclasses())
301 if (C->isAllocatable())
302 getAllocatableSetForRC(MF, RC: C, R&: Allocatable);
303 }
304
305 // Mask out the reserved registers
306 const MachineRegisterInfo &MRI = MF.getRegInfo();
307 const BitVector &Reserved = MRI.getReservedRegs();
308 Allocatable.reset(RHS: Reserved);
309
310 return Allocatable;
311}
312
313static inline
314const TargetRegisterClass *firstCommonClass(const uint32_t *A,
315 const uint32_t *B,
316 const TargetRegisterInfo *TRI) {
317 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
318 if (unsigned Common = *A++ & *B++)
319 return TRI->getRegClass(i: I + llvm::countr_zero(Val: Common));
320 return nullptr;
321}
322
323const TargetRegisterClass *
324TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
325 const TargetRegisterClass *B) const {
326 // First take care of the trivial cases.
327 if (A == B)
328 return A;
329 if (!A || !B)
330 return nullptr;
331
332 // Register classes are ordered topologically, so the largest common
333 // sub-class it the common sub-class with the smallest ID.
334 return firstCommonClass(A: A->getSubClassMask(), B: B->getSubClassMask(), TRI: this);
335}
336
337const TargetRegisterClass *
338TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
339 const TargetRegisterClass *B,
340 unsigned Idx) const {
341 assert(A && B && "Missing register class");
342 assert(Idx && "Bad sub-register index");
343
344 // Find Idx in the list of super-register indices.
345 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
346 if (RCI.getSubReg() == Idx)
347 // The bit mask contains all register classes that are projected into B
348 // by Idx. Find a class that is also a sub-class of A.
349 return firstCommonClass(A: RCI.getMask(), B: A->getSubClassMask(), TRI: this);
350 return nullptr;
351}
352
353const TargetRegisterClass *TargetRegisterInfo::
354getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
355 const TargetRegisterClass *RCB, unsigned SubB,
356 unsigned &PreA, unsigned &PreB) const {
357 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
358
359 // Search all pairs of sub-register indices that project into RCA and RCB
360 // respectively. This is quadratic, but usually the sets are very small. On
361 // most targets like X86, there will only be a single sub-register index
362 // (e.g., sub_16bit projecting into GR16).
363 //
364 // The worst case is a register class like DPR on ARM.
365 // We have indices dsub_0..dsub_7 projecting into that class.
366 //
367 // It is very common that one register class is a sub-register of the other.
368 // Arrange for RCA to be the larger register so the answer will be found in
369 // the first iteration. This makes the search linear for the most common
370 // case.
371 const TargetRegisterClass *BestRC = nullptr;
372 unsigned *BestPreA = &PreA;
373 unsigned *BestPreB = &PreB;
374 if (getRegSizeInBits(RC: *RCA) < getRegSizeInBits(RC: *RCB)) {
375 std::swap(a&: RCA, b&: RCB);
376 std::swap(a&: SubA, b&: SubB);
377 std::swap(a&: BestPreA, b&: BestPreB);
378 }
379
380 // Also terminate the search one we have found a register class as small as
381 // RCA.
382 unsigned MinSize = getRegSizeInBits(RC: *RCA);
383
384 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
385 unsigned FinalA = composeSubRegIndices(a: IA.getSubReg(), b: SubA);
386 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
387 // Check if a common super-register class exists for this index pair.
388 const TargetRegisterClass *RC =
389 firstCommonClass(A: IA.getMask(), B: IB.getMask(), TRI: this);
390 if (!RC || getRegSizeInBits(RC: *RC) < MinSize)
391 continue;
392
393 // The indexes must compose identically: PreA+SubA == PreB+SubB.
394 unsigned FinalB = composeSubRegIndices(a: IB.getSubReg(), b: SubB);
395 if (FinalA != FinalB)
396 continue;
397
398 // Is RC a better candidate than BestRC?
399 if (BestRC && getRegSizeInBits(RC: *RC) >= getRegSizeInBits(RC: *BestRC))
400 continue;
401
402 // Yes, RC is the smallest super-register seen so far.
403 BestRC = RC;
404 *BestPreA = IA.getSubReg();
405 *BestPreB = IB.getSubReg();
406
407 // Bail early if we reached MinSize. We won't find a better candidate.
408 if (getRegSizeInBits(RC: *BestRC) == MinSize)
409 return BestRC;
410 }
411 }
412 return BestRC;
413}
414
415const TargetRegisterClass *TargetRegisterInfo::findCommonRegClass(
416 const TargetRegisterClass *DefRC, unsigned DefSubReg,
417 const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const {
418 // Same register class.
419 //
420 // When processing uncoalescable copies / bitcasts, it is possible we reach
421 // here with the same register class, but mismatched subregister indices.
422 if (DefRC == SrcRC && DefSubReg == SrcSubReg)
423 return DefRC;
424
425 // Both operands are sub registers. Check if they share a register class.
426 unsigned SrcIdx, DefIdx;
427 if (SrcSubReg && DefSubReg) {
428 return getCommonSuperRegClass(RCA: SrcRC, SubA: SrcSubReg, RCB: DefRC, SubB: DefSubReg, PreA&: SrcIdx,
429 PreB&: DefIdx);
430 }
431
432 // At most one of the register is a sub register, make it Src to avoid
433 // duplicating the test.
434 if (!SrcSubReg) {
435 std::swap(a&: DefSubReg, b&: SrcSubReg);
436 std::swap(a&: DefRC, b&: SrcRC);
437 }
438
439 // One of the register is a sub register, check if we can get a superclass.
440 if (SrcSubReg)
441 return getMatchingSuperRegClass(A: SrcRC, B: DefRC, Idx: SrcSubReg);
442
443 // Plain copy.
444 return getCommonSubClass(A: DefRC, B: SrcRC);
445}
446
447float TargetRegisterInfo::getSpillWeightScaleFactor(
448 const TargetRegisterClass *RC) const {
449 return 1.0;
450}
451
452// Compute target-independent register allocator hints to help eliminate copies.
453bool TargetRegisterInfo::getRegAllocationHints(
454 Register VirtReg, ArrayRef<MCPhysReg> Order,
455 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
456 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
457 const MachineRegisterInfo &MRI = MF.getRegInfo();
458 const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI =
459 MRI.getRegAllocationHints(VReg: VirtReg);
460
461 if (!Hints_MRI)
462 return false;
463
464 SmallSet<Register, 32> HintedRegs;
465 // First hint may be a target hint.
466 bool Skip = (Hints_MRI->first != 0);
467 for (auto Reg : Hints_MRI->second) {
468 if (Skip) {
469 Skip = false;
470 continue;
471 }
472
473 // Target-independent hints are either a physical or a virtual register.
474 Register Phys = Reg;
475 if (VRM && Phys.isVirtual())
476 Phys = VRM->getPhys(virtReg: Phys);
477
478 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
479 // registers allocated to the same physreg).
480 if (!HintedRegs.insert(V: Phys).second)
481 continue;
482 // Check that Phys is a valid hint in VirtReg's register class.
483 if (!Phys.isPhysical())
484 continue;
485 if (MRI.isReserved(PhysReg: Phys))
486 continue;
487 // Check that Phys is in the allocation order. We shouldn't heed hints
488 // from VirtReg's register class if they aren't in the allocation order. The
489 // target probably has a reason for removing the register.
490 if (!is_contained(Range&: Order, Element: Phys))
491 continue;
492
493 // All clear, tell the register allocator to prefer this register.
494 Hints.push_back(Elt: Phys.id());
495 }
496 return false;
497}
498
499bool TargetRegisterInfo::isCalleeSavedPhysReg(
500 MCRegister PhysReg, const MachineFunction &MF) const {
501 if (!PhysReg)
502 return false;
503 const uint32_t *callerPreservedRegs =
504 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
505 if (callerPreservedRegs) {
506 assert(PhysReg.isPhysical() && "Expected physical register");
507 return (callerPreservedRegs[PhysReg.id() / 32] >> PhysReg.id() % 32) & 1;
508 }
509 return false;
510}
511
512bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
513 return MF.getFrameInfo().isStackRealignable();
514}
515
516bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
517 return MF.getFrameInfo().shouldRealignStack();
518}
519
520bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
521 const uint32_t *mask1) const {
522 unsigned N = (getNumRegs()+31) / 32;
523 for (unsigned I = 0; I < N; ++I)
524 if ((mask0[I] & mask1[I]) != mask0[I])
525 return false;
526 return true;
527}
528
529TypeSize
530TargetRegisterInfo::getRegSizeInBits(Register Reg,
531 const MachineRegisterInfo &MRI) const {
532 const TargetRegisterClass *RC{};
533 if (Reg.isPhysical()) {
534 // The size is not directly available for physical registers.
535 // Instead, we need to access a register class that contains Reg and
536 // get the size of that register class.
537 RC = getMinimalPhysRegClass(Reg);
538 assert(RC && "Unable to deduce the register class");
539 return getRegSizeInBits(RC: *RC);
540 }
541 LLT Ty = MRI.getType(Reg);
542 if (Ty.isValid())
543 return Ty.getSizeInBits();
544
545 // Since Reg is not a generic register, it may have a register class.
546 RC = MRI.getRegClass(Reg);
547 assert(RC && "Unable to deduce the register class");
548 return getRegSizeInBits(RC: *RC);
549}
550
551bool TargetRegisterInfo::getCoveringSubRegIndexes(
552 const TargetRegisterClass *RC, LaneBitmask LaneMask,
553 SmallVectorImpl<unsigned> &NeededIndexes) const {
554 SmallVector<unsigned, 8> PossibleIndexes;
555 unsigned BestIdx = 0;
556 unsigned BestCover = 0;
557
558 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
559 // Is this index even compatible with the given class?
560 if (!isSubRegValidForRegClass(RC, Idx))
561 continue;
562 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
563 // Early exit if we found a perfect match.
564 if (SubRegMask == LaneMask) {
565 BestIdx = Idx;
566 break;
567 }
568
569 // The index must not cover any lanes outside \p LaneMask.
570 if ((SubRegMask & ~LaneMask).any())
571 continue;
572
573 unsigned PopCount = SubRegMask.getNumLanes();
574 PossibleIndexes.push_back(Elt: Idx);
575 if (PopCount > BestCover) {
576 BestCover = PopCount;
577 BestIdx = Idx;
578 }
579 }
580
581 // Abort if we cannot possibly implement the COPY with the given indexes.
582 if (BestIdx == 0)
583 return false;
584
585 NeededIndexes.push_back(Elt: BestIdx);
586
587 // Greedy heuristic: Keep iterating keeping the best covering subreg index
588 // each time.
589 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(SubIdx: BestIdx);
590 while (LanesLeft.any()) {
591 unsigned BestIdx = 0;
592 int BestCover = std::numeric_limits<int>::min();
593 for (unsigned Idx : PossibleIndexes) {
594 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
595 // Early exit if we found a perfect match.
596 if (SubRegMask == LanesLeft) {
597 BestIdx = Idx;
598 break;
599 }
600
601 // Do not cover already-covered lanes to avoid creating cycles
602 // in copy bundles (= bundle contains copies that write to the
603 // registers).
604 if ((SubRegMask & ~LanesLeft).any())
605 continue;
606
607 // Try to cover as many of the remaining lanes as possible.
608 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
609 if (Cover > BestCover) {
610 BestCover = Cover;
611 BestIdx = Idx;
612 }
613 }
614
615 if (BestIdx == 0)
616 return false; // Impossible to handle
617
618 NeededIndexes.push_back(Elt: BestIdx);
619
620 LanesLeft &= ~getSubRegIndexLaneMask(SubIdx: BestIdx);
621 }
622
623 return BestIdx;
624}
625
626unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
627 assert(Idx && Idx < getNumSubRegIndices() &&
628 "This is not a subregister index");
629 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
630}
631
632unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
633 assert(Idx && Idx < getNumSubRegIndices() &&
634 "This is not a subregister index");
635 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
636}
637
638Register
639TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
640 const MachineRegisterInfo *MRI) const {
641 while (true) {
642 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
643 if (!MI->isCopyLike())
644 return SrcReg;
645
646 Register CopySrcReg;
647 if (MI->isCopy())
648 CopySrcReg = MI->getOperand(i: 1).getReg();
649 else {
650 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
651 CopySrcReg = MI->getOperand(i: 1).getReg();
652 }
653
654 if (!CopySrcReg.isVirtual())
655 return CopySrcReg;
656
657 SrcReg = CopySrcReg;
658 }
659}
660
661Register TargetRegisterInfo::lookThruSingleUseCopyChain(
662 Register SrcReg, const MachineRegisterInfo *MRI) const {
663 while (true) {
664 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
665 // Found the real definition, return it if it has a single use.
666 if (!MI->isCopyLike())
667 return MRI->hasOneNonDBGUse(RegNo: SrcReg) ? SrcReg : Register();
668
669 Register CopySrcReg;
670 if (MI->isCopy())
671 CopySrcReg = MI->getOperand(i: 1).getReg();
672 else {
673 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
674 CopySrcReg = MI->getOperand(i: 1).getReg();
675 }
676
677 // Continue only if the next definition in the chain is for a virtual
678 // register that has a single use.
679 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(RegNo: CopySrcReg))
680 return Register();
681
682 SrcReg = CopySrcReg;
683 }
684}
685
686void TargetRegisterInfo::getOffsetOpcodes(
687 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
688 assert(!Offset.getScalable() && "Scalable offsets are not handled");
689 DIExpression::appendOffset(Ops, Offset: Offset.getFixed());
690}
691
692DIExpression *
693TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
694 unsigned PrependFlags,
695 const StackOffset &Offset) const {
696 assert((PrependFlags &
697 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
698 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
699 "Unsupported prepend flag");
700 SmallVector<uint64_t, 16> OffsetExpr;
701 if (PrependFlags & DIExpression::DerefBefore)
702 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
703 getOffsetOpcodes(Offset, Ops&: OffsetExpr);
704 if (PrependFlags & DIExpression::DerefAfter)
705 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
706 return DIExpression::prependOpcodes(Expr, Ops&: OffsetExpr,
707 StackValue: PrependFlags & DIExpression::StackValue,
708 EntryValue: PrependFlags & DIExpression::EntryValue);
709}
710
711#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
712LLVM_DUMP_METHOD
713void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
714 const TargetRegisterInfo *TRI) {
715 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
716}
717#endif
718