1//==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetRegisterInfo interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetRegisterInfo.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/BinaryFormat/Dwarf.h"
20#include "llvm/CodeGen/LiveInterval.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/TargetFrameLowering.h"
25#include "llvm/CodeGen/TargetInstrInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/VirtRegMap.h"
28#include "llvm/CodeGenTypes/MachineValueType.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/IR/Function.h"
33#include "llvm/MC/MCRegisterInfo.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/Printable.h"
38#include "llvm/Support/raw_ostream.h"
39#include <cassert>
40#include <utility>
41
42#define DEBUG_TYPE "target-reg-info"
43
44using namespace llvm;
45
46static cl::opt<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
50 cl::init(Val: 5000));
51
52TargetRegisterInfo::TargetRegisterInfo(
53 const TargetRegisterInfoDesc *ID,
54 ArrayRef<const TargetRegisterClass *> RegisterClasses,
55 const char *SubRegIndexStrings, ArrayRef<uint32_t> SubRegIndexNameOffsets,
56 const SubRegCoveredBits *SubRegIdxRanges,
57 const LaneBitmask *SubRegIndexLaneMasks, LaneBitmask CoveringLanes,
58 const RegClassInfo *const RCInfos,
59 const MVT::SimpleValueType *const RCVTLists, unsigned Mode)
60 : InfoDesc(ID), SubRegIndexStrings(SubRegIndexStrings),
61 SubRegIndexNameOffsets(SubRegIndexNameOffsets),
62 SubRegIdxRanges(SubRegIdxRanges),
63 SubRegIndexLaneMasks(SubRegIndexLaneMasks),
64 RegClassBegin(RegisterClasses.begin()),
65 RegClassEnd(RegisterClasses.end()), CoveringLanes(CoveringLanes),
66 RCInfos(RCInfos), RCVTLists(RCVTLists), HwMode(Mode) {}
67
68TargetRegisterInfo::~TargetRegisterInfo() = default;
69
70bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
71 const MachineFunction &MF, const LiveInterval &VirtReg) const {
72 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
73 const MachineRegisterInfo &MRI = MF.getRegInfo();
74 MachineInstr *MI = MRI.getUniqueVRegDef(Reg: VirtReg.reg());
75 if (MI && TII->isTriviallyReMaterializable(MI: *MI) &&
76 VirtReg.size() > HugeSizeForSplit)
77 return false;
78 return true;
79}
80
81void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
82 MCRegister Reg) const {
83 for (MCPhysReg SR : superregs_inclusive(Reg))
84 RegisterSet.set(SR);
85}
86
87bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
88 ArrayRef<MCPhysReg> Exceptions) const {
89 // Check that all super registers of reserved regs are reserved as well.
90 BitVector Checked(getNumRegs());
91 for (unsigned Reg : RegisterSet.set_bits()) {
92 if (Checked[Reg])
93 continue;
94 for (MCPhysReg SR : superregs(Reg)) {
95 if (!RegisterSet[SR] && !is_contained(Range&: Exceptions, Element: Reg)) {
96 dbgs() << "Error: Super register " << printReg(Reg: SR, TRI: this)
97 << " of reserved register " << printReg(Reg, TRI: this)
98 << " is not reserved.\n";
99 return false;
100 }
101
102 // We transitively check superregs. So we can remember this for later
103 // to avoid compiletime explosion in deep register hierarchies.
104 Checked.set(SR);
105 }
106 }
107 return true;
108}
109
110Printable llvm::printReg(Register Reg, const TargetRegisterInfo *TRI,
111 unsigned SubIdx, const MachineRegisterInfo *MRI) {
112 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
113 if (!Reg)
114 OS << "$noreg";
115 else if (Reg.isStack())
116 OS << "SS#" << Reg.stackSlotIndex();
117 else if (Reg.isVirtual()) {
118 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
119 if (Name != "") {
120 OS << '%' << Name;
121 } else {
122 OS << '%' << Reg.virtRegIndex();
123 }
124 } else if (!TRI)
125 OS << '$' << "physreg" << Reg.id();
126 else if (Reg < TRI->getNumRegs()) {
127 OS << '$';
128 printLowerCase(String: TRI->getName(RegNo: Reg), Out&: OS);
129 } else
130 llvm_unreachable("Register kind is unsupported.");
131
132 if (SubIdx) {
133 if (TRI)
134 OS << ':' << TRI->getSubRegIndexName(SubIdx);
135 else
136 OS << ":sub(" << SubIdx << ')';
137 }
138 });
139}
140
141Printable llvm::printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI) {
142 return Printable([Unit, TRI](raw_ostream &OS) {
143 // Generic printout when TRI is missing.
144 if (!TRI) {
145 OS << "Unit~" << static_cast<unsigned>(Unit);
146 return;
147 }
148
149 // Check for invalid register units.
150 if (static_cast<unsigned>(Unit) >= TRI->getNumRegUnits()) {
151 OS << "BadUnit~" << static_cast<unsigned>(Unit);
152 return;
153 }
154
155 // Normal units have at least one root.
156 MCRegUnitRootIterator Roots(Unit, TRI);
157 assert(Roots.isValid() && "Unit has no roots.");
158 OS << TRI->getName(RegNo: *Roots);
159 for (++Roots; Roots.isValid(); ++Roots)
160 OS << '~' << TRI->getName(RegNo: *Roots);
161 });
162}
163
164Printable llvm::printVRegOrUnit(VirtRegOrUnit VRegOrUnit,
165 const TargetRegisterInfo *TRI) {
166 return Printable([VRegOrUnit, TRI](raw_ostream &OS) {
167 if (VRegOrUnit.isVirtualReg()) {
168 OS << '%' << VRegOrUnit.asVirtualReg().virtRegIndex();
169 } else {
170 OS << printRegUnit(Unit: VRegOrUnit.asMCRegUnit(), TRI);
171 }
172 });
173}
174
175Printable llvm::printRegClassOrBank(Register Reg,
176 const MachineRegisterInfo &RegInfo,
177 const TargetRegisterInfo *TRI) {
178 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
179 if (RegInfo.getRegClassOrNull(Reg))
180 OS << StringRef(TRI->getRegClassName(Class: RegInfo.getRegClass(Reg))).lower();
181 else if (RegInfo.getRegBankOrNull(Reg))
182 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
183 else {
184 OS << "_";
185 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
186 "Generic registers must have a valid type");
187 }
188 });
189}
190
191/// getAllocatableClass - Return the maximal subclass of the given register
192/// class that is alloctable, or NULL.
193const TargetRegisterClass *
194TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
195 if (!RC || RC->isAllocatable())
196 return RC;
197
198 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
199 ++It) {
200 const TargetRegisterClass *SubRC = getRegClass(i: It.getID());
201 if (SubRC->isAllocatable())
202 return SubRC;
203 }
204 return nullptr;
205}
206
207template <typename TypeT>
208static const TargetRegisterClass *
209getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg,
210 TypeT Ty) {
211 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
212 assert(Reg.isPhysical() && "reg must be a physical register");
213
214 bool IsDefault = [&]() {
215 if constexpr (std::is_same_v<TypeT, MVT>)
216 return Ty == MVT::Other;
217 else
218 return !Ty.isValid();
219 }();
220
221 // Pick the most sub register class of the right type that contains
222 // this physreg.
223 const TargetRegisterClass *BestRC = nullptr;
224 for (const TargetRegisterClass *RC : TRI->regclasses()) {
225 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) && RC->contains(Reg) &&
226 (!BestRC || BestRC->hasSubClass(RC)))
227 BestRC = RC;
228 }
229
230 if constexpr (std::is_same_v<TypeT, MVT>)
231 assert(BestRC && "Couldn't find the register class");
232 return BestRC;
233}
234
235template <typename TypeT>
236static const TargetRegisterClass *
237getCommonMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg1,
238 MCRegister Reg2, TypeT Ty) {
239 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
240 assert(Reg1.isPhysical() && Reg2.isPhysical() &&
241 "Reg1/Reg2 must be a physical register");
242
243 bool IsDefault = [&]() {
244 if constexpr (std::is_same_v<TypeT, MVT>)
245 return Ty == MVT::Other;
246 else
247 return !Ty.isValid();
248 }();
249
250 // Pick the most sub register class of the right type that contains
251 // this physreg.
252 const TargetRegisterClass *BestRC = nullptr;
253 for (const TargetRegisterClass *RC : TRI->regclasses()) {
254 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) &&
255 RC->contains(Reg1, Reg2) && (!BestRC || BestRC->hasSubClass(RC)))
256 BestRC = RC;
257 }
258
259 if constexpr (std::is_same_v<TypeT, MVT>)
260 assert(BestRC && "Couldn't find the register class");
261 return BestRC;
262}
263
264const TargetRegisterClass *
265TargetRegisterInfo::getMinimalPhysRegClass(MCRegister Reg, MVT VT) const {
266 return ::getMinimalPhysRegClass(TRI: this, Reg, Ty: VT);
267}
268
269const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClass(
270 MCRegister Reg1, MCRegister Reg2, MVT VT) const {
271 return ::getCommonMinimalPhysRegClass(TRI: this, Reg1, Reg2, Ty: VT);
272}
273
274const TargetRegisterClass *
275TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister Reg, LLT Ty) const {
276 return ::getMinimalPhysRegClass(TRI: this, Reg, Ty);
277}
278
279const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClassLLT(
280 MCRegister Reg1, MCRegister Reg2, LLT Ty) const {
281 return ::getCommonMinimalPhysRegClass(TRI: this, Reg1, Reg2, Ty);
282}
283
284/// getAllocatableSetForRC - Toggle the bits that represent allocatable
285/// registers for the specific register class.
286static void getAllocatableSetForRC(const MachineFunction &MF,
287 const TargetRegisterClass *RC, BitVector &R){
288 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
289 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
290 for (MCPhysReg PR : Order)
291 R.set(PR);
292}
293
294BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
295 const TargetRegisterClass *RC) const {
296 BitVector Allocatable(getNumRegs());
297 if (RC) {
298 // A register class with no allocatable subclass returns an empty set.
299 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
300 if (SubClass)
301 getAllocatableSetForRC(MF, RC: SubClass, R&: Allocatable);
302 } else {
303 for (const TargetRegisterClass *C : regclasses())
304 if (C->isAllocatable())
305 getAllocatableSetForRC(MF, RC: C, R&: Allocatable);
306 }
307
308 // Mask out the reserved registers
309 const MachineRegisterInfo &MRI = MF.getRegInfo();
310 const BitVector &Reserved = MRI.getReservedRegs();
311 Allocatable.reset(RHS: Reserved);
312
313 return Allocatable;
314}
315
316static inline
317const TargetRegisterClass *firstCommonClass(const uint32_t *A,
318 const uint32_t *B,
319 const TargetRegisterInfo *TRI) {
320 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
321 if (unsigned Common = *A++ & *B++)
322 return TRI->getRegClass(i: I + llvm::countr_zero(Val: Common));
323 return nullptr;
324}
325
326const TargetRegisterClass *
327TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
328 const TargetRegisterClass *B) const {
329 // First take care of the trivial cases.
330 if (A == B)
331 return A;
332 if (!A || !B)
333 return nullptr;
334
335 // Register classes are ordered topologically, so the largest common
336 // sub-class it the common sub-class with the smallest ID.
337 return firstCommonClass(A: A->getSubClassMask(), B: B->getSubClassMask(), TRI: this);
338}
339
340const TargetRegisterClass *
341TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
342 const TargetRegisterClass *B,
343 unsigned Idx) const {
344 assert(A && B && "Missing register class");
345 assert(Idx && "Bad sub-register index");
346
347 // Find Idx in the list of super-register indices.
348 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
349 if (RCI.getSubReg() == Idx)
350 // The bit mask contains all register classes that are projected into B
351 // by Idx. Find a class that is also a sub-class of A.
352 return firstCommonClass(A: RCI.getMask(), B: A->getSubClassMask(), TRI: this);
353 return nullptr;
354}
355
356const TargetRegisterClass *TargetRegisterInfo::
357getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
358 const TargetRegisterClass *RCB, unsigned SubB,
359 unsigned &PreA, unsigned &PreB) const {
360 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
361
362 // Search all pairs of sub-register indices that project into RCA and RCB
363 // respectively. This is quadratic, but usually the sets are very small. On
364 // most targets like X86, there will only be a single sub-register index
365 // (e.g., sub_16bit projecting into GR16).
366 //
367 // The worst case is a register class like DPR on ARM.
368 // We have indices dsub_0..dsub_7 projecting into that class.
369 //
370 // It is very common that one register class is a sub-register of the other.
371 // Arrange for RCA to be the larger register so the answer will be found in
372 // the first iteration. This makes the search linear for the most common
373 // case.
374 const TargetRegisterClass *BestRC = nullptr;
375 unsigned *BestPreA = &PreA;
376 unsigned *BestPreB = &PreB;
377 if (getRegSizeInBits(RC: *RCA) < getRegSizeInBits(RC: *RCB)) {
378 std::swap(a&: RCA, b&: RCB);
379 std::swap(a&: SubA, b&: SubB);
380 std::swap(a&: BestPreA, b&: BestPreB);
381 }
382
383 // Also terminate the search one we have found a register class as small as
384 // RCA.
385 unsigned MinSize = getRegSizeInBits(RC: *RCA);
386
387 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
388 unsigned FinalA = composeSubRegIndices(a: IA.getSubReg(), b: SubA);
389 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
390 // Check if a common super-register class exists for this index pair.
391 const TargetRegisterClass *RC =
392 firstCommonClass(A: IA.getMask(), B: IB.getMask(), TRI: this);
393 if (!RC || getRegSizeInBits(RC: *RC) < MinSize)
394 continue;
395
396 // The indexes must compose identically: PreA+SubA == PreB+SubB.
397 unsigned FinalB = composeSubRegIndices(a: IB.getSubReg(), b: SubB);
398 if (FinalA != FinalB)
399 continue;
400
401 // Is RC a better candidate than BestRC?
402 if (BestRC && getRegSizeInBits(RC: *RC) >= getRegSizeInBits(RC: *BestRC))
403 continue;
404
405 // Yes, RC is the smallest super-register seen so far.
406 BestRC = RC;
407 *BestPreA = IA.getSubReg();
408 *BestPreB = IB.getSubReg();
409
410 // Bail early if we reached MinSize. We won't find a better candidate.
411 if (getRegSizeInBits(RC: *BestRC) == MinSize)
412 return BestRC;
413 }
414 }
415 return BestRC;
416}
417
418const TargetRegisterClass *TargetRegisterInfo::findCommonRegClass(
419 const TargetRegisterClass *DefRC, unsigned DefSubReg,
420 const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const {
421 // Same register class.
422 //
423 // When processing uncoalescable copies / bitcasts, it is possible we reach
424 // here with the same register class, but mismatched subregister indices.
425 if (DefRC == SrcRC && DefSubReg == SrcSubReg)
426 return DefRC;
427
428 // Both operands are sub registers. Check if they share a register class.
429 unsigned SrcIdx, DefIdx;
430 if (SrcSubReg && DefSubReg) {
431 return getCommonSuperRegClass(RCA: SrcRC, SubA: SrcSubReg, RCB: DefRC, SubB: DefSubReg, PreA&: SrcIdx,
432 PreB&: DefIdx);
433 }
434
435 // At most one of the register is a sub register, make it Src to avoid
436 // duplicating the test.
437 if (!SrcSubReg) {
438 std::swap(a&: DefSubReg, b&: SrcSubReg);
439 std::swap(a&: DefRC, b&: SrcRC);
440 }
441
442 // One of the register is a sub register, check if we can get a superclass.
443 if (SrcSubReg)
444 return getMatchingSuperRegClass(A: SrcRC, B: DefRC, Idx: SrcSubReg);
445
446 // Plain copy.
447 return getCommonSubClass(A: DefRC, B: SrcRC);
448}
449
450float TargetRegisterInfo::getSpillWeightScaleFactor(
451 const TargetRegisterClass *RC) const {
452 return 1.0;
453}
454
455// Compute target-independent register allocator hints to help eliminate copies.
456bool TargetRegisterInfo::getRegAllocationHints(
457 Register VirtReg, ArrayRef<MCPhysReg> Order,
458 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
459 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
460 const MachineRegisterInfo &MRI = MF.getRegInfo();
461 const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI =
462 MRI.getRegAllocationHints(VReg: VirtReg);
463
464 if (!Hints_MRI)
465 return false;
466
467 SmallSet<Register, 32> HintedRegs;
468 // First hint may be a target hint.
469 bool Skip = (Hints_MRI->first != 0);
470 for (auto Reg : Hints_MRI->second) {
471 if (Skip) {
472 Skip = false;
473 continue;
474 }
475
476 // Target-independent hints are either a physical or a virtual register.
477 Register Phys = Reg;
478 if (VRM && Phys.isVirtual())
479 Phys = VRM->getPhys(virtReg: Phys);
480
481 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
482 // registers allocated to the same physreg).
483 if (!HintedRegs.insert(V: Phys).second)
484 continue;
485 // Check that Phys is a valid hint in VirtReg's register class.
486 if (!Phys.isPhysical())
487 continue;
488 if (MRI.isReserved(PhysReg: Phys))
489 continue;
490 // Check that Phys is in the allocation order. We shouldn't heed hints
491 // from VirtReg's register class if they aren't in the allocation order. The
492 // target probably has a reason for removing the register.
493 if (!is_contained(Range&: Order, Element: Phys))
494 continue;
495
496 // All clear, tell the register allocator to prefer this register.
497 Hints.push_back(Elt: Phys.id());
498 }
499 return false;
500}
501
502bool TargetRegisterInfo::isCalleeSavedPhysReg(
503 MCRegister PhysReg, const MachineFunction &MF) const {
504 if (!PhysReg)
505 return false;
506 const uint32_t *callerPreservedRegs =
507 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
508 if (callerPreservedRegs) {
509 assert(PhysReg.isPhysical() && "Expected physical register");
510 return (callerPreservedRegs[PhysReg.id() / 32] >> PhysReg.id() % 32) & 1;
511 }
512 return false;
513}
514
515bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
516 return MF.getFrameInfo().isStackRealignable();
517}
518
519bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
520 return MF.getFrameInfo().shouldRealignStack();
521}
522
523bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
524 const uint32_t *mask1) const {
525 unsigned N = (getNumRegs()+31) / 32;
526 for (unsigned I = 0; I < N; ++I)
527 if ((mask0[I] & mask1[I]) != mask0[I])
528 return false;
529 return true;
530}
531
532TypeSize
533TargetRegisterInfo::getRegSizeInBits(Register Reg,
534 const MachineRegisterInfo &MRI) const {
535 const TargetRegisterClass *RC{};
536 if (Reg.isPhysical()) {
537 // The size is not directly available for physical registers.
538 // Instead, we need to access a register class that contains Reg and
539 // get the size of that register class.
540 RC = getMinimalPhysRegClass(Reg);
541 assert(RC && "Unable to deduce the register class");
542 return getRegSizeInBits(RC: *RC);
543 }
544 LLT Ty = MRI.getType(Reg);
545 if (Ty.isValid())
546 return Ty.getSizeInBits();
547
548 // Since Reg is not a generic register, it may have a register class.
549 RC = MRI.getRegClass(Reg);
550 assert(RC && "Unable to deduce the register class");
551 return getRegSizeInBits(RC: *RC);
552}
553
554bool TargetRegisterInfo::getCoveringSubRegIndexes(
555 const TargetRegisterClass *RC, LaneBitmask LaneMask,
556 SmallVectorImpl<unsigned> &NeededIndexes) const {
557 SmallVector<unsigned, 8> PossibleIndexes;
558 unsigned BestIdx = 0;
559 unsigned BestCover = 0;
560
561 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
562 // Is this index even compatible with the given class?
563 if (!isSubRegValidForRegClass(RC, Idx))
564 continue;
565 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
566 // Early exit if we found a perfect match.
567 if (SubRegMask == LaneMask) {
568 BestIdx = Idx;
569 break;
570 }
571
572 // The index must not cover any lanes outside \p LaneMask.
573 if ((SubRegMask & ~LaneMask).any())
574 continue;
575
576 unsigned PopCount = SubRegMask.getNumLanes();
577 PossibleIndexes.push_back(Elt: Idx);
578 if (PopCount > BestCover) {
579 BestCover = PopCount;
580 BestIdx = Idx;
581 }
582 }
583
584 // Abort if we cannot possibly implement the COPY with the given indexes.
585 if (BestIdx == 0)
586 return false;
587
588 NeededIndexes.push_back(Elt: BestIdx);
589
590 // Greedy heuristic: Keep iterating keeping the best covering subreg index
591 // each time.
592 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(SubIdx: BestIdx);
593 while (LanesLeft.any()) {
594 unsigned BestIdx = 0;
595 int BestCover = std::numeric_limits<int>::min();
596 for (unsigned Idx : PossibleIndexes) {
597 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
598 // Early exit if we found a perfect match.
599 if (SubRegMask == LanesLeft) {
600 BestIdx = Idx;
601 break;
602 }
603
604 // Do not cover already-covered lanes to avoid creating cycles
605 // in copy bundles (= bundle contains copies that write to the
606 // registers).
607 if ((SubRegMask & ~LanesLeft).any())
608 continue;
609
610 // Try to cover as many of the remaining lanes as possible.
611 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
612 if (Cover > BestCover) {
613 BestCover = Cover;
614 BestIdx = Idx;
615 }
616 }
617
618 if (BestIdx == 0)
619 return false; // Impossible to handle
620
621 NeededIndexes.push_back(Elt: BestIdx);
622
623 LanesLeft &= ~getSubRegIndexLaneMask(SubIdx: BestIdx);
624 }
625
626 return BestIdx;
627}
628
629unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
630 assert(Idx && Idx < getNumSubRegIndices() &&
631 "This is not a subregister index");
632 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
633}
634
635unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
636 assert(Idx && Idx < getNumSubRegIndices() &&
637 "This is not a subregister index");
638 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
639}
640
641Register
642TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
643 const MachineRegisterInfo *MRI) const {
644 while (true) {
645 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
646 if (!MI->isCopyLike())
647 return SrcReg;
648
649 Register CopySrcReg;
650 if (MI->isCopy())
651 CopySrcReg = MI->getOperand(i: 1).getReg();
652 else {
653 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
654 CopySrcReg = MI->getOperand(i: 1).getReg();
655 }
656
657 if (!CopySrcReg.isVirtual())
658 return CopySrcReg;
659
660 SrcReg = CopySrcReg;
661 }
662}
663
664Register TargetRegisterInfo::lookThruSingleUseCopyChain(
665 Register SrcReg, const MachineRegisterInfo *MRI) const {
666 while (true) {
667 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
668 // Found the real definition, return it if it has a single use.
669 if (!MI->isCopyLike())
670 return MRI->hasOneNonDBGUse(RegNo: SrcReg) ? SrcReg : Register();
671
672 Register CopySrcReg;
673 if (MI->isCopy())
674 CopySrcReg = MI->getOperand(i: 1).getReg();
675 else {
676 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
677 CopySrcReg = MI->getOperand(i: 1).getReg();
678 }
679
680 // Continue only if the next definition in the chain is for a virtual
681 // register that has a single use.
682 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(RegNo: CopySrcReg))
683 return Register();
684
685 SrcReg = CopySrcReg;
686 }
687}
688
689void TargetRegisterInfo::getOffsetOpcodes(
690 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
691 assert(!Offset.getScalable() && "Scalable offsets are not handled");
692 DIExpression::appendOffset(Ops, Offset: Offset.getFixed());
693}
694
695DIExpression *
696TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
697 unsigned PrependFlags,
698 const StackOffset &Offset) const {
699 assert((PrependFlags &
700 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
701 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
702 "Unsupported prepend flag");
703 SmallVector<uint64_t, 16> OffsetExpr;
704 if (PrependFlags & DIExpression::DerefBefore)
705 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
706 getOffsetOpcodes(Offset, Ops&: OffsetExpr);
707 if (PrependFlags & DIExpression::DerefAfter)
708 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
709 return DIExpression::prependOpcodes(Expr, Ops&: OffsetExpr,
710 StackValue: PrependFlags & DIExpression::StackValue,
711 EntryValue: PrependFlags & DIExpression::EntryValue);
712}
713
714#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
715LLVM_DUMP_METHOD
716void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
717 const TargetRegisterInfo *TRI) {
718 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
719}
720#endif
721