1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallBitVector.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/MemoryLocation.h"
21#include "llvm/CodeGen/LiveRegUnits.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineInstrBundle.h"
27#include "llvm/CodeGen/MachineMemOperand.h"
28#include "llvm/CodeGen/MachineModuleInfo.h"
29#include "llvm/CodeGen/MachineOperand.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/CodeGen/Register.h"
33#include "llvm/CodeGen/StackMaps.h"
34#include "llvm/CodeGen/TargetInstrInfo.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
36#include "llvm/CodeGen/TargetSubtargetInfo.h"
37#include "llvm/CodeGenTypes/LowLevelType.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DebugInfoMetadata.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
47#include "llvm/IR/ModuleSlotTracker.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
50#include "llvm/MC/MCRegisterInfo.h"
51#include "llvm/Support/Casting.h"
52#include "llvm/Support/Compiler.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/FormattedStream.h"
56#include "llvm/Support/raw_ostream.h"
57#include "llvm/Target/TargetMachine.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
70static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
78static void tryToGetTargetInfo(const MachineInstr &MI,
79 const TargetRegisterInfo *&TRI,
80 const MachineRegisterInfo *&MRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
90void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, Op: MachineOperand::CreateReg(Reg: ImpDef, isDef: true, isImp: true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, Op: MachineOperand::CreateReg(Reg: ImpUse, isDef: false, isImp: true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(N: NumOps);
110 Operands = MF.allocateOperandArray(Cap: CapOperands);
111 }
112
113 if (!NoImp)
114 addImplicitDefUseOperands(MF);
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123 Opcode(MI.getOpcode()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(N: MI.getNumOperands());
127 Operands = MF.allocateOperandArray(Cap: CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, Op: MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
145void MachineInstr::setDesc(const MCInstrDesc &TID) {
146 if (getParent())
147 getMF()->handleChangeDesc(MI&: *this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
152void MachineInstr::moveBefore(MachineInstr *MovePos) {
153 MovePos->getParent()->splice(Where: MovePos, Other: getParent(), From: getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
160 if (MachineBasicBlock *MBB = getParent())
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(MO: &MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(MO: &MO);
181}
182
183void MachineInstr::addOperand(const MachineOperand &Op) {
184 MachineBasicBlock *MBB = getParent();
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 MachineFunction *MF = MBB->getParent();
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(MF&: *MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
193static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(dest: Dst, src: Src, n: NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
206void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, Op: CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(N: 1);
247 Operands = MF.allocateOperandArray(Cap: CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Dst: Operands, Src: OldOperands, NumOps: OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Dst: Operands + OpNo + 1, Src: OldOperands + OpNo, NumOps: NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(Cap: OldCap, Array: OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(MO: NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNum: OpNo, Constraint: MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, UseIdx: OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNum: OpNo, Constraint: MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpIdx: OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(MO: Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Dst: Operands + OpNo, Src: Operands + OpNo + 1, NumOps: N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
321 ArrayRef<MachineMemOperand *> MMOs,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
333 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334
335 // Drop all extra info if there is none.
336 if (NumPointers <= 0) {
337 Info.clear();
338 return;
339 }
340
341 // If more than one pointer, then store out of line. Store heap alloc markers
342 // out of line because PointerSumType cannot hold more than 4 tag types with
343 // 32-bit pointers.
344 // FIXME: Maybe we should make the symbols in the extra info mutable?
345 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
346 HasCFIType) {
347 Info.set<EIIK_OutOfLine>(
348 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
349 HeapAllocMarker, PCSections, CFIType, MMRAs));
350 return;
351 }
352
353 // Otherwise store the single pointer inline.
354 if (HasPreInstrSymbol)
355 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
356 else if (HasPostInstrSymbol)
357 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
358 else
359 Info.set<EIIK_MMO>(MMOs[0]);
360}
361
362void MachineInstr::dropMemRefs(MachineFunction &MF) {
363 if (memoperands_empty())
364 return;
365
366 setExtraInfo(MF, MMOs: {}, PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
367 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
368 MMRAs: getMMRAMetadata());
369}
370
371void MachineInstr::setMemRefs(MachineFunction &MF,
372 ArrayRef<MachineMemOperand *> MMOs) {
373 if (MMOs.empty()) {
374 dropMemRefs(MF);
375 return;
376 }
377
378 setExtraInfo(MF, MMOs, PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
379 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
380 MMRAs: getMMRAMetadata());
381}
382
383void MachineInstr::addMemOperand(MachineFunction &MF,
384 MachineMemOperand *MO) {
385 SmallVector<MachineMemOperand *, 2> MMOs;
386 MMOs.append(in_start: memoperands_begin(), in_end: memoperands_end());
387 MMOs.push_back(Elt: MO);
388 setMemRefs(MF, MMOs);
389}
390
391void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
392 if (this == &MI)
393 // Nothing to do for a self-clone!
394 return;
395
396 assert(&MF == MI.getMF() &&
397 "Invalid machine functions when cloning memory refrences!");
398 // See if we can just steal the extra info already allocated for the
399 // instruction. We can do this whenever the pre- and post-instruction symbols
400 // are the same (including null).
401 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
402 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
403 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
404 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
405 MI.getMMRAMetadata()) {
406 Info = MI.Info;
407 return;
408 }
409
410 // Otherwise, fall back on a copy-based clone.
411 setMemRefs(MF, MMOs: MI.memoperands());
412}
413
414/// Check to see if the MMOs pointed to by the two MemRefs arrays are
415/// identical.
416static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
417 ArrayRef<MachineMemOperand *> RHS) {
418 if (LHS.size() != RHS.size())
419 return false;
420
421 auto LHSPointees = make_pointee_range(Range&: LHS);
422 auto RHSPointees = make_pointee_range(Range&: RHS);
423 return std::equal(first1: LHSPointees.begin(), last1: LHSPointees.end(),
424 first2: RHSPointees.begin());
425}
426
427void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
428 ArrayRef<const MachineInstr *> MIs) {
429 // Try handling easy numbers of MIs with simpler mechanisms.
430 if (MIs.empty()) {
431 dropMemRefs(MF);
432 return;
433 }
434 if (MIs.size() == 1) {
435 cloneMemRefs(MF, MI: *MIs[0]);
436 return;
437 }
438 // Because an empty memoperands list provides *no* information and must be
439 // handled conservatively (assuming the instruction can do anything), the only
440 // way to merge with it is to drop all other memoperands.
441 if (MIs[0]->memoperands_empty()) {
442 dropMemRefs(MF);
443 return;
444 }
445
446 // Handle the general case.
447 SmallVector<MachineMemOperand *, 2> MergedMMOs;
448 // Start with the first instruction.
449 assert(&MF == MIs[0]->getMF() &&
450 "Invalid machine functions when cloning memory references!");
451 MergedMMOs.append(in_start: MIs[0]->memoperands_begin(), in_end: MIs[0]->memoperands_end());
452 // Now walk all the other instructions and accumulate any different MMOs.
453 for (const MachineInstr &MI : make_pointee_range(Range: MIs.slice(N: 1))) {
454 assert(&MF == MI.getMF() &&
455 "Invalid machine functions when cloning memory references!");
456
457 // Skip MIs with identical operands to the first. This is a somewhat
458 // arbitrary hack but will catch common cases without being quadratic.
459 // TODO: We could fully implement merge semantics here if needed.
460 if (hasIdenticalMMOs(LHS: MIs[0]->memoperands(), RHS: MI.memoperands()))
461 continue;
462
463 // Because an empty memoperands list provides *no* information and must be
464 // handled conservatively (assuming the instruction can do anything), the
465 // only way to merge with it is to drop all other memoperands.
466 if (MI.memoperands_empty()) {
467 dropMemRefs(MF);
468 return;
469 }
470
471 // Otherwise accumulate these into our temporary buffer of the merged state.
472 MergedMMOs.append(in_start: MI.memoperands_begin(), in_end: MI.memoperands_end());
473 }
474
475 setMemRefs(MF, MMOs: MergedMMOs);
476}
477
478void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
479 // Do nothing if old and new symbols are the same.
480 if (Symbol == getPreInstrSymbol())
481 return;
482
483 // If there was only one symbol and we're removing it, just clear info.
484 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
485 Info.clear();
486 return;
487 }
488
489 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: Symbol, PostInstrSymbol: getPostInstrSymbol(),
490 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
491 MMRAs: getMMRAMetadata());
492}
493
494void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
495 // Do nothing if old and new symbols are the same.
496 if (Symbol == getPostInstrSymbol())
497 return;
498
499 // If there was only one symbol and we're removing it, just clear info.
500 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
501 Info.clear();
502 return;
503 }
504
505 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: Symbol,
506 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
507 MMRAs: getMMRAMetadata());
508}
509
510void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
511 // Do nothing if old and new symbols are the same.
512 if (Marker == getHeapAllocMarker())
513 return;
514
515 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
516 HeapAllocMarker: Marker, PCSections: getPCSections(), CFIType: getCFIType(), MMRAs: getMMRAMetadata());
517}
518
519void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) {
520 // Do nothing if old and new symbols are the same.
521 if (PCSections == getPCSections())
522 return;
523
524 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
525 HeapAllocMarker: getHeapAllocMarker(), PCSections, CFIType: getCFIType(),
526 MMRAs: getMMRAMetadata());
527}
528
529void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) {
530 // Do nothing if old and new types are the same.
531 if (Type == getCFIType())
532 return;
533
534 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
535 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: Type, MMRAs: getMMRAMetadata());
536}
537
538void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) {
539 // Do nothing if old and new symbols are the same.
540 if (MMRAs == getMMRAMetadata())
541 return;
542
543 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
544 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(), MMRAs);
545}
546
547void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
548 const MachineInstr &MI) {
549 if (this == &MI)
550 // Nothing to do for a self-clone!
551 return;
552
553 assert(&MF == MI.getMF() &&
554 "Invalid machine functions when cloning instruction symbols!");
555
556 setPreInstrSymbol(MF, Symbol: MI.getPreInstrSymbol());
557 setPostInstrSymbol(MF, Symbol: MI.getPostInstrSymbol());
558 setHeapAllocMarker(MF, Marker: MI.getHeapAllocMarker());
559 setPCSections(MF, PCSections: MI.getPCSections());
560 setMMRAMetadata(MF, MMRAs: MI.getMMRAMetadata());
561}
562
563uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
564 // For now, the just return the union of the flags. If the flags get more
565 // complicated over time, we might need more logic here.
566 return getFlags() | Other.getFlags();
567}
568
569uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
570 uint32_t MIFlags = 0;
571 // Copy the wrapping flags.
572 if (const OverflowingBinaryOperator *OB =
573 dyn_cast<OverflowingBinaryOperator>(Val: &I)) {
574 if (OB->hasNoSignedWrap())
575 MIFlags |= MachineInstr::MIFlag::NoSWrap;
576 if (OB->hasNoUnsignedWrap())
577 MIFlags |= MachineInstr::MIFlag::NoUWrap;
578 } else if (const TruncInst *TI = dyn_cast<TruncInst>(Val: &I)) {
579 if (TI->hasNoSignedWrap())
580 MIFlags |= MachineInstr::MIFlag::NoSWrap;
581 if (TI->hasNoUnsignedWrap())
582 MIFlags |= MachineInstr::MIFlag::NoUWrap;
583 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: &I)) {
584 if (GEP->hasNoUnsignedSignedWrap())
585 MIFlags |= MachineInstr::MIFlag::NoUSWrap;
586 if (GEP->hasNoUnsignedWrap())
587 MIFlags |= MachineInstr::MIFlag::NoUWrap;
588 }
589
590 // Copy the nonneg flag.
591 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(Val: &I)) {
592 if (PNI->hasNonNeg())
593 MIFlags |= MachineInstr::MIFlag::NonNeg;
594 // Copy the disjoint flag.
595 } else if (const PossiblyDisjointInst *PD =
596 dyn_cast<PossiblyDisjointInst>(Val: &I)) {
597 if (PD->isDisjoint())
598 MIFlags |= MachineInstr::MIFlag::Disjoint;
599 }
600
601 // Copy the samesign flag.
602 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(Val: &I))
603 if (ICmp->hasSameSign())
604 MIFlags |= MachineInstr::MIFlag::SameSign;
605
606 // Copy the exact flag.
607 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(Val: &I))
608 if (PE->isExact())
609 MIFlags |= MachineInstr::MIFlag::IsExact;
610
611 // Copy the fast-math flags.
612 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(Val: &I)) {
613 const FastMathFlags Flags = FP->getFastMathFlags();
614 if (Flags.noNaNs())
615 MIFlags |= MachineInstr::MIFlag::FmNoNans;
616 if (Flags.noInfs())
617 MIFlags |= MachineInstr::MIFlag::FmNoInfs;
618 if (Flags.noSignedZeros())
619 MIFlags |= MachineInstr::MIFlag::FmNsz;
620 if (Flags.allowReciprocal())
621 MIFlags |= MachineInstr::MIFlag::FmArcp;
622 if (Flags.allowContract())
623 MIFlags |= MachineInstr::MIFlag::FmContract;
624 if (Flags.approxFunc())
625 MIFlags |= MachineInstr::MIFlag::FmAfn;
626 if (Flags.allowReassoc())
627 MIFlags |= MachineInstr::MIFlag::FmReassoc;
628 }
629
630 if (I.getMetadata(KindID: LLVMContext::MD_unpredictable))
631 MIFlags |= MachineInstr::MIFlag::Unpredictable;
632
633 return MIFlags;
634}
635
636void MachineInstr::copyIRFlags(const Instruction &I) {
637 Flags = copyFlagsFromInstruction(I);
638}
639
640bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
641 assert(!isBundledWithPred() && "Must be called on bundle header");
642 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
643 if (MII->getDesc().getFlags() & Mask) {
644 if (Type == AnyInBundle)
645 return true;
646 } else {
647 if (Type == AllInBundle && !MII->isBundle())
648 return false;
649 }
650 // This was the last instruction in the bundle.
651 if (!MII->isBundledWithSucc())
652 return Type == AllInBundle;
653 }
654}
655
656bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
657 MICheckType Check) const {
658 // If opcodes or number of operands are not the same then the two
659 // instructions are obviously not identical.
660 if (Other.getOpcode() != getOpcode() ||
661 Other.getNumOperands() != getNumOperands())
662 return false;
663
664 if (isBundle()) {
665 // We have passed the test above that both instructions have the same
666 // opcode, so we know that both instructions are bundles here. Let's compare
667 // MIs inside the bundle.
668 assert(Other.isBundle() && "Expected that both instructions are bundles.");
669 MachineBasicBlock::const_instr_iterator I1 = getIterator();
670 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
671 // Loop until we analysed the last intruction inside at least one of the
672 // bundles.
673 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
674 ++I1;
675 ++I2;
676 if (!I1->isIdenticalTo(Other: *I2, Check))
677 return false;
678 }
679 // If we've reached the end of just one of the two bundles, but not both,
680 // the instructions are not identical.
681 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
682 return false;
683 }
684
685 // Check operands to make sure they match.
686 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
687 const MachineOperand &MO = getOperand(i);
688 const MachineOperand &OMO = Other.getOperand(i);
689 if (!MO.isReg()) {
690 if (!MO.isIdenticalTo(Other: OMO))
691 return false;
692 continue;
693 }
694
695 // Clients may or may not want to ignore defs when testing for equality.
696 // For example, machine CSE pass only cares about finding common
697 // subexpressions, so it's safe to ignore virtual register defs.
698 if (MO.isDef()) {
699 if (Check == IgnoreDefs)
700 continue;
701 else if (Check == IgnoreVRegDefs) {
702 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
703 if (!MO.isIdenticalTo(Other: OMO))
704 return false;
705 } else {
706 if (!MO.isIdenticalTo(Other: OMO))
707 return false;
708 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
709 return false;
710 }
711 } else {
712 if (!MO.isIdenticalTo(Other: OMO))
713 return false;
714 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
715 return false;
716 }
717 }
718 // If DebugLoc does not match then two debug instructions are not identical.
719 if (isDebugInstr())
720 if (getDebugLoc() && Other.getDebugLoc() &&
721 getDebugLoc() != Other.getDebugLoc())
722 return false;
723 // If pre- or post-instruction symbols do not match then the two instructions
724 // are not identical.
725 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
726 getPostInstrSymbol() != Other.getPostInstrSymbol())
727 return false;
728 // Call instructions with different CFI types are not identical.
729 if (isCall() && getCFIType() != Other.getCFIType())
730 return false;
731
732 return true;
733}
734
735bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
736 if (!isDebugValueLike() || !Other.isDebugValueLike())
737 return false;
738 if (getDebugLoc() != Other.getDebugLoc())
739 return false;
740 if (getDebugVariable() != Other.getDebugVariable())
741 return false;
742 if (getNumDebugOperands() != Other.getNumDebugOperands())
743 return false;
744 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
745 if (!getDebugOperand(Index: OpIdx).isIdenticalTo(Other: Other.getDebugOperand(Index: OpIdx)))
746 return false;
747 if (!DIExpression::isEqualExpression(
748 FirstExpr: getDebugExpression(), FirstIndirect: isIndirectDebugValue(),
749 SecondExpr: Other.getDebugExpression(), SecondIndirect: Other.isIndirectDebugValue()))
750 return false;
751 return true;
752}
753
754const MachineFunction *MachineInstr::getMF() const {
755 return getParent()->getParent();
756}
757
758MachineInstr *MachineInstr::removeFromParent() {
759 assert(getParent() && "Not embedded in a basic block!");
760 return getParent()->remove(I: this);
761}
762
763MachineInstr *MachineInstr::removeFromBundle() {
764 assert(getParent() && "Not embedded in a basic block!");
765 return getParent()->remove_instr(I: this);
766}
767
768void MachineInstr::eraseFromParent() {
769 assert(getParent() && "Not embedded in a basic block!");
770 getParent()->erase(I: this);
771}
772
773void MachineInstr::eraseFromBundle() {
774 assert(getParent() && "Not embedded in a basic block!");
775 getParent()->erase_instr(I: this);
776}
777
778bool MachineInstr::isCandidateForAdditionalCallInfo(QueryType Type) const {
779 if (!isCall(Type))
780 return false;
781 switch (getOpcode()) {
782 case TargetOpcode::PATCHPOINT:
783 case TargetOpcode::STACKMAP:
784 case TargetOpcode::STATEPOINT:
785 case TargetOpcode::FENTRY_CALL:
786 return false;
787 }
788 return true;
789}
790
791bool MachineInstr::shouldUpdateAdditionalCallInfo() const {
792 if (isBundle())
793 return isCandidateForAdditionalCallInfo(Type: MachineInstr::AnyInBundle);
794 return isCandidateForAdditionalCallInfo();
795}
796
797template <typename Operand, typename Instruction>
798static iterator_range<
799 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
800getDebugOperandsForRegHelper(Instruction *MI, Register Reg) {
801 std::function<bool(Operand & Op)> OpUsesReg(
802 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
803 return make_filter_range(MI->debug_operands(), OpUsesReg);
804}
805
806iterator_range<filter_iterator<const MachineOperand *,
807 std::function<bool(const MachineOperand &Op)>>>
808MachineInstr::getDebugOperandsForReg(Register Reg) const {
809 return getDebugOperandsForRegHelper<const MachineOperand, const MachineInstr>(
810 MI: this, Reg);
811}
812
813iterator_range<
814 filter_iterator<MachineOperand *, std::function<bool(MachineOperand &Op)>>>
815MachineInstr::getDebugOperandsForReg(Register Reg) {
816 return getDebugOperandsForRegHelper<MachineOperand, MachineInstr>(MI: this, Reg);
817}
818
819unsigned MachineInstr::getNumExplicitOperands() const {
820 unsigned NumOperands = MCID->getNumOperands();
821 if (!MCID->isVariadic())
822 return NumOperands;
823
824 for (const MachineOperand &MO : operands_impl().drop_front(N: NumOperands)) {
825 // The operands must always be in the following order:
826 // - explicit reg defs,
827 // - other explicit operands (reg uses, immediates, etc.),
828 // - implicit reg defs
829 // - implicit reg uses
830 if (MO.isReg() && MO.isImplicit())
831 break;
832 ++NumOperands;
833 }
834 return NumOperands;
835}
836
837unsigned MachineInstr::getNumExplicitDefs() const {
838 unsigned NumDefs = MCID->getNumDefs();
839 if (!MCID->isVariadic())
840 return NumDefs;
841
842 for (const MachineOperand &MO : operands_impl().drop_front(N: NumDefs)) {
843 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
844 break;
845 ++NumDefs;
846 }
847 return NumDefs;
848}
849
850void MachineInstr::bundleWithPred() {
851 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
852 setFlag(BundledPred);
853 MachineBasicBlock::instr_iterator Pred = getIterator();
854 --Pred;
855 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
856 Pred->setFlag(BundledSucc);
857}
858
859void MachineInstr::bundleWithSucc() {
860 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
861 setFlag(BundledSucc);
862 MachineBasicBlock::instr_iterator Succ = getIterator();
863 ++Succ;
864 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
865 Succ->setFlag(BundledPred);
866}
867
868void MachineInstr::unbundleFromPred() {
869 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
870 clearFlag(Flag: BundledPred);
871 MachineBasicBlock::instr_iterator Pred = getIterator();
872 --Pred;
873 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
874 Pred->clearFlag(Flag: BundledSucc);
875}
876
877void MachineInstr::unbundleFromSucc() {
878 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
879 clearFlag(Flag: BundledSucc);
880 MachineBasicBlock::instr_iterator Succ = getIterator();
881 ++Succ;
882 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
883 Succ->clearFlag(Flag: BundledPred);
884}
885
886bool MachineInstr::isStackAligningInlineAsm() const {
887 if (isInlineAsm()) {
888 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
889 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
890 return true;
891 }
892 return false;
893}
894
895InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
896 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
897 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
898 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
899}
900
901int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
902 unsigned *GroupNo) const {
903 assert(isInlineAsm() && "Expected an inline asm instruction");
904 assert(OpIdx < getNumOperands() && "OpIdx out of range");
905
906 // Ignore queries about the initial operands.
907 if (OpIdx < InlineAsm::MIOp_FirstOperand)
908 return -1;
909
910 unsigned Group = 0;
911 unsigned NumOps;
912 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
913 i += NumOps) {
914 const MachineOperand &FlagMO = getOperand(i);
915 // If we reach the implicit register operands, stop looking.
916 if (!FlagMO.isImm())
917 return -1;
918 const InlineAsm::Flag F(FlagMO.getImm());
919 NumOps = 1 + F.getNumOperandRegisters();
920 if (i + NumOps > OpIdx) {
921 if (GroupNo)
922 *GroupNo = Group;
923 return i;
924 }
925 ++Group;
926 }
927 return -1;
928}
929
930const DILabel *MachineInstr::getDebugLabel() const {
931 assert(isDebugLabel() && "not a DBG_LABEL");
932 return cast<DILabel>(Val: getOperand(i: 0).getMetadata());
933}
934
935const MachineOperand &MachineInstr::getDebugVariableOp() const {
936 assert((isDebugValueLike()) && "not a DBG_VALUE*");
937 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
938 return getOperand(i: VariableOp);
939}
940
941MachineOperand &MachineInstr::getDebugVariableOp() {
942 assert((isDebugValueLike()) && "not a DBG_VALUE*");
943 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
944 return getOperand(i: VariableOp);
945}
946
947const DILocalVariable *MachineInstr::getDebugVariable() const {
948 return cast<DILocalVariable>(Val: getDebugVariableOp().getMetadata());
949}
950
951const MachineOperand &MachineInstr::getDebugExpressionOp() const {
952 assert((isDebugValueLike()) && "not a DBG_VALUE*");
953 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
954 return getOperand(i: ExpressionOp);
955}
956
957MachineOperand &MachineInstr::getDebugExpressionOp() {
958 assert((isDebugValueLike()) && "not a DBG_VALUE*");
959 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
960 return getOperand(i: ExpressionOp);
961}
962
963const DIExpression *MachineInstr::getDebugExpression() const {
964 return cast<DIExpression>(Val: getDebugExpressionOp().getMetadata());
965}
966
967bool MachineInstr::isDebugEntryValue() const {
968 return isDebugValue() && getDebugExpression()->isEntryValue();
969}
970
971const TargetRegisterClass*
972MachineInstr::getRegClassConstraint(unsigned OpIdx,
973 const TargetInstrInfo *TII,
974 const TargetRegisterInfo *TRI) const {
975 assert(getParent() && "Can't have an MBB reference here!");
976 assert(getMF() && "Can't have an MF reference here!");
977 const MachineFunction &MF = *getMF();
978
979 // Most opcodes have fixed constraints in their MCInstrDesc.
980 if (!isInlineAsm())
981 return TII->getRegClass(MCID: getDesc(), OpNum: OpIdx, TRI, MF);
982
983 if (!getOperand(i: OpIdx).isReg())
984 return nullptr;
985
986 // For tied uses on inline asm, get the constraint from the def.
987 unsigned DefIdx;
988 if (getOperand(i: OpIdx).isUse() && isRegTiedToDefOperand(UseOpIdx: OpIdx, DefOpIdx: &DefIdx))
989 OpIdx = DefIdx;
990
991 // Inline asm stores register class constraints in the flag word.
992 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
993 if (FlagIdx < 0)
994 return nullptr;
995
996 const InlineAsm::Flag F(getOperand(i: FlagIdx).getImm());
997 unsigned RCID;
998 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
999 F.hasRegClassConstraint(RC&: RCID))
1000 return TRI->getRegClass(i: RCID);
1001
1002 // Assume that all registers in a memory operand are pointers.
1003 if (F.isMemKind())
1004 return TRI->getPointerRegClass(MF);
1005
1006 return nullptr;
1007}
1008
1009const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
1010 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1011 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1012 // Check every operands inside the bundle if we have
1013 // been asked to.
1014 if (ExploreBundle)
1015 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1016 ++OpndIt)
1017 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1018 OpIdx: OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1019 else
1020 // Otherwise, just check the current operands.
1021 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1022 CurRC = getRegClassConstraintEffectForVRegImpl(OpIdx: i, Reg, CurRC, TII, TRI);
1023 return CurRC;
1024}
1025
1026const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1027 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1028 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1029 assert(CurRC && "Invalid initial register class");
1030 // Check if Reg is constrained by some of its use/def from MI.
1031 const MachineOperand &MO = getOperand(i: OpIdx);
1032 if (!MO.isReg() || MO.getReg() != Reg)
1033 return CurRC;
1034 // If yes, accumulate the constraints through the operand.
1035 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1036}
1037
1038const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
1039 unsigned OpIdx, const TargetRegisterClass *CurRC,
1040 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1041 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1042 const MachineOperand &MO = getOperand(i: OpIdx);
1043 assert(MO.isReg() &&
1044 "Cannot get register constraints for non-register operand");
1045 assert(CurRC && "Invalid initial register class");
1046 if (unsigned SubIdx = MO.getSubReg()) {
1047 if (OpRC)
1048 CurRC = TRI->getMatchingSuperRegClass(A: CurRC, B: OpRC, Idx: SubIdx);
1049 else
1050 CurRC = TRI->getSubClassWithSubReg(RC: CurRC, Idx: SubIdx);
1051 } else if (OpRC)
1052 CurRC = TRI->getCommonSubClass(A: CurRC, B: OpRC);
1053 return CurRC;
1054}
1055
1056/// Return the number of instructions inside the MI bundle, not counting the
1057/// header instruction.
1058unsigned MachineInstr::getBundleSize() const {
1059 MachineBasicBlock::const_instr_iterator I = getIterator();
1060 unsigned Size = 0;
1061 while (I->isBundledWithSucc()) {
1062 ++Size;
1063 ++I;
1064 }
1065 return Size;
1066}
1067
1068/// Returns true if the MachineInstr has an implicit-use operand of exactly
1069/// the given register (not considering sub/super-registers).
1070bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
1071 for (const MachineOperand &MO : implicit_operands()) {
1072 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1073 return true;
1074 }
1075 return false;
1076}
1077
1078/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1079/// the specific register or -1 if it is not found. It further tightens
1080/// the search criteria to a use that kills the register if isKill is true.
1081int MachineInstr::findRegisterUseOperandIdx(Register Reg,
1082 const TargetRegisterInfo *TRI,
1083 bool isKill) const {
1084 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1085 const MachineOperand &MO = getOperand(i);
1086 if (!MO.isReg() || !MO.isUse())
1087 continue;
1088 Register MOReg = MO.getReg();
1089 if (!MOReg)
1090 continue;
1091 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(RegA: MOReg, RegB: Reg)))
1092 if (!isKill || MO.isKill())
1093 return i;
1094 }
1095 return -1;
1096}
1097
1098/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1099/// indicating if this instruction reads or writes Reg. This also considers
1100/// partial defines.
1101std::pair<bool,bool>
1102MachineInstr::readsWritesVirtualRegister(Register Reg,
1103 SmallVectorImpl<unsigned> *Ops) const {
1104 bool PartDef = false; // Partial redefine.
1105 bool FullDef = false; // Full define.
1106 bool Use = false;
1107
1108 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1109 const MachineOperand &MO = getOperand(i);
1110 if (!MO.isReg() || MO.getReg() != Reg)
1111 continue;
1112 if (Ops)
1113 Ops->push_back(Elt: i);
1114 if (MO.isUse())
1115 Use |= !MO.isUndef();
1116 else if (MO.getSubReg() && !MO.isUndef())
1117 // A partial def undef doesn't count as reading the register.
1118 PartDef = true;
1119 else
1120 FullDef = true;
1121 }
1122 // A partial redefine uses Reg unless there is also a full define.
1123 return std::make_pair(x: Use || (PartDef && !FullDef), y: PartDef || FullDef);
1124}
1125
1126/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1127/// the specified register or -1 if it is not found. If isDead is true, defs
1128/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1129/// also checks if there is a def of a super-register.
1130int MachineInstr::findRegisterDefOperandIdx(Register Reg,
1131 const TargetRegisterInfo *TRI,
1132 bool isDead, bool Overlap) const {
1133 bool isPhys = Reg.isPhysical();
1134 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1135 const MachineOperand &MO = getOperand(i);
1136 // Accept regmask operands when Overlap is set.
1137 // Ignore them when looking for a specific def operand (Overlap == false).
1138 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(PhysReg: Reg))
1139 return i;
1140 if (!MO.isReg() || !MO.isDef())
1141 continue;
1142 Register MOReg = MO.getReg();
1143 bool Found = (MOReg == Reg);
1144 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1145 if (Overlap)
1146 Found = TRI->regsOverlap(RegA: MOReg, RegB: Reg);
1147 else
1148 Found = TRI->isSubRegister(RegA: MOReg, RegB: Reg);
1149 }
1150 if (Found && (!isDead || MO.isDead()))
1151 return i;
1152 }
1153 return -1;
1154}
1155
1156/// findFirstPredOperandIdx() - Find the index of the first operand in the
1157/// operand list that is used to represent the predicate. It returns -1 if
1158/// none is found.
1159int MachineInstr::findFirstPredOperandIdx() const {
1160 // Don't call MCID.findFirstPredOperandIdx() because this variant
1161 // is sometimes called on an instruction that's not yet complete, and
1162 // so the number of operands is less than the MCID indicates. In
1163 // particular, the PTX target does this.
1164 const MCInstrDesc &MCID = getDesc();
1165 if (MCID.isPredicable()) {
1166 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1167 if (MCID.operands()[i].isPredicate())
1168 return i;
1169 }
1170
1171 return -1;
1172}
1173
1174// MachineOperand::TiedTo is 4 bits wide.
1175const unsigned TiedMax = 15;
1176
1177/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1178///
1179/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1180/// field. TiedTo can have these values:
1181///
1182/// 0: Operand is not tied to anything.
1183/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1184/// TiedMax: Tied to an operand >= TiedMax-1.
1185///
1186/// The tied def must be one of the first TiedMax operands on a normal
1187/// instruction. INLINEASM instructions allow more tied defs.
1188///
1189void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1190 MachineOperand &DefMO = getOperand(i: DefIdx);
1191 MachineOperand &UseMO = getOperand(i: UseIdx);
1192 assert(DefMO.isDef() && "DefIdx must be a def operand");
1193 assert(UseMO.isUse() && "UseIdx must be a use operand");
1194 assert(!DefMO.isTied() && "Def is already tied to another use");
1195 assert(!UseMO.isTied() && "Use is already tied to another def");
1196
1197 if (DefIdx < TiedMax) {
1198 UseMO.TiedTo = DefIdx + 1;
1199 } else {
1200 // Inline asm can use the group descriptors to find tied operands,
1201 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1202 // but on normal instruction, the tied def must be within the first TiedMax
1203 // operands.
1204 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1205 "DefIdx out of range");
1206 UseMO.TiedTo = TiedMax;
1207 }
1208
1209 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1210 DefMO.TiedTo = std::min(a: UseIdx + 1, b: TiedMax);
1211}
1212
1213/// Given the index of a tied register operand, find the operand it is tied to.
1214/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1215/// which must exist.
1216unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1217 const MachineOperand &MO = getOperand(i: OpIdx);
1218 assert(MO.isTied() && "Operand isn't tied");
1219
1220 // Normally TiedTo is in range.
1221 if (MO.TiedTo < TiedMax)
1222 return MO.TiedTo - 1;
1223
1224 // Uses on normal instructions can be out of range.
1225 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1226 // Normal tied defs must be in the 0..TiedMax-1 range.
1227 if (MO.isUse())
1228 return TiedMax - 1;
1229 // MO is a def. Search for the tied use.
1230 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1231 const MachineOperand &UseMO = getOperand(i);
1232 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1233 return i;
1234 }
1235 llvm_unreachable("Can't find tied use");
1236 }
1237
1238 if (getOpcode() == TargetOpcode::STATEPOINT) {
1239 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1240 // on registers.
1241 StatepointOpers SO(this);
1242 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1243 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1244 unsigned NumDefs = getNumDefs();
1245 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1246 while (!getOperand(i: CurUseIdx).isReg())
1247 CurUseIdx = StackMaps::getNextMetaArgIdx(MI: this, CurIdx: CurUseIdx);
1248 if (OpIdx == CurDefIdx)
1249 return CurUseIdx;
1250 if (OpIdx == CurUseIdx)
1251 return CurDefIdx;
1252 CurUseIdx = StackMaps::getNextMetaArgIdx(MI: this, CurIdx: CurUseIdx);
1253 }
1254 llvm_unreachable("Can't find tied use");
1255 }
1256
1257 // Now deal with inline asm by parsing the operand group descriptor flags.
1258 // Find the beginning of each operand group.
1259 SmallVector<unsigned, 8> GroupIdx;
1260 unsigned OpIdxGroup = ~0u;
1261 unsigned NumOps;
1262 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1263 i += NumOps) {
1264 const MachineOperand &FlagMO = getOperand(i);
1265 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1266 unsigned CurGroup = GroupIdx.size();
1267 GroupIdx.push_back(Elt: i);
1268 const InlineAsm::Flag F(FlagMO.getImm());
1269 NumOps = 1 + F.getNumOperandRegisters();
1270 // OpIdx belongs to this operand group.
1271 if (OpIdx > i && OpIdx < i + NumOps)
1272 OpIdxGroup = CurGroup;
1273 unsigned TiedGroup;
1274 if (!F.isUseOperandTiedToDef(Idx&: TiedGroup))
1275 continue;
1276 // Operands in this group are tied to operands in TiedGroup which must be
1277 // earlier. Find the number of operands between the two groups.
1278 unsigned Delta = i - GroupIdx[TiedGroup];
1279
1280 // OpIdx is a use tied to TiedGroup.
1281 if (OpIdxGroup == CurGroup)
1282 return OpIdx - Delta;
1283
1284 // OpIdx is a def tied to this use group.
1285 if (OpIdxGroup == TiedGroup)
1286 return OpIdx + Delta;
1287 }
1288 llvm_unreachable("Invalid tied operand on inline asm");
1289}
1290
1291/// clearKillInfo - Clears kill flags on all operands.
1292///
1293void MachineInstr::clearKillInfo() {
1294 for (MachineOperand &MO : operands()) {
1295 if (MO.isReg() && MO.isUse())
1296 MO.setIsKill(false);
1297 }
1298}
1299
1300void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1301 unsigned SubIdx,
1302 const TargetRegisterInfo &RegInfo) {
1303 if (ToReg.isPhysical()) {
1304 if (SubIdx)
1305 ToReg = RegInfo.getSubReg(Reg: ToReg, Idx: SubIdx);
1306 for (MachineOperand &MO : operands()) {
1307 if (!MO.isReg() || MO.getReg() != FromReg)
1308 continue;
1309 MO.substPhysReg(Reg: ToReg, RegInfo);
1310 }
1311 } else {
1312 for (MachineOperand &MO : operands()) {
1313 if (!MO.isReg() || MO.getReg() != FromReg)
1314 continue;
1315 MO.substVirtReg(Reg: ToReg, SubIdx, RegInfo);
1316 }
1317 }
1318}
1319
1320/// isSafeToMove - Return true if it is safe to move this instruction. If
1321/// SawStore is set to true, it means that there is a store (or call) between
1322/// the instruction's location and its intended destination.
1323bool MachineInstr::isSafeToMove(bool &SawStore) const {
1324 // Ignore stuff that we obviously can't move.
1325 //
1326 // Treat volatile loads as stores. This is not strictly necessary for
1327 // volatiles, but it is required for atomic loads. It is not allowed to move
1328 // a load across an atomic load with Ordering > Monotonic.
1329 if (mayStore() || isCall() || isPHI() ||
1330 (mayLoad() && hasOrderedMemoryRef())) {
1331 SawStore = true;
1332 return false;
1333 }
1334
1335 // Don't touch instructions that have non-trivial invariants. For example,
1336 // terminators have to be at the end of a basic block.
1337 if (isPosition() || isDebugInstr() || isTerminator() ||
1338 isJumpTableDebugInfo())
1339 return false;
1340
1341 // Don't touch instructions which can have non-load/store effects.
1342 //
1343 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1344 // intentional side-effects. Even if an inline asm is not "sideeffect",
1345 // though, it still can't be speculatively executed: the operation might
1346 // not be valid on the current target, or for some combinations of operands.
1347 // (Some transforms that move an instruction don't speculatively execute it;
1348 // we currently don't try to handle that distinction here.)
1349 //
1350 // Other instructions handled here include those that can raise FP
1351 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1352 // stack adjustments.
1353 if (mayRaiseFPException() || hasProperty(MCFlag: MCID::UnmodeledSideEffects) ||
1354 isInlineAsm())
1355 return false;
1356
1357 // See if this instruction does a load. If so, we have to guarantee that the
1358 // loaded value doesn't change between the load and the its intended
1359 // destination. The check for isInvariantLoad gives the target the chance to
1360 // classify the load as always returning a constant, e.g. a constant pool
1361 // load.
1362 if (mayLoad() && !isDereferenceableInvariantLoad())
1363 // Otherwise, this is a real load. If there is a store between the load and
1364 // end of block, we can't move it.
1365 return !SawStore;
1366
1367 return true;
1368}
1369
1370bool MachineInstr::wouldBeTriviallyDead() const {
1371 // Don't delete frame allocation labels.
1372 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1373 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1374 return false;
1375
1376 // Don't delete FAKE_USE.
1377 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1378 if (isFakeUse())
1379 return false;
1380
1381 // LIFETIME markers should be preserved.
1382 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1383 if (isLifetimeMarker())
1384 return false;
1385
1386 // If we can move an instruction, we can remove it. Otherwise, it has
1387 // a side-effect of some sort.
1388 bool SawStore = false;
1389 return isPHI() || isSafeToMove(SawStore);
1390}
1391
1392bool MachineInstr::isDead(const MachineRegisterInfo &MRI,
1393 LiveRegUnits *LivePhysRegs) const {
1394 // Instructions without side-effects are dead iff they only define dead regs.
1395 // This function is hot and this loop returns early in the common case,
1396 // so only perform additional checks before this if absolutely necessary.
1397 for (const MachineOperand &MO : all_defs()) {
1398 Register Reg = MO.getReg();
1399 if (Reg.isPhysical()) {
1400 // Don't delete live physreg defs, or any reserved register defs.
1401 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(PhysReg: Reg))
1402 return false;
1403 } else {
1404 if (MO.isDead())
1405 continue;
1406 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1407 if (&Use != this)
1408 // This def has a non-debug use. Don't delete the instruction!
1409 return false;
1410 }
1411 }
1412 }
1413
1414 // Technically speaking inline asm without side effects and no defs can still
1415 // be deleted. But there is so much bad inline asm code out there, we should
1416 // let them be.
1417 if (isInlineAsm())
1418 return false;
1419
1420 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1421 if (isLifetimeMarker())
1422 return true;
1423
1424 // If there are no defs with uses, then we call the instruction dead so long
1425 // as we do not suspect it may have sideeffects.
1426 return wouldBeTriviallyDead();
1427}
1428
1429static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI,
1430 BatchAAResults *AA, bool UseTBAA,
1431 const MachineMemOperand *MMOa,
1432 const MachineMemOperand *MMOb) {
1433 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1434 // operates with MachineMemOperand offset with some important assumptions:
1435 // - LLVM fundamentally assumes flat address spaces.
1436 // - MachineOperand offset can *only* result from legalization and cannot
1437 // affect queries other than the trivial case of overlap checking.
1438 // - These offsets never wrap and never step outside of allocated objects.
1439 // - There should never be any negative offsets here.
1440 //
1441 // FIXME: Modify API to hide this math from "user"
1442 // Even before we go to AA we can reason locally about some memory objects. It
1443 // can save compile time, and possibly catch some corner cases not currently
1444 // covered.
1445
1446 int64_t OffsetA = MMOa->getOffset();
1447 int64_t OffsetB = MMOb->getOffset();
1448 int64_t MinOffset = std::min(a: OffsetA, b: OffsetB);
1449
1450 LocationSize WidthA = MMOa->getSize();
1451 LocationSize WidthB = MMOb->getSize();
1452 bool KnownWidthA = WidthA.hasValue();
1453 bool KnownWidthB = WidthB.hasValue();
1454 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1455
1456 const Value *ValA = MMOa->getValue();
1457 const Value *ValB = MMOb->getValue();
1458 bool SameVal = (ValA && ValB && (ValA == ValB));
1459 if (!SameVal) {
1460 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1461 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1462 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1463 return false;
1464 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1465 return false;
1466 if (PSVa && PSVb && (PSVa == PSVb))
1467 SameVal = true;
1468 }
1469
1470 if (SameVal && BothMMONonScalable) {
1471 if (!KnownWidthA || !KnownWidthB)
1472 return true;
1473 int64_t MaxOffset = std::max(a: OffsetA, b: OffsetB);
1474 int64_t LowWidth = (MinOffset == OffsetA)
1475 ? WidthA.getValue().getKnownMinValue()
1476 : WidthB.getValue().getKnownMinValue();
1477 return (MinOffset + LowWidth > MaxOffset);
1478 }
1479
1480 if (!AA)
1481 return true;
1482
1483 if (!ValA || !ValB)
1484 return true;
1485
1486 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1487 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1488
1489 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1490 // at the moment
1491 if ((WidthA.isScalable() && OffsetA > 0) ||
1492 (WidthB.isScalable() && OffsetB > 0))
1493 return true;
1494
1495 int64_t OverlapA =
1496 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1497 : MemoryLocation::UnknownSize;
1498 int64_t OverlapB =
1499 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1500 : MemoryLocation::UnknownSize;
1501
1502 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1503 ? WidthA
1504 : LocationSize::precise(Value: OverlapA);
1505 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1506 ? WidthB
1507 : LocationSize::precise(Value: OverlapB);
1508
1509 return !AA->isNoAlias(
1510 LocA: MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1511 LocB: MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1512}
1513
1514bool MachineInstr::mayAlias(BatchAAResults *AA, const MachineInstr &Other,
1515 bool UseTBAA) const {
1516 const MachineFunction *MF = getMF();
1517 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1518 const MachineFrameInfo &MFI = MF->getFrameInfo();
1519
1520 // Exclude call instruction which may alter the memory but can not be handled
1521 // by this function.
1522 if (isCall() || Other.isCall())
1523 return true;
1524
1525 // If neither instruction stores to memory, they can't alias in any
1526 // meaningful way, even if they read from the same address.
1527 if (!mayStore() && !Other.mayStore())
1528 return false;
1529
1530 // Both instructions must be memory operations to be able to alias.
1531 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1532 return false;
1533
1534 // Let the target decide if memory accesses cannot possibly overlap.
1535 if (TII->areMemAccessesTriviallyDisjoint(MIa: *this, MIb: Other))
1536 return false;
1537
1538 // Memory operations without memory operands may access anything. Be
1539 // conservative and assume `MayAlias`.
1540 if (memoperands_empty() || Other.memoperands_empty())
1541 return true;
1542
1543 // Skip if there are too many memory operands.
1544 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1545 if (NumChecks > TII->getMemOperandAACheckLimit())
1546 return true;
1547
1548 // Check each pair of memory operands from both instructions, which can't
1549 // alias only if all pairs won't alias.
1550 for (auto *MMOa : memoperands())
1551 for (auto *MMOb : Other.memoperands())
1552 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1553 return true;
1554
1555 return false;
1556}
1557
1558bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1559 bool UseTBAA) const {
1560 if (AA) {
1561 BatchAAResults BAA(*AA);
1562 return mayAlias(AA: &BAA, Other, UseTBAA);
1563 }
1564 return mayAlias(AA: static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1565}
1566
1567/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1568/// or volatile memory reference, or if the information describing the memory
1569/// reference is not available. Return false if it is known to have no ordered
1570/// memory references.
1571bool MachineInstr::hasOrderedMemoryRef() const {
1572 // An instruction known never to access memory won't have a volatile access.
1573 if (!mayStore() &&
1574 !mayLoad() &&
1575 !isCall() &&
1576 !hasUnmodeledSideEffects())
1577 return false;
1578
1579 // Otherwise, if the instruction has no memory reference information,
1580 // conservatively assume it wasn't preserved.
1581 if (memoperands_empty())
1582 return true;
1583
1584 // Check if any of our memory operands are ordered.
1585 return llvm::any_of(Range: memoperands(), P: [](const MachineMemOperand *MMO) {
1586 return !MMO->isUnordered();
1587 });
1588}
1589
1590/// isDereferenceableInvariantLoad - Return true if this instruction will never
1591/// trap and is loading from a location whose value is invariant across a run of
1592/// this function.
1593bool MachineInstr::isDereferenceableInvariantLoad() const {
1594 // If the instruction doesn't load at all, it isn't an invariant load.
1595 if (!mayLoad())
1596 return false;
1597
1598 // If the instruction has lost its memoperands, conservatively assume that
1599 // it may not be an invariant load.
1600 if (memoperands_empty())
1601 return false;
1602
1603 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1604
1605 for (MachineMemOperand *MMO : memoperands()) {
1606 if (!MMO->isUnordered())
1607 // If the memory operand has ordering side effects, we can't move the
1608 // instruction. Such an instruction is technically an invariant load,
1609 // but the caller code would need updated to expect that.
1610 return false;
1611 if (MMO->isStore()) return false;
1612 if (MMO->isInvariant() && MMO->isDereferenceable())
1613 continue;
1614
1615 // A load from a constant PseudoSourceValue is invariant.
1616 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1617 if (PSV->isConstant(&MFI))
1618 continue;
1619 }
1620
1621 // Otherwise assume conservatively.
1622 return false;
1623 }
1624
1625 // Everything checks out.
1626 return true;
1627}
1628
1629Register MachineInstr::isConstantValuePHI() const {
1630 if (!isPHI())
1631 return {};
1632 assert(getNumOperands() >= 3 &&
1633 "It's illegal to have a PHI without source operands");
1634
1635 Register Reg = getOperand(i: 1).getReg();
1636 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1637 if (getOperand(i).getReg() != Reg)
1638 return {};
1639 return Reg;
1640}
1641
1642bool MachineInstr::hasUnmodeledSideEffects() const {
1643 if (hasProperty(MCFlag: MCID::UnmodeledSideEffects))
1644 return true;
1645 if (isInlineAsm()) {
1646 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
1647 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1648 return true;
1649 }
1650
1651 return false;
1652}
1653
1654bool MachineInstr::isLoadFoldBarrier() const {
1655 return mayStore() || isCall() ||
1656 (hasUnmodeledSideEffects() && !isPseudoProbe());
1657}
1658
1659/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1660///
1661bool MachineInstr::allDefsAreDead() const {
1662 for (const MachineOperand &MO : operands()) {
1663 if (!MO.isReg() || MO.isUse())
1664 continue;
1665 if (!MO.isDead())
1666 return false;
1667 }
1668 return true;
1669}
1670
1671bool MachineInstr::allImplicitDefsAreDead() const {
1672 for (const MachineOperand &MO : implicit_operands()) {
1673 if (!MO.isReg() || MO.isUse())
1674 continue;
1675 if (!MO.isDead())
1676 return false;
1677 }
1678 return true;
1679}
1680
1681/// copyImplicitOps - Copy implicit register operands from specified
1682/// instruction to this instruction.
1683void MachineInstr::copyImplicitOps(MachineFunction &MF,
1684 const MachineInstr &MI) {
1685 for (const MachineOperand &MO :
1686 llvm::drop_begin(RangeOrContainer: MI.operands(), N: MI.getDesc().getNumOperands()))
1687 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1688 addOperand(MF, Op: MO);
1689}
1690
1691bool MachineInstr::hasComplexRegisterTies() const {
1692 const MCInstrDesc &MCID = getDesc();
1693 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1694 return true;
1695 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1696 const auto &Operand = getOperand(i: I);
1697 if (!Operand.isReg() || Operand.isDef())
1698 // Ignore the defined registers as MCID marks only the uses as tied.
1699 continue;
1700 int ExpectedTiedIdx = MCID.getOperandConstraint(OpNum: I, Constraint: MCOI::TIED_TO);
1701 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(OpIdx: I)) : -1;
1702 if (ExpectedTiedIdx != TiedIdx)
1703 return true;
1704 }
1705 return false;
1706}
1707
1708LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1709 const MachineRegisterInfo &MRI) const {
1710 const MachineOperand &Op = getOperand(i: OpIdx);
1711 if (!Op.isReg())
1712 return LLT{};
1713
1714 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1715 return MRI.getType(Reg: Op.getReg());
1716
1717 auto &OpInfo = getDesc().operands()[OpIdx];
1718 if (!OpInfo.isGenericType())
1719 return MRI.getType(Reg: Op.getReg());
1720
1721 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1722 return LLT{};
1723
1724 LLT TypeToPrint = MRI.getType(Reg: Op.getReg());
1725 // Don't mark the type index printed if it wasn't actually printed: maybe
1726 // another operand with the same type index has an actual type attached:
1727 if (TypeToPrint.isValid())
1728 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1729 return TypeToPrint;
1730}
1731
1732#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1733LLVM_DUMP_METHOD void MachineInstr::dump() const {
1734 dbgs() << " ";
1735 print(dbgs());
1736}
1737
1738LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1739 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1740 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1741 if (Depth >= MaxDepth)
1742 return;
1743 if (!AlreadySeenInstrs.insert(this).second)
1744 return;
1745 // PadToColumn always inserts at least one space.
1746 // Don't mess up the alignment if we don't want any space.
1747 if (Depth)
1748 fdbgs().PadToColumn(Depth * 2);
1749 print(fdbgs());
1750 for (const MachineOperand &MO : operands()) {
1751 if (!MO.isReg() || MO.isDef())
1752 continue;
1753 Register Reg = MO.getReg();
1754 if (Reg.isPhysical())
1755 continue;
1756 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1757 if (NewMI == nullptr)
1758 continue;
1759 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1760 }
1761}
1762
1763LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1764 unsigned MaxDepth) const {
1765 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1766 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1767}
1768#endif
1769
1770void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1771 bool SkipDebugLoc, bool AddNewLine,
1772 const TargetInstrInfo *TII) const {
1773 const Module *M = nullptr;
1774 const Function *F = nullptr;
1775 if (const MachineFunction *MF = getMFIfAvailable(MI: *this)) {
1776 F = &MF->getFunction();
1777 M = F->getParent();
1778 if (!TII)
1779 TII = MF->getSubtarget().getInstrInfo();
1780 }
1781
1782 ModuleSlotTracker MST(M);
1783 if (F)
1784 MST.incorporateFunction(F: *F);
1785 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1786}
1787
1788void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1789 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1790 bool AddNewLine, const TargetInstrInfo *TII) const {
1791 // We can be a bit tidier if we know the MachineFunction.
1792 const TargetRegisterInfo *TRI = nullptr;
1793 const MachineRegisterInfo *MRI = nullptr;
1794 tryToGetTargetInfo(MI: *this, TRI, MRI, TII);
1795
1796 if (isCFIInstruction())
1797 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1798
1799 SmallBitVector PrintedTypes(8);
1800 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1801 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1802 if (!ShouldPrintRegisterTies)
1803 return 0U;
1804 const MachineOperand &MO = getOperand(i: OpIdx);
1805 if (MO.isReg() && MO.isTied() && !MO.isDef())
1806 return findTiedOperandIdx(OpIdx);
1807 return 0U;
1808 };
1809 unsigned StartOp = 0;
1810 unsigned e = getNumOperands();
1811
1812 // Print explicitly defined operands on the left of an assignment syntax.
1813 while (StartOp < e) {
1814 const MachineOperand &MO = getOperand(i: StartOp);
1815 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1816 break;
1817
1818 if (StartOp != 0)
1819 OS << ", ";
1820
1821 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: StartOp, PrintedTypes, MRI: *MRI) : LLT{};
1822 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1823 MO.print(os&: OS, MST, TypeToPrint, OpIdx: StartOp, /*PrintDef=*/false, IsStandalone,
1824 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1825 ++StartOp;
1826 }
1827
1828 if (StartOp != 0)
1829 OS << " = ";
1830
1831 if (getFlag(Flag: MachineInstr::FrameSetup))
1832 OS << "frame-setup ";
1833 if (getFlag(Flag: MachineInstr::FrameDestroy))
1834 OS << "frame-destroy ";
1835 if (getFlag(Flag: MachineInstr::FmNoNans))
1836 OS << "nnan ";
1837 if (getFlag(Flag: MachineInstr::FmNoInfs))
1838 OS << "ninf ";
1839 if (getFlag(Flag: MachineInstr::FmNsz))
1840 OS << "nsz ";
1841 if (getFlag(Flag: MachineInstr::FmArcp))
1842 OS << "arcp ";
1843 if (getFlag(Flag: MachineInstr::FmContract))
1844 OS << "contract ";
1845 if (getFlag(Flag: MachineInstr::FmAfn))
1846 OS << "afn ";
1847 if (getFlag(Flag: MachineInstr::FmReassoc))
1848 OS << "reassoc ";
1849 if (getFlag(Flag: MachineInstr::NoUWrap))
1850 OS << "nuw ";
1851 if (getFlag(Flag: MachineInstr::NoSWrap))
1852 OS << "nsw ";
1853 if (getFlag(Flag: MachineInstr::IsExact))
1854 OS << "exact ";
1855 if (getFlag(Flag: MachineInstr::NoFPExcept))
1856 OS << "nofpexcept ";
1857 if (getFlag(Flag: MachineInstr::NoMerge))
1858 OS << "nomerge ";
1859 if (getFlag(Flag: MachineInstr::NonNeg))
1860 OS << "nneg ";
1861 if (getFlag(Flag: MachineInstr::Disjoint))
1862 OS << "disjoint ";
1863 if (getFlag(Flag: MachineInstr::SameSign))
1864 OS << "samesign ";
1865
1866 // Print the opcode name.
1867 if (TII)
1868 OS << TII->getName(Opcode: getOpcode());
1869 else
1870 OS << "UNKNOWN";
1871
1872 if (SkipOpers)
1873 return;
1874
1875 // Print the rest of the operands.
1876 bool FirstOp = true;
1877 unsigned AsmDescOp = ~0u;
1878 unsigned AsmOpCount = 0;
1879
1880 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1881 // Print asm string.
1882 OS << " ";
1883 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1884 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, MRI: *MRI) : LLT{};
1885 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1886 getOperand(i: OpIdx).print(os&: OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1887 IsStandalone, ShouldPrintRegisterTies,
1888 TiedOperandIdx, TRI);
1889
1890 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1891 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
1892 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1893 OS << " [sideeffect]";
1894 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1895 OS << " [mayload]";
1896 if (ExtraInfo & InlineAsm::Extra_MayStore)
1897 OS << " [maystore]";
1898 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1899 OS << " [isconvergent]";
1900 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1901 OS << " [alignstack]";
1902 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1903 OS << " [attdialect]";
1904 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1905 OS << " [inteldialect]";
1906
1907 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1908 FirstOp = false;
1909 }
1910
1911 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1912 const MachineOperand &MO = getOperand(i);
1913
1914 if (FirstOp) FirstOp = false; else OS << ",";
1915 OS << " ";
1916
1917 if (isDebugValueLike() && MO.isMetadata()) {
1918 // Pretty print DBG_VALUE* instructions.
1919 auto *DIV = dyn_cast<DILocalVariable>(Val: MO.getMetadata());
1920 if (DIV && !DIV->getName().empty())
1921 OS << "!\"" << DIV->getName() << '\"';
1922 else {
1923 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
1924 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1925 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
1926 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1927 }
1928 } else if (isDebugLabel() && MO.isMetadata()) {
1929 // Pretty print DBG_LABEL instructions.
1930 auto *DIL = dyn_cast<DILabel>(Val: MO.getMetadata());
1931 if (DIL && !DIL->getName().empty())
1932 OS << "\"" << DIL->getName() << '\"';
1933 else {
1934 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
1935 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1936 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
1937 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1938 }
1939 } else if (i == AsmDescOp && MO.isImm()) {
1940 // Pretty print the inline asm operand descriptor.
1941 OS << '$' << AsmOpCount++;
1942 unsigned Flag = MO.getImm();
1943 const InlineAsm::Flag F(Flag);
1944 OS << ":[";
1945 OS << F.getKindName();
1946
1947 unsigned RCID;
1948 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RC&: RCID)) {
1949 if (TRI) {
1950 OS << ':' << TRI->getRegClassName(Class: TRI->getRegClass(i: RCID));
1951 } else
1952 OS << ":RC" << RCID;
1953 }
1954
1955 if (F.isMemKind()) {
1956 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1957 OS << ":" << InlineAsm::getMemConstraintName(C: MCID);
1958 }
1959
1960 unsigned TiedTo;
1961 if (F.isUseOperandTiedToDef(Idx&: TiedTo))
1962 OS << " tiedto:$" << TiedTo;
1963
1964 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1965 F.isRegUseKind()) &&
1966 F.getRegMayBeFolded()) {
1967 OS << " foldable";
1968 }
1969
1970 OS << ']';
1971
1972 // Compute the index of the next operand descriptor.
1973 AsmDescOp += 1 + F.getNumOperandRegisters();
1974 } else {
1975 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
1976 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1977 if (MO.isImm() && isOperandSubregIdx(OpIdx: i))
1978 MachineOperand::printSubRegIdx(OS, Index: MO.getImm(), TRI);
1979 else
1980 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
1981 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1982 }
1983 }
1984
1985 // Print any optional symbols attached to this instruction as-if they were
1986 // operands.
1987 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1988 if (!FirstOp) {
1989 FirstOp = false;
1990 OS << ',';
1991 }
1992 OS << " pre-instr-symbol ";
1993 MachineOperand::printSymbol(OS, Sym&: *PreInstrSymbol);
1994 }
1995 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1996 if (!FirstOp) {
1997 FirstOp = false;
1998 OS << ',';
1999 }
2000 OS << " post-instr-symbol ";
2001 MachineOperand::printSymbol(OS, Sym&: *PostInstrSymbol);
2002 }
2003 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2004 if (!FirstOp) {
2005 FirstOp = false;
2006 OS << ',';
2007 }
2008 OS << " heap-alloc-marker ";
2009 HeapAllocMarker->printAsOperand(OS, MST);
2010 }
2011 if (MDNode *PCSections = getPCSections()) {
2012 if (!FirstOp) {
2013 FirstOp = false;
2014 OS << ',';
2015 }
2016 OS << " pcsections ";
2017 PCSections->printAsOperand(OS, MST);
2018 }
2019 if (MDNode *MMRA = getMMRAMetadata()) {
2020 if (!FirstOp) {
2021 FirstOp = false;
2022 OS << ',';
2023 }
2024 OS << " mmra ";
2025 MMRA->printAsOperand(OS, MST);
2026 }
2027 if (uint32_t CFIType = getCFIType()) {
2028 if (!FirstOp)
2029 OS << ',';
2030 OS << " cfi-type " << CFIType;
2031 }
2032
2033 if (DebugInstrNum) {
2034 if (!FirstOp)
2035 OS << ",";
2036 OS << " debug-instr-number " << DebugInstrNum;
2037 }
2038
2039 if (!SkipDebugLoc) {
2040 if (const DebugLoc &DL = getDebugLoc()) {
2041 if (!FirstOp)
2042 OS << ',';
2043 OS << " debug-location ";
2044 DL->printAsOperand(OS, MST);
2045 }
2046 }
2047
2048 if (!memoperands_empty()) {
2049 SmallVector<StringRef, 0> SSNs;
2050 const LLVMContext *Context = nullptr;
2051 std::unique_ptr<LLVMContext> CtxPtr;
2052 const MachineFrameInfo *MFI = nullptr;
2053 if (const MachineFunction *MF = getMFIfAvailable(MI: *this)) {
2054 MFI = &MF->getFrameInfo();
2055 Context = &MF->getFunction().getContext();
2056 } else {
2057 CtxPtr = std::make_unique<LLVMContext>();
2058 Context = CtxPtr.get();
2059 }
2060
2061 OS << " :: ";
2062 bool NeedComma = false;
2063 for (const MachineMemOperand *Op : memoperands()) {
2064 if (NeedComma)
2065 OS << ", ";
2066 Op->print(OS, MST, SSNs, Context: *Context, MFI, TII);
2067 NeedComma = true;
2068 }
2069 }
2070
2071 if (SkipDebugLoc)
2072 return;
2073
2074 bool HaveSemi = false;
2075
2076 // Print debug location information.
2077 if (const DebugLoc &DL = getDebugLoc()) {
2078 if (!HaveSemi) {
2079 OS << ';';
2080 HaveSemi = true;
2081 }
2082 OS << ' ';
2083 DL.print(OS);
2084 }
2085
2086 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2087 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2088 (isDebugValueList() && getNumOperands() >= 2) ||
2089 (isDebugRef() && getNumOperands() >= 3)) {
2090 if (getDebugVariableOp().isMetadata()) {
2091 if (!HaveSemi) {
2092 OS << ";";
2093 HaveSemi = true;
2094 }
2095 auto *DV = getDebugVariable();
2096 OS << " line no:" << DV->getLine();
2097 if (isIndirectDebugValue())
2098 OS << " indirect";
2099 }
2100 }
2101 // TODO: DBG_LABEL
2102
2103 if (PrintMIAddrs)
2104 OS << " ; " << this;
2105
2106 if (AddNewLine)
2107 OS << '\n';
2108}
2109
2110bool MachineInstr::addRegisterKilled(Register IncomingReg,
2111 const TargetRegisterInfo *RegInfo,
2112 bool AddIfNotFound) {
2113 bool isPhysReg = IncomingReg.isPhysical();
2114 bool hasAliases = isPhysReg &&
2115 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2116 bool Found = false;
2117 SmallVector<unsigned,4> DeadOps;
2118 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2119 MachineOperand &MO = getOperand(i);
2120 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2121 continue;
2122
2123 // DEBUG_VALUE nodes do not contribute to code generation and should
2124 // always be ignored. Failure to do so may result in trying to modify
2125 // KILL flags on DEBUG_VALUE nodes.
2126 if (MO.isDebug())
2127 continue;
2128
2129 Register Reg = MO.getReg();
2130 if (!Reg)
2131 continue;
2132
2133 if (Reg == IncomingReg) {
2134 if (!Found) {
2135 if (MO.isKill())
2136 // The register is already marked kill.
2137 return true;
2138 if (isPhysReg && isRegTiedToDefOperand(UseOpIdx: i))
2139 // Two-address uses of physregs must not be marked kill.
2140 return true;
2141 MO.setIsKill();
2142 Found = true;
2143 }
2144 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2145 // A super-register kill already exists.
2146 if (RegInfo->isSuperRegister(RegA: IncomingReg, RegB: Reg))
2147 return true;
2148 if (RegInfo->isSubRegister(RegA: IncomingReg, RegB: Reg))
2149 DeadOps.push_back(Elt: i);
2150 }
2151 }
2152
2153 // Trim unneeded kill operands.
2154 while (!DeadOps.empty()) {
2155 unsigned OpIdx = DeadOps.back();
2156 if (getOperand(i: OpIdx).isImplicit() &&
2157 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2158 removeOperand(OpNo: OpIdx);
2159 else
2160 getOperand(i: OpIdx).setIsKill(false);
2161 DeadOps.pop_back();
2162 }
2163
2164 // If not found, this means an alias of one of the operands is killed. Add a
2165 // new implicit operand if required.
2166 if (!Found && AddIfNotFound) {
2167 addOperand(Op: MachineOperand::CreateReg(Reg: IncomingReg,
2168 isDef: false /*IsDef*/,
2169 isImp: true /*IsImp*/,
2170 isKill: true /*IsKill*/));
2171 return true;
2172 }
2173 return Found;
2174}
2175
2176void MachineInstr::clearRegisterKills(Register Reg,
2177 const TargetRegisterInfo *RegInfo) {
2178 if (!Reg.isPhysical())
2179 RegInfo = nullptr;
2180 for (MachineOperand &MO : operands()) {
2181 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2182 continue;
2183 Register OpReg = MO.getReg();
2184 if ((RegInfo && RegInfo->regsOverlap(RegA: Reg, RegB: OpReg)) || Reg == OpReg)
2185 MO.setIsKill(false);
2186 }
2187}
2188
2189bool MachineInstr::addRegisterDead(Register Reg,
2190 const TargetRegisterInfo *RegInfo,
2191 bool AddIfNotFound) {
2192 bool isPhysReg = Reg.isPhysical();
2193 bool hasAliases = isPhysReg &&
2194 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2195 bool Found = false;
2196 SmallVector<unsigned,4> DeadOps;
2197 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2198 MachineOperand &MO = getOperand(i);
2199 if (!MO.isReg() || !MO.isDef())
2200 continue;
2201 Register MOReg = MO.getReg();
2202 if (!MOReg)
2203 continue;
2204
2205 if (MOReg == Reg) {
2206 MO.setIsDead();
2207 Found = true;
2208 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2209 // There exists a super-register that's marked dead.
2210 if (RegInfo->isSuperRegister(RegA: Reg, RegB: MOReg))
2211 return true;
2212 if (RegInfo->isSubRegister(RegA: Reg, RegB: MOReg))
2213 DeadOps.push_back(Elt: i);
2214 }
2215 }
2216
2217 // Trim unneeded dead operands.
2218 while (!DeadOps.empty()) {
2219 unsigned OpIdx = DeadOps.back();
2220 if (getOperand(i: OpIdx).isImplicit() &&
2221 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2222 removeOperand(OpNo: OpIdx);
2223 else
2224 getOperand(i: OpIdx).setIsDead(false);
2225 DeadOps.pop_back();
2226 }
2227
2228 // If not found, this means an alias of one of the operands is dead. Add a
2229 // new implicit operand if required.
2230 if (Found || !AddIfNotFound)
2231 return Found;
2232
2233 addOperand(Op: MachineOperand::CreateReg(Reg,
2234 isDef: true /*IsDef*/,
2235 isImp: true /*IsImp*/,
2236 isKill: false /*IsKill*/,
2237 isDead: true /*IsDead*/));
2238 return true;
2239}
2240
2241void MachineInstr::clearRegisterDeads(Register Reg) {
2242 for (MachineOperand &MO : all_defs())
2243 if (MO.getReg() == Reg)
2244 MO.setIsDead(false);
2245}
2246
2247void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
2248 for (MachineOperand &MO : all_defs())
2249 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2250 MO.setIsUndef(IsUndef);
2251}
2252
2253void MachineInstr::addRegisterDefined(Register Reg,
2254 const TargetRegisterInfo *RegInfo) {
2255 if (Reg.isPhysical()) {
2256 MachineOperand *MO = findRegisterDefOperand(Reg, TRI: RegInfo, isDead: false, Overlap: false);
2257 if (MO)
2258 return;
2259 } else {
2260 for (const MachineOperand &MO : all_defs()) {
2261 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2262 return;
2263 }
2264 }
2265 addOperand(Op: MachineOperand::CreateReg(Reg,
2266 isDef: true /*IsDef*/,
2267 isImp: true /*IsImp*/));
2268}
2269
2270void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
2271 const TargetRegisterInfo &TRI) {
2272 bool HasRegMask = false;
2273 for (MachineOperand &MO : operands()) {
2274 if (MO.isRegMask()) {
2275 HasRegMask = true;
2276 continue;
2277 }
2278 if (!MO.isReg() || !MO.isDef()) continue;
2279 Register Reg = MO.getReg();
2280 if (!Reg.isPhysical())
2281 continue;
2282 // If there are no uses, including partial uses, the def is dead.
2283 if (llvm::none_of(Range&: UsedRegs,
2284 P: [&](MCRegister Use) { return TRI.regsOverlap(RegA: Use, RegB: Reg); }))
2285 MO.setIsDead();
2286 }
2287
2288 // This is a call with a register mask operand.
2289 // Mask clobbers are always dead, so add defs for the non-dead defines.
2290 if (HasRegMask)
2291 for (const Register &UsedReg : UsedRegs)
2292 addRegisterDefined(Reg: UsedReg, RegInfo: &TRI);
2293}
2294
2295unsigned
2296MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2297 // Build up a buffer of hash code components.
2298 SmallVector<size_t, 16> HashComponents;
2299 HashComponents.reserve(N: MI->getNumOperands() + 1);
2300 HashComponents.push_back(Elt: MI->getOpcode());
2301 for (const MachineOperand &MO : MI->operands()) {
2302 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2303 continue; // Skip virtual register defs.
2304
2305 HashComponents.push_back(Elt: hash_value(MO));
2306 }
2307 return hash_combine_range(R&: HashComponents);
2308}
2309
2310const MDNode *MachineInstr::getLocCookieMD() const {
2311 // Find the source location cookie.
2312 const MDNode *LocMD = nullptr;
2313 for (unsigned i = getNumOperands(); i != 0; --i) {
2314 if (getOperand(i: i-1).isMetadata() &&
2315 (LocMD = getOperand(i: i-1).getMetadata()) &&
2316 LocMD->getNumOperands() != 0) {
2317 if (mdconst::hasa<ConstantInt>(MD: LocMD->getOperand(I: 0)))
2318 return LocMD;
2319 }
2320 }
2321
2322 return nullptr;
2323}
2324
2325void MachineInstr::emitInlineAsmError(const Twine &Msg) const {
2326 assert(isInlineAsm());
2327 const MDNode *LocMD = getLocCookieMD();
2328 uint64_t LocCookie =
2329 LocMD
2330 ? mdconst::extract<ConstantInt>(MD: LocMD->getOperand(I: 0))->getZExtValue()
2331 : 0;
2332 LLVMContext &Ctx = getMF()->getFunction().getContext();
2333 Ctx.diagnose(DI: DiagnosticInfoInlineAsm(LocCookie, Msg));
2334}
2335
2336void MachineInstr::emitGenericError(const Twine &Msg) const {
2337 const Function &Fn = getMF()->getFunction();
2338 Fn.getContext().diagnose(
2339 DI: DiagnosticInfoGenericWithLoc(Msg, Fn, getDebugLoc()));
2340}
2341
2342MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2343 const MCInstrDesc &MCID, bool IsIndirect,
2344 Register Reg, const MDNode *Variable,
2345 const MDNode *Expr) {
2346 assert(isa<DILocalVariable>(Variable) && "not a variable");
2347 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2348 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2349 "Expected inlined-at fields to agree");
2350 auto MIB = BuildMI(MF, MIMD: DL, MCID).addReg(RegNo: Reg);
2351 if (IsIndirect)
2352 MIB.addImm(Val: 0U);
2353 else
2354 MIB.addReg(RegNo: 0U);
2355 return MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2356}
2357
2358MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2359 const MCInstrDesc &MCID, bool IsIndirect,
2360 ArrayRef<MachineOperand> DebugOps,
2361 const MDNode *Variable, const MDNode *Expr) {
2362 assert(isa<DILocalVariable>(Variable) && "not a variable");
2363 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2364 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2365 "Expected inlined-at fields to agree");
2366 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2367 assert(DebugOps.size() == 1 &&
2368 "DBG_VALUE must contain exactly one debug operand");
2369 MachineOperand DebugOp = DebugOps[0];
2370 if (DebugOp.isReg())
2371 return BuildMI(MF, DL, MCID, IsIndirect, Reg: DebugOp.getReg(), Variable,
2372 Expr);
2373
2374 auto MIB = BuildMI(MF, MIMD: DL, MCID).add(MO: DebugOp);
2375 if (IsIndirect)
2376 MIB.addImm(Val: 0U);
2377 else
2378 MIB.addReg(RegNo: 0U);
2379 return MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2380 }
2381
2382 auto MIB = BuildMI(MF, MIMD: DL, MCID);
2383 MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2384 for (const MachineOperand &DebugOp : DebugOps)
2385 if (DebugOp.isReg())
2386 MIB.addReg(RegNo: DebugOp.getReg());
2387 else
2388 MIB.add(MO: DebugOp);
2389 return MIB;
2390}
2391
2392MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2393 MachineBasicBlock::iterator I,
2394 const DebugLoc &DL, const MCInstrDesc &MCID,
2395 bool IsIndirect, Register Reg,
2396 const MDNode *Variable, const MDNode *Expr) {
2397 MachineFunction &MF = *BB.getParent();
2398 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2399 BB.insert(I, MI);
2400 return MachineInstrBuilder(MF, MI);
2401}
2402
2403MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2404 MachineBasicBlock::iterator I,
2405 const DebugLoc &DL, const MCInstrDesc &MCID,
2406 bool IsIndirect,
2407 ArrayRef<MachineOperand> DebugOps,
2408 const MDNode *Variable, const MDNode *Expr) {
2409 MachineFunction &MF = *BB.getParent();
2410 MachineInstr *MI =
2411 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2412 BB.insert(I, MI);
2413 return MachineInstrBuilder(MF, *MI);
2414}
2415
2416/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2417/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2418static const DIExpression *computeExprForSpill(
2419 const MachineInstr &MI,
2420 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2421 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2422 "Expected inlined-at fields to agree");
2423
2424 const DIExpression *Expr = MI.getDebugExpression();
2425 if (MI.isIndirectDebugValue()) {
2426 assert(MI.getDebugOffset().getImm() == 0 &&
2427 "DBG_VALUE with nonzero offset");
2428 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
2429 } else if (MI.isDebugValueList()) {
2430 // We will replace the spilled register with a frame index, so
2431 // immediately deref all references to the spilled register.
2432 std::array<uint64_t, 1> Ops{._M_elems: {dwarf::DW_OP_deref}};
2433 for (const MachineOperand *Op : SpilledOperands) {
2434 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2435 Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: OpIdx);
2436 }
2437 }
2438 return Expr;
2439}
2440static const DIExpression *computeExprForSpill(const MachineInstr &MI,
2441 Register SpillReg) {
2442 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2443 SmallVector<const MachineOperand *> SpillOperands(
2444 llvm::make_pointer_range(Range: MI.getDebugOperandsForReg(Reg: SpillReg)));
2445 return computeExprForSpill(MI, SpilledOperands: SpillOperands);
2446}
2447
2448MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2449 MachineBasicBlock::iterator I,
2450 const MachineInstr &Orig,
2451 int FrameIndex, Register SpillReg) {
2452 assert(!Orig.isDebugRef() &&
2453 "DBG_INSTR_REF should not reference a virtual register.");
2454 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpillReg);
2455 MachineInstrBuilder NewMI =
2456 BuildMI(BB, I, MIMD: Orig.getDebugLoc(), MCID: Orig.getDesc());
2457 // Non-Variadic Operands: Location, Offset, Variable, Expression
2458 // Variadic Operands: Variable, Expression, Locations...
2459 if (Orig.isNonListDebugValue())
2460 NewMI.addFrameIndex(Idx: FrameIndex).addImm(Val: 0U);
2461 NewMI.addMetadata(MD: Orig.getDebugVariable()).addMetadata(MD: Expr);
2462 if (Orig.isDebugValueList()) {
2463 for (const MachineOperand &Op : Orig.debug_operands())
2464 if (Op.isReg() && Op.getReg() == SpillReg)
2465 NewMI.addFrameIndex(Idx: FrameIndex);
2466 else
2467 NewMI.add(MO: MachineOperand(Op));
2468 }
2469 return NewMI;
2470}
2471MachineInstr *llvm::buildDbgValueForSpill(
2472 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
2473 const MachineInstr &Orig, int FrameIndex,
2474 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2475 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpilledOperands);
2476 MachineInstrBuilder NewMI =
2477 BuildMI(BB, I, MIMD: Orig.getDebugLoc(), MCID: Orig.getDesc());
2478 // Non-Variadic Operands: Location, Offset, Variable, Expression
2479 // Variadic Operands: Variable, Expression, Locations...
2480 if (Orig.isNonListDebugValue())
2481 NewMI.addFrameIndex(Idx: FrameIndex).addImm(Val: 0U);
2482 NewMI.addMetadata(MD: Orig.getDebugVariable()).addMetadata(MD: Expr);
2483 if (Orig.isDebugValueList()) {
2484 for (const MachineOperand &Op : Orig.debug_operands())
2485 if (is_contained(Range: SpilledOperands, Element: &Op))
2486 NewMI.addFrameIndex(Idx: FrameIndex);
2487 else
2488 NewMI.add(MO: MachineOperand(Op));
2489 }
2490 return NewMI;
2491}
2492
2493void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex,
2494 Register Reg) {
2495 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpillReg: Reg);
2496 if (Orig.isNonListDebugValue())
2497 Orig.getDebugOffset().ChangeToImmediate(ImmVal: 0U);
2498 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2499 Op.ChangeToFrameIndex(Idx: FrameIndex);
2500 Orig.getDebugExpressionOp().setMetadata(Expr);
2501}
2502
2503void MachineInstr::collectDebugValues(
2504 SmallVectorImpl<MachineInstr *> &DbgValues) {
2505 MachineInstr &MI = *this;
2506 if (!MI.getOperand(i: 0).isReg())
2507 return;
2508
2509 MachineBasicBlock::iterator DI = MI; ++DI;
2510 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2511 DI != DE; ++DI) {
2512 if (!DI->isDebugValue())
2513 return;
2514 if (DI->hasDebugOperandForReg(Reg: MI.getOperand(i: 0).getReg()))
2515 DbgValues.push_back(Elt: &*DI);
2516 }
2517}
2518
2519void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2520 // Collect matching debug values.
2521 SmallVector<MachineInstr *, 2> DbgValues;
2522
2523 if (!getOperand(i: 0).isReg())
2524 return;
2525
2526 Register DefReg = getOperand(i: 0).getReg();
2527 auto *MRI = getRegInfo();
2528 for (auto &MO : MRI->use_operands(Reg: DefReg)) {
2529 auto *DI = MO.getParent();
2530 if (!DI->isDebugValue())
2531 continue;
2532 if (DI->hasDebugOperandForReg(Reg: DefReg)) {
2533 DbgValues.push_back(Elt: DI);
2534 }
2535 }
2536
2537 // Propagate Reg to debug value instructions.
2538 for (auto *DBI : DbgValues)
2539 for (MachineOperand &Op : DBI->getDebugOperandsForReg(Reg: DefReg))
2540 Op.setReg(Reg);
2541}
2542
2543using MMOList = SmallVector<const MachineMemOperand *, 2>;
2544
2545static LocationSize getSpillSlotSize(const MMOList &Accesses,
2546 const MachineFrameInfo &MFI) {
2547 std::optional<TypeSize> Size;
2548 for (const auto *A : Accesses) {
2549 if (MFI.isSpillSlotObjectIndex(
2550 ObjectIdx: cast<FixedStackPseudoSourceValue>(Val: A->getPseudoValue())
2551 ->getFrameIndex())) {
2552 LocationSize S = A->getSize();
2553 if (!S.hasValue())
2554 return LocationSize::beforeOrAfterPointer();
2555 if (!Size)
2556 Size = S.getValue();
2557 else
2558 Size = *Size + S.getValue();
2559 }
2560 }
2561 if (!Size)
2562 return LocationSize::precise(Value: 0);
2563 return LocationSize::precise(Value: *Size);
2564}
2565
2566std::optional<LocationSize>
2567MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2568 int FI;
2569 if (TII->isStoreToStackSlotPostFE(MI: *this, FrameIndex&: FI)) {
2570 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2571 if (MFI.isSpillSlotObjectIndex(ObjectIdx: FI))
2572 return (*memoperands_begin())->getSize();
2573 }
2574 return std::nullopt;
2575}
2576
2577std::optional<LocationSize>
2578MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2579 MMOList Accesses;
2580 if (TII->hasStoreToStackSlot(MI: *this, Accesses))
2581 return getSpillSlotSize(Accesses, MFI: getMF()->getFrameInfo());
2582 return std::nullopt;
2583}
2584
2585std::optional<LocationSize>
2586MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2587 int FI;
2588 if (TII->isLoadFromStackSlotPostFE(MI: *this, FrameIndex&: FI)) {
2589 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2590 if (MFI.isSpillSlotObjectIndex(ObjectIdx: FI))
2591 return (*memoperands_begin())->getSize();
2592 }
2593 return std::nullopt;
2594}
2595
2596std::optional<LocationSize>
2597MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2598 MMOList Accesses;
2599 if (TII->hasLoadFromStackSlot(MI: *this, Accesses))
2600 return getSpillSlotSize(Accesses, MFI: getMF()->getFrameInfo());
2601 return std::nullopt;
2602}
2603
2604unsigned MachineInstr::getDebugInstrNum() {
2605 if (DebugInstrNum == 0)
2606 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2607 return DebugInstrNum;
2608}
2609
2610unsigned MachineInstr::getDebugInstrNum(MachineFunction &MF) {
2611 if (DebugInstrNum == 0)
2612 DebugInstrNum = MF.getNewDebugInstrNum();
2613 return DebugInstrNum;
2614}
2615
2616std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2617 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2618 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()));
2619}
2620
2621std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2622 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2623 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2624 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()));
2625}
2626
2627std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2628 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2629 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2630 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()),
2631 getRegInfo()->getType(Reg: getOperand(i: 3).getReg()));
2632}
2633
2634std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2635 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2636 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2637 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()),
2638 getRegInfo()->getType(Reg: getOperand(i: 3).getReg()),
2639 getRegInfo()->getType(Reg: getOperand(i: 4).getReg()));
2640}
2641
2642std::tuple<Register, LLT, Register, LLT>
2643MachineInstr::getFirst2RegLLTs() const {
2644 Register Reg0 = getOperand(i: 0).getReg();
2645 Register Reg1 = getOperand(i: 1).getReg();
2646 return std::tuple(Reg0, getRegInfo()->getType(Reg: Reg0), Reg1,
2647 getRegInfo()->getType(Reg: Reg1));
2648}
2649
2650std::tuple<Register, LLT, Register, LLT, Register, LLT>
2651MachineInstr::getFirst3RegLLTs() const {
2652 Register Reg0 = getOperand(i: 0).getReg();
2653 Register Reg1 = getOperand(i: 1).getReg();
2654 Register Reg2 = getOperand(i: 2).getReg();
2655 return std::tuple(Reg0, getRegInfo()->getType(Reg: Reg0), Reg1,
2656 getRegInfo()->getType(Reg: Reg1), Reg2,
2657 getRegInfo()->getType(Reg: Reg2));
2658}
2659
2660std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2661MachineInstr::getFirst4RegLLTs() const {
2662 Register Reg0 = getOperand(i: 0).getReg();
2663 Register Reg1 = getOperand(i: 1).getReg();
2664 Register Reg2 = getOperand(i: 2).getReg();
2665 Register Reg3 = getOperand(i: 3).getReg();
2666 return std::tuple(
2667 Reg0, getRegInfo()->getType(Reg: Reg0), Reg1, getRegInfo()->getType(Reg: Reg1),
2668 Reg2, getRegInfo()->getType(Reg: Reg2), Reg3, getRegInfo()->getType(Reg: Reg3));
2669}
2670
2671std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register,
2672 LLT>
2673MachineInstr::getFirst5RegLLTs() const {
2674 Register Reg0 = getOperand(i: 0).getReg();
2675 Register Reg1 = getOperand(i: 1).getReg();
2676 Register Reg2 = getOperand(i: 2).getReg();
2677 Register Reg3 = getOperand(i: 3).getReg();
2678 Register Reg4 = getOperand(i: 4).getReg();
2679 return std::tuple(
2680 Reg0, getRegInfo()->getType(Reg: Reg0), Reg1, getRegInfo()->getType(Reg: Reg1),
2681 Reg2, getRegInfo()->getType(Reg: Reg2), Reg3, getRegInfo()->getType(Reg: Reg3),
2682 Reg4, getRegInfo()->getType(Reg: Reg4));
2683}
2684
2685void MachineInstr::insert(mop_iterator InsertBefore,
2686 ArrayRef<MachineOperand> Ops) {
2687 assert(InsertBefore != nullptr && "invalid iterator");
2688 assert(InsertBefore->getParent() == this &&
2689 "iterator points to operand of other inst");
2690 if (Ops.empty())
2691 return;
2692
2693 // Do one pass to untie operands.
2694 SmallDenseMap<unsigned, unsigned> TiedOpIndices;
2695 for (const MachineOperand &MO : operands()) {
2696 if (MO.isReg() && MO.isTied()) {
2697 unsigned OpNo = getOperandNo(I: &MO);
2698 unsigned TiedTo = findTiedOperandIdx(OpIdx: OpNo);
2699 TiedOpIndices[OpNo] = TiedTo;
2700 untieRegOperand(OpIdx: OpNo);
2701 }
2702 }
2703
2704 unsigned OpIdx = getOperandNo(I: InsertBefore);
2705 unsigned NumOperands = getNumOperands();
2706 unsigned OpsToMove = NumOperands - OpIdx;
2707
2708 SmallVector<MachineOperand> MovingOps;
2709 MovingOps.reserve(N: OpsToMove);
2710
2711 for (unsigned I = 0; I < OpsToMove; ++I) {
2712 MovingOps.emplace_back(Args&: getOperand(i: OpIdx));
2713 removeOperand(OpNo: OpIdx);
2714 }
2715 for (const MachineOperand &MO : Ops)
2716 addOperand(Op: MO);
2717 for (const MachineOperand &OpMoved : MovingOps)
2718 addOperand(Op: OpMoved);
2719
2720 // Re-tie operands.
2721 for (auto [Tie1, Tie2] : TiedOpIndices) {
2722 if (Tie1 >= OpIdx)
2723 Tie1 += Ops.size();
2724 if (Tie2 >= OpIdx)
2725 Tie2 += Ops.size();
2726 tieOperands(DefIdx: Tie1, UseIdx: Tie2);
2727 }
2728}
2729
2730bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2731 assert(OpId && "expected non-zero operand id");
2732 assert(isInlineAsm() && "should only be used on inline asm");
2733
2734 if (!getOperand(i: OpId).isReg())
2735 return false;
2736
2737 const MachineOperand &MD = getOperand(i: OpId - 1);
2738 if (!MD.isImm())
2739 return false;
2740
2741 InlineAsm::Flag F(MD.getImm());
2742 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2743 return F.getRegMayBeFolded();
2744 return false;
2745}
2746