1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallBitVector.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/MemoryLocation.h"
21#include "llvm/CodeGen/LiveRegUnits.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineInstrBundle.h"
27#include "llvm/CodeGen/MachineMemOperand.h"
28#include "llvm/CodeGen/MachineModuleInfo.h"
29#include "llvm/CodeGen/MachineOperand.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/CodeGen/Register.h"
33#include "llvm/CodeGen/StackMaps.h"
34#include "llvm/CodeGen/TargetInstrInfo.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
36#include "llvm/CodeGen/TargetSubtargetInfo.h"
37#include "llvm/CodeGenTypes/LowLevelType.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DebugInfoMetadata.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
47#include "llvm/IR/ModuleSlotTracker.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
50#include "llvm/MC/MCRegisterInfo.h"
51#include "llvm/Support/Casting.h"
52#include "llvm/Support/Compiler.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/FormattedStream.h"
56#include "llvm/Support/raw_ostream.h"
57#include "llvm/Target/TargetMachine.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
70static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
78static void tryToGetTargetInfo(const MachineInstr &MI,
79 const TargetRegisterInfo *&TRI,
80 const MachineRegisterInfo *&MRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
90void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, Op: MachineOperand::CreateReg(Reg: ImpDef, isDef: true, isImp: true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, Op: MachineOperand::CreateReg(Reg: ImpUse, isDef: false, isImp: true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 Opcode(TID.Opcode), DebugInstrNum(0), DbgLoc(std::move(DL)) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(N: NumOps);
110 Operands = MF.allocateOperandArray(Cap: CapOperands);
111 }
112
113 if (!NoImp)
114 addImplicitDefUseOperands(MF);
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Opcode(MI.getOpcode()), DebugInstrNum(0), Info(MI.Info),
123 DbgLoc(MI.getDebugLoc()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(N: MI.getNumOperands());
127 Operands = MF.allocateOperandArray(Cap: CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, Op: MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
145void MachineInstr::setDesc(const MCInstrDesc &TID) {
146 if (getParent())
147 getMF()->handleChangeDesc(MI&: *this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
152void MachineInstr::moveBefore(MachineInstr *MovePos) {
153 MovePos->getParent()->splice(Where: MovePos, Other: getParent(), From: getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
160 if (MachineBasicBlock *MBB = getParent())
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(MO: &MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(MO: &MO);
181}
182
183void MachineInstr::addOperand(const MachineOperand &Op) {
184 MachineBasicBlock *MBB = getParent();
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 MachineFunction *MF = MBB->getParent();
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(MF&: *MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
193static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(dest: Dst, src: Src, n: NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
206void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, Op: CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(N: 1);
247 Operands = MF.allocateOperandArray(Cap: CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Dst: Operands, Src: OldOperands, NumOps: OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Dst: Operands + OpNo + 1, Src: OldOperands + OpNo, NumOps: NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(Cap: OldCap, Array: OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(MO: NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNum: OpNo, Constraint: MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, UseIdx: OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNum: OpNo, Constraint: MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpIdx: OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(MO: Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Dst: Operands + OpNo, Src: Operands + OpNo + 1, NumOps: N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
321 ArrayRef<MachineMemOperand *> MMOs,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 bool HasDS = DS != nullptr;
333 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
334 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs +
335 HasDS;
336
337 // Drop all extra info if there is none.
338 if (NumPointers <= 0) {
339 Info.clear();
340 return;
341 }
342
343 // If more than one pointer, then store out of line. Store heap alloc markers
344 // out of line because PointerSumType cannot hold more than 4 tag types with
345 // 32-bit pointers.
346 // FIXME: Maybe we should make the symbols in the extra info mutable?
347 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
348 HasCFIType || HasDS) {
349 Info.set<EIIK_OutOfLine>(
350 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
351 HeapAllocMarker, PCSections, CFIType, MMRAs, DS));
352 return;
353 }
354
355 // Otherwise store the single pointer inline.
356 if (HasPreInstrSymbol)
357 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
358 else if (HasPostInstrSymbol)
359 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
360 else
361 Info.set<EIIK_MMO>(MMOs[0]);
362}
363
364void MachineInstr::dropMemRefs(MachineFunction &MF) {
365 if (memoperands_empty())
366 return;
367
368 setExtraInfo(MF, MMOs: {}, PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
369 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
370 MMRAs: getMMRAMetadata(), DS: getDeactivationSymbol());
371}
372
373void MachineInstr::setMemRefs(MachineFunction &MF,
374 ArrayRef<MachineMemOperand *> MMOs) {
375 if (MMOs.empty()) {
376 dropMemRefs(MF);
377 return;
378 }
379
380 setExtraInfo(MF, MMOs, PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
381 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
382 MMRAs: getMMRAMetadata(), DS: getDeactivationSymbol());
383}
384
385void MachineInstr::addMemOperand(MachineFunction &MF,
386 MachineMemOperand *MO) {
387 SmallVector<MachineMemOperand *, 2> MMOs;
388 MMOs.append(in_start: memoperands_begin(), in_end: memoperands_end());
389 MMOs.push_back(Elt: MO);
390 setMemRefs(MF, MMOs);
391}
392
393void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
394 if (this == &MI)
395 // Nothing to do for a self-clone!
396 return;
397
398 assert(&MF == MI.getMF() &&
399 "Invalid machine functions when cloning memory refrences!");
400 // See if we can just steal the extra info already allocated for the
401 // instruction. We can do this whenever the pre- and post-instruction symbols
402 // are the same (including null).
403 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
404 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
405 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
406 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
407 MI.getMMRAMetadata()) {
408 Info = MI.Info;
409 return;
410 }
411
412 // Otherwise, fall back on a copy-based clone.
413 setMemRefs(MF, MMOs: MI.memoperands());
414}
415
416/// Check to see if the MMOs pointed to by the two MemRefs arrays are
417/// identical.
418static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
419 ArrayRef<MachineMemOperand *> RHS) {
420 if (LHS.size() != RHS.size())
421 return false;
422
423 auto LHSPointees = make_pointee_range(Range&: LHS);
424 auto RHSPointees = make_pointee_range(Range&: RHS);
425 return std::equal(first1: LHSPointees.begin(), last1: LHSPointees.end(),
426 first2: RHSPointees.begin());
427}
428
429void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
430 ArrayRef<const MachineInstr *> MIs) {
431 // Try handling easy numbers of MIs with simpler mechanisms.
432 if (MIs.empty()) {
433 dropMemRefs(MF);
434 return;
435 }
436 if (MIs.size() == 1) {
437 cloneMemRefs(MF, MI: *MIs[0]);
438 return;
439 }
440 // Because an empty memoperands list provides *no* information and must be
441 // handled conservatively (assuming the instruction can do anything), the only
442 // way to merge with it is to drop all other memoperands.
443 if (MIs[0]->memoperands_empty()) {
444 dropMemRefs(MF);
445 return;
446 }
447
448 // Handle the general case.
449 SmallVector<MachineMemOperand *, 2> MergedMMOs;
450 // Start with the first instruction.
451 assert(&MF == MIs[0]->getMF() &&
452 "Invalid machine functions when cloning memory references!");
453 MergedMMOs.append(in_start: MIs[0]->memoperands_begin(), in_end: MIs[0]->memoperands_end());
454 // Now walk all the other instructions and accumulate any different MMOs.
455 for (const MachineInstr &MI : make_pointee_range(Range: MIs.slice(N: 1))) {
456 assert(&MF == MI.getMF() &&
457 "Invalid machine functions when cloning memory references!");
458
459 // Skip MIs with identical operands to the first. This is a somewhat
460 // arbitrary hack but will catch common cases without being quadratic.
461 // TODO: We could fully implement merge semantics here if needed.
462 if (hasIdenticalMMOs(LHS: MIs[0]->memoperands(), RHS: MI.memoperands()))
463 continue;
464
465 // Because an empty memoperands list provides *no* information and must be
466 // handled conservatively (assuming the instruction can do anything), the
467 // only way to merge with it is to drop all other memoperands.
468 if (MI.memoperands_empty()) {
469 dropMemRefs(MF);
470 return;
471 }
472
473 // Otherwise accumulate these into our temporary buffer of the merged state.
474 MergedMMOs.append(in_start: MI.memoperands_begin(), in_end: MI.memoperands_end());
475 }
476
477 setMemRefs(MF, MMOs: MergedMMOs);
478}
479
480void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
481 // Do nothing if old and new symbols are the same.
482 if (Symbol == getPreInstrSymbol())
483 return;
484
485 // If there was only one symbol and we're removing it, just clear info.
486 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
487 Info.clear();
488 return;
489 }
490
491 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: Symbol, PostInstrSymbol: getPostInstrSymbol(),
492 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
493 MMRAs: getMMRAMetadata(), DS: getDeactivationSymbol());
494}
495
496void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
497 // Do nothing if old and new symbols are the same.
498 if (Symbol == getPostInstrSymbol())
499 return;
500
501 // If there was only one symbol and we're removing it, just clear info.
502 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
503 Info.clear();
504 return;
505 }
506
507 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: Symbol,
508 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
509 MMRAs: getMMRAMetadata(), DS: getDeactivationSymbol());
510}
511
512void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
513 // Do nothing if old and new symbols are the same.
514 if (Marker == getHeapAllocMarker())
515 return;
516
517 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
518 HeapAllocMarker: Marker, PCSections: getPCSections(), CFIType: getCFIType(), MMRAs: getMMRAMetadata(),
519 DS: getDeactivationSymbol());
520}
521
522void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) {
523 // Do nothing if old and new symbols are the same.
524 if (PCSections == getPCSections())
525 return;
526
527 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
528 HeapAllocMarker: getHeapAllocMarker(), PCSections, CFIType: getCFIType(),
529 MMRAs: getMMRAMetadata(), DS: getDeactivationSymbol());
530}
531
532void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) {
533 // Do nothing if old and new types are the same.
534 if (Type == getCFIType())
535 return;
536
537 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
538 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: Type, MMRAs: getMMRAMetadata(),
539 DS: getDeactivationSymbol());
540}
541
542void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) {
543 // Do nothing if old and new symbols are the same.
544 if (MMRAs == getMMRAMetadata())
545 return;
546
547 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
548 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(), MMRAs,
549 DS: getDeactivationSymbol());
550}
551
552void MachineInstr::setDeactivationSymbol(MachineFunction &MF, Value *DS) {
553 // Do nothing if old and new symbols are the same.
554 if (DS == getDeactivationSymbol())
555 return;
556
557 setExtraInfo(MF, MMOs: memoperands(), PreInstrSymbol: getPreInstrSymbol(), PostInstrSymbol: getPostInstrSymbol(),
558 HeapAllocMarker: getHeapAllocMarker(), PCSections: getPCSections(), CFIType: getCFIType(),
559 MMRAs: getMMRAMetadata(), DS);
560}
561
562void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
563 const MachineInstr &MI) {
564 if (this == &MI)
565 // Nothing to do for a self-clone!
566 return;
567
568 assert(&MF == MI.getMF() &&
569 "Invalid machine functions when cloning instruction symbols!");
570
571 setPreInstrSymbol(MF, Symbol: MI.getPreInstrSymbol());
572 setPostInstrSymbol(MF, Symbol: MI.getPostInstrSymbol());
573 setHeapAllocMarker(MF, Marker: MI.getHeapAllocMarker());
574 setPCSections(MF, PCSections: MI.getPCSections());
575 setMMRAMetadata(MF, MMRAs: MI.getMMRAMetadata());
576}
577
578uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
579 // For now, the just return the union of the flags. If the flags get more
580 // complicated over time, we might need more logic here.
581 return getFlags() | Other.getFlags();
582}
583
584uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
585 uint32_t MIFlags = 0;
586 // Copy the wrapping flags.
587 if (const OverflowingBinaryOperator *OB =
588 dyn_cast<OverflowingBinaryOperator>(Val: &I)) {
589 if (OB->hasNoSignedWrap())
590 MIFlags |= MachineInstr::MIFlag::NoSWrap;
591 if (OB->hasNoUnsignedWrap())
592 MIFlags |= MachineInstr::MIFlag::NoUWrap;
593 } else if (const TruncInst *TI = dyn_cast<TruncInst>(Val: &I)) {
594 if (TI->hasNoSignedWrap())
595 MIFlags |= MachineInstr::MIFlag::NoSWrap;
596 if (TI->hasNoUnsignedWrap())
597 MIFlags |= MachineInstr::MIFlag::NoUWrap;
598 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: &I)) {
599 if (GEP->hasNoUnsignedSignedWrap())
600 MIFlags |= MachineInstr::MIFlag::NoUSWrap;
601 if (GEP->hasNoUnsignedWrap())
602 MIFlags |= MachineInstr::MIFlag::NoUWrap;
603 if (GEP->isInBounds())
604 MIFlags |= MachineInstr::MIFlag::InBounds;
605 }
606
607 // Copy the nonneg flag.
608 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(Val: &I)) {
609 if (PNI->hasNonNeg())
610 MIFlags |= MachineInstr::MIFlag::NonNeg;
611 // Copy the disjoint flag.
612 } else if (const PossiblyDisjointInst *PD =
613 dyn_cast<PossiblyDisjointInst>(Val: &I)) {
614 if (PD->isDisjoint())
615 MIFlags |= MachineInstr::MIFlag::Disjoint;
616 }
617
618 // Copy the samesign flag.
619 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(Val: &I))
620 if (ICmp->hasSameSign())
621 MIFlags |= MachineInstr::MIFlag::SameSign;
622
623 // Copy the exact flag.
624 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(Val: &I))
625 if (PE->isExact())
626 MIFlags |= MachineInstr::MIFlag::IsExact;
627
628 // Copy the fast-math flags.
629 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(Val: &I)) {
630 const FastMathFlags Flags = FP->getFastMathFlags();
631 if (Flags.noNaNs())
632 MIFlags |= MachineInstr::MIFlag::FmNoNans;
633 if (Flags.noInfs())
634 MIFlags |= MachineInstr::MIFlag::FmNoInfs;
635 if (Flags.noSignedZeros())
636 MIFlags |= MachineInstr::MIFlag::FmNsz;
637 if (Flags.allowReciprocal())
638 MIFlags |= MachineInstr::MIFlag::FmArcp;
639 if (Flags.allowContract())
640 MIFlags |= MachineInstr::MIFlag::FmContract;
641 if (Flags.approxFunc())
642 MIFlags |= MachineInstr::MIFlag::FmAfn;
643 if (Flags.allowReassoc())
644 MIFlags |= MachineInstr::MIFlag::FmReassoc;
645 }
646
647 if (I.getMetadata(KindID: LLVMContext::MD_unpredictable))
648 MIFlags |= MachineInstr::MIFlag::Unpredictable;
649
650 return MIFlags;
651}
652
653void MachineInstr::copyIRFlags(const Instruction &I) {
654 Flags = copyFlagsFromInstruction(I);
655}
656
657bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
658 assert(!isBundledWithPred() && "Must be called on bundle header");
659 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
660 if (MII->getDesc().getFlags() & Mask) {
661 if (Type == AnyInBundle)
662 return true;
663 } else {
664 if (Type == AllInBundle && !MII->isBundle())
665 return false;
666 }
667 // This was the last instruction in the bundle.
668 if (!MII->isBundledWithSucc())
669 return Type == AllInBundle;
670 }
671}
672
673bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
674 MICheckType Check) const {
675 // If opcodes or number of operands are not the same then the two
676 // instructions are obviously not identical.
677 if (Other.getOpcode() != getOpcode() ||
678 Other.getNumOperands() != getNumOperands())
679 return false;
680
681 if (isBundle()) {
682 // We have passed the test above that both instructions have the same
683 // opcode, so we know that both instructions are bundles here. Let's compare
684 // MIs inside the bundle.
685 assert(Other.isBundle() && "Expected that both instructions are bundles.");
686 MachineBasicBlock::const_instr_iterator I1 = getIterator();
687 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
688 // Loop until we analysed the last intruction inside at least one of the
689 // bundles.
690 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
691 ++I1;
692 ++I2;
693 if (!I1->isIdenticalTo(Other: *I2, Check))
694 return false;
695 }
696 // If we've reached the end of just one of the two bundles, but not both,
697 // the instructions are not identical.
698 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
699 return false;
700 }
701
702 // Check operands to make sure they match.
703 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
704 const MachineOperand &MO = getOperand(i);
705 const MachineOperand &OMO = Other.getOperand(i);
706 if (!MO.isReg()) {
707 if (!MO.isIdenticalTo(Other: OMO))
708 return false;
709 continue;
710 }
711
712 // Clients may or may not want to ignore defs when testing for equality.
713 // For example, machine CSE pass only cares about finding common
714 // subexpressions, so it's safe to ignore virtual register defs.
715 if (MO.isDef()) {
716 if (Check == IgnoreDefs)
717 continue;
718 else if (Check == IgnoreVRegDefs) {
719 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
720 if (!MO.isIdenticalTo(Other: OMO))
721 return false;
722 } else {
723 if (!MO.isIdenticalTo(Other: OMO))
724 return false;
725 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
726 return false;
727 }
728 } else {
729 if (!MO.isIdenticalTo(Other: OMO))
730 return false;
731 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
732 return false;
733 }
734 }
735 // If DebugLoc does not match then two debug instructions are not identical.
736 if (isDebugInstr())
737 if (getDebugLoc() && Other.getDebugLoc() &&
738 getDebugLoc() != Other.getDebugLoc())
739 return false;
740 // If pre- or post-instruction symbols do not match then the two instructions
741 // are not identical.
742 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
743 getPostInstrSymbol() != Other.getPostInstrSymbol())
744 return false;
745 if (isCall()) {
746 // Call instructions with different CFI types are not identical.
747 if (getCFIType() != Other.getCFIType())
748 return false;
749 // Even if the call instructions have the same ops, they are not identical
750 // if they are for different globals (this may happen with indirect calls).
751 if (isCandidateForAdditionalCallInfo()) {
752 MachineFunction::CalledGlobalInfo ThisCGI =
753 getParent()->getParent()->tryGetCalledGlobal(MI: this);
754 MachineFunction::CalledGlobalInfo OtherCGI =
755 Other.getParent()->getParent()->tryGetCalledGlobal(MI: &Other);
756 if (ThisCGI.Callee != OtherCGI.Callee ||
757 ThisCGI.TargetFlags != OtherCGI.TargetFlags)
758 return false;
759 }
760 }
761 if (getDeactivationSymbol() != Other.getDeactivationSymbol())
762 return false;
763
764 return true;
765}
766
767bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
768 if (!isDebugValueLike() || !Other.isDebugValueLike())
769 return false;
770 if (getDebugLoc() != Other.getDebugLoc())
771 return false;
772 if (getDebugVariable() != Other.getDebugVariable())
773 return false;
774 if (getNumDebugOperands() != Other.getNumDebugOperands())
775 return false;
776 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
777 if (!getDebugOperand(Index: OpIdx).isIdenticalTo(Other: Other.getDebugOperand(Index: OpIdx)))
778 return false;
779 if (!DIExpression::isEqualExpression(
780 FirstExpr: getDebugExpression(), FirstIndirect: isIndirectDebugValue(),
781 SecondExpr: Other.getDebugExpression(), SecondIndirect: Other.isIndirectDebugValue()))
782 return false;
783 return true;
784}
785
786const MachineFunction *MachineInstr::getMF() const {
787 return getParent()->getParent();
788}
789
790MachineInstr *MachineInstr::removeFromParent() {
791 assert(getParent() && "Not embedded in a basic block!");
792 return getParent()->remove(I: this);
793}
794
795MachineInstr *MachineInstr::removeFromBundle() {
796 assert(getParent() && "Not embedded in a basic block!");
797 return getParent()->remove_instr(I: this);
798}
799
800void MachineInstr::eraseFromParent() {
801 assert(getParent() && "Not embedded in a basic block!");
802 getParent()->erase(I: this);
803}
804
805void MachineInstr::eraseFromBundle() {
806 assert(getParent() && "Not embedded in a basic block!");
807 getParent()->erase_instr(I: this);
808}
809
810bool MachineInstr::isCandidateForAdditionalCallInfo(QueryType Type) const {
811 if (!isCall(Type))
812 return false;
813 switch (getOpcode()) {
814 case TargetOpcode::PATCHPOINT:
815 case TargetOpcode::STACKMAP:
816 case TargetOpcode::STATEPOINT:
817 case TargetOpcode::FENTRY_CALL:
818 return false;
819 }
820 return true;
821}
822
823bool MachineInstr::shouldUpdateAdditionalCallInfo() const {
824 if (isBundle())
825 return isCandidateForAdditionalCallInfo(Type: MachineInstr::AnyInBundle);
826 return isCandidateForAdditionalCallInfo();
827}
828
829template <typename Operand, typename Instruction>
830static iterator_range<
831 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
832getDebugOperandsForRegHelper(Instruction *MI, Register Reg) {
833 std::function<bool(Operand & Op)> OpUsesReg(
834 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
835 return make_filter_range(MI->debug_operands(), OpUsesReg);
836}
837
838iterator_range<filter_iterator<const MachineOperand *,
839 std::function<bool(const MachineOperand &Op)>>>
840MachineInstr::getDebugOperandsForReg(Register Reg) const {
841 return getDebugOperandsForRegHelper<const MachineOperand, const MachineInstr>(
842 MI: this, Reg);
843}
844
845iterator_range<
846 filter_iterator<MachineOperand *, std::function<bool(MachineOperand &Op)>>>
847MachineInstr::getDebugOperandsForReg(Register Reg) {
848 return getDebugOperandsForRegHelper<MachineOperand, MachineInstr>(MI: this, Reg);
849}
850
851unsigned MachineInstr::getNumExplicitOperands() const {
852 unsigned NumOperands = MCID->getNumOperands();
853 if (!MCID->isVariadic())
854 return NumOperands;
855
856 for (const MachineOperand &MO : operands_impl().drop_front(N: NumOperands)) {
857 // The operands must always be in the following order:
858 // - explicit reg defs,
859 // - other explicit operands (reg uses, immediates, etc.),
860 // - implicit reg defs
861 // - implicit reg uses
862 if (MO.isReg() && MO.isImplicit())
863 break;
864 ++NumOperands;
865 }
866 return NumOperands;
867}
868
869unsigned MachineInstr::getNumExplicitDefs() const {
870 unsigned NumDefs = MCID->getNumDefs();
871 if (!MCID->isVariadic())
872 return NumDefs;
873
874 for (const MachineOperand &MO : operands_impl().drop_front(N: NumDefs)) {
875 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
876 break;
877 ++NumDefs;
878 }
879 return NumDefs;
880}
881
882void MachineInstr::bundleWithPred() {
883 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
884 setFlag(BundledPred);
885 MachineBasicBlock::instr_iterator Pred = getIterator();
886 --Pred;
887 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
888 Pred->setFlag(BundledSucc);
889}
890
891void MachineInstr::bundleWithSucc() {
892 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
893 setFlag(BundledSucc);
894 MachineBasicBlock::instr_iterator Succ = getIterator();
895 ++Succ;
896 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
897 Succ->setFlag(BundledPred);
898}
899
900void MachineInstr::unbundleFromPred() {
901 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
902 clearFlag(Flag: BundledPred);
903 MachineBasicBlock::instr_iterator Pred = getIterator();
904 --Pred;
905 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
906 Pred->clearFlag(Flag: BundledSucc);
907}
908
909void MachineInstr::unbundleFromSucc() {
910 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
911 clearFlag(Flag: BundledSucc);
912 MachineBasicBlock::instr_iterator Succ = getIterator();
913 ++Succ;
914 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
915 Succ->clearFlag(Flag: BundledPred);
916}
917
918bool MachineInstr::isStackAligningInlineAsm() const {
919 if (isInlineAsm()) {
920 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
921 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
922 return true;
923 }
924 return false;
925}
926
927InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
928 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
929 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
930 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
931}
932
933int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
934 unsigned *GroupNo) const {
935 assert(isInlineAsm() && "Expected an inline asm instruction");
936 assert(OpIdx < getNumOperands() && "OpIdx out of range");
937
938 // Ignore queries about the initial operands.
939 if (OpIdx < InlineAsm::MIOp_FirstOperand)
940 return -1;
941
942 unsigned Group = 0;
943 unsigned NumOps;
944 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
945 i += NumOps) {
946 const MachineOperand &FlagMO = getOperand(i);
947 // If we reach the implicit register operands, stop looking.
948 if (!FlagMO.isImm())
949 return -1;
950 const InlineAsm::Flag F(FlagMO.getImm());
951 NumOps = 1 + F.getNumOperandRegisters();
952 if (i + NumOps > OpIdx) {
953 if (GroupNo)
954 *GroupNo = Group;
955 return i;
956 }
957 ++Group;
958 }
959 return -1;
960}
961
962const DILabel *MachineInstr::getDebugLabel() const {
963 assert(isDebugLabel() && "not a DBG_LABEL");
964 return cast<DILabel>(Val: getOperand(i: 0).getMetadata());
965}
966
967const MachineOperand &MachineInstr::getDebugVariableOp() const {
968 assert((isDebugValueLike()) && "not a DBG_VALUE*");
969 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
970 return getOperand(i: VariableOp);
971}
972
973MachineOperand &MachineInstr::getDebugVariableOp() {
974 assert((isDebugValueLike()) && "not a DBG_VALUE*");
975 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
976 return getOperand(i: VariableOp);
977}
978
979const DILocalVariable *MachineInstr::getDebugVariable() const {
980 return cast<DILocalVariable>(Val: getDebugVariableOp().getMetadata());
981}
982
983const MachineOperand &MachineInstr::getDebugExpressionOp() const {
984 assert((isDebugValueLike()) && "not a DBG_VALUE*");
985 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
986 return getOperand(i: ExpressionOp);
987}
988
989MachineOperand &MachineInstr::getDebugExpressionOp() {
990 assert((isDebugValueLike()) && "not a DBG_VALUE*");
991 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
992 return getOperand(i: ExpressionOp);
993}
994
995const DIExpression *MachineInstr::getDebugExpression() const {
996 return cast<DIExpression>(Val: getDebugExpressionOp().getMetadata());
997}
998
999bool MachineInstr::isDebugEntryValue() const {
1000 return isDebugValue() && getDebugExpression()->isEntryValue();
1001}
1002
1003const TargetRegisterClass*
1004MachineInstr::getRegClassConstraint(unsigned OpIdx,
1005 const TargetInstrInfo *TII,
1006 const TargetRegisterInfo *TRI) const {
1007 assert(getParent() && "Can't have an MBB reference here!");
1008 assert(getMF() && "Can't have an MF reference here!");
1009 // Most opcodes have fixed constraints in their MCInstrDesc.
1010 if (!isInlineAsm())
1011 return TII->getRegClass(MCID: getDesc(), OpNum: OpIdx);
1012
1013 if (!getOperand(i: OpIdx).isReg())
1014 return nullptr;
1015
1016 // For tied uses on inline asm, get the constraint from the def.
1017 unsigned DefIdx;
1018 if (getOperand(i: OpIdx).isUse() && isRegTiedToDefOperand(UseOpIdx: OpIdx, DefOpIdx: &DefIdx))
1019 OpIdx = DefIdx;
1020
1021 // Inline asm stores register class constraints in the flag word.
1022 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
1023 if (FlagIdx < 0)
1024 return nullptr;
1025
1026 const InlineAsm::Flag F(getOperand(i: FlagIdx).getImm());
1027 unsigned RCID;
1028 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
1029 F.hasRegClassConstraint(RC&: RCID))
1030 return TRI->getRegClass(i: RCID);
1031
1032 // Assume that all registers in a memory operand are pointers.
1033 if (F.isMemKind())
1034 return TRI->getPointerRegClass();
1035
1036 return nullptr;
1037}
1038
1039const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
1040 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1041 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1042 // Check every operands inside the bundle if we have
1043 // been asked to.
1044 if (ExploreBundle)
1045 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1046 ++OpndIt)
1047 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1048 OpIdx: OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1049 else
1050 // Otherwise, just check the current operands.
1051 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1052 CurRC = getRegClassConstraintEffectForVRegImpl(OpIdx: i, Reg, CurRC, TII, TRI);
1053 return CurRC;
1054}
1055
1056const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1057 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1058 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1059 assert(CurRC && "Invalid initial register class");
1060 // Check if Reg is constrained by some of its use/def from MI.
1061 const MachineOperand &MO = getOperand(i: OpIdx);
1062 if (!MO.isReg() || MO.getReg() != Reg)
1063 return CurRC;
1064 // If yes, accumulate the constraints through the operand.
1065 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1066}
1067
1068const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
1069 unsigned OpIdx, const TargetRegisterClass *CurRC,
1070 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1071 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1072 const MachineOperand &MO = getOperand(i: OpIdx);
1073 assert(MO.isReg() &&
1074 "Cannot get register constraints for non-register operand");
1075 assert(CurRC && "Invalid initial register class");
1076 if (unsigned SubIdx = MO.getSubReg()) {
1077 if (OpRC)
1078 CurRC = TRI->getMatchingSuperRegClass(A: CurRC, B: OpRC, Idx: SubIdx);
1079 else
1080 CurRC = TRI->getSubClassWithSubReg(RC: CurRC, Idx: SubIdx);
1081 } else if (OpRC)
1082 CurRC = TRI->getCommonSubClass(A: CurRC, B: OpRC);
1083 return CurRC;
1084}
1085
1086/// Return the number of instructions inside the MI bundle, not counting the
1087/// header instruction.
1088unsigned MachineInstr::getBundleSize() const {
1089 MachineBasicBlock::const_instr_iterator I = getIterator();
1090 unsigned Size = 0;
1091 while (I->isBundledWithSucc()) {
1092 ++Size;
1093 ++I;
1094 }
1095 return Size;
1096}
1097
1098/// Returns true if the MachineInstr has an implicit-use operand of exactly
1099/// the given register (not considering sub/super-registers).
1100bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
1101 for (const MachineOperand &MO : implicit_operands()) {
1102 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1103 return true;
1104 }
1105 return false;
1106}
1107
1108/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1109/// the specific register or -1 if it is not found. It further tightens
1110/// the search criteria to a use that kills the register if isKill is true.
1111int MachineInstr::findRegisterUseOperandIdx(Register Reg,
1112 const TargetRegisterInfo *TRI,
1113 bool isKill) const {
1114 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1115 const MachineOperand &MO = getOperand(i);
1116 if (!MO.isReg() || !MO.isUse())
1117 continue;
1118 Register MOReg = MO.getReg();
1119 if (!MOReg)
1120 continue;
1121 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(RegA: MOReg, RegB: Reg)))
1122 if (!isKill || MO.isKill())
1123 return i;
1124 }
1125 return -1;
1126}
1127
1128/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1129/// indicating if this instruction reads or writes Reg. This also considers
1130/// partial defines.
1131std::pair<bool,bool>
1132MachineInstr::readsWritesVirtualRegister(Register Reg,
1133 SmallVectorImpl<unsigned> *Ops) const {
1134 bool PartDef = false; // Partial redefine.
1135 bool FullDef = false; // Full define.
1136 bool Use = false;
1137
1138 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1139 const MachineOperand &MO = getOperand(i);
1140 if (!MO.isReg() || MO.getReg() != Reg)
1141 continue;
1142 if (Ops)
1143 Ops->push_back(Elt: i);
1144 if (MO.isUse())
1145 Use |= !MO.isUndef();
1146 else if (MO.getSubReg() && !MO.isUndef())
1147 // A partial def undef doesn't count as reading the register.
1148 PartDef = true;
1149 else
1150 FullDef = true;
1151 }
1152 // A partial redefine uses Reg unless there is also a full define.
1153 return std::make_pair(x: Use || (PartDef && !FullDef), y: PartDef || FullDef);
1154}
1155
1156/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1157/// the specified register or -1 if it is not found. If isDead is true, defs
1158/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1159/// also checks if there is a def of a super-register.
1160int MachineInstr::findRegisterDefOperandIdx(Register Reg,
1161 const TargetRegisterInfo *TRI,
1162 bool isDead, bool Overlap) const {
1163 bool isPhys = Reg.isPhysical();
1164 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1165 const MachineOperand &MO = getOperand(i);
1166 // Accept regmask operands when Overlap is set.
1167 // Ignore them when looking for a specific def operand (Overlap == false).
1168 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(PhysReg: Reg))
1169 return i;
1170 if (!MO.isReg() || !MO.isDef())
1171 continue;
1172 Register MOReg = MO.getReg();
1173 bool Found = (MOReg == Reg);
1174 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1175 if (Overlap)
1176 Found = TRI->regsOverlap(RegA: MOReg, RegB: Reg);
1177 else
1178 Found = TRI->isSubRegister(RegA: MOReg, RegB: Reg);
1179 }
1180 if (Found && (!isDead || MO.isDead()))
1181 return i;
1182 }
1183 return -1;
1184}
1185
1186/// findFirstPredOperandIdx() - Find the index of the first operand in the
1187/// operand list that is used to represent the predicate. It returns -1 if
1188/// none is found.
1189int MachineInstr::findFirstPredOperandIdx() const {
1190 // Don't call MCID.findFirstPredOperandIdx() because this variant
1191 // is sometimes called on an instruction that's not yet complete, and
1192 // so the number of operands is less than the MCID indicates. In
1193 // particular, the PTX target does this.
1194 const MCInstrDesc &MCID = getDesc();
1195 if (MCID.isPredicable()) {
1196 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1197 if (MCID.operands()[i].isPredicate())
1198 return i;
1199 }
1200
1201 return -1;
1202}
1203
1204// MachineOperand::TiedTo is 4 bits wide.
1205const unsigned TiedMax = 15;
1206
1207/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1208///
1209/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1210/// field. TiedTo can have these values:
1211///
1212/// 0: Operand is not tied to anything.
1213/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1214/// TiedMax: Tied to an operand >= TiedMax-1.
1215///
1216/// The tied def must be one of the first TiedMax operands on a normal
1217/// instruction. INLINEASM instructions allow more tied defs.
1218///
1219void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1220 MachineOperand &DefMO = getOperand(i: DefIdx);
1221 MachineOperand &UseMO = getOperand(i: UseIdx);
1222 assert(DefMO.isDef() && "DefIdx must be a def operand");
1223 assert(UseMO.isUse() && "UseIdx must be a use operand");
1224 assert(!DefMO.isTied() && "Def is already tied to another use");
1225 assert(!UseMO.isTied() && "Use is already tied to another def");
1226
1227 if (DefIdx < TiedMax) {
1228 UseMO.TiedTo = DefIdx + 1;
1229 } else {
1230 // Inline asm can use the group descriptors to find tied operands,
1231 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1232 // but on normal instruction, the tied def must be within the first TiedMax
1233 // operands.
1234 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1235 "DefIdx out of range");
1236 UseMO.TiedTo = TiedMax;
1237 }
1238
1239 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1240 DefMO.TiedTo = std::min(a: UseIdx + 1, b: TiedMax);
1241}
1242
1243/// Given the index of a tied register operand, find the operand it is tied to.
1244/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1245/// which must exist.
1246unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1247 const MachineOperand &MO = getOperand(i: OpIdx);
1248 assert(MO.isTied() && "Operand isn't tied");
1249
1250 // Normally TiedTo is in range.
1251 if (MO.TiedTo < TiedMax)
1252 return MO.TiedTo - 1;
1253
1254 // Uses on normal instructions can be out of range.
1255 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1256 // Normal tied defs must be in the 0..TiedMax-1 range.
1257 if (MO.isUse())
1258 return TiedMax - 1;
1259 // MO is a def. Search for the tied use.
1260 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1261 const MachineOperand &UseMO = getOperand(i);
1262 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1263 return i;
1264 }
1265 llvm_unreachable("Can't find tied use");
1266 }
1267
1268 if (getOpcode() == TargetOpcode::STATEPOINT) {
1269 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1270 // on registers.
1271 StatepointOpers SO(this);
1272 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1273 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1274 unsigned NumDefs = getNumDefs();
1275 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1276 while (!getOperand(i: CurUseIdx).isReg())
1277 CurUseIdx = StackMaps::getNextMetaArgIdx(MI: this, CurIdx: CurUseIdx);
1278 if (OpIdx == CurDefIdx)
1279 return CurUseIdx;
1280 if (OpIdx == CurUseIdx)
1281 return CurDefIdx;
1282 CurUseIdx = StackMaps::getNextMetaArgIdx(MI: this, CurIdx: CurUseIdx);
1283 }
1284 llvm_unreachable("Can't find tied use");
1285 }
1286
1287 // Now deal with inline asm by parsing the operand group descriptor flags.
1288 // Find the beginning of each operand group.
1289 SmallVector<unsigned, 8> GroupIdx;
1290 unsigned OpIdxGroup = ~0u;
1291 unsigned NumOps;
1292 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1293 i += NumOps) {
1294 const MachineOperand &FlagMO = getOperand(i);
1295 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1296 unsigned CurGroup = GroupIdx.size();
1297 GroupIdx.push_back(Elt: i);
1298 const InlineAsm::Flag F(FlagMO.getImm());
1299 NumOps = 1 + F.getNumOperandRegisters();
1300 // OpIdx belongs to this operand group.
1301 if (OpIdx > i && OpIdx < i + NumOps)
1302 OpIdxGroup = CurGroup;
1303 unsigned TiedGroup;
1304 if (!F.isUseOperandTiedToDef(Idx&: TiedGroup))
1305 continue;
1306 // Operands in this group are tied to operands in TiedGroup which must be
1307 // earlier. Find the number of operands between the two groups.
1308 unsigned Delta = i - GroupIdx[TiedGroup];
1309
1310 // OpIdx is a use tied to TiedGroup.
1311 if (OpIdxGroup == CurGroup)
1312 return OpIdx - Delta;
1313
1314 // OpIdx is a def tied to this use group.
1315 if (OpIdxGroup == TiedGroup)
1316 return OpIdx + Delta;
1317 }
1318 llvm_unreachable("Invalid tied operand on inline asm");
1319}
1320
1321/// clearKillInfo - Clears kill flags on all operands.
1322///
1323void MachineInstr::clearKillInfo() {
1324 for (MachineOperand &MO : operands()) {
1325 if (MO.isReg() && MO.isUse())
1326 MO.setIsKill(false);
1327 }
1328}
1329
1330void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1331 unsigned SubIdx,
1332 const TargetRegisterInfo &RegInfo) {
1333 if (ToReg.isPhysical()) {
1334 if (SubIdx)
1335 ToReg = RegInfo.getSubReg(Reg: ToReg, Idx: SubIdx);
1336 for (MachineOperand &MO : operands()) {
1337 if (!MO.isReg() || MO.getReg() != FromReg)
1338 continue;
1339 MO.substPhysReg(Reg: ToReg, RegInfo);
1340 }
1341 } else {
1342 for (MachineOperand &MO : operands()) {
1343 if (!MO.isReg() || MO.getReg() != FromReg)
1344 continue;
1345 MO.substVirtReg(Reg: ToReg, SubIdx, RegInfo);
1346 }
1347 }
1348}
1349
1350/// isSafeToMove - Return true if it is safe to move this instruction. If
1351/// SawStore is set to true, it means that there is a store (or call) between
1352/// the instruction's location and its intended destination.
1353bool MachineInstr::isSafeToMove(bool &SawStore) const {
1354 // Ignore stuff that we obviously can't move.
1355 //
1356 // Treat volatile loads as stores. This is not strictly necessary for
1357 // volatiles, but it is required for atomic loads. It is not allowed to move
1358 // a load across an atomic load with Ordering > Monotonic.
1359 if (mayStore() || isCall() || isPHI() ||
1360 (mayLoad() && hasOrderedMemoryRef())) {
1361 SawStore = true;
1362 return false;
1363 }
1364
1365 // Don't touch instructions that have non-trivial invariants. For example,
1366 // terminators have to be at the end of a basic block.
1367 if (isPosition() || isDebugInstr() || isTerminator() ||
1368 isJumpTableDebugInfo())
1369 return false;
1370
1371 // Don't touch instructions which can have non-load/store effects.
1372 //
1373 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1374 // intentional side-effects. Even if an inline asm is not "sideeffect",
1375 // though, it still can't be speculatively executed: the operation might
1376 // not be valid on the current target, or for some combinations of operands.
1377 // (Some transforms that move an instruction don't speculatively execute it;
1378 // we currently don't try to handle that distinction here.)
1379 //
1380 // Other instructions handled here include those that can raise FP
1381 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1382 // stack adjustments.
1383 if (mayRaiseFPException() || hasProperty(MCFlag: MCID::UnmodeledSideEffects) ||
1384 isInlineAsm())
1385 return false;
1386
1387 // See if this instruction does a load. If so, we have to guarantee that the
1388 // loaded value doesn't change between the load and the its intended
1389 // destination. The check for isInvariantLoad gives the target the chance to
1390 // classify the load as always returning a constant, e.g. a constant pool
1391 // load.
1392 if (mayLoad() && !isDereferenceableInvariantLoad())
1393 // Otherwise, this is a real load. If there is a store between the load and
1394 // end of block, we can't move it.
1395 return !SawStore;
1396
1397 return true;
1398}
1399
1400bool MachineInstr::wouldBeTriviallyDead() const {
1401 // Don't delete frame allocation labels.
1402 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1403 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1404 return false;
1405
1406 // Don't delete FAKE_USE.
1407 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1408 if (isFakeUse())
1409 return false;
1410
1411 // LIFETIME markers should be preserved.
1412 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1413 if (isLifetimeMarker())
1414 return false;
1415
1416 // If we can move an instruction, we can remove it. Otherwise, it has
1417 // a side-effect of some sort.
1418 bool SawStore = false;
1419 return isPHI() || isSafeToMove(SawStore);
1420}
1421
1422bool MachineInstr::isDead(const MachineRegisterInfo &MRI,
1423 LiveRegUnits *LivePhysRegs) const {
1424 // Instructions without side-effects are dead iff they only define dead regs.
1425 // This function is hot and this loop returns early in the common case,
1426 // so only perform additional checks before this if absolutely necessary.
1427 for (const MachineOperand &MO : all_defs()) {
1428 Register Reg = MO.getReg();
1429 if (Reg.isPhysical()) {
1430 // Don't delete live physreg defs, or any reserved register defs.
1431 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(PhysReg: Reg))
1432 return false;
1433 } else {
1434 if (MO.isDead())
1435 continue;
1436 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1437 if (&Use != this)
1438 // This def has a non-debug use. Don't delete the instruction!
1439 return false;
1440 }
1441 }
1442 }
1443
1444 // Technically speaking inline asm without side effects and no defs can still
1445 // be deleted. But there is so much bad inline asm code out there, we should
1446 // let them be.
1447 if (isInlineAsm())
1448 return false;
1449
1450 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1451 if (isLifetimeMarker())
1452 return true;
1453
1454 // If there are no defs with uses, then we call the instruction dead so long
1455 // as we do not suspect it may have sideeffects.
1456 return wouldBeTriviallyDead();
1457}
1458
1459static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI,
1460 BatchAAResults *AA, bool UseTBAA,
1461 const MachineMemOperand *MMOa,
1462 const MachineMemOperand *MMOb) {
1463 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1464 // operates with MachineMemOperand offset with some important assumptions:
1465 // - LLVM fundamentally assumes flat address spaces.
1466 // - MachineOperand offset can *only* result from legalization and cannot
1467 // affect queries other than the trivial case of overlap checking.
1468 // - These offsets never wrap and never step outside of allocated objects.
1469 // - There should never be any negative offsets here.
1470 //
1471 // FIXME: Modify API to hide this math from "user"
1472 // Even before we go to AA we can reason locally about some memory objects. It
1473 // can save compile time, and possibly catch some corner cases not currently
1474 // covered.
1475
1476 int64_t OffsetA = MMOa->getOffset();
1477 int64_t OffsetB = MMOb->getOffset();
1478 int64_t MinOffset = std::min(a: OffsetA, b: OffsetB);
1479
1480 LocationSize WidthA = MMOa->getSize();
1481 LocationSize WidthB = MMOb->getSize();
1482 bool KnownWidthA = WidthA.hasValue();
1483 bool KnownWidthB = WidthB.hasValue();
1484 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1485
1486 const Value *ValA = MMOa->getValue();
1487 const Value *ValB = MMOb->getValue();
1488 bool SameVal = (ValA && ValB && (ValA == ValB));
1489 if (!SameVal) {
1490 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1491 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1492 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1493 return false;
1494 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1495 return false;
1496 if (PSVa && PSVb && (PSVa == PSVb))
1497 SameVal = true;
1498 }
1499
1500 if (SameVal && BothMMONonScalable) {
1501 if (!KnownWidthA || !KnownWidthB)
1502 return true;
1503 int64_t MaxOffset = std::max(a: OffsetA, b: OffsetB);
1504 int64_t LowWidth = (MinOffset == OffsetA)
1505 ? WidthA.getValue().getKnownMinValue()
1506 : WidthB.getValue().getKnownMinValue();
1507 return (MinOffset + LowWidth > MaxOffset);
1508 }
1509
1510 if (!AA)
1511 return true;
1512
1513 if (!ValA || !ValB)
1514 return true;
1515
1516 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1517 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1518
1519 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1520 // at the moment
1521 if ((WidthA.isScalable() && OffsetA > 0) ||
1522 (WidthB.isScalable() && OffsetB > 0))
1523 return true;
1524
1525 int64_t OverlapA =
1526 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1527 : MemoryLocation::UnknownSize;
1528 int64_t OverlapB =
1529 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1530 : MemoryLocation::UnknownSize;
1531
1532 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1533 ? WidthA
1534 : LocationSize::precise(Value: OverlapA);
1535 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1536 ? WidthB
1537 : LocationSize::precise(Value: OverlapB);
1538
1539 return !AA->isNoAlias(
1540 LocA: MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1541 LocB: MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1542}
1543
1544bool MachineInstr::mayAlias(BatchAAResults *AA, const MachineInstr &Other,
1545 bool UseTBAA) const {
1546 const MachineFunction *MF = getMF();
1547 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1548 const MachineFrameInfo &MFI = MF->getFrameInfo();
1549
1550 // Exclude call instruction which may alter the memory but can not be handled
1551 // by this function.
1552 if (isCall() || Other.isCall())
1553 return true;
1554
1555 // If neither instruction stores to memory, they can't alias in any
1556 // meaningful way, even if they read from the same address.
1557 if (!mayStore() && !Other.mayStore())
1558 return false;
1559
1560 // Both instructions must be memory operations to be able to alias.
1561 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1562 return false;
1563
1564 // Let the target decide if memory accesses cannot possibly overlap.
1565 if (TII->areMemAccessesTriviallyDisjoint(MIa: *this, MIb: Other))
1566 return false;
1567
1568 // Memory operations without memory operands may access anything. Be
1569 // conservative and assume `MayAlias`.
1570 if (memoperands_empty() || Other.memoperands_empty())
1571 return true;
1572
1573 // Skip if there are too many memory operands.
1574 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1575 if (NumChecks > TII->getMemOperandAACheckLimit())
1576 return true;
1577
1578 // Check each pair of memory operands from both instructions, which can't
1579 // alias only if all pairs won't alias.
1580 for (auto *MMOa : memoperands()) {
1581 for (auto *MMOb : Other.memoperands()) {
1582 if (!MMOa->isStore() && !MMOb->isStore())
1583 continue;
1584 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1585 return true;
1586 }
1587 }
1588
1589 return false;
1590}
1591
1592bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1593 bool UseTBAA) const {
1594 if (AA) {
1595 BatchAAResults BAA(*AA);
1596 return mayAlias(AA: &BAA, Other, UseTBAA);
1597 }
1598 return mayAlias(AA: static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1599}
1600
1601/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1602/// or volatile memory reference, or if the information describing the memory
1603/// reference is not available. Return false if it is known to have no ordered
1604/// memory references.
1605bool MachineInstr::hasOrderedMemoryRef() const {
1606 // An instruction known never to access memory won't have a volatile access.
1607 if (!mayStore() &&
1608 !mayLoad() &&
1609 !isCall() &&
1610 !hasUnmodeledSideEffects())
1611 return false;
1612
1613 // Otherwise, if the instruction has no memory reference information,
1614 // conservatively assume it wasn't preserved.
1615 if (memoperands_empty())
1616 return true;
1617
1618 // Check if any of our memory operands are ordered.
1619 return llvm::any_of(Range: memoperands(), P: [](const MachineMemOperand *MMO) {
1620 return !MMO->isUnordered();
1621 });
1622}
1623
1624/// isDereferenceableInvariantLoad - Return true if this instruction will never
1625/// trap and is loading from a location whose value is invariant across a run of
1626/// this function.
1627bool MachineInstr::isDereferenceableInvariantLoad() const {
1628 // If the instruction doesn't load at all, it isn't an invariant load.
1629 if (!mayLoad())
1630 return false;
1631
1632 // If the instruction has lost its memoperands, conservatively assume that
1633 // it may not be an invariant load.
1634 if (memoperands_empty())
1635 return false;
1636
1637 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1638
1639 for (MachineMemOperand *MMO : memoperands()) {
1640 if (!MMO->isUnordered())
1641 // If the memory operand has ordering side effects, we can't move the
1642 // instruction. Such an instruction is technically an invariant load,
1643 // but the caller code would need updated to expect that.
1644 return false;
1645 if (MMO->isStore()) return false;
1646 if (MMO->isInvariant() && MMO->isDereferenceable())
1647 continue;
1648
1649 // A load from a constant PseudoSourceValue is invariant.
1650 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1651 if (PSV->isConstant(&MFI))
1652 continue;
1653 }
1654
1655 // Otherwise assume conservatively.
1656 return false;
1657 }
1658
1659 // Everything checks out.
1660 return true;
1661}
1662
1663Register MachineInstr::isConstantValuePHI() const {
1664 if (!isPHI())
1665 return {};
1666 assert(getNumOperands() >= 3 &&
1667 "It's illegal to have a PHI without source operands");
1668
1669 Register Reg = getOperand(i: 1).getReg();
1670 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1671 if (getOperand(i).getReg() != Reg)
1672 return {};
1673 return Reg;
1674}
1675
1676bool MachineInstr::hasUnmodeledSideEffects() const {
1677 if (hasProperty(MCFlag: MCID::UnmodeledSideEffects))
1678 return true;
1679 if (isInlineAsm()) {
1680 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
1681 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1682 return true;
1683 }
1684
1685 return false;
1686}
1687
1688bool MachineInstr::isLoadFoldBarrier() const {
1689 return mayStore() || isCall() ||
1690 (hasUnmodeledSideEffects() && !isPseudoProbe());
1691}
1692
1693/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1694///
1695bool MachineInstr::allDefsAreDead() const {
1696 for (const MachineOperand &MO : operands()) {
1697 if (!MO.isReg() || MO.isUse())
1698 continue;
1699 if (!MO.isDead())
1700 return false;
1701 }
1702 return true;
1703}
1704
1705bool MachineInstr::allImplicitDefsAreDead() const {
1706 for (const MachineOperand &MO : implicit_operands()) {
1707 if (!MO.isReg() || MO.isUse())
1708 continue;
1709 if (!MO.isDead())
1710 return false;
1711 }
1712 return true;
1713}
1714
1715/// copyImplicitOps - Copy implicit register operands from specified
1716/// instruction to this instruction.
1717void MachineInstr::copyImplicitOps(MachineFunction &MF,
1718 const MachineInstr &MI) {
1719 for (const MachineOperand &MO :
1720 llvm::drop_begin(RangeOrContainer: MI.operands(), N: MI.getDesc().getNumOperands()))
1721 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1722 addOperand(MF, Op: MO);
1723}
1724
1725bool MachineInstr::hasComplexRegisterTies() const {
1726 const MCInstrDesc &MCID = getDesc();
1727 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1728 return true;
1729 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1730 const auto &Operand = getOperand(i: I);
1731 if (!Operand.isReg() || Operand.isDef())
1732 // Ignore the defined registers as MCID marks only the uses as tied.
1733 continue;
1734 int ExpectedTiedIdx = MCID.getOperandConstraint(OpNum: I, Constraint: MCOI::TIED_TO);
1735 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(OpIdx: I)) : -1;
1736 if (ExpectedTiedIdx != TiedIdx)
1737 return true;
1738 }
1739 return false;
1740}
1741
1742LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1743 const MachineRegisterInfo &MRI) const {
1744 const MachineOperand &Op = getOperand(i: OpIdx);
1745 if (!Op.isReg())
1746 return LLT{};
1747
1748 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1749 return MRI.getType(Reg: Op.getReg());
1750
1751 auto &OpInfo = getDesc().operands()[OpIdx];
1752 if (!OpInfo.isGenericType())
1753 return MRI.getType(Reg: Op.getReg());
1754
1755 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1756 return LLT{};
1757
1758 LLT TypeToPrint = MRI.getType(Reg: Op.getReg());
1759 // Don't mark the type index printed if it wasn't actually printed: maybe
1760 // another operand with the same type index has an actual type attached:
1761 if (TypeToPrint.isValid())
1762 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1763 return TypeToPrint;
1764}
1765
1766#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1767LLVM_DUMP_METHOD void MachineInstr::dump() const {
1768 dbgs() << " ";
1769 print(dbgs());
1770}
1771
1772LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1773 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1774 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1775 if (Depth >= MaxDepth)
1776 return;
1777 if (!AlreadySeenInstrs.insert(this).second)
1778 return;
1779 // PadToColumn always inserts at least one space.
1780 // Don't mess up the alignment if we don't want any space.
1781 if (Depth)
1782 fdbgs().PadToColumn(Depth * 2);
1783 print(fdbgs());
1784 for (const MachineOperand &MO : operands()) {
1785 if (!MO.isReg() || MO.isDef())
1786 continue;
1787 Register Reg = MO.getReg();
1788 if (Reg.isPhysical())
1789 continue;
1790 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1791 if (NewMI == nullptr)
1792 continue;
1793 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1794 }
1795}
1796
1797LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1798 unsigned MaxDepth) const {
1799 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1800 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1801}
1802#endif
1803
1804void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1805 bool SkipDebugLoc, bool AddNewLine,
1806 const TargetInstrInfo *TII) const {
1807 const Module *M = nullptr;
1808 const Function *F = nullptr;
1809 if (const MachineFunction *MF = getMFIfAvailable(MI: *this)) {
1810 F = &MF->getFunction();
1811 M = F->getParent();
1812 if (!TII)
1813 TII = MF->getSubtarget().getInstrInfo();
1814 }
1815
1816 ModuleSlotTracker MST(M);
1817 if (F)
1818 MST.incorporateFunction(F: *F);
1819 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1820}
1821
1822void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1823 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1824 bool AddNewLine, const TargetInstrInfo *TII) const {
1825 // We can be a bit tidier if we know the MachineFunction.
1826 const TargetRegisterInfo *TRI = nullptr;
1827 const MachineRegisterInfo *MRI = nullptr;
1828 tryToGetTargetInfo(MI: *this, TRI, MRI, TII);
1829
1830 if (isCFIInstruction())
1831 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1832
1833 SmallBitVector PrintedTypes(8);
1834 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1835 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1836 if (!ShouldPrintRegisterTies)
1837 return 0U;
1838 const MachineOperand &MO = getOperand(i: OpIdx);
1839 if (MO.isReg() && MO.isTied() && !MO.isDef())
1840 return findTiedOperandIdx(OpIdx);
1841 return 0U;
1842 };
1843 unsigned StartOp = 0;
1844 unsigned e = getNumOperands();
1845
1846 // Print explicitly defined operands on the left of an assignment syntax.
1847 while (StartOp < e) {
1848 const MachineOperand &MO = getOperand(i: StartOp);
1849 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1850 break;
1851
1852 if (StartOp != 0)
1853 OS << ", ";
1854
1855 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: StartOp, PrintedTypes, MRI: *MRI) : LLT{};
1856 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1857 MO.print(os&: OS, MST, TypeToPrint, OpIdx: StartOp, /*PrintDef=*/false, IsStandalone,
1858 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1859 ++StartOp;
1860 }
1861
1862 if (StartOp != 0)
1863 OS << " = ";
1864
1865 if (getFlag(Flag: MachineInstr::FrameSetup))
1866 OS << "frame-setup ";
1867 if (getFlag(Flag: MachineInstr::FrameDestroy))
1868 OS << "frame-destroy ";
1869 if (getFlag(Flag: MachineInstr::FmNoNans))
1870 OS << "nnan ";
1871 if (getFlag(Flag: MachineInstr::FmNoInfs))
1872 OS << "ninf ";
1873 if (getFlag(Flag: MachineInstr::FmNsz))
1874 OS << "nsz ";
1875 if (getFlag(Flag: MachineInstr::FmArcp))
1876 OS << "arcp ";
1877 if (getFlag(Flag: MachineInstr::FmContract))
1878 OS << "contract ";
1879 if (getFlag(Flag: MachineInstr::FmAfn))
1880 OS << "afn ";
1881 if (getFlag(Flag: MachineInstr::FmReassoc))
1882 OS << "reassoc ";
1883 if (getFlag(Flag: MachineInstr::NoUWrap))
1884 OS << "nuw ";
1885 if (getFlag(Flag: MachineInstr::NoSWrap))
1886 OS << "nsw ";
1887 if (getFlag(Flag: MachineInstr::IsExact))
1888 OS << "exact ";
1889 if (getFlag(Flag: MachineInstr::NoFPExcept))
1890 OS << "nofpexcept ";
1891 if (getFlag(Flag: MachineInstr::NoMerge))
1892 OS << "nomerge ";
1893 if (getFlag(Flag: MachineInstr::NonNeg))
1894 OS << "nneg ";
1895 if (getFlag(Flag: MachineInstr::Disjoint))
1896 OS << "disjoint ";
1897 if (getFlag(Flag: MachineInstr::NoUSWrap))
1898 OS << "nusw ";
1899 if (getFlag(Flag: MachineInstr::SameSign))
1900 OS << "samesign ";
1901 if (getFlag(Flag: MachineInstr::InBounds))
1902 OS << "inbounds ";
1903
1904 // Print the opcode name.
1905 if (TII)
1906 OS << TII->getName(Opcode: getOpcode());
1907 else
1908 OS << "UNKNOWN";
1909
1910 if (SkipOpers)
1911 return;
1912
1913 // Print the rest of the operands.
1914 bool FirstOp = true;
1915 unsigned AsmDescOp = ~0u;
1916 unsigned AsmOpCount = 0;
1917
1918 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1919 // Print asm string.
1920 OS << " ";
1921 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1922 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, MRI: *MRI) : LLT{};
1923 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1924 getOperand(i: OpIdx).print(os&: OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1925 IsStandalone, ShouldPrintRegisterTies,
1926 TiedOperandIdx, TRI);
1927
1928 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1929 unsigned ExtraInfo = getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
1930 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1931 OS << " [sideeffect]";
1932 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1933 OS << " [mayload]";
1934 if (ExtraInfo & InlineAsm::Extra_MayStore)
1935 OS << " [maystore]";
1936 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1937 OS << " [isconvergent]";
1938 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1939 OS << " [alignstack]";
1940 if (ExtraInfo & InlineAsm::Extra_MayUnwind)
1941 OS << " [unwind]";
1942 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1943 OS << " [attdialect]";
1944 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1945 OS << " [inteldialect]";
1946
1947 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1948 FirstOp = false;
1949 }
1950
1951 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1952 const MachineOperand &MO = getOperand(i);
1953
1954 if (FirstOp) FirstOp = false; else OS << ",";
1955 OS << " ";
1956
1957 if (isDebugValueLike() && MO.isMetadata()) {
1958 // Pretty print DBG_VALUE* instructions.
1959 auto *DIV = dyn_cast<DILocalVariable>(Val: MO.getMetadata());
1960 if (DIV && !DIV->getName().empty())
1961 OS << "!\"" << DIV->getName() << '\"';
1962 else {
1963 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
1964 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1965 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
1966 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1967 }
1968 } else if (isDebugLabel() && MO.isMetadata()) {
1969 // Pretty print DBG_LABEL instructions.
1970 auto *DIL = dyn_cast<DILabel>(Val: MO.getMetadata());
1971 if (DIL && !DIL->getName().empty())
1972 OS << "\"" << DIL->getName() << '\"';
1973 else {
1974 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
1975 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1976 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
1977 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1978 }
1979 } else if (i == AsmDescOp && MO.isImm()) {
1980 // Pretty print the inline asm operand descriptor.
1981 OS << '$' << AsmOpCount++;
1982 unsigned Flag = MO.getImm();
1983 const InlineAsm::Flag F(Flag);
1984 OS << ":[";
1985 OS << F.getKindName();
1986
1987 unsigned RCID;
1988 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RC&: RCID)) {
1989 if (TRI) {
1990 OS << ':' << TRI->getRegClassName(Class: TRI->getRegClass(i: RCID));
1991 } else
1992 OS << ":RC" << RCID;
1993 }
1994
1995 if (F.isMemKind()) {
1996 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1997 OS << ":" << InlineAsm::getMemConstraintName(C: MCID);
1998 }
1999
2000 unsigned TiedTo;
2001 if (F.isUseOperandTiedToDef(Idx&: TiedTo))
2002 OS << " tiedto:$" << TiedTo;
2003
2004 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
2005 F.isRegUseKind()) &&
2006 F.getRegMayBeFolded()) {
2007 OS << " foldable";
2008 }
2009
2010 OS << ']';
2011
2012 // Compute the index of the next operand descriptor.
2013 AsmDescOp += 1 + F.getNumOperandRegisters();
2014 } else {
2015 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx: i, PrintedTypes, MRI: *MRI) : LLT{};
2016 unsigned TiedOperandIdx = getTiedOperandIdx(i);
2017 if (MO.isImm() && isOperandSubregIdx(OpIdx: i))
2018 MachineOperand::printSubRegIdx(OS, Index: MO.getImm(), TRI);
2019 else
2020 MO.print(os&: OS, MST, TypeToPrint, OpIdx: i, /*PrintDef=*/true, IsStandalone,
2021 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
2022 }
2023 }
2024
2025 // Print any optional symbols attached to this instruction as-if they were
2026 // operands.
2027 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
2028 if (!FirstOp) {
2029 OS << ',';
2030 }
2031 OS << " pre-instr-symbol ";
2032 MachineOperand::printSymbol(OS, Sym&: *PreInstrSymbol);
2033 }
2034 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
2035 if (!FirstOp) {
2036 OS << ',';
2037 }
2038 OS << " post-instr-symbol ";
2039 MachineOperand::printSymbol(OS, Sym&: *PostInstrSymbol);
2040 }
2041 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2042 if (!FirstOp) {
2043 OS << ',';
2044 }
2045 OS << " heap-alloc-marker ";
2046 HeapAllocMarker->printAsOperand(OS, MST);
2047 }
2048 if (MDNode *PCSections = getPCSections()) {
2049 if (!FirstOp) {
2050 OS << ',';
2051 }
2052 OS << " pcsections ";
2053 PCSections->printAsOperand(OS, MST);
2054 }
2055 if (MDNode *MMRA = getMMRAMetadata()) {
2056 if (!FirstOp) {
2057 OS << ',';
2058 }
2059 OS << " mmra ";
2060 MMRA->printAsOperand(OS, MST);
2061 }
2062 if (uint32_t CFIType = getCFIType()) {
2063 if (!FirstOp)
2064 OS << ',';
2065 OS << " cfi-type " << CFIType;
2066 }
2067 if (getDeactivationSymbol())
2068 OS << ", deactivation-symbol " << getDeactivationSymbol()->getName();
2069
2070 if (DebugInstrNum) {
2071 if (!FirstOp)
2072 OS << ",";
2073 OS << " debug-instr-number " << DebugInstrNum;
2074 }
2075
2076 if (!SkipDebugLoc) {
2077 if (const DebugLoc &DL = getDebugLoc()) {
2078 if (!FirstOp)
2079 OS << ',';
2080 OS << " debug-location ";
2081 DL->printAsOperand(OS, MST);
2082 }
2083 }
2084
2085 if (!memoperands_empty()) {
2086 SmallVector<StringRef, 0> SSNs;
2087 const LLVMContext *Context = nullptr;
2088 std::unique_ptr<LLVMContext> CtxPtr;
2089 const MachineFrameInfo *MFI = nullptr;
2090 if (const MachineFunction *MF = getMFIfAvailable(MI: *this)) {
2091 MFI = &MF->getFrameInfo();
2092 Context = &MF->getFunction().getContext();
2093 } else {
2094 CtxPtr = std::make_unique<LLVMContext>();
2095 Context = CtxPtr.get();
2096 }
2097
2098 OS << " :: ";
2099 bool NeedComma = false;
2100 for (const MachineMemOperand *Op : memoperands()) {
2101 if (NeedComma)
2102 OS << ", ";
2103 Op->print(OS, MST, SSNs, Context: *Context, MFI, TII);
2104 NeedComma = true;
2105 }
2106 }
2107
2108 if (SkipDebugLoc)
2109 return;
2110
2111 bool HaveSemi = false;
2112
2113 // Print debug location information.
2114 if (const DebugLoc &DL = getDebugLoc()) {
2115 if (!HaveSemi) {
2116 OS << ';';
2117 HaveSemi = true;
2118 }
2119 OS << ' ';
2120 DL.print(OS);
2121 }
2122
2123 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2124 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2125 (isDebugValueList() && getNumOperands() >= 2) ||
2126 (isDebugRef() && getNumOperands() >= 3)) {
2127 if (getDebugVariableOp().isMetadata()) {
2128 if (!HaveSemi) {
2129 OS << ";";
2130 HaveSemi = true;
2131 }
2132 auto *DV = getDebugVariable();
2133 OS << " line no:" << DV->getLine();
2134 if (isIndirectDebugValue())
2135 OS << " indirect";
2136 }
2137 }
2138 // TODO: DBG_LABEL
2139
2140 if (PrintMIAddrs)
2141 OS << " ; " << this;
2142
2143 if (AddNewLine)
2144 OS << '\n';
2145}
2146
2147bool MachineInstr::addRegisterKilled(Register IncomingReg,
2148 const TargetRegisterInfo *RegInfo,
2149 bool AddIfNotFound) {
2150 bool isPhysReg = IncomingReg.isPhysical();
2151 bool hasAliases = isPhysReg &&
2152 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2153 bool Found = false;
2154 SmallVector<unsigned,4> DeadOps;
2155 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2156 MachineOperand &MO = getOperand(i);
2157 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2158 continue;
2159
2160 // DEBUG_VALUE nodes do not contribute to code generation and should
2161 // always be ignored. Failure to do so may result in trying to modify
2162 // KILL flags on DEBUG_VALUE nodes.
2163 if (MO.isDebug())
2164 continue;
2165
2166 Register Reg = MO.getReg();
2167 if (!Reg)
2168 continue;
2169
2170 if (Reg == IncomingReg) {
2171 if (!Found) {
2172 if (MO.isKill())
2173 // The register is already marked kill.
2174 return true;
2175 if (isPhysReg && isRegTiedToDefOperand(UseOpIdx: i))
2176 // Two-address uses of physregs must not be marked kill.
2177 return true;
2178 MO.setIsKill();
2179 Found = true;
2180 }
2181 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2182 // A super-register kill already exists.
2183 if (RegInfo->isSuperRegister(RegA: IncomingReg, RegB: Reg))
2184 return true;
2185 if (RegInfo->isSubRegister(RegA: IncomingReg, RegB: Reg))
2186 DeadOps.push_back(Elt: i);
2187 }
2188 }
2189
2190 // Trim unneeded kill operands.
2191 while (!DeadOps.empty()) {
2192 unsigned OpIdx = DeadOps.back();
2193 if (getOperand(i: OpIdx).isImplicit() &&
2194 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2195 removeOperand(OpNo: OpIdx);
2196 else
2197 getOperand(i: OpIdx).setIsKill(false);
2198 DeadOps.pop_back();
2199 }
2200
2201 // If not found, this means an alias of one of the operands is killed. Add a
2202 // new implicit operand if required.
2203 if (!Found && AddIfNotFound) {
2204 addOperand(Op: MachineOperand::CreateReg(Reg: IncomingReg,
2205 isDef: false /*IsDef*/,
2206 isImp: true /*IsImp*/,
2207 isKill: true /*IsKill*/));
2208 return true;
2209 }
2210 return Found;
2211}
2212
2213void MachineInstr::clearRegisterKills(Register Reg,
2214 const TargetRegisterInfo *RegInfo) {
2215 if (!Reg.isPhysical())
2216 RegInfo = nullptr;
2217 for (MachineOperand &MO : operands()) {
2218 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2219 continue;
2220 Register OpReg = MO.getReg();
2221 if ((RegInfo && RegInfo->regsOverlap(RegA: Reg, RegB: OpReg)) || Reg == OpReg)
2222 MO.setIsKill(false);
2223 }
2224}
2225
2226bool MachineInstr::addRegisterDead(Register Reg,
2227 const TargetRegisterInfo *RegInfo,
2228 bool AddIfNotFound) {
2229 bool isPhysReg = Reg.isPhysical();
2230 bool hasAliases = isPhysReg &&
2231 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2232 bool Found = false;
2233 SmallVector<unsigned,4> DeadOps;
2234 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2235 MachineOperand &MO = getOperand(i);
2236 if (!MO.isReg() || !MO.isDef())
2237 continue;
2238 Register MOReg = MO.getReg();
2239 if (!MOReg)
2240 continue;
2241
2242 if (MOReg == Reg) {
2243 MO.setIsDead();
2244 Found = true;
2245 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2246 // There exists a super-register that's marked dead.
2247 if (RegInfo->isSuperRegister(RegA: Reg, RegB: MOReg))
2248 return true;
2249 if (RegInfo->isSubRegister(RegA: Reg, RegB: MOReg))
2250 DeadOps.push_back(Elt: i);
2251 }
2252 }
2253
2254 // Trim unneeded dead operands.
2255 while (!DeadOps.empty()) {
2256 unsigned OpIdx = DeadOps.back();
2257 if (getOperand(i: OpIdx).isImplicit() &&
2258 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2259 removeOperand(OpNo: OpIdx);
2260 else
2261 getOperand(i: OpIdx).setIsDead(false);
2262 DeadOps.pop_back();
2263 }
2264
2265 // If not found, this means an alias of one of the operands is dead. Add a
2266 // new implicit operand if required.
2267 if (Found || !AddIfNotFound)
2268 return Found;
2269
2270 addOperand(Op: MachineOperand::CreateReg(Reg,
2271 isDef: true /*IsDef*/,
2272 isImp: true /*IsImp*/,
2273 isKill: false /*IsKill*/,
2274 isDead: true /*IsDead*/));
2275 return true;
2276}
2277
2278void MachineInstr::clearRegisterDeads(Register Reg) {
2279 for (MachineOperand &MO : all_defs())
2280 if (MO.getReg() == Reg)
2281 MO.setIsDead(false);
2282}
2283
2284void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
2285 for (MachineOperand &MO : all_defs())
2286 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2287 MO.setIsUndef(IsUndef);
2288}
2289
2290void MachineInstr::addRegisterDefined(Register Reg,
2291 const TargetRegisterInfo *RegInfo) {
2292 if (Reg.isPhysical()) {
2293 MachineOperand *MO = findRegisterDefOperand(Reg, TRI: RegInfo, isDead: false, Overlap: false);
2294 if (MO)
2295 return;
2296 } else {
2297 for (const MachineOperand &MO : all_defs()) {
2298 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2299 return;
2300 }
2301 }
2302 addOperand(Op: MachineOperand::CreateReg(Reg,
2303 isDef: true /*IsDef*/,
2304 isImp: true /*IsImp*/));
2305}
2306
2307void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
2308 const TargetRegisterInfo &TRI) {
2309 bool HasRegMask = false;
2310 for (MachineOperand &MO : operands()) {
2311 if (MO.isRegMask()) {
2312 HasRegMask = true;
2313 continue;
2314 }
2315 if (!MO.isReg() || !MO.isDef()) continue;
2316 Register Reg = MO.getReg();
2317 if (!Reg.isPhysical())
2318 continue;
2319 // If there are no uses, including partial uses, the def is dead.
2320 if (llvm::none_of(Range&: UsedRegs,
2321 P: [&](MCRegister Use) { return TRI.regsOverlap(RegA: Use, RegB: Reg); }))
2322 MO.setIsDead();
2323 }
2324
2325 // This is a call with a register mask operand.
2326 // Mask clobbers are always dead, so add defs for the non-dead defines.
2327 if (HasRegMask)
2328 for (const Register &UsedReg : UsedRegs)
2329 addRegisterDefined(Reg: UsedReg, RegInfo: &TRI);
2330}
2331
2332unsigned
2333MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2334 // Build up a buffer of hash code components.
2335 SmallVector<size_t, 16> HashComponents;
2336 HashComponents.reserve(N: MI->getNumOperands() + 1);
2337 HashComponents.push_back(Elt: MI->getOpcode());
2338 for (const MachineOperand &MO : MI->operands()) {
2339 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2340 continue; // Skip virtual register defs.
2341
2342 HashComponents.push_back(Elt: hash_value(MO));
2343 }
2344 return hash_combine_range(R&: HashComponents);
2345}
2346
2347const MDNode *MachineInstr::getLocCookieMD() const {
2348 // Find the source location cookie.
2349 const MDNode *LocMD = nullptr;
2350 for (unsigned i = getNumOperands(); i != 0; --i) {
2351 if (getOperand(i: i-1).isMetadata() &&
2352 (LocMD = getOperand(i: i-1).getMetadata()) &&
2353 LocMD->getNumOperands() != 0) {
2354 if (mdconst::hasa<ConstantInt>(MD: LocMD->getOperand(I: 0)))
2355 return LocMD;
2356 }
2357 }
2358
2359 return nullptr;
2360}
2361
2362void MachineInstr::emitInlineAsmError(const Twine &Msg) const {
2363 assert(isInlineAsm());
2364 const MDNode *LocMD = getLocCookieMD();
2365 uint64_t LocCookie =
2366 LocMD
2367 ? mdconst::extract<ConstantInt>(MD: LocMD->getOperand(I: 0))->getZExtValue()
2368 : 0;
2369 LLVMContext &Ctx = getMF()->getFunction().getContext();
2370 Ctx.diagnose(DI: DiagnosticInfoInlineAsm(LocCookie, Msg));
2371}
2372
2373void MachineInstr::emitGenericError(const Twine &Msg) const {
2374 const Function &Fn = getMF()->getFunction();
2375 Fn.getContext().diagnose(
2376 DI: DiagnosticInfoGenericWithLoc(Msg, Fn, getDebugLoc()));
2377}
2378
2379MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2380 const MCInstrDesc &MCID, bool IsIndirect,
2381 Register Reg, const MDNode *Variable,
2382 const MDNode *Expr) {
2383 assert(isa<DILocalVariable>(Variable) && "not a variable");
2384 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2385 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2386 "Expected inlined-at fields to agree");
2387 auto MIB = BuildMI(MF, MIMD: DL, MCID).addReg(RegNo: Reg);
2388 if (IsIndirect)
2389 MIB.addImm(Val: 0U);
2390 else
2391 MIB.addReg(RegNo: 0U);
2392 return MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2393}
2394
2395MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2396 const MCInstrDesc &MCID, bool IsIndirect,
2397 ArrayRef<MachineOperand> DebugOps,
2398 const MDNode *Variable, const MDNode *Expr) {
2399 assert(isa<DILocalVariable>(Variable) && "not a variable");
2400 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2401 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2402 "Expected inlined-at fields to agree");
2403 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2404 assert(DebugOps.size() == 1 &&
2405 "DBG_VALUE must contain exactly one debug operand");
2406 MachineOperand DebugOp = DebugOps[0];
2407 if (DebugOp.isReg())
2408 return BuildMI(MF, DL, MCID, IsIndirect, Reg: DebugOp.getReg(), Variable,
2409 Expr);
2410
2411 auto MIB = BuildMI(MF, MIMD: DL, MCID).add(MO: DebugOp);
2412 if (IsIndirect)
2413 MIB.addImm(Val: 0U);
2414 else
2415 MIB.addReg(RegNo: 0U);
2416 return MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2417 }
2418
2419 auto MIB = BuildMI(MF, MIMD: DL, MCID);
2420 MIB.addMetadata(MD: Variable).addMetadata(MD: Expr);
2421 for (const MachineOperand &DebugOp : DebugOps)
2422 if (DebugOp.isReg())
2423 MIB.addReg(RegNo: DebugOp.getReg());
2424 else
2425 MIB.add(MO: DebugOp);
2426 return MIB;
2427}
2428
2429MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2430 MachineBasicBlock::iterator I,
2431 const DebugLoc &DL, const MCInstrDesc &MCID,
2432 bool IsIndirect, Register Reg,
2433 const MDNode *Variable, const MDNode *Expr) {
2434 MachineFunction &MF = *BB.getParent();
2435 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2436 BB.insert(I, MI);
2437 return MachineInstrBuilder(MF, MI);
2438}
2439
2440MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2441 MachineBasicBlock::iterator I,
2442 const DebugLoc &DL, const MCInstrDesc &MCID,
2443 bool IsIndirect,
2444 ArrayRef<MachineOperand> DebugOps,
2445 const MDNode *Variable, const MDNode *Expr) {
2446 MachineFunction &MF = *BB.getParent();
2447 MachineInstr *MI =
2448 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2449 BB.insert(I, MI);
2450 return MachineInstrBuilder(MF, *MI);
2451}
2452
2453/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2454/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2455static const DIExpression *computeExprForSpill(
2456 const MachineInstr &MI,
2457 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2458 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2459 "Expected inlined-at fields to agree");
2460
2461 const DIExpression *Expr = MI.getDebugExpression();
2462 if (MI.isIndirectDebugValue()) {
2463 assert(MI.getDebugOffset().getImm() == 0 &&
2464 "DBG_VALUE with nonzero offset");
2465 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefBefore);
2466 } else if (MI.isDebugValueList()) {
2467 // We will replace the spilled register with a frame index, so
2468 // immediately deref all references to the spilled register.
2469 std::array<uint64_t, 1> Ops{._M_elems: {dwarf::DW_OP_deref}};
2470 for (const MachineOperand *Op : SpilledOperands) {
2471 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2472 Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: OpIdx);
2473 }
2474 }
2475 return Expr;
2476}
2477static const DIExpression *computeExprForSpill(const MachineInstr &MI,
2478 Register SpillReg) {
2479 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2480 SmallVector<const MachineOperand *> SpillOperands(
2481 llvm::make_pointer_range(Range: MI.getDebugOperandsForReg(Reg: SpillReg)));
2482 return computeExprForSpill(MI, SpilledOperands: SpillOperands);
2483}
2484
2485MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2486 MachineBasicBlock::iterator I,
2487 const MachineInstr &Orig,
2488 int FrameIndex, Register SpillReg) {
2489 assert(!Orig.isDebugRef() &&
2490 "DBG_INSTR_REF should not reference a virtual register.");
2491 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpillReg);
2492 MachineInstrBuilder NewMI =
2493 BuildMI(BB, I, MIMD: Orig.getDebugLoc(), MCID: Orig.getDesc());
2494 // Non-Variadic Operands: Location, Offset, Variable, Expression
2495 // Variadic Operands: Variable, Expression, Locations...
2496 if (Orig.isNonListDebugValue())
2497 NewMI.addFrameIndex(Idx: FrameIndex).addImm(Val: 0U);
2498 NewMI.addMetadata(MD: Orig.getDebugVariable()).addMetadata(MD: Expr);
2499 if (Orig.isDebugValueList()) {
2500 for (const MachineOperand &Op : Orig.debug_operands())
2501 if (Op.isReg() && Op.getReg() == SpillReg)
2502 NewMI.addFrameIndex(Idx: FrameIndex);
2503 else
2504 NewMI.add(MO: MachineOperand(Op));
2505 }
2506 return NewMI;
2507}
2508MachineInstr *llvm::buildDbgValueForSpill(
2509 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
2510 const MachineInstr &Orig, int FrameIndex,
2511 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2512 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpilledOperands);
2513 MachineInstrBuilder NewMI =
2514 BuildMI(BB, I, MIMD: Orig.getDebugLoc(), MCID: Orig.getDesc());
2515 // Non-Variadic Operands: Location, Offset, Variable, Expression
2516 // Variadic Operands: Variable, Expression, Locations...
2517 if (Orig.isNonListDebugValue())
2518 NewMI.addFrameIndex(Idx: FrameIndex).addImm(Val: 0U);
2519 NewMI.addMetadata(MD: Orig.getDebugVariable()).addMetadata(MD: Expr);
2520 if (Orig.isDebugValueList()) {
2521 for (const MachineOperand &Op : Orig.debug_operands())
2522 if (is_contained(Range: SpilledOperands, Element: &Op))
2523 NewMI.addFrameIndex(Idx: FrameIndex);
2524 else
2525 NewMI.add(MO: MachineOperand(Op));
2526 }
2527 return NewMI;
2528}
2529
2530void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex,
2531 Register Reg) {
2532 const DIExpression *Expr = computeExprForSpill(MI: Orig, SpillReg: Reg);
2533 if (Orig.isNonListDebugValue())
2534 Orig.getDebugOffset().ChangeToImmediate(ImmVal: 0U);
2535 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2536 Op.ChangeToFrameIndex(Idx: FrameIndex);
2537 Orig.getDebugExpressionOp().setMetadata(Expr);
2538}
2539
2540void MachineInstr::collectDebugValues(
2541 SmallVectorImpl<MachineInstr *> &DbgValues) {
2542 MachineInstr &MI = *this;
2543 if (!MI.getOperand(i: 0).isReg())
2544 return;
2545
2546 MachineBasicBlock::iterator DI = MI; ++DI;
2547 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2548 DI != DE; ++DI) {
2549 if (!DI->isDebugValue())
2550 return;
2551 if (DI->hasDebugOperandForReg(Reg: MI.getOperand(i: 0).getReg()))
2552 DbgValues.push_back(Elt: &*DI);
2553 }
2554}
2555
2556void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2557 // Collect matching debug values.
2558 SmallVector<MachineInstr *, 2> DbgValues;
2559
2560 if (!getOperand(i: 0).isReg())
2561 return;
2562
2563 Register DefReg = getOperand(i: 0).getReg();
2564 auto *MRI = getRegInfo();
2565 for (auto &MO : MRI->use_operands(Reg: DefReg)) {
2566 auto *DI = MO.getParent();
2567 if (!DI->isDebugValue())
2568 continue;
2569 if (DI->hasDebugOperandForReg(Reg: DefReg)) {
2570 DbgValues.push_back(Elt: DI);
2571 }
2572 }
2573
2574 // Propagate Reg to debug value instructions.
2575 for (auto *DBI : DbgValues)
2576 for (MachineOperand &Op : DBI->getDebugOperandsForReg(Reg: DefReg))
2577 Op.setReg(Reg);
2578}
2579
2580using MMOList = SmallVector<const MachineMemOperand *, 2>;
2581
2582static LocationSize getSpillSlotSize(const MMOList &Accesses,
2583 const MachineFrameInfo &MFI) {
2584 std::optional<TypeSize> Size;
2585 for (const auto *A : Accesses) {
2586 if (MFI.isSpillSlotObjectIndex(
2587 ObjectIdx: cast<FixedStackPseudoSourceValue>(Val: A->getPseudoValue())
2588 ->getFrameIndex())) {
2589 LocationSize S = A->getSize();
2590 if (!S.hasValue())
2591 return LocationSize::beforeOrAfterPointer();
2592 if (!Size)
2593 Size = S.getValue();
2594 else
2595 Size = *Size + S.getValue();
2596 }
2597 }
2598 if (!Size)
2599 return LocationSize::precise(Value: 0);
2600 return LocationSize::precise(Value: *Size);
2601}
2602
2603std::optional<LocationSize>
2604MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2605 int FI;
2606 if (TII->isStoreToStackSlotPostFE(MI: *this, FrameIndex&: FI)) {
2607 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2608 if (MFI.isSpillSlotObjectIndex(ObjectIdx: FI))
2609 return (*memoperands_begin())->getSize();
2610 }
2611 return std::nullopt;
2612}
2613
2614std::optional<LocationSize>
2615MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2616 MMOList Accesses;
2617 if (TII->hasStoreToStackSlot(MI: *this, Accesses))
2618 return getSpillSlotSize(Accesses, MFI: getMF()->getFrameInfo());
2619 return std::nullopt;
2620}
2621
2622std::optional<LocationSize>
2623MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2624 int FI;
2625 if (TII->isLoadFromStackSlotPostFE(MI: *this, FrameIndex&: FI)) {
2626 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2627 if (MFI.isSpillSlotObjectIndex(ObjectIdx: FI))
2628 return (*memoperands_begin())->getSize();
2629 }
2630 return std::nullopt;
2631}
2632
2633std::optional<LocationSize>
2634MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2635 MMOList Accesses;
2636 if (TII->hasLoadFromStackSlot(MI: *this, Accesses))
2637 return getSpillSlotSize(Accesses, MFI: getMF()->getFrameInfo());
2638 return std::nullopt;
2639}
2640
2641unsigned MachineInstr::getDebugInstrNum() {
2642 if (DebugInstrNum == 0)
2643 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2644 return DebugInstrNum;
2645}
2646
2647unsigned MachineInstr::getDebugInstrNum(MachineFunction &MF) {
2648 if (DebugInstrNum == 0)
2649 DebugInstrNum = MF.getNewDebugInstrNum();
2650 return DebugInstrNum;
2651}
2652
2653std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2654 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2655 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()));
2656}
2657
2658std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2659 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2660 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2661 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()));
2662}
2663
2664std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2665 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2666 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2667 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()),
2668 getRegInfo()->getType(Reg: getOperand(i: 3).getReg()));
2669}
2670
2671std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2672 return std::tuple(getRegInfo()->getType(Reg: getOperand(i: 0).getReg()),
2673 getRegInfo()->getType(Reg: getOperand(i: 1).getReg()),
2674 getRegInfo()->getType(Reg: getOperand(i: 2).getReg()),
2675 getRegInfo()->getType(Reg: getOperand(i: 3).getReg()),
2676 getRegInfo()->getType(Reg: getOperand(i: 4).getReg()));
2677}
2678
2679std::tuple<Register, LLT, Register, LLT>
2680MachineInstr::getFirst2RegLLTs() const {
2681 Register Reg0 = getOperand(i: 0).getReg();
2682 Register Reg1 = getOperand(i: 1).getReg();
2683 return std::tuple(Reg0, getRegInfo()->getType(Reg: Reg0), Reg1,
2684 getRegInfo()->getType(Reg: Reg1));
2685}
2686
2687std::tuple<Register, LLT, Register, LLT, Register, LLT>
2688MachineInstr::getFirst3RegLLTs() const {
2689 Register Reg0 = getOperand(i: 0).getReg();
2690 Register Reg1 = getOperand(i: 1).getReg();
2691 Register Reg2 = getOperand(i: 2).getReg();
2692 return std::tuple(Reg0, getRegInfo()->getType(Reg: Reg0), Reg1,
2693 getRegInfo()->getType(Reg: Reg1), Reg2,
2694 getRegInfo()->getType(Reg: Reg2));
2695}
2696
2697std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2698MachineInstr::getFirst4RegLLTs() const {
2699 Register Reg0 = getOperand(i: 0).getReg();
2700 Register Reg1 = getOperand(i: 1).getReg();
2701 Register Reg2 = getOperand(i: 2).getReg();
2702 Register Reg3 = getOperand(i: 3).getReg();
2703 return std::tuple(
2704 Reg0, getRegInfo()->getType(Reg: Reg0), Reg1, getRegInfo()->getType(Reg: Reg1),
2705 Reg2, getRegInfo()->getType(Reg: Reg2), Reg3, getRegInfo()->getType(Reg: Reg3));
2706}
2707
2708std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register,
2709 LLT>
2710MachineInstr::getFirst5RegLLTs() const {
2711 Register Reg0 = getOperand(i: 0).getReg();
2712 Register Reg1 = getOperand(i: 1).getReg();
2713 Register Reg2 = getOperand(i: 2).getReg();
2714 Register Reg3 = getOperand(i: 3).getReg();
2715 Register Reg4 = getOperand(i: 4).getReg();
2716 return std::tuple(
2717 Reg0, getRegInfo()->getType(Reg: Reg0), Reg1, getRegInfo()->getType(Reg: Reg1),
2718 Reg2, getRegInfo()->getType(Reg: Reg2), Reg3, getRegInfo()->getType(Reg: Reg3),
2719 Reg4, getRegInfo()->getType(Reg: Reg4));
2720}
2721
2722void MachineInstr::insert(mop_iterator InsertBefore,
2723 ArrayRef<MachineOperand> Ops) {
2724 assert(InsertBefore != nullptr && "invalid iterator");
2725 assert(InsertBefore->getParent() == this &&
2726 "iterator points to operand of other inst");
2727 if (Ops.empty())
2728 return;
2729
2730 // Do one pass to untie operands.
2731 SmallDenseMap<unsigned, unsigned> TiedOpIndices;
2732 for (const MachineOperand &MO : operands()) {
2733 if (MO.isReg() && MO.isTied()) {
2734 unsigned OpNo = getOperandNo(I: &MO);
2735 unsigned TiedTo = findTiedOperandIdx(OpIdx: OpNo);
2736 TiedOpIndices[OpNo] = TiedTo;
2737 untieRegOperand(OpIdx: OpNo);
2738 }
2739 }
2740
2741 unsigned OpIdx = getOperandNo(I: InsertBefore);
2742 unsigned NumOperands = getNumOperands();
2743 unsigned OpsToMove = NumOperands - OpIdx;
2744
2745 SmallVector<MachineOperand> MovingOps;
2746 MovingOps.reserve(N: OpsToMove);
2747
2748 for (unsigned I = 0; I < OpsToMove; ++I) {
2749 MovingOps.emplace_back(Args&: getOperand(i: OpIdx));
2750 removeOperand(OpNo: OpIdx);
2751 }
2752 for (const MachineOperand &MO : Ops)
2753 addOperand(Op: MO);
2754 for (const MachineOperand &OpMoved : MovingOps)
2755 addOperand(Op: OpMoved);
2756
2757 // Re-tie operands.
2758 for (auto [Tie1, Tie2] : TiedOpIndices) {
2759 if (Tie1 >= OpIdx)
2760 Tie1 += Ops.size();
2761 if (Tie2 >= OpIdx)
2762 Tie2 += Ops.size();
2763 tieOperands(DefIdx: Tie1, UseIdx: Tie2);
2764 }
2765}
2766
2767bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2768 assert(OpId && "expected non-zero operand id");
2769 assert(isInlineAsm() && "should only be used on inline asm");
2770
2771 if (!getOperand(i: OpId).isReg())
2772 return false;
2773
2774 const MachineOperand &MD = getOperand(i: OpId - 1);
2775 if (!MD.isImm())
2776 return false;
2777
2778 InlineAsm::Flag F(MD.getImm());
2779 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2780 return F.getRegMayBeFolded();
2781 return false;
2782}
2783
2784unsigned MachineInstr::removePHIIncomingValueFor(const MachineBasicBlock &MBB) {
2785 assert(isPHI());
2786
2787 // Phi might have multiple entries for MBB. Need to remove them all.
2788 unsigned RemovedCount = 0;
2789 for (unsigned N = getNumOperands(); N > 2; N -= 2) {
2790 if (getOperand(i: N - 1).getMBB() == &MBB) {
2791 removeOperand(OpNo: N - 1);
2792 removeOperand(OpNo: N - 2);
2793 RemovedCount += 2;
2794 }
2795 }
2796 return RemovedCount;
2797}
2798