1//===-- Instruction.cpp - Implement the Instruction class -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Instruction class for the IR library.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Instruction.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/IR/AttributeMask.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/InstrTypes.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/Operator.h"
27#include "llvm/IR/ProfDataUtils.h"
28#include "llvm/IR/Type.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Compiler.h"
31using namespace llvm;
32
33namespace llvm {
34
35// FIXME: Flag used for an ablation performance test, Issue #147390. Placing it
36// here because referencing IR should be feasible from anywhere. Will be
37// removed after the ablation test.
38cl::opt<bool> ProfcheckDisableMetadataFixes(
39 "profcheck-disable-metadata-fixes", cl::Hidden, cl::init(Val: false),
40 cl::desc(
41 "Disable metadata propagation fixes discovered through Issue #147390"));
42
43} // end namespace llvm
44
45InsertPosition::InsertPosition(Instruction *InsertBefore)
46 : InsertAt(InsertBefore ? InsertBefore->getIterator()
47 : InstListType::iterator()) {}
48InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
49 : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
50
51Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
52 InsertPosition InsertBefore)
53 : User(ty, Value::InstructionVal + it, AllocInfo) {
54 // When called with an iterator, there must be a block to insert into.
55 if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
56 BasicBlock *BB = InsertIt.getNodeParent();
57 assert(BB && "Instruction to insert before is not in a basic block!");
58 insertInto(ParentBB: BB, It: InsertBefore);
59 }
60}
61
62Instruction::~Instruction() {
63 assert(!getParent() && "Instruction still linked in the program!");
64
65 // Replace any extant metadata uses of this instruction with poison to
66 // preserve debug info accuracy. Some alternatives include:
67 // - Treat Instruction like any other Value, and point its extant metadata
68 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
69 // trivially dead (i.e. fair game for deletion in many passes), leading to
70 // stale dbg.values being in effect for too long.
71 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
72 // correct. OTOH results in wasted work in some common cases (e.g. when all
73 // instructions in a BasicBlock are deleted).
74 if (isUsedByMetadata())
75 ValueAsMetadata::handleRAUW(From: this, To: PoisonValue::get(T: getType()));
76
77 // Remove associated metadata from context.
78 if (hasMetadata()) {
79 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
80 // mapping in LLVMContext.
81 updateDIAssignIDMapping(ID: nullptr);
82 clearMetadata();
83 }
84}
85
86const Module *Instruction::getModule() const {
87 return getParent()->getModule();
88}
89
90const Function *Instruction::getFunction() const {
91 return getParent()->getParent();
92}
93
94const DataLayout &Instruction::getDataLayout() const {
95 return getModule()->getDataLayout();
96}
97
98void Instruction::removeFromParent() {
99 // Perform any debug-info maintenence required.
100 handleMarkerRemoval();
101
102 getParent()->getInstList().remove(IT: getIterator());
103}
104
105void Instruction::handleMarkerRemoval() {
106 if (!DebugMarker)
107 return;
108
109 DebugMarker->removeMarker();
110}
111
112BasicBlock::iterator Instruction::eraseFromParent() {
113 handleMarkerRemoval();
114 return getParent()->getInstList().erase(where: getIterator());
115}
116
117void Instruction::insertBefore(Instruction *InsertPos) {
118 insertBefore(InsertPos: InsertPos->getIterator());
119}
120
121/// Insert an unlinked instruction into a basic block immediately before the
122/// specified instruction.
123void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
124 insertBefore(BB&: *InsertPos->getParent(), InsertPos);
125}
126
127/// Insert an unlinked instruction into a basic block immediately after the
128/// specified instruction.
129void Instruction::insertAfter(Instruction *InsertPos) {
130 BasicBlock *DestParent = InsertPos->getParent();
131
132 DestParent->getInstList().insertAfter(where: InsertPos->getIterator(), New: this);
133}
134
135void Instruction::insertAfter(BasicBlock::iterator InsertPos) {
136 BasicBlock *DestParent = InsertPos->getParent();
137
138 DestParent->getInstList().insertAfter(where: InsertPos, New: this);
139}
140
141BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
142 BasicBlock::iterator It) {
143 assert(getParent() == nullptr && "Expected detached instruction");
144 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
145 "It not in ParentBB");
146 insertBefore(BB&: *ParentBB, InsertPos: It);
147 return getIterator();
148}
149
150void Instruction::insertBefore(BasicBlock &BB,
151 InstListType::iterator InsertPos) {
152 assert(!DebugMarker);
153
154 BB.getInstList().insert(where: InsertPos, New: this);
155
156 // We've inserted "this": if InsertAtHead is set then it comes before any
157 // DbgVariableRecords attached to InsertPos. But if it's not set, then any
158 // DbgRecords should now come before "this".
159 bool InsertAtHead = InsertPos.getHeadBit();
160 if (!InsertAtHead) {
161 DbgMarker *SrcMarker = BB.getMarker(It: InsertPos);
162 if (SrcMarker && !SrcMarker->empty()) {
163 // If this assertion fires, the calling code is about to insert a PHI
164 // after debug-records, which would form a sequence like:
165 // %0 = PHI
166 // #dbg_value
167 // %1 = PHI
168 // Which is de-normalised and undesired -- hence the assertion. To avoid
169 // this, you must insert at that position using an iterator, and it must
170 // be aquired by calling getFirstNonPHIIt / begin or similar methods on
171 // the block. This will signal to this behind-the-scenes debug-info
172 // maintenence code that you intend the PHI to be ahead of everything,
173 // including any debug-info.
174 assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
175 adoptDbgRecords(BB: &BB, It: InsertPos, InsertAtHead: false);
176 }
177 }
178
179 // If we're inserting a terminator, check if we need to flush out
180 // TrailingDbgRecords. Inserting instructions at the end of an incomplete
181 // block is handled by the code block above.
182 if (isTerminator())
183 getParent()->flushTerminatorDbgRecords();
184}
185
186/// Unlink this instruction from its current basic block and insert it into the
187/// basic block that MovePos lives in, right before MovePos.
188void Instruction::moveBefore(Instruction *MovePos) {
189 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: false);
190}
191
192void Instruction::moveBefore(BasicBlock::iterator MovePos) {
193 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false);
194}
195
196void Instruction::moveBeforePreserving(Instruction *MovePos) {
197 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: true);
198}
199
200void Instruction::moveBeforePreserving(BasicBlock::iterator MovePos) {
201 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: true);
202}
203
204void Instruction::moveAfter(Instruction *MovePos) {
205 auto NextIt = std::next(x: MovePos->getIterator());
206 // We want this instruction to be moved to after NextIt in the instruction
207 // list, but before NextIt's debug value range.
208 NextIt.setHeadBit(true);
209 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: false);
210}
211
212void Instruction::moveAfter(InstListType::iterator MovePos) {
213 // We want this instruction to be moved to after NextIt in the instruction
214 // list, but before NextIt's debug value range.
215 MovePos.setHeadBit(true);
216 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false);
217}
218
219void Instruction::moveAfterPreserving(Instruction *MovePos) {
220 auto NextIt = std::next(x: MovePos->getIterator());
221 // We want this instruction and its debug range to be moved to after NextIt
222 // in the instruction list, but before NextIt's debug value range.
223 NextIt.setHeadBit(true);
224 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: true);
225}
226
227void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
228 moveBeforeImpl(BB, I, Preserve: false);
229}
230
231void Instruction::moveBeforePreserving(BasicBlock &BB,
232 InstListType::iterator I) {
233 moveBeforeImpl(BB, I, Preserve: true);
234}
235
236void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
237 bool Preserve) {
238 assert(I == BB.end() || I->getParent() == &BB);
239 bool InsertAtHead = I.getHeadBit();
240
241 // If we've been given the "Preserve" flag, then just move the DbgRecords with
242 // the instruction, no more special handling needed.
243 if (DebugMarker && !Preserve) {
244 if (I != this->getIterator() || InsertAtHead) {
245 // "this" is definitely moving in the list, or it's moving ahead of its
246 // attached DbgVariableRecords. Detach any existing DbgRecords.
247 handleMarkerRemoval();
248 }
249 }
250
251 // Move this single instruction. Use the list splice method directly, not
252 // the block splicer, which will do more debug-info things.
253 BB.getInstList().splice(where: I, L2&: getParent()->getInstList(), first: getIterator());
254
255 if (!Preserve) {
256 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
257
258 // If we're inserting at point I, and not in front of the DbgRecords
259 // attached there, then we should absorb the DbgRecords attached to I.
260 if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
261 adoptDbgRecords(BB: &BB, It: I, InsertAtHead: false);
262 }
263 }
264
265 if (isTerminator())
266 getParent()->flushTerminatorDbgRecords();
267}
268
269iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
270 const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
271 bool InsertAtHead) {
272 if (!From->DebugMarker)
273 return DbgMarker::getEmptyDbgRecordRange();
274
275 if (!DebugMarker)
276 getParent()->createMarker(I: this);
277
278 return DebugMarker->cloneDebugInfoFrom(From: From->DebugMarker, FromHere,
279 InsertAtHead);
280}
281
282std::optional<DbgRecord::self_iterator>
283Instruction::getDbgReinsertionPosition() {
284 // Is there a marker on the next instruction?
285 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
286 if (!NextMarker)
287 return std::nullopt;
288
289 // Are there any DbgRecords in the next marker?
290 if (NextMarker->StoredDbgRecords.empty())
291 return std::nullopt;
292
293 return NextMarker->StoredDbgRecords.begin();
294}
295
296bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
297
298void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
299 bool InsertAtHead) {
300 DbgMarker *SrcMarker = BB->getMarker(It);
301 auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
302 if (BB->end() == It) {
303 SrcMarker->eraseFromParent();
304 BB->deleteTrailingDbgRecords();
305 }
306 };
307
308 if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
309 ReleaseTrailingDbgRecords();
310 return;
311 }
312
313 // If we have DbgMarkers attached to this instruction, we have to honour the
314 // ordering of DbgRecords between this and the other marker. Fall back to just
315 // absorbing from the source.
316 if (DebugMarker || It == BB->end()) {
317 // Ensure we _do_ have a marker.
318 getParent()->createMarker(I: this);
319 DebugMarker->absorbDebugValues(Src&: *SrcMarker, InsertAtHead);
320
321 // Having transferred everything out of SrcMarker, we _could_ clean it up
322 // and free the marker now. However, that's a lot of heap-accounting for a
323 // small amount of memory with a good chance of re-use. Leave it for the
324 // moment. It will be released when the Instruction is freed in the worst
325 // case.
326 // However: if we transferred from a trailing marker off the end of the
327 // block, it's important to not leave the empty marker trailing. It will
328 // give a misleading impression that some debug records have been left
329 // trailing.
330 ReleaseTrailingDbgRecords();
331 } else {
332 // Optimisation: we're transferring all the DbgRecords from the source
333 // marker onto this empty location: just adopt the other instructions
334 // marker.
335 DebugMarker = SrcMarker;
336 DebugMarker->MarkedInstr = this;
337 It->DebugMarker = nullptr;
338 }
339}
340
341void Instruction::dropDbgRecords() {
342 if (DebugMarker)
343 DebugMarker->dropDbgRecords();
344}
345
346void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
347 DebugMarker->dropOneDbgRecord(DR: DVR);
348}
349
350bool Instruction::comesBefore(const Instruction *Other) const {
351 assert(getParent() && Other->getParent() &&
352 "instructions without BB parents have no order");
353 assert(getParent() == Other->getParent() &&
354 "cross-BB instruction order comparison");
355 if (!getParent()->isInstrOrderValid())
356 const_cast<BasicBlock *>(getParent())->renumberInstructions();
357 return Order < Other->Order;
358}
359
360std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
361 assert(!getType()->isVoidTy() && "Instruction must define result");
362 BasicBlock *InsertBB;
363 BasicBlock::iterator InsertPt;
364 if (auto *PN = dyn_cast<PHINode>(Val: this)) {
365 InsertBB = PN->getParent();
366 InsertPt = InsertBB->getFirstInsertionPt();
367 } else if (auto *II = dyn_cast<InvokeInst>(Val: this)) {
368 InsertBB = II->getNormalDest();
369 InsertPt = InsertBB->getFirstInsertionPt();
370 } else if (isa<CallBrInst>(Val: this)) {
371 // Def is available in multiple successors, there's no single dominating
372 // insertion point.
373 return std::nullopt;
374 } else {
375 assert(!isTerminator() && "Only invoke/callbr terminators return value");
376 InsertBB = getParent();
377 InsertPt = std::next(x: getIterator());
378 // Any instruction inserted immediately after "this" will come before any
379 // debug-info records take effect -- thus, set the head bit indicating that
380 // to debug-info-transfer code.
381 InsertPt.setHeadBit(true);
382 }
383
384 // catchswitch blocks don't have any legal insertion point (because they
385 // are both an exception pad and a terminator).
386 if (InsertPt == InsertBB->end())
387 return std::nullopt;
388 return InsertPt;
389}
390
391bool Instruction::isOnlyUserOfAnyOperand() {
392 return any_of(Range: operands(), P: [](const Value *V) { return V->hasOneUser(); });
393}
394
395void Instruction::setHasNoUnsignedWrap(bool b) {
396 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
397 Inst->setHasNoUnsignedWrap(b);
398 else
399 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(b);
400}
401
402void Instruction::setHasNoSignedWrap(bool b) {
403 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
404 Inst->setHasNoSignedWrap(b);
405 else
406 cast<TruncInst>(Val: this)->setHasNoSignedWrap(b);
407}
408
409void Instruction::setIsExact(bool b) {
410 cast<PossiblyExactOperator>(Val: this)->setIsExact(b);
411}
412
413void Instruction::setNonNeg(bool b) {
414 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
415 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
416 (b * PossiblyNonNegInst::NonNeg);
417}
418
419bool Instruction::hasNoUnsignedWrap() const {
420 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
421 return Inst->hasNoUnsignedWrap();
422
423 return cast<TruncInst>(Val: this)->hasNoUnsignedWrap();
424}
425
426bool Instruction::hasNoSignedWrap() const {
427 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
428 return Inst->hasNoSignedWrap();
429
430 return cast<TruncInst>(Val: this)->hasNoSignedWrap();
431}
432
433bool Instruction::hasNonNeg() const {
434 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
435 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
436}
437
438bool Instruction::hasPoisonGeneratingFlags() const {
439 return cast<Operator>(Val: this)->hasPoisonGeneratingFlags();
440}
441
442void Instruction::dropPoisonGeneratingFlags() {
443 switch (getOpcode()) {
444 case Instruction::Add:
445 case Instruction::Sub:
446 case Instruction::Mul:
447 case Instruction::Shl:
448 cast<OverflowingBinaryOperator>(Val: this)->setHasNoUnsignedWrap(false);
449 cast<OverflowingBinaryOperator>(Val: this)->setHasNoSignedWrap(false);
450 break;
451
452 case Instruction::UDiv:
453 case Instruction::SDiv:
454 case Instruction::AShr:
455 case Instruction::LShr:
456 cast<PossiblyExactOperator>(Val: this)->setIsExact(false);
457 break;
458
459 case Instruction::Or:
460 cast<PossiblyDisjointInst>(Val: this)->setIsDisjoint(false);
461 break;
462
463 case Instruction::GetElementPtr:
464 cast<GetElementPtrInst>(Val: this)->setNoWrapFlags(GEPNoWrapFlags::none());
465 break;
466
467 case Instruction::UIToFP:
468 case Instruction::ZExt:
469 setNonNeg(false);
470 break;
471
472 case Instruction::Trunc:
473 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(false);
474 cast<TruncInst>(Val: this)->setHasNoSignedWrap(false);
475 break;
476
477 case Instruction::ICmp:
478 cast<ICmpInst>(Val: this)->setSameSign(false);
479 break;
480
481 case Instruction::Call: {
482 if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) {
483 switch (II->getIntrinsicID()) {
484 case Intrinsic::ctlz:
485 case Intrinsic::cttz:
486 case Intrinsic::abs:
487 II->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::getFalse(Context&: getContext()));
488 break;
489 }
490 }
491 break;
492 }
493 }
494
495 if (isa<FPMathOperator>(Val: this)) {
496 setHasNoNaNs(false);
497 setHasNoInfs(false);
498 }
499
500 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
501}
502
503bool Instruction::hasPoisonGeneratingMetadata() const {
504 return any_of(Range: Metadata::PoisonGeneratingIDs,
505 P: [this](unsigned ID) { return hasMetadata(KindID: ID); });
506}
507
508bool Instruction::hasNonDebugLocLoopMetadata() const {
509 // If there is no loop metadata at all, we also don't have
510 // non-debug loop metadata, obviously.
511 if (!hasMetadata(KindID: LLVMContext::MD_loop))
512 return false;
513
514 // If we do have loop metadata, retrieve it.
515 MDNode *LoopMD = getMetadata(KindID: LLVMContext::MD_loop);
516
517 // Check if the existing operands are debug locations. This loop
518 // should terminate after at most three iterations. Skip
519 // the first item because it is a self-reference.
520 for (const MDOperand &Op : llvm::drop_begin(RangeOrContainer: LoopMD->operands())) {
521 // check for debug location type by attempting a cast.
522 if (!isa<DILocation>(Val: Op)) {
523 return true;
524 }
525 }
526
527 // If we get here, then all we have is debug locations in the loop metadata.
528 return false;
529}
530
531void Instruction::dropPoisonGeneratingMetadata() {
532 for (unsigned ID : Metadata::PoisonGeneratingIDs)
533 eraseMetadata(KindID: ID);
534}
535
536bool Instruction::hasPoisonGeneratingReturnAttributes() const {
537 if (const auto *CB = dyn_cast<CallBase>(Val: this)) {
538 AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
539 return RetAttrs.hasAttribute(Kind: Attribute::Range) ||
540 RetAttrs.hasAttribute(Kind: Attribute::Alignment) ||
541 RetAttrs.hasAttribute(Kind: Attribute::NonNull);
542 }
543 return false;
544}
545
546void Instruction::dropPoisonGeneratingReturnAttributes() {
547 if (auto *CB = dyn_cast<CallBase>(Val: this)) {
548 AttributeMask AM;
549 AM.addAttribute(Val: Attribute::Range);
550 AM.addAttribute(Val: Attribute::Alignment);
551 AM.addAttribute(Val: Attribute::NonNull);
552 CB->removeRetAttrs(AttrsToRemove: AM);
553 }
554 assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
555}
556
557void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
558 ArrayRef<unsigned> KnownIDs) {
559 dropUnknownNonDebugMetadata(KnownIDs);
560 auto *CB = dyn_cast<CallBase>(Val: this);
561 if (!CB)
562 return;
563 // For call instructions, we also need to drop parameter and return attributes
564 // that can cause UB if the call is moved to a location where the attribute is
565 // not valid.
566 AttributeList AL = CB->getAttributes();
567 if (AL.isEmpty())
568 return;
569 AttributeMask UBImplyingAttributes =
570 AttributeFuncs::getUBImplyingAttributes();
571 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
572 CB->removeParamAttrs(ArgNo, AttrsToRemove: UBImplyingAttributes);
573 CB->removeRetAttrs(AttrsToRemove: UBImplyingAttributes);
574}
575
576void Instruction::dropUBImplyingAttrsAndMetadata(ArrayRef<unsigned> Keep) {
577 // !annotation and !prof metadata does not impact semantics.
578 // !range, !nonnull and !align produce poison, so they are safe to speculate.
579 // !fpmath specifies floating-point precision and does not imply UB.
580 // !noundef and various AA metadata must be dropped, as it generally produces
581 // immediate undefined behavior.
582 static const unsigned KnownIDs[] = {
583 LLVMContext::MD_annotation, LLVMContext::MD_range,
584 LLVMContext::MD_nonnull, LLVMContext::MD_align,
585 LLVMContext::MD_fpmath, LLVMContext::MD_prof};
586 SmallVector<unsigned> KeepIDs;
587 KeepIDs.reserve(N: Keep.size() + std::size(KnownIDs));
588 append_range(C&: KeepIDs, R: (!ProfcheckDisableMetadataFixes ? KnownIDs
589 : drop_end(RangeOrContainer: KnownIDs)));
590 append_range(C&: KeepIDs, R&: Keep);
591 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs: KeepIDs);
592}
593
594bool Instruction::hasUBImplyingAttrs() const {
595 auto *CB = dyn_cast<CallBase>(Val: this);
596 if (!CB)
597 return false;
598 // For call instructions, we also need to check parameter and return
599 // attributes that can cause UB.
600 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
601 if (CB->isPassingUndefUB(ArgNo))
602 return true;
603 return CB->hasRetAttr(Kind: Attribute::NoUndef) ||
604 CB->hasRetAttr(Kind: Attribute::Dereferenceable) ||
605 CB->hasRetAttr(Kind: Attribute::DereferenceableOrNull);
606}
607
608bool Instruction::isExact() const {
609 return cast<PossiblyExactOperator>(Val: this)->isExact();
610}
611
612void Instruction::setFast(bool B) {
613 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
614 cast<FPMathOperator>(Val: this)->setFast(B);
615}
616
617void Instruction::setHasAllowReassoc(bool B) {
618 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
619 cast<FPMathOperator>(Val: this)->setHasAllowReassoc(B);
620}
621
622void Instruction::setHasNoNaNs(bool B) {
623 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
624 cast<FPMathOperator>(Val: this)->setHasNoNaNs(B);
625}
626
627void Instruction::setHasNoInfs(bool B) {
628 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
629 cast<FPMathOperator>(Val: this)->setHasNoInfs(B);
630}
631
632void Instruction::setHasNoSignedZeros(bool B) {
633 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
634 cast<FPMathOperator>(Val: this)->setHasNoSignedZeros(B);
635}
636
637void Instruction::setHasAllowReciprocal(bool B) {
638 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
639 cast<FPMathOperator>(Val: this)->setHasAllowReciprocal(B);
640}
641
642void Instruction::setHasAllowContract(bool B) {
643 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
644 cast<FPMathOperator>(Val: this)->setHasAllowContract(B);
645}
646
647void Instruction::setHasApproxFunc(bool B) {
648 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
649 cast<FPMathOperator>(Val: this)->setHasApproxFunc(B);
650}
651
652void Instruction::setFastMathFlags(FastMathFlags FMF) {
653 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
654 cast<FPMathOperator>(Val: this)->setFastMathFlags(FMF);
655}
656
657void Instruction::copyFastMathFlags(FastMathFlags FMF) {
658 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
659 cast<FPMathOperator>(Val: this)->copyFastMathFlags(FMF);
660}
661
662bool Instruction::isFast() const {
663 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
664 return cast<FPMathOperator>(Val: this)->isFast();
665}
666
667bool Instruction::hasAllowReassoc() const {
668 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
669 return cast<FPMathOperator>(Val: this)->hasAllowReassoc();
670}
671
672bool Instruction::hasNoNaNs() const {
673 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
674 return cast<FPMathOperator>(Val: this)->hasNoNaNs();
675}
676
677bool Instruction::hasNoInfs() const {
678 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
679 return cast<FPMathOperator>(Val: this)->hasNoInfs();
680}
681
682bool Instruction::hasNoSignedZeros() const {
683 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
684 return cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
685}
686
687bool Instruction::hasAllowReciprocal() const {
688 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
689 return cast<FPMathOperator>(Val: this)->hasAllowReciprocal();
690}
691
692bool Instruction::hasAllowContract() const {
693 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
694 return cast<FPMathOperator>(Val: this)->hasAllowContract();
695}
696
697bool Instruction::hasApproxFunc() const {
698 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
699 return cast<FPMathOperator>(Val: this)->hasApproxFunc();
700}
701
702FastMathFlags Instruction::getFastMathFlags() const {
703 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
704 return cast<FPMathOperator>(Val: this)->getFastMathFlags();
705}
706
707void Instruction::copyFastMathFlags(const Instruction *I) {
708 copyFastMathFlags(FMF: I->getFastMathFlags());
709}
710
711void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
712 // Copy the wrapping flags.
713 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(Val: this)) {
714 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
715 setHasNoSignedWrap(OB->hasNoSignedWrap());
716 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
717 }
718 }
719
720 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
721 if (isa<TruncInst>(Val: this)) {
722 setHasNoSignedWrap(TI->hasNoSignedWrap());
723 setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
724 }
725 }
726
727 // Copy the exact flag.
728 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
729 if (isa<PossiblyExactOperator>(Val: this))
730 setIsExact(PE->isExact());
731
732 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
733 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
734 DestPD->setIsDisjoint(SrcPD->isDisjoint());
735
736 // Copy the fast-math flags.
737 if (auto *FP = dyn_cast<FPMathOperator>(Val: V))
738 if (isa<FPMathOperator>(Val: this))
739 copyFastMathFlags(FMF: FP->getFastMathFlags());
740
741 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
742 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
743 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
744 DestGEP->getNoWrapFlags());
745
746 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
747 if (isa<PossiblyNonNegInst>(Val: this))
748 setNonNeg(NNI->hasNonNeg());
749
750 if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V))
751 if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this))
752 DestICmp->setSameSign(SrcICmp->hasSameSign());
753}
754
755void Instruction::andIRFlags(const Value *V) {
756 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
757 if (isa<OverflowingBinaryOperator>(Val: this)) {
758 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
759 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
760 }
761 }
762
763 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
764 if (isa<TruncInst>(Val: this)) {
765 setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
766 setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
767 }
768 }
769
770 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
771 if (isa<PossiblyExactOperator>(Val: this))
772 setIsExact(isExact() && PE->isExact());
773
774 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
775 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
776 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
777
778 if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) {
779 if (isa<FPMathOperator>(Val: this)) {
780 FastMathFlags FM = getFastMathFlags();
781 FM &= FP->getFastMathFlags();
782 copyFastMathFlags(FMF: FM);
783 }
784 }
785
786 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
787 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
788 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
789 DestGEP->getNoWrapFlags());
790
791 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
792 if (isa<PossiblyNonNegInst>(Val: this))
793 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
794
795 if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V))
796 if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this))
797 DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
798}
799
800const char *Instruction::getOpcodeName(unsigned OpCode) {
801 switch (OpCode) {
802 // Terminators
803 case Ret: return "ret";
804 case UncondBr: return "br";
805 case CondBr: return "br";
806 case Switch: return "switch";
807 case IndirectBr: return "indirectbr";
808 case Invoke: return "invoke";
809 case Resume: return "resume";
810 case Unreachable: return "unreachable";
811 case CleanupRet: return "cleanupret";
812 case CatchRet: return "catchret";
813 case CatchPad: return "catchpad";
814 case CatchSwitch: return "catchswitch";
815 case CallBr: return "callbr";
816
817 // Standard unary operators...
818 case FNeg: return "fneg";
819
820 // Standard binary operators...
821 case Add: return "add";
822 case FAdd: return "fadd";
823 case Sub: return "sub";
824 case FSub: return "fsub";
825 case Mul: return "mul";
826 case FMul: return "fmul";
827 case UDiv: return "udiv";
828 case SDiv: return "sdiv";
829 case FDiv: return "fdiv";
830 case URem: return "urem";
831 case SRem: return "srem";
832 case FRem: return "frem";
833
834 // Logical operators...
835 case And: return "and";
836 case Or : return "or";
837 case Xor: return "xor";
838
839 // Memory instructions...
840 case Alloca: return "alloca";
841 case Load: return "load";
842 case Store: return "store";
843 case AtomicCmpXchg: return "cmpxchg";
844 case AtomicRMW: return "atomicrmw";
845 case Fence: return "fence";
846 case GetElementPtr: return "getelementptr";
847
848 // Convert instructions...
849 case Trunc: return "trunc";
850 case ZExt: return "zext";
851 case SExt: return "sext";
852 case FPTrunc: return "fptrunc";
853 case FPExt: return "fpext";
854 case FPToUI: return "fptoui";
855 case FPToSI: return "fptosi";
856 case UIToFP: return "uitofp";
857 case SIToFP: return "sitofp";
858 case IntToPtr: return "inttoptr";
859 case PtrToAddr: return "ptrtoaddr";
860 case PtrToInt: return "ptrtoint";
861 case BitCast: return "bitcast";
862 case AddrSpaceCast: return "addrspacecast";
863
864 // Other instructions...
865 case ICmp: return "icmp";
866 case FCmp: return "fcmp";
867 case PHI: return "phi";
868 case Select: return "select";
869 case Call: return "call";
870 case Shl: return "shl";
871 case LShr: return "lshr";
872 case AShr: return "ashr";
873 case VAArg: return "va_arg";
874 case ExtractElement: return "extractelement";
875 case InsertElement: return "insertelement";
876 case ShuffleVector: return "shufflevector";
877 case ExtractValue: return "extractvalue";
878 case InsertValue: return "insertvalue";
879 case LandingPad: return "landingpad";
880 case CleanupPad: return "cleanuppad";
881 case Freeze: return "freeze";
882
883 default: return "<Invalid operator> ";
884 }
885}
886
887/// This must be kept in sync with FunctionComparator::cmpOperations in
888/// lib/Transforms/Utils/FunctionComparator.cpp.
889bool Instruction::hasSameSpecialState(const Instruction *I2,
890 bool IgnoreAlignment,
891 bool IntersectAttrs) const {
892 const auto *I1 = this;
893 assert(I1->getOpcode() == I2->getOpcode() &&
894 "Can not compare special state of different instructions");
895
896 auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
897 const CallBase *CB1) {
898 return IntersectAttrs
899 ? CB0->getAttributes()
900 .intersectWith(C&: CB0->getContext(), Other: CB1->getAttributes())
901 .has_value()
902 : CB0->getAttributes() == CB1->getAttributes();
903 };
904
905 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: I1))
906 return AI->getAllocatedType() == cast<AllocaInst>(Val: I2)->getAllocatedType() &&
907 (AI->getAlign() == cast<AllocaInst>(Val: I2)->getAlign() ||
908 IgnoreAlignment);
909 if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I1))
910 return LI->isVolatile() == cast<LoadInst>(Val: I2)->isVolatile() &&
911 (LI->getAlign() == cast<LoadInst>(Val: I2)->getAlign() ||
912 IgnoreAlignment) &&
913 LI->getOrdering() == cast<LoadInst>(Val: I2)->getOrdering() &&
914 LI->getSyncScopeID() == cast<LoadInst>(Val: I2)->getSyncScopeID();
915 if (const StoreInst *SI = dyn_cast<StoreInst>(Val: I1))
916 return SI->isVolatile() == cast<StoreInst>(Val: I2)->isVolatile() &&
917 (SI->getAlign() == cast<StoreInst>(Val: I2)->getAlign() ||
918 IgnoreAlignment) &&
919 SI->getOrdering() == cast<StoreInst>(Val: I2)->getOrdering() &&
920 SI->getSyncScopeID() == cast<StoreInst>(Val: I2)->getSyncScopeID();
921 if (const CmpInst *CI = dyn_cast<CmpInst>(Val: I1))
922 return CI->getPredicate() == cast<CmpInst>(Val: I2)->getPredicate();
923 if (const CallInst *CI = dyn_cast<CallInst>(Val: I1))
924 return CI->isTailCall() == cast<CallInst>(Val: I2)->isTailCall() &&
925 CI->getCallingConv() == cast<CallInst>(Val: I2)->getCallingConv() &&
926 CheckAttrsSame(CI, cast<CallInst>(Val: I2)) &&
927 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallInst>(Val: I2));
928 if (const InvokeInst *CI = dyn_cast<InvokeInst>(Val: I1))
929 return CI->getCallingConv() == cast<InvokeInst>(Val: I2)->getCallingConv() &&
930 CheckAttrsSame(CI, cast<InvokeInst>(Val: I2)) &&
931 CI->hasIdenticalOperandBundleSchema(Other: *cast<InvokeInst>(Val: I2));
932 if (const CallBrInst *CI = dyn_cast<CallBrInst>(Val: I1))
933 return CI->getCallingConv() == cast<CallBrInst>(Val: I2)->getCallingConv() &&
934 CheckAttrsSame(CI, cast<CallBrInst>(Val: I2)) &&
935 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallBrInst>(Val: I2));
936 if (const SwitchInst *SI = dyn_cast<SwitchInst>(Val: I1)) {
937 for (auto [Case1, Case2] : zip(t: SI->cases(), u: cast<SwitchInst>(Val: I2)->cases()))
938 if (Case1.getCaseValue() != Case2.getCaseValue())
939 return false;
940 return true;
941 }
942 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: I1))
943 return IVI->getIndices() == cast<InsertValueInst>(Val: I2)->getIndices();
944 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: I1))
945 return EVI->getIndices() == cast<ExtractValueInst>(Val: I2)->getIndices();
946 if (const FenceInst *FI = dyn_cast<FenceInst>(Val: I1))
947 return FI->getOrdering() == cast<FenceInst>(Val: I2)->getOrdering() &&
948 FI->getSyncScopeID() == cast<FenceInst>(Val: I2)->getSyncScopeID();
949 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I1))
950 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(Val: I2)->isVolatile() &&
951 (CXI->getAlign() == cast<AtomicCmpXchgInst>(Val: I2)->getAlign() ||
952 IgnoreAlignment) &&
953 CXI->isWeak() == cast<AtomicCmpXchgInst>(Val: I2)->isWeak() &&
954 CXI->getSuccessOrdering() ==
955 cast<AtomicCmpXchgInst>(Val: I2)->getSuccessOrdering() &&
956 CXI->getFailureOrdering() ==
957 cast<AtomicCmpXchgInst>(Val: I2)->getFailureOrdering() &&
958 CXI->getSyncScopeID() ==
959 cast<AtomicCmpXchgInst>(Val: I2)->getSyncScopeID();
960 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: I1))
961 return RMWI->getOperation() == cast<AtomicRMWInst>(Val: I2)->getOperation() &&
962 RMWI->isVolatile() == cast<AtomicRMWInst>(Val: I2)->isVolatile() &&
963 (RMWI->getAlign() == cast<AtomicRMWInst>(Val: I2)->getAlign() ||
964 IgnoreAlignment) &&
965 RMWI->getOrdering() == cast<AtomicRMWInst>(Val: I2)->getOrdering() &&
966 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(Val: I2)->getSyncScopeID();
967 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: I1))
968 return SVI->getShuffleMask() ==
969 cast<ShuffleVectorInst>(Val: I2)->getShuffleMask();
970 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I1))
971 return GEP->getSourceElementType() ==
972 cast<GetElementPtrInst>(Val: I2)->getSourceElementType();
973
974 return true;
975}
976
977bool Instruction::isIdenticalTo(const Instruction *I) const {
978 return isIdenticalToWhenDefined(I) &&
979 SubclassOptionalData == I->SubclassOptionalData;
980}
981
982bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
983 bool IntersectAttrs) const {
984 if (getOpcode() != I->getOpcode() ||
985 getNumOperands() != I->getNumOperands() || getType() != I->getType())
986 return false;
987
988 // If both instructions have no operands, they are identical.
989 if (getNumOperands() == 0 && I->getNumOperands() == 0)
990 return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false,
991 IntersectAttrs);
992
993 // We have two instructions of identical opcode and #operands. Check to see
994 // if all operands are the same.
995 if (!equal(LRange: operands(), RRange: I->operands()))
996 return false;
997
998 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
999 if (const PHINode *Phi = dyn_cast<PHINode>(Val: this)) {
1000 const PHINode *OtherPhi = cast<PHINode>(Val: I);
1001 return equal(LRange: Phi->blocks(), RRange: OtherPhi->blocks());
1002 }
1003
1004 return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false,
1005 IntersectAttrs);
1006}
1007
1008// Keep this in sync with FunctionComparator::cmpOperations in
1009// lib/Transforms/IPO/MergeFunctions.cpp.
1010bool Instruction::isSameOperationAs(const Instruction *I,
1011 unsigned flags) const {
1012 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
1013 bool UseScalarTypes = flags & CompareUsingScalarTypes;
1014 bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
1015
1016 if (getOpcode() != I->getOpcode() ||
1017 getNumOperands() != I->getNumOperands() ||
1018 (UseScalarTypes ?
1019 getType()->getScalarType() != I->getType()->getScalarType() :
1020 getType() != I->getType()))
1021 return false;
1022
1023 // We have two instructions of identical opcode and #operands. Check to see
1024 // if all operands are the same type
1025 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1026 if (UseScalarTypes ?
1027 getOperand(i)->getType()->getScalarType() !=
1028 I->getOperand(i)->getType()->getScalarType() :
1029 getOperand(i)->getType() != I->getOperand(i)->getType())
1030 return false;
1031
1032 return this->hasSameSpecialState(I2: I, IgnoreAlignment, IntersectAttrs);
1033}
1034
1035bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
1036 for (const Use &U : uses()) {
1037 // PHI nodes uses values in the corresponding predecessor block. For other
1038 // instructions, just check to see whether the parent of the use matches up.
1039 const Instruction *I = cast<Instruction>(Val: U.getUser());
1040 const PHINode *PN = dyn_cast<PHINode>(Val: I);
1041 if (!PN) {
1042 if (I->getParent() != BB)
1043 return true;
1044 continue;
1045 }
1046
1047 if (PN->getIncomingBlock(U) != BB)
1048 return true;
1049 }
1050 return false;
1051}
1052
1053bool Instruction::mayReadFromMemory() const {
1054 switch (getOpcode()) {
1055 default: return false;
1056 case Instruction::VAArg:
1057 case Instruction::Load:
1058 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
1059 case Instruction::AtomicCmpXchg:
1060 case Instruction::AtomicRMW:
1061 case Instruction::CatchPad:
1062 case Instruction::CatchRet:
1063 return true;
1064 case Instruction::Call:
1065 case Instruction::Invoke:
1066 case Instruction::CallBr:
1067 return !cast<CallBase>(Val: this)->onlyWritesMemory();
1068 case Instruction::Store:
1069 return !cast<StoreInst>(Val: this)->isUnordered();
1070 }
1071}
1072
1073bool Instruction::mayWriteToMemory() const {
1074 switch (getOpcode()) {
1075 default: return false;
1076 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
1077 case Instruction::Store:
1078 case Instruction::VAArg:
1079 case Instruction::AtomicCmpXchg:
1080 case Instruction::AtomicRMW:
1081 case Instruction::CatchPad:
1082 case Instruction::CatchRet:
1083 return true;
1084 case Instruction::Call:
1085 case Instruction::Invoke:
1086 case Instruction::CallBr:
1087 return !cast<CallBase>(Val: this)->onlyReadsMemory();
1088 case Instruction::Load:
1089 return !cast<LoadInst>(Val: this)->isUnordered();
1090 }
1091}
1092
1093bool Instruction::isAtomic() const {
1094 switch (getOpcode()) {
1095 default:
1096 return false;
1097 case Instruction::AtomicCmpXchg:
1098 case Instruction::AtomicRMW:
1099 case Instruction::Fence:
1100 return true;
1101 case Instruction::Load:
1102 return cast<LoadInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
1103 case Instruction::Store:
1104 return cast<StoreInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
1105 }
1106}
1107
1108bool Instruction::hasAtomicLoad() const {
1109 assert(isAtomic());
1110 switch (getOpcode()) {
1111 default:
1112 return false;
1113 case Instruction::AtomicCmpXchg:
1114 case Instruction::AtomicRMW:
1115 case Instruction::Load:
1116 return true;
1117 }
1118}
1119
1120bool Instruction::hasAtomicStore() const {
1121 assert(isAtomic());
1122 switch (getOpcode()) {
1123 default:
1124 return false;
1125 case Instruction::AtomicCmpXchg:
1126 case Instruction::AtomicRMW:
1127 case Instruction::Store:
1128 return true;
1129 }
1130}
1131
1132bool Instruction::isVolatile() const {
1133 switch (getOpcode()) {
1134 default:
1135 return false;
1136 case Instruction::AtomicRMW:
1137 return cast<AtomicRMWInst>(Val: this)->isVolatile();
1138 case Instruction::Store:
1139 return cast<StoreInst>(Val: this)->isVolatile();
1140 case Instruction::Load:
1141 return cast<LoadInst>(Val: this)->isVolatile();
1142 case Instruction::AtomicCmpXchg:
1143 return cast<AtomicCmpXchgInst>(Val: this)->isVolatile();
1144 case Instruction::Call:
1145 case Instruction::Invoke:
1146 // There are a very limited number of intrinsics with volatile flags.
1147 if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) {
1148 if (auto *MI = dyn_cast<MemIntrinsic>(Val: II))
1149 return MI->isVolatile();
1150 switch (II->getIntrinsicID()) {
1151 default: break;
1152 case Intrinsic::matrix_column_major_load:
1153 return cast<ConstantInt>(Val: II->getArgOperand(i: 2))->isOne();
1154 case Intrinsic::matrix_column_major_store:
1155 return cast<ConstantInt>(Val: II->getArgOperand(i: 3))->isOne();
1156 }
1157 }
1158 return false;
1159 }
1160}
1161
1162Type *Instruction::getAccessType() const {
1163 switch (getOpcode()) {
1164 case Instruction::Store:
1165 return cast<StoreInst>(Val: this)->getValueOperand()->getType();
1166 case Instruction::Load:
1167 case Instruction::AtomicRMW:
1168 return getType();
1169 case Instruction::AtomicCmpXchg:
1170 return cast<AtomicCmpXchgInst>(Val: this)->getNewValOperand()->getType();
1171 case Instruction::Call:
1172 case Instruction::Invoke:
1173 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: this)) {
1174 switch (II->getIntrinsicID()) {
1175 case Intrinsic::masked_load:
1176 case Intrinsic::masked_gather:
1177 case Intrinsic::masked_expandload:
1178 case Intrinsic::vp_load:
1179 case Intrinsic::vp_gather:
1180 case Intrinsic::experimental_vp_strided_load:
1181 return II->getType();
1182 case Intrinsic::masked_store:
1183 case Intrinsic::masked_scatter:
1184 case Intrinsic::masked_compressstore:
1185 case Intrinsic::vp_store:
1186 case Intrinsic::vp_scatter:
1187 case Intrinsic::experimental_vp_strided_store:
1188 return II->getOperand(i_nocapture: 0)->getType();
1189 default:
1190 break;
1191 }
1192 }
1193 }
1194
1195 return nullptr;
1196}
1197
1198static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1199 bool IncludePhaseOneUnwind) {
1200 // Because phase one unwinding skips cleanup landingpads, we effectively
1201 // unwind past this frame, and callers need to have valid unwind info.
1202 if (LP->isCleanup())
1203 return IncludePhaseOneUnwind;
1204
1205 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1206 Constant *Clause = LP->getClause(Idx: I);
1207 // catch ptr null catches all exceptions.
1208 if (LP->isCatch(Idx: I) && isa<ConstantPointerNull>(Val: Clause))
1209 return false;
1210 // filter [0 x ptr] catches all exceptions.
1211 if (LP->isFilter(Idx: I) && Clause->getType()->getArrayNumElements() == 0)
1212 return false;
1213 }
1214
1215 // May catch only some subset of exceptions, in which case other exceptions
1216 // will continue unwinding.
1217 return true;
1218}
1219
1220bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1221 switch (getOpcode()) {
1222 case Instruction::Call:
1223 return !cast<CallInst>(Val: this)->doesNotThrow();
1224 case Instruction::CleanupRet:
1225 return cast<CleanupReturnInst>(Val: this)->unwindsToCaller();
1226 case Instruction::CatchSwitch:
1227 return cast<CatchSwitchInst>(Val: this)->unwindsToCaller();
1228 case Instruction::Resume:
1229 return true;
1230 case Instruction::Invoke: {
1231 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1232 // landingpad may continue unwinding.
1233 BasicBlock *UnwindDest = cast<InvokeInst>(Val: this)->getUnwindDest();
1234 BasicBlock::iterator Pad = UnwindDest->getFirstNonPHIIt();
1235 if (auto *LP = dyn_cast<LandingPadInst>(Val&: Pad))
1236 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1237 return false;
1238 }
1239 case Instruction::CleanupPad:
1240 // Treat the same as cleanup landingpad.
1241 return IncludePhaseOneUnwind;
1242 default:
1243 return false;
1244 }
1245}
1246
1247bool Instruction::mayHaveSideEffects() const {
1248 return mayWriteToMemory() || mayThrow() || !willReturn();
1249}
1250
1251bool Instruction::isSafeToRemove() const {
1252 return (!isa<CallInst>(Val: this) || !this->mayHaveSideEffects()) &&
1253 !this->isTerminator() && !this->isEHPad();
1254}
1255
1256bool Instruction::willReturn() const {
1257 // Volatile store isn't guaranteed to return; see LangRef.
1258 if (auto *SI = dyn_cast<StoreInst>(Val: this))
1259 return !SI->isVolatile();
1260
1261 if (const auto *CB = dyn_cast<CallBase>(Val: this))
1262 return CB->hasFnAttr(Kind: Attribute::WillReturn);
1263 return true;
1264}
1265
1266bool Instruction::isLifetimeStartOrEnd() const {
1267 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1268 if (!II)
1269 return false;
1270 Intrinsic::ID ID = II->getIntrinsicID();
1271 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1272}
1273
1274bool Instruction::isLaunderOrStripInvariantGroup() const {
1275 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1276 if (!II)
1277 return false;
1278 Intrinsic::ID ID = II->getIntrinsicID();
1279 return ID == Intrinsic::launder_invariant_group ||
1280 ID == Intrinsic::strip_invariant_group;
1281}
1282
1283bool Instruction::isDebugOrPseudoInst() const {
1284 return isa<DbgInfoIntrinsic>(Val: this) || isa<PseudoProbeInst>(Val: this);
1285}
1286
1287const DebugLoc &Instruction::getStableDebugLoc() const {
1288 return getDebugLoc();
1289}
1290
1291bool Instruction::isAssociative() const {
1292 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1293 return II->isAssociative();
1294 unsigned Opcode = getOpcode();
1295 if (isAssociative(Opcode))
1296 return true;
1297
1298 switch (Opcode) {
1299 case FMul:
1300 return cast<FPMathOperator>(Val: this)->hasAllowReassoc();
1301 case FAdd:
1302 return cast<FPMathOperator>(Val: this)->hasAllowReassoc() &&
1303 cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
1304 default:
1305 return false;
1306 }
1307}
1308
1309bool Instruction::isCommutative() const {
1310 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1311 return II->isCommutative();
1312 // TODO: Should allow icmp/fcmp?
1313 return isCommutative(Opcode: getOpcode());
1314}
1315
1316bool Instruction::isCommutableOperand(unsigned Op) const {
1317 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1318 return II->isCommutableOperand(Op);
1319 // TODO: Should allow icmp/fcmp?
1320 return isCommutative(Opcode: getOpcode());
1321}
1322
1323unsigned Instruction::getNumSuccessors() const {
1324 switch (getOpcode()) {
1325#define HANDLE_TERM_INST(N, OPC, CLASS) \
1326 case Instruction::OPC: \
1327 return static_cast<const CLASS *>(this)->getNumSuccessors();
1328#include "llvm/IR/Instruction.def"
1329 default:
1330 break;
1331 }
1332 llvm_unreachable("not a terminator");
1333}
1334
1335BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1336 switch (getOpcode()) {
1337#define HANDLE_TERM_INST(N, OPC, CLASS) \
1338 case Instruction::OPC: \
1339 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1340#include "llvm/IR/Instruction.def"
1341 default:
1342 break;
1343 }
1344 llvm_unreachable("not a terminator");
1345}
1346
1347void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1348 switch (getOpcode()) {
1349#define HANDLE_TERM_INST(N, OPC, CLASS) \
1350 case Instruction::OPC: \
1351 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1352#include "llvm/IR/Instruction.def"
1353 default:
1354 break;
1355 }
1356 llvm_unreachable("not a terminator");
1357}
1358
1359iterator_range<Instruction::const_succ_iterator>
1360Instruction::successors() const {
1361 switch (getOpcode()) {
1362#define HANDLE_TERM_INST(N, OPC, CLASS) \
1363 case Instruction::OPC: \
1364 return static_cast<const CLASS *>(this)->successors();
1365#include "llvm/IR/Instruction.def"
1366 default:
1367 break;
1368 }
1369 llvm_unreachable("not a terminator");
1370}
1371
1372void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1373 auto Succs = successors();
1374 for (auto I = Succs.begin(), E = Succs.end(); I != E; ++I)
1375 if (*I == OldBB)
1376 I.getUse()->set(NewBB);
1377}
1378
1379Instruction *Instruction::cloneImpl() const {
1380 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1381}
1382
1383void Instruction::swapProfMetadata() {
1384 MDNode *ProfileData = getBranchWeightMDNode(I: *this);
1385 if (!ProfileData)
1386 return;
1387 unsigned FirstIdx = getBranchWeightOffset(ProfileData);
1388 if (ProfileData->getNumOperands() != 2 + FirstIdx)
1389 return;
1390
1391 unsigned SecondIdx = FirstIdx + 1;
1392 SmallVector<Metadata *, 4> Ops;
1393 // If there are more weights past the second, we can't swap them
1394 if (ProfileData->getNumOperands() > SecondIdx + 1)
1395 return;
1396 for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
1397 Ops.push_back(Elt: ProfileData->getOperand(I: Idx));
1398 }
1399 // Switch the order of the weights
1400 Ops.push_back(Elt: ProfileData->getOperand(I: SecondIdx));
1401 Ops.push_back(Elt: ProfileData->getOperand(I: FirstIdx));
1402 setMetadata(KindID: LLVMContext::MD_prof,
1403 Node: MDNode::get(Context&: ProfileData->getContext(), MDs: Ops));
1404}
1405
1406void Instruction::copyMetadata(const Instruction &SrcInst,
1407 ArrayRef<unsigned> WL) {
1408 if (WL.empty() || is_contained(Range&: WL, Element: LLVMContext::MD_dbg))
1409 setDebugLoc(SrcInst.getDebugLoc().orElse(Other: getDebugLoc()));
1410
1411 if (!SrcInst.hasMetadata())
1412 return;
1413
1414 SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
1415
1416 // Otherwise, enumerate and copy over metadata from the old instruction to the
1417 // new one.
1418 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1419 SrcInst.getAllMetadataOtherThanDebugLoc(MDs&: TheMDs);
1420 for (const auto &MD : TheMDs) {
1421 if (WL.empty() || WLS.count(V: MD.first))
1422 setMetadata(KindID: MD.first, Node: MD.second);
1423 }
1424}
1425
1426Instruction *Instruction::clone() const {
1427 Instruction *New = nullptr;
1428 switch (getOpcode()) {
1429 default:
1430 llvm_unreachable("Unhandled Opcode.");
1431#define HANDLE_INST(num, opc, clas) \
1432 case Instruction::opc: \
1433 New = cast<clas>(this)->cloneImpl(); \
1434 break;
1435#include "llvm/IR/Instruction.def"
1436#undef HANDLE_INST
1437 }
1438
1439 New->SubclassOptionalData = SubclassOptionalData;
1440 New->copyMetadata(SrcInst: *this);
1441 return New;
1442}
1443