1//===-- Instruction.cpp - Implement the Instruction class -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Instruction class for the IR library.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Instruction.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/IR/AttributeMask.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/InstrTypes.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/Operator.h"
27#include "llvm/IR/ProfDataUtils.h"
28#include "llvm/IR/Type.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Compiler.h"
31using namespace llvm;
32
33namespace llvm {
34
35// FIXME: Flag used for an ablation performance test, Issue #147390. Placing it
36// here because referencing IR should be feasible from anywhere. Will be
37// removed after the ablation test.
38cl::opt<bool> ProfcheckDisableMetadataFixes(
39 "profcheck-disable-metadata-fixes", cl::Hidden, cl::init(Val: false),
40 cl::desc(
41 "Disable metadata propagation fixes discovered through Issue #147390"));
42
43} // end namespace llvm
44
45InsertPosition::InsertPosition(Instruction *InsertBefore)
46 : InsertAt(InsertBefore ? InsertBefore->getIterator()
47 : InstListType::iterator()) {}
48InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
49 : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
50
51Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
52 InsertPosition InsertBefore)
53 : User(ty, Value::InstructionVal + it, AllocInfo) {
54 // When called with an iterator, there must be a block to insert into.
55 if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
56 BasicBlock *BB = InsertIt.getNodeParent();
57 assert(BB && "Instruction to insert before is not in a basic block!");
58 insertInto(ParentBB: BB, It: InsertBefore);
59 }
60}
61
62Instruction::~Instruction() {
63 assert(!getParent() && "Instruction still linked in the program!");
64
65 // Replace any extant metadata uses of this instruction with poison to
66 // preserve debug info accuracy. Some alternatives include:
67 // - Treat Instruction like any other Value, and point its extant metadata
68 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
69 // trivially dead (i.e. fair game for deletion in many passes), leading to
70 // stale dbg.values being in effect for too long.
71 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
72 // correct. OTOH results in wasted work in some common cases (e.g. when all
73 // instructions in a BasicBlock are deleted).
74 if (isUsedByMetadata())
75 ValueAsMetadata::handleRAUW(From: this, To: PoisonValue::get(T: getType()));
76
77 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
78 // mapping in LLVMContext.
79 setMetadata(KindID: LLVMContext::MD_DIAssignID, Node: nullptr);
80}
81
82const Module *Instruction::getModule() const {
83 return getParent()->getModule();
84}
85
86const Function *Instruction::getFunction() const {
87 return getParent()->getParent();
88}
89
90const DataLayout &Instruction::getDataLayout() const {
91 return getModule()->getDataLayout();
92}
93
94void Instruction::removeFromParent() {
95 // Perform any debug-info maintenence required.
96 handleMarkerRemoval();
97
98 getParent()->getInstList().remove(IT: getIterator());
99}
100
101void Instruction::handleMarkerRemoval() {
102 if (!DebugMarker)
103 return;
104
105 DebugMarker->removeMarker();
106}
107
108BasicBlock::iterator Instruction::eraseFromParent() {
109 handleMarkerRemoval();
110 return getParent()->getInstList().erase(where: getIterator());
111}
112
113void Instruction::insertBefore(Instruction *InsertPos) {
114 insertBefore(InsertPos: InsertPos->getIterator());
115}
116
117/// Insert an unlinked instruction into a basic block immediately before the
118/// specified instruction.
119void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
120 insertBefore(BB&: *InsertPos->getParent(), InsertPos);
121}
122
123/// Insert an unlinked instruction into a basic block immediately after the
124/// specified instruction.
125void Instruction::insertAfter(Instruction *InsertPos) {
126 BasicBlock *DestParent = InsertPos->getParent();
127
128 DestParent->getInstList().insertAfter(where: InsertPos->getIterator(), New: this);
129}
130
131void Instruction::insertAfter(BasicBlock::iterator InsertPos) {
132 BasicBlock *DestParent = InsertPos->getParent();
133
134 DestParent->getInstList().insertAfter(where: InsertPos, New: this);
135}
136
137BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
138 BasicBlock::iterator It) {
139 assert(getParent() == nullptr && "Expected detached instruction");
140 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
141 "It not in ParentBB");
142 insertBefore(BB&: *ParentBB, InsertPos: It);
143 return getIterator();
144}
145
146void Instruction::insertBefore(BasicBlock &BB,
147 InstListType::iterator InsertPos) {
148 assert(!DebugMarker);
149
150 BB.getInstList().insert(where: InsertPos, New: this);
151
152 // We've inserted "this": if InsertAtHead is set then it comes before any
153 // DbgVariableRecords attached to InsertPos. But if it's not set, then any
154 // DbgRecords should now come before "this".
155 bool InsertAtHead = InsertPos.getHeadBit();
156 if (!InsertAtHead) {
157 DbgMarker *SrcMarker = BB.getMarker(It: InsertPos);
158 if (SrcMarker && !SrcMarker->empty()) {
159 // If this assertion fires, the calling code is about to insert a PHI
160 // after debug-records, which would form a sequence like:
161 // %0 = PHI
162 // #dbg_value
163 // %1 = PHI
164 // Which is de-normalised and undesired -- hence the assertion. To avoid
165 // this, you must insert at that position using an iterator, and it must
166 // be aquired by calling getFirstNonPHIIt / begin or similar methods on
167 // the block. This will signal to this behind-the-scenes debug-info
168 // maintenence code that you intend the PHI to be ahead of everything,
169 // including any debug-info.
170 assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
171 adoptDbgRecords(BB: &BB, It: InsertPos, InsertAtHead: false);
172 }
173 }
174
175 // If we're inserting a terminator, check if we need to flush out
176 // TrailingDbgRecords. Inserting instructions at the end of an incomplete
177 // block is handled by the code block above.
178 if (isTerminator())
179 getParent()->flushTerminatorDbgRecords();
180}
181
182/// Unlink this instruction from its current basic block and insert it into the
183/// basic block that MovePos lives in, right before MovePos.
184void Instruction::moveBefore(Instruction *MovePos) {
185 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: false);
186}
187
188void Instruction::moveBefore(BasicBlock::iterator MovePos) {
189 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false);
190}
191
192void Instruction::moveBeforePreserving(Instruction *MovePos) {
193 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: true);
194}
195
196void Instruction::moveBeforePreserving(BasicBlock::iterator MovePos) {
197 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: true);
198}
199
200void Instruction::moveAfter(Instruction *MovePos) {
201 auto NextIt = std::next(x: MovePos->getIterator());
202 // We want this instruction to be moved to after NextIt in the instruction
203 // list, but before NextIt's debug value range.
204 NextIt.setHeadBit(true);
205 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: false);
206}
207
208void Instruction::moveAfter(InstListType::iterator MovePos) {
209 // We want this instruction to be moved to after NextIt in the instruction
210 // list, but before NextIt's debug value range.
211 MovePos.setHeadBit(true);
212 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false);
213}
214
215void Instruction::moveAfterPreserving(Instruction *MovePos) {
216 auto NextIt = std::next(x: MovePos->getIterator());
217 // We want this instruction and its debug range to be moved to after NextIt
218 // in the instruction list, but before NextIt's debug value range.
219 NextIt.setHeadBit(true);
220 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: true);
221}
222
223void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
224 moveBeforeImpl(BB, I, Preserve: false);
225}
226
227void Instruction::moveBeforePreserving(BasicBlock &BB,
228 InstListType::iterator I) {
229 moveBeforeImpl(BB, I, Preserve: true);
230}
231
232void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
233 bool Preserve) {
234 assert(I == BB.end() || I->getParent() == &BB);
235 bool InsertAtHead = I.getHeadBit();
236
237 // If we've been given the "Preserve" flag, then just move the DbgRecords with
238 // the instruction, no more special handling needed.
239 if (DebugMarker && !Preserve) {
240 if (I != this->getIterator() || InsertAtHead) {
241 // "this" is definitely moving in the list, or it's moving ahead of its
242 // attached DbgVariableRecords. Detach any existing DbgRecords.
243 handleMarkerRemoval();
244 }
245 }
246
247 // Move this single instruction. Use the list splice method directly, not
248 // the block splicer, which will do more debug-info things.
249 BB.getInstList().splice(where: I, L2&: getParent()->getInstList(), first: getIterator());
250
251 if (!Preserve) {
252 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
253
254 // If we're inserting at point I, and not in front of the DbgRecords
255 // attached there, then we should absorb the DbgRecords attached to I.
256 if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
257 adoptDbgRecords(BB: &BB, It: I, InsertAtHead: false);
258 }
259 }
260
261 if (isTerminator())
262 getParent()->flushTerminatorDbgRecords();
263}
264
265iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
266 const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
267 bool InsertAtHead) {
268 if (!From->DebugMarker)
269 return DbgMarker::getEmptyDbgRecordRange();
270
271 if (!DebugMarker)
272 getParent()->createMarker(I: this);
273
274 return DebugMarker->cloneDebugInfoFrom(From: From->DebugMarker, FromHere,
275 InsertAtHead);
276}
277
278std::optional<DbgRecord::self_iterator>
279Instruction::getDbgReinsertionPosition() {
280 // Is there a marker on the next instruction?
281 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
282 if (!NextMarker)
283 return std::nullopt;
284
285 // Are there any DbgRecords in the next marker?
286 if (NextMarker->StoredDbgRecords.empty())
287 return std::nullopt;
288
289 return NextMarker->StoredDbgRecords.begin();
290}
291
292bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
293
294void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
295 bool InsertAtHead) {
296 DbgMarker *SrcMarker = BB->getMarker(It);
297 auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
298 if (BB->end() == It) {
299 SrcMarker->eraseFromParent();
300 BB->deleteTrailingDbgRecords();
301 }
302 };
303
304 if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
305 ReleaseTrailingDbgRecords();
306 return;
307 }
308
309 // If we have DbgMarkers attached to this instruction, we have to honour the
310 // ordering of DbgRecords between this and the other marker. Fall back to just
311 // absorbing from the source.
312 if (DebugMarker || It == BB->end()) {
313 // Ensure we _do_ have a marker.
314 getParent()->createMarker(I: this);
315 DebugMarker->absorbDebugValues(Src&: *SrcMarker, InsertAtHead);
316
317 // Having transferred everything out of SrcMarker, we _could_ clean it up
318 // and free the marker now. However, that's a lot of heap-accounting for a
319 // small amount of memory with a good chance of re-use. Leave it for the
320 // moment. It will be released when the Instruction is freed in the worst
321 // case.
322 // However: if we transferred from a trailing marker off the end of the
323 // block, it's important to not leave the empty marker trailing. It will
324 // give a misleading impression that some debug records have been left
325 // trailing.
326 ReleaseTrailingDbgRecords();
327 } else {
328 // Optimisation: we're transferring all the DbgRecords from the source
329 // marker onto this empty location: just adopt the other instructions
330 // marker.
331 DebugMarker = SrcMarker;
332 DebugMarker->MarkedInstr = this;
333 It->DebugMarker = nullptr;
334 }
335}
336
337void Instruction::dropDbgRecords() {
338 if (DebugMarker)
339 DebugMarker->dropDbgRecords();
340}
341
342void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
343 DebugMarker->dropOneDbgRecord(DR: DVR);
344}
345
346bool Instruction::comesBefore(const Instruction *Other) const {
347 assert(getParent() && Other->getParent() &&
348 "instructions without BB parents have no order");
349 assert(getParent() == Other->getParent() &&
350 "cross-BB instruction order comparison");
351 if (!getParent()->isInstrOrderValid())
352 const_cast<BasicBlock *>(getParent())->renumberInstructions();
353 return Order < Other->Order;
354}
355
356std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
357 assert(!getType()->isVoidTy() && "Instruction must define result");
358 BasicBlock *InsertBB;
359 BasicBlock::iterator InsertPt;
360 if (auto *PN = dyn_cast<PHINode>(Val: this)) {
361 InsertBB = PN->getParent();
362 InsertPt = InsertBB->getFirstInsertionPt();
363 } else if (auto *II = dyn_cast<InvokeInst>(Val: this)) {
364 InsertBB = II->getNormalDest();
365 InsertPt = InsertBB->getFirstInsertionPt();
366 } else if (isa<CallBrInst>(Val: this)) {
367 // Def is available in multiple successors, there's no single dominating
368 // insertion point.
369 return std::nullopt;
370 } else {
371 assert(!isTerminator() && "Only invoke/callbr terminators return value");
372 InsertBB = getParent();
373 InsertPt = std::next(x: getIterator());
374 // Any instruction inserted immediately after "this" will come before any
375 // debug-info records take effect -- thus, set the head bit indicating that
376 // to debug-info-transfer code.
377 InsertPt.setHeadBit(true);
378 }
379
380 // catchswitch blocks don't have any legal insertion point (because they
381 // are both an exception pad and a terminator).
382 if (InsertPt == InsertBB->end())
383 return std::nullopt;
384 return InsertPt;
385}
386
387bool Instruction::isOnlyUserOfAnyOperand() {
388 return any_of(Range: operands(), P: [](const Value *V) { return V->hasOneUser(); });
389}
390
391void Instruction::setHasNoUnsignedWrap(bool b) {
392 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
393 Inst->setHasNoUnsignedWrap(b);
394 else
395 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(b);
396}
397
398void Instruction::setHasNoSignedWrap(bool b) {
399 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
400 Inst->setHasNoSignedWrap(b);
401 else
402 cast<TruncInst>(Val: this)->setHasNoSignedWrap(b);
403}
404
405void Instruction::setIsExact(bool b) {
406 cast<PossiblyExactOperator>(Val: this)->setIsExact(b);
407}
408
409void Instruction::setNonNeg(bool b) {
410 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
411 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
412 (b * PossiblyNonNegInst::NonNeg);
413}
414
415bool Instruction::hasNoUnsignedWrap() const {
416 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
417 return Inst->hasNoUnsignedWrap();
418
419 return cast<TruncInst>(Val: this)->hasNoUnsignedWrap();
420}
421
422bool Instruction::hasNoSignedWrap() const {
423 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
424 return Inst->hasNoSignedWrap();
425
426 return cast<TruncInst>(Val: this)->hasNoSignedWrap();
427}
428
429bool Instruction::hasNonNeg() const {
430 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
431 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
432}
433
434bool Instruction::hasPoisonGeneratingFlags() const {
435 return cast<Operator>(Val: this)->hasPoisonGeneratingFlags();
436}
437
438void Instruction::dropPoisonGeneratingFlags() {
439 switch (getOpcode()) {
440 case Instruction::Add:
441 case Instruction::Sub:
442 case Instruction::Mul:
443 case Instruction::Shl:
444 cast<OverflowingBinaryOperator>(Val: this)->setHasNoUnsignedWrap(false);
445 cast<OverflowingBinaryOperator>(Val: this)->setHasNoSignedWrap(false);
446 break;
447
448 case Instruction::UDiv:
449 case Instruction::SDiv:
450 case Instruction::AShr:
451 case Instruction::LShr:
452 cast<PossiblyExactOperator>(Val: this)->setIsExact(false);
453 break;
454
455 case Instruction::Or:
456 cast<PossiblyDisjointInst>(Val: this)->setIsDisjoint(false);
457 break;
458
459 case Instruction::GetElementPtr:
460 cast<GetElementPtrInst>(Val: this)->setNoWrapFlags(GEPNoWrapFlags::none());
461 break;
462
463 case Instruction::UIToFP:
464 case Instruction::ZExt:
465 setNonNeg(false);
466 break;
467
468 case Instruction::Trunc:
469 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(false);
470 cast<TruncInst>(Val: this)->setHasNoSignedWrap(false);
471 break;
472
473 case Instruction::ICmp:
474 cast<ICmpInst>(Val: this)->setSameSign(false);
475 break;
476
477 case Instruction::Call: {
478 if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) {
479 switch (II->getIntrinsicID()) {
480 case Intrinsic::ctlz:
481 case Intrinsic::cttz:
482 case Intrinsic::abs:
483 II->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::getFalse(Context&: getContext()));
484 break;
485 }
486 }
487 break;
488 }
489 }
490
491 if (isa<FPMathOperator>(Val: this)) {
492 setHasNoNaNs(false);
493 setHasNoInfs(false);
494 }
495
496 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
497}
498
499bool Instruction::hasPoisonGeneratingMetadata() const {
500 return any_of(Range: Metadata::PoisonGeneratingIDs,
501 P: [this](unsigned ID) { return hasMetadata(KindID: ID); });
502}
503
504bool Instruction::hasNonDebugLocLoopMetadata() const {
505 // If there is no loop metadata at all, we also don't have
506 // non-debug loop metadata, obviously.
507 if (!hasMetadata(KindID: LLVMContext::MD_loop))
508 return false;
509
510 // If we do have loop metadata, retrieve it.
511 MDNode *LoopMD = getMetadata(KindID: LLVMContext::MD_loop);
512
513 // Check if the existing operands are debug locations. This loop
514 // should terminate after at most three iterations. Skip
515 // the first item because it is a self-reference.
516 for (const MDOperand &Op : llvm::drop_begin(RangeOrContainer: LoopMD->operands())) {
517 // check for debug location type by attempting a cast.
518 if (!isa<DILocation>(Val: Op)) {
519 return true;
520 }
521 }
522
523 // If we get here, then all we have is debug locations in the loop metadata.
524 return false;
525}
526
527void Instruction::dropPoisonGeneratingMetadata() {
528 for (unsigned ID : Metadata::PoisonGeneratingIDs)
529 eraseMetadata(KindID: ID);
530}
531
532bool Instruction::hasPoisonGeneratingReturnAttributes() const {
533 if (const auto *CB = dyn_cast<CallBase>(Val: this)) {
534 AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
535 return RetAttrs.hasAttribute(Kind: Attribute::Range) ||
536 RetAttrs.hasAttribute(Kind: Attribute::Alignment) ||
537 RetAttrs.hasAttribute(Kind: Attribute::NonNull);
538 }
539 return false;
540}
541
542void Instruction::dropPoisonGeneratingReturnAttributes() {
543 if (auto *CB = dyn_cast<CallBase>(Val: this)) {
544 AttributeMask AM;
545 AM.addAttribute(Val: Attribute::Range);
546 AM.addAttribute(Val: Attribute::Alignment);
547 AM.addAttribute(Val: Attribute::NonNull);
548 CB->removeRetAttrs(AttrsToRemove: AM);
549 }
550 assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
551}
552
553void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
554 ArrayRef<unsigned> KnownIDs) {
555 dropUnknownNonDebugMetadata(KnownIDs);
556 auto *CB = dyn_cast<CallBase>(Val: this);
557 if (!CB)
558 return;
559 // For call instructions, we also need to drop parameter and return attributes
560 // that can cause UB if the call is moved to a location where the attribute is
561 // not valid.
562 AttributeList AL = CB->getAttributes();
563 if (AL.isEmpty())
564 return;
565 AttributeMask UBImplyingAttributes =
566 AttributeFuncs::getUBImplyingAttributes();
567 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
568 CB->removeParamAttrs(ArgNo, AttrsToRemove: UBImplyingAttributes);
569 CB->removeRetAttrs(AttrsToRemove: UBImplyingAttributes);
570}
571
572void Instruction::dropUBImplyingAttrsAndMetadata(ArrayRef<unsigned> Keep) {
573 // !annotation and !prof metadata does not impact semantics.
574 // !range, !nonnull and !align produce poison, so they are safe to speculate.
575 // !fpmath specifies floating-point precision and does not imply UB.
576 // !noundef and various AA metadata must be dropped, as it generally produces
577 // immediate undefined behavior.
578 static const unsigned KnownIDs[] = {
579 LLVMContext::MD_annotation, LLVMContext::MD_range,
580 LLVMContext::MD_nonnull, LLVMContext::MD_align,
581 LLVMContext::MD_fpmath, LLVMContext::MD_prof};
582 SmallVector<unsigned> KeepIDs;
583 KeepIDs.reserve(N: Keep.size() + std::size(KnownIDs));
584 append_range(C&: KeepIDs, R: (!ProfcheckDisableMetadataFixes ? KnownIDs
585 : drop_end(RangeOrContainer: KnownIDs)));
586 append_range(C&: KeepIDs, R&: Keep);
587 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs: KeepIDs);
588}
589
590bool Instruction::hasUBImplyingAttrs() const {
591 auto *CB = dyn_cast<CallBase>(Val: this);
592 if (!CB)
593 return false;
594 // For call instructions, we also need to check parameter and return
595 // attributes that can cause UB.
596 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
597 if (CB->isPassingUndefUB(ArgNo))
598 return true;
599 return CB->hasRetAttr(Kind: Attribute::NoUndef) ||
600 CB->hasRetAttr(Kind: Attribute::Dereferenceable) ||
601 CB->hasRetAttr(Kind: Attribute::DereferenceableOrNull);
602}
603
604bool Instruction::isExact() const {
605 return cast<PossiblyExactOperator>(Val: this)->isExact();
606}
607
608void Instruction::setFast(bool B) {
609 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
610 cast<FPMathOperator>(Val: this)->setFast(B);
611}
612
613void Instruction::setHasAllowReassoc(bool B) {
614 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
615 cast<FPMathOperator>(Val: this)->setHasAllowReassoc(B);
616}
617
618void Instruction::setHasNoNaNs(bool B) {
619 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
620 cast<FPMathOperator>(Val: this)->setHasNoNaNs(B);
621}
622
623void Instruction::setHasNoInfs(bool B) {
624 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
625 cast<FPMathOperator>(Val: this)->setHasNoInfs(B);
626}
627
628void Instruction::setHasNoSignedZeros(bool B) {
629 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
630 cast<FPMathOperator>(Val: this)->setHasNoSignedZeros(B);
631}
632
633void Instruction::setHasAllowReciprocal(bool B) {
634 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
635 cast<FPMathOperator>(Val: this)->setHasAllowReciprocal(B);
636}
637
638void Instruction::setHasAllowContract(bool B) {
639 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
640 cast<FPMathOperator>(Val: this)->setHasAllowContract(B);
641}
642
643void Instruction::setHasApproxFunc(bool B) {
644 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
645 cast<FPMathOperator>(Val: this)->setHasApproxFunc(B);
646}
647
648void Instruction::setFastMathFlags(FastMathFlags FMF) {
649 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
650 cast<FPMathOperator>(Val: this)->setFastMathFlags(FMF);
651}
652
653void Instruction::copyFastMathFlags(FastMathFlags FMF) {
654 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
655 cast<FPMathOperator>(Val: this)->copyFastMathFlags(FMF);
656}
657
658bool Instruction::isFast() const {
659 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
660 return cast<FPMathOperator>(Val: this)->isFast();
661}
662
663bool Instruction::hasAllowReassoc() const {
664 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
665 return cast<FPMathOperator>(Val: this)->hasAllowReassoc();
666}
667
668bool Instruction::hasNoNaNs() const {
669 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
670 return cast<FPMathOperator>(Val: this)->hasNoNaNs();
671}
672
673bool Instruction::hasNoInfs() const {
674 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
675 return cast<FPMathOperator>(Val: this)->hasNoInfs();
676}
677
678bool Instruction::hasNoSignedZeros() const {
679 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
680 return cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
681}
682
683bool Instruction::hasAllowReciprocal() const {
684 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
685 return cast<FPMathOperator>(Val: this)->hasAllowReciprocal();
686}
687
688bool Instruction::hasAllowContract() const {
689 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
690 return cast<FPMathOperator>(Val: this)->hasAllowContract();
691}
692
693bool Instruction::hasApproxFunc() const {
694 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
695 return cast<FPMathOperator>(Val: this)->hasApproxFunc();
696}
697
698FastMathFlags Instruction::getFastMathFlags() const {
699 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
700 return cast<FPMathOperator>(Val: this)->getFastMathFlags();
701}
702
703void Instruction::copyFastMathFlags(const Instruction *I) {
704 copyFastMathFlags(FMF: I->getFastMathFlags());
705}
706
707void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
708 // Copy the wrapping flags.
709 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(Val: this)) {
710 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
711 setHasNoSignedWrap(OB->hasNoSignedWrap());
712 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
713 }
714 }
715
716 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
717 if (isa<TruncInst>(Val: this)) {
718 setHasNoSignedWrap(TI->hasNoSignedWrap());
719 setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
720 }
721 }
722
723 // Copy the exact flag.
724 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
725 if (isa<PossiblyExactOperator>(Val: this))
726 setIsExact(PE->isExact());
727
728 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
729 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
730 DestPD->setIsDisjoint(SrcPD->isDisjoint());
731
732 // Copy the fast-math flags.
733 if (auto *FP = dyn_cast<FPMathOperator>(Val: V))
734 if (isa<FPMathOperator>(Val: this))
735 copyFastMathFlags(FMF: FP->getFastMathFlags());
736
737 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
738 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
739 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
740 DestGEP->getNoWrapFlags());
741
742 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
743 if (isa<PossiblyNonNegInst>(Val: this))
744 setNonNeg(NNI->hasNonNeg());
745
746 if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V))
747 if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this))
748 DestICmp->setSameSign(SrcICmp->hasSameSign());
749}
750
751void Instruction::andIRFlags(const Value *V) {
752 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
753 if (isa<OverflowingBinaryOperator>(Val: this)) {
754 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
755 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
756 }
757 }
758
759 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
760 if (isa<TruncInst>(Val: this)) {
761 setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
762 setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
763 }
764 }
765
766 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
767 if (isa<PossiblyExactOperator>(Val: this))
768 setIsExact(isExact() && PE->isExact());
769
770 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
771 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
772 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
773
774 if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) {
775 if (isa<FPMathOperator>(Val: this)) {
776 FastMathFlags FM = getFastMathFlags();
777 FM &= FP->getFastMathFlags();
778 copyFastMathFlags(FMF: FM);
779 }
780 }
781
782 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
783 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
784 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
785 DestGEP->getNoWrapFlags());
786
787 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
788 if (isa<PossiblyNonNegInst>(Val: this))
789 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
790
791 if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V))
792 if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this))
793 DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
794}
795
796const char *Instruction::getOpcodeName(unsigned OpCode) {
797 switch (OpCode) {
798 // Terminators
799 case Ret: return "ret";
800 case Br: return "br";
801 case Switch: return "switch";
802 case IndirectBr: return "indirectbr";
803 case Invoke: return "invoke";
804 case Resume: return "resume";
805 case Unreachable: return "unreachable";
806 case CleanupRet: return "cleanupret";
807 case CatchRet: return "catchret";
808 case CatchPad: return "catchpad";
809 case CatchSwitch: return "catchswitch";
810 case CallBr: return "callbr";
811
812 // Standard unary operators...
813 case FNeg: return "fneg";
814
815 // Standard binary operators...
816 case Add: return "add";
817 case FAdd: return "fadd";
818 case Sub: return "sub";
819 case FSub: return "fsub";
820 case Mul: return "mul";
821 case FMul: return "fmul";
822 case UDiv: return "udiv";
823 case SDiv: return "sdiv";
824 case FDiv: return "fdiv";
825 case URem: return "urem";
826 case SRem: return "srem";
827 case FRem: return "frem";
828
829 // Logical operators...
830 case And: return "and";
831 case Or : return "or";
832 case Xor: return "xor";
833
834 // Memory instructions...
835 case Alloca: return "alloca";
836 case Load: return "load";
837 case Store: return "store";
838 case AtomicCmpXchg: return "cmpxchg";
839 case AtomicRMW: return "atomicrmw";
840 case Fence: return "fence";
841 case GetElementPtr: return "getelementptr";
842
843 // Convert instructions...
844 case Trunc: return "trunc";
845 case ZExt: return "zext";
846 case SExt: return "sext";
847 case FPTrunc: return "fptrunc";
848 case FPExt: return "fpext";
849 case FPToUI: return "fptoui";
850 case FPToSI: return "fptosi";
851 case UIToFP: return "uitofp";
852 case SIToFP: return "sitofp";
853 case IntToPtr: return "inttoptr";
854 case PtrToAddr: return "ptrtoaddr";
855 case PtrToInt: return "ptrtoint";
856 case BitCast: return "bitcast";
857 case AddrSpaceCast: return "addrspacecast";
858
859 // Other instructions...
860 case ICmp: return "icmp";
861 case FCmp: return "fcmp";
862 case PHI: return "phi";
863 case Select: return "select";
864 case Call: return "call";
865 case Shl: return "shl";
866 case LShr: return "lshr";
867 case AShr: return "ashr";
868 case VAArg: return "va_arg";
869 case ExtractElement: return "extractelement";
870 case InsertElement: return "insertelement";
871 case ShuffleVector: return "shufflevector";
872 case ExtractValue: return "extractvalue";
873 case InsertValue: return "insertvalue";
874 case LandingPad: return "landingpad";
875 case CleanupPad: return "cleanuppad";
876 case Freeze: return "freeze";
877
878 default: return "<Invalid operator> ";
879 }
880}
881
882/// This must be kept in sync with FunctionComparator::cmpOperations in
883/// lib/Transforms/Utils/FunctionComparator.cpp.
884bool Instruction::hasSameSpecialState(const Instruction *I2,
885 bool IgnoreAlignment,
886 bool IntersectAttrs) const {
887 const auto *I1 = this;
888 assert(I1->getOpcode() == I2->getOpcode() &&
889 "Can not compare special state of different instructions");
890
891 auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
892 const CallBase *CB1) {
893 return IntersectAttrs
894 ? CB0->getAttributes()
895 .intersectWith(C&: CB0->getContext(), Other: CB1->getAttributes())
896 .has_value()
897 : CB0->getAttributes() == CB1->getAttributes();
898 };
899
900 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: I1))
901 return AI->getAllocatedType() == cast<AllocaInst>(Val: I2)->getAllocatedType() &&
902 (AI->getAlign() == cast<AllocaInst>(Val: I2)->getAlign() ||
903 IgnoreAlignment);
904 if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I1))
905 return LI->isVolatile() == cast<LoadInst>(Val: I2)->isVolatile() &&
906 (LI->getAlign() == cast<LoadInst>(Val: I2)->getAlign() ||
907 IgnoreAlignment) &&
908 LI->getOrdering() == cast<LoadInst>(Val: I2)->getOrdering() &&
909 LI->getSyncScopeID() == cast<LoadInst>(Val: I2)->getSyncScopeID();
910 if (const StoreInst *SI = dyn_cast<StoreInst>(Val: I1))
911 return SI->isVolatile() == cast<StoreInst>(Val: I2)->isVolatile() &&
912 (SI->getAlign() == cast<StoreInst>(Val: I2)->getAlign() ||
913 IgnoreAlignment) &&
914 SI->getOrdering() == cast<StoreInst>(Val: I2)->getOrdering() &&
915 SI->getSyncScopeID() == cast<StoreInst>(Val: I2)->getSyncScopeID();
916 if (const CmpInst *CI = dyn_cast<CmpInst>(Val: I1))
917 return CI->getPredicate() == cast<CmpInst>(Val: I2)->getPredicate();
918 if (const CallInst *CI = dyn_cast<CallInst>(Val: I1))
919 return CI->isTailCall() == cast<CallInst>(Val: I2)->isTailCall() &&
920 CI->getCallingConv() == cast<CallInst>(Val: I2)->getCallingConv() &&
921 CheckAttrsSame(CI, cast<CallInst>(Val: I2)) &&
922 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallInst>(Val: I2));
923 if (const InvokeInst *CI = dyn_cast<InvokeInst>(Val: I1))
924 return CI->getCallingConv() == cast<InvokeInst>(Val: I2)->getCallingConv() &&
925 CheckAttrsSame(CI, cast<InvokeInst>(Val: I2)) &&
926 CI->hasIdenticalOperandBundleSchema(Other: *cast<InvokeInst>(Val: I2));
927 if (const CallBrInst *CI = dyn_cast<CallBrInst>(Val: I1))
928 return CI->getCallingConv() == cast<CallBrInst>(Val: I2)->getCallingConv() &&
929 CheckAttrsSame(CI, cast<CallBrInst>(Val: I2)) &&
930 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallBrInst>(Val: I2));
931 if (const SwitchInst *SI = dyn_cast<SwitchInst>(Val: I1)) {
932 for (auto [Case1, Case2] : zip(t: SI->cases(), u: cast<SwitchInst>(Val: I2)->cases()))
933 if (Case1.getCaseValue() != Case2.getCaseValue())
934 return false;
935 return true;
936 }
937 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: I1))
938 return IVI->getIndices() == cast<InsertValueInst>(Val: I2)->getIndices();
939 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: I1))
940 return EVI->getIndices() == cast<ExtractValueInst>(Val: I2)->getIndices();
941 if (const FenceInst *FI = dyn_cast<FenceInst>(Val: I1))
942 return FI->getOrdering() == cast<FenceInst>(Val: I2)->getOrdering() &&
943 FI->getSyncScopeID() == cast<FenceInst>(Val: I2)->getSyncScopeID();
944 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I1))
945 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(Val: I2)->isVolatile() &&
946 (CXI->getAlign() == cast<AtomicCmpXchgInst>(Val: I2)->getAlign() ||
947 IgnoreAlignment) &&
948 CXI->isWeak() == cast<AtomicCmpXchgInst>(Val: I2)->isWeak() &&
949 CXI->getSuccessOrdering() ==
950 cast<AtomicCmpXchgInst>(Val: I2)->getSuccessOrdering() &&
951 CXI->getFailureOrdering() ==
952 cast<AtomicCmpXchgInst>(Val: I2)->getFailureOrdering() &&
953 CXI->getSyncScopeID() ==
954 cast<AtomicCmpXchgInst>(Val: I2)->getSyncScopeID();
955 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: I1))
956 return RMWI->getOperation() == cast<AtomicRMWInst>(Val: I2)->getOperation() &&
957 RMWI->isVolatile() == cast<AtomicRMWInst>(Val: I2)->isVolatile() &&
958 (RMWI->getAlign() == cast<AtomicRMWInst>(Val: I2)->getAlign() ||
959 IgnoreAlignment) &&
960 RMWI->getOrdering() == cast<AtomicRMWInst>(Val: I2)->getOrdering() &&
961 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(Val: I2)->getSyncScopeID();
962 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: I1))
963 return SVI->getShuffleMask() ==
964 cast<ShuffleVectorInst>(Val: I2)->getShuffleMask();
965 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I1))
966 return GEP->getSourceElementType() ==
967 cast<GetElementPtrInst>(Val: I2)->getSourceElementType();
968
969 return true;
970}
971
972bool Instruction::isIdenticalTo(const Instruction *I) const {
973 return isIdenticalToWhenDefined(I) &&
974 SubclassOptionalData == I->SubclassOptionalData;
975}
976
977bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
978 bool IntersectAttrs) const {
979 if (getOpcode() != I->getOpcode() ||
980 getNumOperands() != I->getNumOperands() || getType() != I->getType())
981 return false;
982
983 // If both instructions have no operands, they are identical.
984 if (getNumOperands() == 0 && I->getNumOperands() == 0)
985 return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false,
986 IntersectAttrs);
987
988 // We have two instructions of identical opcode and #operands. Check to see
989 // if all operands are the same.
990 if (!equal(LRange: operands(), RRange: I->operands()))
991 return false;
992
993 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
994 if (const PHINode *Phi = dyn_cast<PHINode>(Val: this)) {
995 const PHINode *OtherPhi = cast<PHINode>(Val: I);
996 return equal(LRange: Phi->blocks(), RRange: OtherPhi->blocks());
997 }
998
999 return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false,
1000 IntersectAttrs);
1001}
1002
1003// Keep this in sync with FunctionComparator::cmpOperations in
1004// lib/Transforms/IPO/MergeFunctions.cpp.
1005bool Instruction::isSameOperationAs(const Instruction *I,
1006 unsigned flags) const {
1007 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
1008 bool UseScalarTypes = flags & CompareUsingScalarTypes;
1009 bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
1010
1011 if (getOpcode() != I->getOpcode() ||
1012 getNumOperands() != I->getNumOperands() ||
1013 (UseScalarTypes ?
1014 getType()->getScalarType() != I->getType()->getScalarType() :
1015 getType() != I->getType()))
1016 return false;
1017
1018 // We have two instructions of identical opcode and #operands. Check to see
1019 // if all operands are the same type
1020 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1021 if (UseScalarTypes ?
1022 getOperand(i)->getType()->getScalarType() !=
1023 I->getOperand(i)->getType()->getScalarType() :
1024 getOperand(i)->getType() != I->getOperand(i)->getType())
1025 return false;
1026
1027 return this->hasSameSpecialState(I2: I, IgnoreAlignment, IntersectAttrs);
1028}
1029
1030bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
1031 for (const Use &U : uses()) {
1032 // PHI nodes uses values in the corresponding predecessor block. For other
1033 // instructions, just check to see whether the parent of the use matches up.
1034 const Instruction *I = cast<Instruction>(Val: U.getUser());
1035 const PHINode *PN = dyn_cast<PHINode>(Val: I);
1036 if (!PN) {
1037 if (I->getParent() != BB)
1038 return true;
1039 continue;
1040 }
1041
1042 if (PN->getIncomingBlock(U) != BB)
1043 return true;
1044 }
1045 return false;
1046}
1047
1048bool Instruction::mayReadFromMemory() const {
1049 switch (getOpcode()) {
1050 default: return false;
1051 case Instruction::VAArg:
1052 case Instruction::Load:
1053 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
1054 case Instruction::AtomicCmpXchg:
1055 case Instruction::AtomicRMW:
1056 case Instruction::CatchPad:
1057 case Instruction::CatchRet:
1058 return true;
1059 case Instruction::Call:
1060 case Instruction::Invoke:
1061 case Instruction::CallBr:
1062 return !cast<CallBase>(Val: this)->onlyWritesMemory();
1063 case Instruction::Store:
1064 return !cast<StoreInst>(Val: this)->isUnordered();
1065 }
1066}
1067
1068bool Instruction::mayWriteToMemory() const {
1069 switch (getOpcode()) {
1070 default: return false;
1071 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
1072 case Instruction::Store:
1073 case Instruction::VAArg:
1074 case Instruction::AtomicCmpXchg:
1075 case Instruction::AtomicRMW:
1076 case Instruction::CatchPad:
1077 case Instruction::CatchRet:
1078 return true;
1079 case Instruction::Call:
1080 case Instruction::Invoke:
1081 case Instruction::CallBr:
1082 return !cast<CallBase>(Val: this)->onlyReadsMemory();
1083 case Instruction::Load:
1084 return !cast<LoadInst>(Val: this)->isUnordered();
1085 }
1086}
1087
1088bool Instruction::isAtomic() const {
1089 switch (getOpcode()) {
1090 default:
1091 return false;
1092 case Instruction::AtomicCmpXchg:
1093 case Instruction::AtomicRMW:
1094 case Instruction::Fence:
1095 return true;
1096 case Instruction::Load:
1097 return cast<LoadInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
1098 case Instruction::Store:
1099 return cast<StoreInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
1100 }
1101}
1102
1103bool Instruction::hasAtomicLoad() const {
1104 assert(isAtomic());
1105 switch (getOpcode()) {
1106 default:
1107 return false;
1108 case Instruction::AtomicCmpXchg:
1109 case Instruction::AtomicRMW:
1110 case Instruction::Load:
1111 return true;
1112 }
1113}
1114
1115bool Instruction::hasAtomicStore() const {
1116 assert(isAtomic());
1117 switch (getOpcode()) {
1118 default:
1119 return false;
1120 case Instruction::AtomicCmpXchg:
1121 case Instruction::AtomicRMW:
1122 case Instruction::Store:
1123 return true;
1124 }
1125}
1126
1127bool Instruction::isVolatile() const {
1128 switch (getOpcode()) {
1129 default:
1130 return false;
1131 case Instruction::AtomicRMW:
1132 return cast<AtomicRMWInst>(Val: this)->isVolatile();
1133 case Instruction::Store:
1134 return cast<StoreInst>(Val: this)->isVolatile();
1135 case Instruction::Load:
1136 return cast<LoadInst>(Val: this)->isVolatile();
1137 case Instruction::AtomicCmpXchg:
1138 return cast<AtomicCmpXchgInst>(Val: this)->isVolatile();
1139 case Instruction::Call:
1140 case Instruction::Invoke:
1141 // There are a very limited number of intrinsics with volatile flags.
1142 if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) {
1143 if (auto *MI = dyn_cast<MemIntrinsic>(Val: II))
1144 return MI->isVolatile();
1145 switch (II->getIntrinsicID()) {
1146 default: break;
1147 case Intrinsic::matrix_column_major_load:
1148 return cast<ConstantInt>(Val: II->getArgOperand(i: 2))->isOne();
1149 case Intrinsic::matrix_column_major_store:
1150 return cast<ConstantInt>(Val: II->getArgOperand(i: 3))->isOne();
1151 }
1152 }
1153 return false;
1154 }
1155}
1156
1157Type *Instruction::getAccessType() const {
1158 switch (getOpcode()) {
1159 case Instruction::Store:
1160 return cast<StoreInst>(Val: this)->getValueOperand()->getType();
1161 case Instruction::Load:
1162 case Instruction::AtomicRMW:
1163 return getType();
1164 case Instruction::AtomicCmpXchg:
1165 return cast<AtomicCmpXchgInst>(Val: this)->getNewValOperand()->getType();
1166 case Instruction::Call:
1167 case Instruction::Invoke:
1168 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: this)) {
1169 switch (II->getIntrinsicID()) {
1170 case Intrinsic::masked_load:
1171 case Intrinsic::masked_gather:
1172 case Intrinsic::masked_expandload:
1173 case Intrinsic::vp_load:
1174 case Intrinsic::vp_gather:
1175 case Intrinsic::experimental_vp_strided_load:
1176 return II->getType();
1177 case Intrinsic::masked_store:
1178 case Intrinsic::masked_scatter:
1179 case Intrinsic::masked_compressstore:
1180 case Intrinsic::vp_store:
1181 case Intrinsic::vp_scatter:
1182 case Intrinsic::experimental_vp_strided_store:
1183 return II->getOperand(i_nocapture: 0)->getType();
1184 default:
1185 break;
1186 }
1187 }
1188 }
1189
1190 return nullptr;
1191}
1192
1193static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1194 bool IncludePhaseOneUnwind) {
1195 // Because phase one unwinding skips cleanup landingpads, we effectively
1196 // unwind past this frame, and callers need to have valid unwind info.
1197 if (LP->isCleanup())
1198 return IncludePhaseOneUnwind;
1199
1200 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1201 Constant *Clause = LP->getClause(Idx: I);
1202 // catch ptr null catches all exceptions.
1203 if (LP->isCatch(Idx: I) && isa<ConstantPointerNull>(Val: Clause))
1204 return false;
1205 // filter [0 x ptr] catches all exceptions.
1206 if (LP->isFilter(Idx: I) && Clause->getType()->getArrayNumElements() == 0)
1207 return false;
1208 }
1209
1210 // May catch only some subset of exceptions, in which case other exceptions
1211 // will continue unwinding.
1212 return true;
1213}
1214
1215bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1216 switch (getOpcode()) {
1217 case Instruction::Call:
1218 return !cast<CallInst>(Val: this)->doesNotThrow();
1219 case Instruction::CleanupRet:
1220 return cast<CleanupReturnInst>(Val: this)->unwindsToCaller();
1221 case Instruction::CatchSwitch:
1222 return cast<CatchSwitchInst>(Val: this)->unwindsToCaller();
1223 case Instruction::Resume:
1224 return true;
1225 case Instruction::Invoke: {
1226 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1227 // landingpad may continue unwinding.
1228 BasicBlock *UnwindDest = cast<InvokeInst>(Val: this)->getUnwindDest();
1229 BasicBlock::iterator Pad = UnwindDest->getFirstNonPHIIt();
1230 if (auto *LP = dyn_cast<LandingPadInst>(Val&: Pad))
1231 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1232 return false;
1233 }
1234 case Instruction::CleanupPad:
1235 // Treat the same as cleanup landingpad.
1236 return IncludePhaseOneUnwind;
1237 default:
1238 return false;
1239 }
1240}
1241
1242bool Instruction::mayHaveSideEffects() const {
1243 return mayWriteToMemory() || mayThrow() || !willReturn();
1244}
1245
1246bool Instruction::isSafeToRemove() const {
1247 return (!isa<CallInst>(Val: this) || !this->mayHaveSideEffects()) &&
1248 !this->isTerminator() && !this->isEHPad();
1249}
1250
1251bool Instruction::willReturn() const {
1252 // Volatile store isn't guaranteed to return; see LangRef.
1253 if (auto *SI = dyn_cast<StoreInst>(Val: this))
1254 return !SI->isVolatile();
1255
1256 if (const auto *CB = dyn_cast<CallBase>(Val: this))
1257 return CB->hasFnAttr(Kind: Attribute::WillReturn);
1258 return true;
1259}
1260
1261bool Instruction::isLifetimeStartOrEnd() const {
1262 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1263 if (!II)
1264 return false;
1265 Intrinsic::ID ID = II->getIntrinsicID();
1266 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1267}
1268
1269bool Instruction::isLaunderOrStripInvariantGroup() const {
1270 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1271 if (!II)
1272 return false;
1273 Intrinsic::ID ID = II->getIntrinsicID();
1274 return ID == Intrinsic::launder_invariant_group ||
1275 ID == Intrinsic::strip_invariant_group;
1276}
1277
1278bool Instruction::isDebugOrPseudoInst() const {
1279 return isa<DbgInfoIntrinsic>(Val: this) || isa<PseudoProbeInst>(Val: this);
1280}
1281
1282const DebugLoc &Instruction::getStableDebugLoc() const {
1283 return getDebugLoc();
1284}
1285
1286bool Instruction::isAssociative() const {
1287 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1288 return II->isAssociative();
1289 unsigned Opcode = getOpcode();
1290 if (isAssociative(Opcode))
1291 return true;
1292
1293 switch (Opcode) {
1294 case FMul:
1295 return cast<FPMathOperator>(Val: this)->hasAllowReassoc();
1296 case FAdd:
1297 return cast<FPMathOperator>(Val: this)->hasAllowReassoc() &&
1298 cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
1299 default:
1300 return false;
1301 }
1302}
1303
1304bool Instruction::isCommutative() const {
1305 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1306 return II->isCommutative();
1307 // TODO: Should allow icmp/fcmp?
1308 return isCommutative(Opcode: getOpcode());
1309}
1310
1311bool Instruction::isCommutableOperand(unsigned Op) const {
1312 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1313 return II->isCommutableOperand(Op);
1314 // TODO: Should allow icmp/fcmp?
1315 return isCommutative(Opcode: getOpcode());
1316}
1317
1318unsigned Instruction::getNumSuccessors() const {
1319 switch (getOpcode()) {
1320#define HANDLE_TERM_INST(N, OPC, CLASS) \
1321 case Instruction::OPC: \
1322 return static_cast<const CLASS *>(this)->getNumSuccessors();
1323#include "llvm/IR/Instruction.def"
1324 default:
1325 break;
1326 }
1327 llvm_unreachable("not a terminator");
1328}
1329
1330BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1331 switch (getOpcode()) {
1332#define HANDLE_TERM_INST(N, OPC, CLASS) \
1333 case Instruction::OPC: \
1334 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1335#include "llvm/IR/Instruction.def"
1336 default:
1337 break;
1338 }
1339 llvm_unreachable("not a terminator");
1340}
1341
1342void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1343 switch (getOpcode()) {
1344#define HANDLE_TERM_INST(N, OPC, CLASS) \
1345 case Instruction::OPC: \
1346 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1347#include "llvm/IR/Instruction.def"
1348 default:
1349 break;
1350 }
1351 llvm_unreachable("not a terminator");
1352}
1353
1354void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1355 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1356 Idx != NumSuccessors; ++Idx)
1357 if (getSuccessor(idx: Idx) == OldBB)
1358 setSuccessor(idx: Idx, B: NewBB);
1359}
1360
1361Instruction *Instruction::cloneImpl() const {
1362 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1363}
1364
1365void Instruction::swapProfMetadata() {
1366 MDNode *ProfileData = getBranchWeightMDNode(I: *this);
1367 if (!ProfileData)
1368 return;
1369 unsigned FirstIdx = getBranchWeightOffset(ProfileData);
1370 if (ProfileData->getNumOperands() != 2 + FirstIdx)
1371 return;
1372
1373 unsigned SecondIdx = FirstIdx + 1;
1374 SmallVector<Metadata *, 4> Ops;
1375 // If there are more weights past the second, we can't swap them
1376 if (ProfileData->getNumOperands() > SecondIdx + 1)
1377 return;
1378 for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
1379 Ops.push_back(Elt: ProfileData->getOperand(I: Idx));
1380 }
1381 // Switch the order of the weights
1382 Ops.push_back(Elt: ProfileData->getOperand(I: SecondIdx));
1383 Ops.push_back(Elt: ProfileData->getOperand(I: FirstIdx));
1384 setMetadata(KindID: LLVMContext::MD_prof,
1385 Node: MDNode::get(Context&: ProfileData->getContext(), MDs: Ops));
1386}
1387
1388void Instruction::copyMetadata(const Instruction &SrcInst,
1389 ArrayRef<unsigned> WL) {
1390 if (WL.empty() || is_contained(Range&: WL, Element: LLVMContext::MD_dbg))
1391 setDebugLoc(SrcInst.getDebugLoc().orElse(Other: getDebugLoc()));
1392
1393 if (!SrcInst.hasMetadata())
1394 return;
1395
1396 SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
1397
1398 // Otherwise, enumerate and copy over metadata from the old instruction to the
1399 // new one.
1400 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1401 SrcInst.getAllMetadataOtherThanDebugLoc(MDs&: TheMDs);
1402 for (const auto &MD : TheMDs) {
1403 if (WL.empty() || WLS.count(V: MD.first))
1404 setMetadata(KindID: MD.first, Node: MD.second);
1405 }
1406}
1407
1408Instruction *Instruction::clone() const {
1409 Instruction *New = nullptr;
1410 switch (getOpcode()) {
1411 default:
1412 llvm_unreachable("Unhandled Opcode.");
1413#define HANDLE_INST(num, opc, clas) \
1414 case Instruction::opc: \
1415 New = cast<clas>(this)->cloneImpl(); \
1416 break;
1417#include "llvm/IR/Instruction.def"
1418#undef HANDLE_INST
1419 }
1420
1421 New->SubclassOptionalData = SubclassOptionalData;
1422 New->copyMetadata(SrcInst: *this);
1423 return New;
1424}
1425