1 | //===-- Instruction.cpp - Implement the Instruction class -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the Instruction class for the IR library. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/IR/Instruction.h" |
14 | #include "llvm/ADT/DenseSet.h" |
15 | #include "llvm/ADT/STLExtras.h" |
16 | #include "llvm/IR/AttributeMask.h" |
17 | #include "llvm/IR/Attributes.h" |
18 | #include "llvm/IR/Constants.h" |
19 | #include "llvm/IR/InstrTypes.h" |
20 | #include "llvm/IR/Instructions.h" |
21 | #include "llvm/IR/IntrinsicInst.h" |
22 | #include "llvm/IR/Intrinsics.h" |
23 | #include "llvm/IR/LLVMContext.h" |
24 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
25 | #include "llvm/IR/Module.h" |
26 | #include "llvm/IR/Operator.h" |
27 | #include "llvm/IR/ProfDataUtils.h" |
28 | #include "llvm/IR/Type.h" |
29 | #include "llvm/Support/Compiler.h" |
30 | using namespace llvm; |
31 | |
32 | InsertPosition::InsertPosition(Instruction *InsertBefore) |
33 | : InsertAt(InsertBefore ? InsertBefore->getIterator() |
34 | : InstListType::iterator()) {} |
35 | InsertPosition::InsertPosition(BasicBlock *InsertAtEnd) |
36 | : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {} |
37 | |
38 | Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo, |
39 | InsertPosition InsertBefore) |
40 | : User(ty, Value::InstructionVal + it, AllocInfo) { |
41 | // When called with an iterator, there must be a block to insert into. |
42 | if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) { |
43 | BasicBlock *BB = InsertIt.getNodeParent(); |
44 | assert(BB && "Instruction to insert before is not in a basic block!" ); |
45 | insertInto(ParentBB: BB, It: InsertBefore); |
46 | } |
47 | } |
48 | |
49 | Instruction::~Instruction() { |
50 | assert(!getParent() && "Instruction still linked in the program!" ); |
51 | |
52 | // Replace any extant metadata uses of this instruction with poison to |
53 | // preserve debug info accuracy. Some alternatives include: |
54 | // - Treat Instruction like any other Value, and point its extant metadata |
55 | // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses |
56 | // trivially dead (i.e. fair game for deletion in many passes), leading to |
57 | // stale dbg.values being in effect for too long. |
58 | // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal |
59 | // correct. OTOH results in wasted work in some common cases (e.g. when all |
60 | // instructions in a BasicBlock are deleted). |
61 | if (isUsedByMetadata()) |
62 | ValueAsMetadata::handleRAUW(From: this, To: PoisonValue::get(T: getType())); |
63 | |
64 | // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s) |
65 | // mapping in LLVMContext. |
66 | setMetadata(KindID: LLVMContext::MD_DIAssignID, Node: nullptr); |
67 | } |
68 | |
69 | const Module *Instruction::getModule() const { |
70 | return getParent()->getModule(); |
71 | } |
72 | |
73 | const Function *Instruction::getFunction() const { |
74 | return getParent()->getParent(); |
75 | } |
76 | |
77 | const DataLayout &Instruction::getDataLayout() const { |
78 | return getModule()->getDataLayout(); |
79 | } |
80 | |
81 | void Instruction::removeFromParent() { |
82 | // Perform any debug-info maintenence required. |
83 | handleMarkerRemoval(); |
84 | |
85 | getParent()->getInstList().remove(IT: getIterator()); |
86 | } |
87 | |
88 | void Instruction::handleMarkerRemoval() { |
89 | if (!DebugMarker) |
90 | return; |
91 | |
92 | DebugMarker->removeMarker(); |
93 | } |
94 | |
95 | BasicBlock::iterator Instruction::eraseFromParent() { |
96 | handleMarkerRemoval(); |
97 | return getParent()->getInstList().erase(where: getIterator()); |
98 | } |
99 | |
100 | void Instruction::insertBefore(Instruction *InsertPos) { |
101 | insertBefore(InsertPos: InsertPos->getIterator()); |
102 | } |
103 | |
104 | /// Insert an unlinked instruction into a basic block immediately before the |
105 | /// specified instruction. |
106 | void Instruction::insertBefore(BasicBlock::iterator InsertPos) { |
107 | insertBefore(BB&: *InsertPos->getParent(), InsertPos); |
108 | } |
109 | |
110 | /// Insert an unlinked instruction into a basic block immediately after the |
111 | /// specified instruction. |
112 | void Instruction::insertAfter(Instruction *InsertPos) { |
113 | BasicBlock *DestParent = InsertPos->getParent(); |
114 | |
115 | DestParent->getInstList().insertAfter(where: InsertPos->getIterator(), New: this); |
116 | } |
117 | |
118 | void Instruction::insertAfter(BasicBlock::iterator InsertPos) { |
119 | BasicBlock *DestParent = InsertPos->getParent(); |
120 | |
121 | DestParent->getInstList().insertAfter(where: InsertPos, New: this); |
122 | } |
123 | |
124 | BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB, |
125 | BasicBlock::iterator It) { |
126 | assert(getParent() == nullptr && "Expected detached instruction" ); |
127 | assert((It == ParentBB->end() || It->getParent() == ParentBB) && |
128 | "It not in ParentBB" ); |
129 | insertBefore(BB&: *ParentBB, InsertPos: It); |
130 | return getIterator(); |
131 | } |
132 | |
133 | void Instruction::insertBefore(BasicBlock &BB, |
134 | InstListType::iterator InsertPos) { |
135 | assert(!DebugMarker); |
136 | |
137 | BB.getInstList().insert(where: InsertPos, New: this); |
138 | |
139 | // We've inserted "this": if InsertAtHead is set then it comes before any |
140 | // DbgVariableRecords attached to InsertPos. But if it's not set, then any |
141 | // DbgRecords should now come before "this". |
142 | bool InsertAtHead = InsertPos.getHeadBit(); |
143 | if (!InsertAtHead) { |
144 | DbgMarker *SrcMarker = BB.getMarker(It: InsertPos); |
145 | if (SrcMarker && !SrcMarker->empty()) { |
146 | // If this assertion fires, the calling code is about to insert a PHI |
147 | // after debug-records, which would form a sequence like: |
148 | // %0 = PHI |
149 | // #dbg_value |
150 | // %1 = PHI |
151 | // Which is de-normalised and undesired -- hence the assertion. To avoid |
152 | // this, you must insert at that position using an iterator, and it must |
153 | // be aquired by calling getFirstNonPHIIt / begin or similar methods on |
154 | // the block. This will signal to this behind-the-scenes debug-info |
155 | // maintenence code that you intend the PHI to be ahead of everything, |
156 | // including any debug-info. |
157 | assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!" ); |
158 | adoptDbgRecords(BB: &BB, It: InsertPos, InsertAtHead: false); |
159 | } |
160 | } |
161 | |
162 | // If we're inserting a terminator, check if we need to flush out |
163 | // TrailingDbgRecords. Inserting instructions at the end of an incomplete |
164 | // block is handled by the code block above. |
165 | if (isTerminator()) |
166 | getParent()->flushTerminatorDbgRecords(); |
167 | } |
168 | |
169 | /// Unlink this instruction from its current basic block and insert it into the |
170 | /// basic block that MovePos lives in, right before MovePos. |
171 | void Instruction::moveBefore(Instruction *MovePos) { |
172 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: false); |
173 | } |
174 | |
175 | void Instruction::moveBefore(BasicBlock::iterator MovePos) { |
176 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false); |
177 | } |
178 | |
179 | void Instruction::moveBeforePreserving(Instruction *MovePos) { |
180 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: true); |
181 | } |
182 | |
183 | void Instruction::moveBeforePreserving(BasicBlock::iterator MovePos) { |
184 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: true); |
185 | } |
186 | |
187 | void Instruction::moveAfter(Instruction *MovePos) { |
188 | auto NextIt = std::next(x: MovePos->getIterator()); |
189 | // We want this instruction to be moved to after NextIt in the instruction |
190 | // list, but before NextIt's debug value range. |
191 | NextIt.setHeadBit(true); |
192 | moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: false); |
193 | } |
194 | |
195 | void Instruction::moveAfter(InstListType::iterator MovePos) { |
196 | // We want this instruction to be moved to after NextIt in the instruction |
197 | // list, but before NextIt's debug value range. |
198 | MovePos.setHeadBit(true); |
199 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos, Preserve: false); |
200 | } |
201 | |
202 | void Instruction::moveAfterPreserving(Instruction *MovePos) { |
203 | auto NextIt = std::next(x: MovePos->getIterator()); |
204 | // We want this instruction and its debug range to be moved to after NextIt |
205 | // in the instruction list, but before NextIt's debug value range. |
206 | NextIt.setHeadBit(true); |
207 | moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: true); |
208 | } |
209 | |
210 | void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) { |
211 | moveBeforeImpl(BB, I, Preserve: false); |
212 | } |
213 | |
214 | void Instruction::moveBeforePreserving(BasicBlock &BB, |
215 | InstListType::iterator I) { |
216 | moveBeforeImpl(BB, I, Preserve: true); |
217 | } |
218 | |
219 | void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I, |
220 | bool Preserve) { |
221 | assert(I == BB.end() || I->getParent() == &BB); |
222 | bool InsertAtHead = I.getHeadBit(); |
223 | |
224 | // If we've been given the "Preserve" flag, then just move the DbgRecords with |
225 | // the instruction, no more special handling needed. |
226 | if (DebugMarker && !Preserve) { |
227 | if (I != this->getIterator() || InsertAtHead) { |
228 | // "this" is definitely moving in the list, or it's moving ahead of its |
229 | // attached DbgVariableRecords. Detach any existing DbgRecords. |
230 | handleMarkerRemoval(); |
231 | } |
232 | } |
233 | |
234 | // Move this single instruction. Use the list splice method directly, not |
235 | // the block splicer, which will do more debug-info things. |
236 | BB.getInstList().splice(where: I, L2&: getParent()->getInstList(), first: getIterator()); |
237 | |
238 | if (!Preserve) { |
239 | DbgMarker *NextMarker = getParent()->getNextMarker(I: this); |
240 | |
241 | // If we're inserting at point I, and not in front of the DbgRecords |
242 | // attached there, then we should absorb the DbgRecords attached to I. |
243 | if (!InsertAtHead && NextMarker && !NextMarker->empty()) { |
244 | adoptDbgRecords(BB: &BB, It: I, InsertAtHead: false); |
245 | } |
246 | } |
247 | |
248 | if (isTerminator()) |
249 | getParent()->flushTerminatorDbgRecords(); |
250 | } |
251 | |
252 | iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom( |
253 | const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere, |
254 | bool InsertAtHead) { |
255 | if (!From->DebugMarker) |
256 | return DbgMarker::getEmptyDbgRecordRange(); |
257 | |
258 | if (!DebugMarker) |
259 | getParent()->createMarker(I: this); |
260 | |
261 | return DebugMarker->cloneDebugInfoFrom(From: From->DebugMarker, FromHere, |
262 | InsertAtHead); |
263 | } |
264 | |
265 | std::optional<DbgRecord::self_iterator> |
266 | Instruction::getDbgReinsertionPosition() { |
267 | // Is there a marker on the next instruction? |
268 | DbgMarker *NextMarker = getParent()->getNextMarker(I: this); |
269 | if (!NextMarker) |
270 | return std::nullopt; |
271 | |
272 | // Are there any DbgRecords in the next marker? |
273 | if (NextMarker->StoredDbgRecords.empty()) |
274 | return std::nullopt; |
275 | |
276 | return NextMarker->StoredDbgRecords.begin(); |
277 | } |
278 | |
279 | bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); } |
280 | |
281 | void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It, |
282 | bool InsertAtHead) { |
283 | DbgMarker *SrcMarker = BB->getMarker(It); |
284 | auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() { |
285 | if (BB->end() == It) { |
286 | SrcMarker->eraseFromParent(); |
287 | BB->deleteTrailingDbgRecords(); |
288 | } |
289 | }; |
290 | |
291 | if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) { |
292 | ReleaseTrailingDbgRecords(); |
293 | return; |
294 | } |
295 | |
296 | // If we have DbgMarkers attached to this instruction, we have to honour the |
297 | // ordering of DbgRecords between this and the other marker. Fall back to just |
298 | // absorbing from the source. |
299 | if (DebugMarker || It == BB->end()) { |
300 | // Ensure we _do_ have a marker. |
301 | getParent()->createMarker(I: this); |
302 | DebugMarker->absorbDebugValues(Src&: *SrcMarker, InsertAtHead); |
303 | |
304 | // Having transferred everything out of SrcMarker, we _could_ clean it up |
305 | // and free the marker now. However, that's a lot of heap-accounting for a |
306 | // small amount of memory with a good chance of re-use. Leave it for the |
307 | // moment. It will be released when the Instruction is freed in the worst |
308 | // case. |
309 | // However: if we transferred from a trailing marker off the end of the |
310 | // block, it's important to not leave the empty marker trailing. It will |
311 | // give a misleading impression that some debug records have been left |
312 | // trailing. |
313 | ReleaseTrailingDbgRecords(); |
314 | } else { |
315 | // Optimisation: we're transferring all the DbgRecords from the source |
316 | // marker onto this empty location: just adopt the other instructions |
317 | // marker. |
318 | DebugMarker = SrcMarker; |
319 | DebugMarker->MarkedInstr = this; |
320 | It->DebugMarker = nullptr; |
321 | } |
322 | } |
323 | |
324 | void Instruction::dropDbgRecords() { |
325 | if (DebugMarker) |
326 | DebugMarker->dropDbgRecords(); |
327 | } |
328 | |
329 | void Instruction::dropOneDbgRecord(DbgRecord *DVR) { |
330 | DebugMarker->dropOneDbgRecord(DR: DVR); |
331 | } |
332 | |
333 | bool Instruction::comesBefore(const Instruction *Other) const { |
334 | assert(getParent() && Other->getParent() && |
335 | "instructions without BB parents have no order" ); |
336 | assert(getParent() == Other->getParent() && |
337 | "cross-BB instruction order comparison" ); |
338 | if (!getParent()->isInstrOrderValid()) |
339 | const_cast<BasicBlock *>(getParent())->renumberInstructions(); |
340 | return Order < Other->Order; |
341 | } |
342 | |
343 | std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() { |
344 | assert(!getType()->isVoidTy() && "Instruction must define result" ); |
345 | BasicBlock *InsertBB; |
346 | BasicBlock::iterator InsertPt; |
347 | if (auto *PN = dyn_cast<PHINode>(Val: this)) { |
348 | InsertBB = PN->getParent(); |
349 | InsertPt = InsertBB->getFirstInsertionPt(); |
350 | } else if (auto *II = dyn_cast<InvokeInst>(Val: this)) { |
351 | InsertBB = II->getNormalDest(); |
352 | InsertPt = InsertBB->getFirstInsertionPt(); |
353 | } else if (isa<CallBrInst>(Val: this)) { |
354 | // Def is available in multiple successors, there's no single dominating |
355 | // insertion point. |
356 | return std::nullopt; |
357 | } else { |
358 | assert(!isTerminator() && "Only invoke/callbr terminators return value" ); |
359 | InsertBB = getParent(); |
360 | InsertPt = std::next(x: getIterator()); |
361 | // Any instruction inserted immediately after "this" will come before any |
362 | // debug-info records take effect -- thus, set the head bit indicating that |
363 | // to debug-info-transfer code. |
364 | InsertPt.setHeadBit(true); |
365 | } |
366 | |
367 | // catchswitch blocks don't have any legal insertion point (because they |
368 | // are both an exception pad and a terminator). |
369 | if (InsertPt == InsertBB->end()) |
370 | return std::nullopt; |
371 | return InsertPt; |
372 | } |
373 | |
374 | bool Instruction::isOnlyUserOfAnyOperand() { |
375 | return any_of(Range: operands(), P: [](const Value *V) { return V->hasOneUser(); }); |
376 | } |
377 | |
378 | void Instruction::setHasNoUnsignedWrap(bool b) { |
379 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
380 | Inst->setHasNoUnsignedWrap(b); |
381 | else |
382 | cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(b); |
383 | } |
384 | |
385 | void Instruction::setHasNoSignedWrap(bool b) { |
386 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
387 | Inst->setHasNoSignedWrap(b); |
388 | else |
389 | cast<TruncInst>(Val: this)->setHasNoSignedWrap(b); |
390 | } |
391 | |
392 | void Instruction::setIsExact(bool b) { |
393 | cast<PossiblyExactOperator>(Val: this)->setIsExact(b); |
394 | } |
395 | |
396 | void Instruction::setNonNeg(bool b) { |
397 | assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp" ); |
398 | SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) | |
399 | (b * PossiblyNonNegInst::NonNeg); |
400 | } |
401 | |
402 | bool Instruction::hasNoUnsignedWrap() const { |
403 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
404 | return Inst->hasNoUnsignedWrap(); |
405 | |
406 | return cast<TruncInst>(Val: this)->hasNoUnsignedWrap(); |
407 | } |
408 | |
409 | bool Instruction::hasNoSignedWrap() const { |
410 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
411 | return Inst->hasNoSignedWrap(); |
412 | |
413 | return cast<TruncInst>(Val: this)->hasNoSignedWrap(); |
414 | } |
415 | |
416 | bool Instruction::hasNonNeg() const { |
417 | assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp" ); |
418 | return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0; |
419 | } |
420 | |
421 | bool Instruction::hasPoisonGeneratingFlags() const { |
422 | return cast<Operator>(Val: this)->hasPoisonGeneratingFlags(); |
423 | } |
424 | |
425 | void Instruction::dropPoisonGeneratingFlags() { |
426 | switch (getOpcode()) { |
427 | case Instruction::Add: |
428 | case Instruction::Sub: |
429 | case Instruction::Mul: |
430 | case Instruction::Shl: |
431 | cast<OverflowingBinaryOperator>(Val: this)->setHasNoUnsignedWrap(false); |
432 | cast<OverflowingBinaryOperator>(Val: this)->setHasNoSignedWrap(false); |
433 | break; |
434 | |
435 | case Instruction::UDiv: |
436 | case Instruction::SDiv: |
437 | case Instruction::AShr: |
438 | case Instruction::LShr: |
439 | cast<PossiblyExactOperator>(Val: this)->setIsExact(false); |
440 | break; |
441 | |
442 | case Instruction::Or: |
443 | cast<PossiblyDisjointInst>(Val: this)->setIsDisjoint(false); |
444 | break; |
445 | |
446 | case Instruction::GetElementPtr: |
447 | cast<GetElementPtrInst>(Val: this)->setNoWrapFlags(GEPNoWrapFlags::none()); |
448 | break; |
449 | |
450 | case Instruction::UIToFP: |
451 | case Instruction::ZExt: |
452 | setNonNeg(false); |
453 | break; |
454 | |
455 | case Instruction::Trunc: |
456 | cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(false); |
457 | cast<TruncInst>(Val: this)->setHasNoSignedWrap(false); |
458 | break; |
459 | |
460 | case Instruction::ICmp: |
461 | cast<ICmpInst>(Val: this)->setSameSign(false); |
462 | break; |
463 | } |
464 | |
465 | if (isa<FPMathOperator>(Val: this)) { |
466 | setHasNoNaNs(false); |
467 | setHasNoInfs(false); |
468 | } |
469 | |
470 | assert(!hasPoisonGeneratingFlags() && "must be kept in sync" ); |
471 | } |
472 | |
473 | bool Instruction::hasPoisonGeneratingMetadata() const { |
474 | return any_of(Range: Metadata::PoisonGeneratingIDs, |
475 | P: [this](unsigned ID) { return hasMetadata(KindID: ID); }); |
476 | } |
477 | |
478 | bool Instruction::hasNonDebugLocLoopMetadata() const { |
479 | // If there is no loop metadata at all, we also don't have |
480 | // non-debug loop metadata, obviously. |
481 | if (!hasMetadata(KindID: LLVMContext::MD_loop)) |
482 | return false; |
483 | |
484 | // If we do have loop metadata, retrieve it. |
485 | MDNode *LoopMD = getMetadata(KindID: LLVMContext::MD_loop); |
486 | |
487 | // Check if the existing operands are debug locations. This loop |
488 | // should terminate after at most three iterations. Skip |
489 | // the first item because it is a self-reference. |
490 | for (const MDOperand &Op : llvm::drop_begin(RangeOrContainer: LoopMD->operands())) { |
491 | // check for debug location type by attempting a cast. |
492 | if (!isa<DILocation>(Val: Op)) { |
493 | return true; |
494 | } |
495 | } |
496 | |
497 | // If we get here, then all we have is debug locations in the loop metadata. |
498 | return false; |
499 | } |
500 | |
501 | void Instruction::dropPoisonGeneratingMetadata() { |
502 | for (unsigned ID : Metadata::PoisonGeneratingIDs) |
503 | eraseMetadata(KindID: ID); |
504 | } |
505 | |
506 | bool Instruction::hasPoisonGeneratingReturnAttributes() const { |
507 | if (const auto *CB = dyn_cast<CallBase>(Val: this)) { |
508 | AttributeSet RetAttrs = CB->getAttributes().getRetAttrs(); |
509 | return RetAttrs.hasAttribute(Kind: Attribute::Range) || |
510 | RetAttrs.hasAttribute(Kind: Attribute::Alignment) || |
511 | RetAttrs.hasAttribute(Kind: Attribute::NonNull); |
512 | } |
513 | return false; |
514 | } |
515 | |
516 | void Instruction::dropPoisonGeneratingReturnAttributes() { |
517 | if (auto *CB = dyn_cast<CallBase>(Val: this)) { |
518 | AttributeMask AM; |
519 | AM.addAttribute(Val: Attribute::Range); |
520 | AM.addAttribute(Val: Attribute::Alignment); |
521 | AM.addAttribute(Val: Attribute::NonNull); |
522 | CB->removeRetAttrs(AttrsToRemove: AM); |
523 | } |
524 | assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync" ); |
525 | } |
526 | |
527 | void Instruction::dropUBImplyingAttrsAndUnknownMetadata( |
528 | ArrayRef<unsigned> KnownIDs) { |
529 | dropUnknownNonDebugMetadata(KnownIDs); |
530 | auto *CB = dyn_cast<CallBase>(Val: this); |
531 | if (!CB) |
532 | return; |
533 | // For call instructions, we also need to drop parameter and return attributes |
534 | // that can cause UB if the call is moved to a location where the attribute is |
535 | // not valid. |
536 | AttributeList AL = CB->getAttributes(); |
537 | if (AL.isEmpty()) |
538 | return; |
539 | AttributeMask UBImplyingAttributes = |
540 | AttributeFuncs::getUBImplyingAttributes(); |
541 | for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++) |
542 | CB->removeParamAttrs(ArgNo, AttrsToRemove: UBImplyingAttributes); |
543 | CB->removeRetAttrs(AttrsToRemove: UBImplyingAttributes); |
544 | } |
545 | |
546 | void Instruction::dropUBImplyingAttrsAndMetadata() { |
547 | // !annotation metadata does not impact semantics. |
548 | // !range, !nonnull and !align produce poison, so they are safe to speculate. |
549 | // !noundef and various AA metadata must be dropped, as it generally produces |
550 | // immediate undefined behavior. |
551 | unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range, |
552 | LLVMContext::MD_nonnull, LLVMContext::MD_align}; |
553 | dropUBImplyingAttrsAndUnknownMetadata(KnownIDs); |
554 | } |
555 | |
556 | bool Instruction::hasUBImplyingAttrs() const { |
557 | auto *CB = dyn_cast<CallBase>(Val: this); |
558 | if (!CB) |
559 | return false; |
560 | // For call instructions, we also need to check parameter and return |
561 | // attributes that can cause UB. |
562 | for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++) |
563 | if (CB->isPassingUndefUB(ArgNo)) |
564 | return true; |
565 | return CB->hasRetAttr(Kind: Attribute::NoUndef) || |
566 | CB->hasRetAttr(Kind: Attribute::Dereferenceable) || |
567 | CB->hasRetAttr(Kind: Attribute::DereferenceableOrNull); |
568 | } |
569 | |
570 | bool Instruction::isExact() const { |
571 | return cast<PossiblyExactOperator>(Val: this)->isExact(); |
572 | } |
573 | |
574 | void Instruction::setFast(bool B) { |
575 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
576 | cast<FPMathOperator>(Val: this)->setFast(B); |
577 | } |
578 | |
579 | void Instruction::setHasAllowReassoc(bool B) { |
580 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
581 | cast<FPMathOperator>(Val: this)->setHasAllowReassoc(B); |
582 | } |
583 | |
584 | void Instruction::setHasNoNaNs(bool B) { |
585 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
586 | cast<FPMathOperator>(Val: this)->setHasNoNaNs(B); |
587 | } |
588 | |
589 | void Instruction::setHasNoInfs(bool B) { |
590 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
591 | cast<FPMathOperator>(Val: this)->setHasNoInfs(B); |
592 | } |
593 | |
594 | void Instruction::setHasNoSignedZeros(bool B) { |
595 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
596 | cast<FPMathOperator>(Val: this)->setHasNoSignedZeros(B); |
597 | } |
598 | |
599 | void Instruction::setHasAllowReciprocal(bool B) { |
600 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
601 | cast<FPMathOperator>(Val: this)->setHasAllowReciprocal(B); |
602 | } |
603 | |
604 | void Instruction::setHasAllowContract(bool B) { |
605 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
606 | cast<FPMathOperator>(Val: this)->setHasAllowContract(B); |
607 | } |
608 | |
609 | void Instruction::setHasApproxFunc(bool B) { |
610 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
611 | cast<FPMathOperator>(Val: this)->setHasApproxFunc(B); |
612 | } |
613 | |
614 | void Instruction::setFastMathFlags(FastMathFlags FMF) { |
615 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
616 | cast<FPMathOperator>(Val: this)->setFastMathFlags(FMF); |
617 | } |
618 | |
619 | void Instruction::copyFastMathFlags(FastMathFlags FMF) { |
620 | assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op" ); |
621 | cast<FPMathOperator>(Val: this)->copyFastMathFlags(FMF); |
622 | } |
623 | |
624 | bool Instruction::isFast() const { |
625 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
626 | return cast<FPMathOperator>(Val: this)->isFast(); |
627 | } |
628 | |
629 | bool Instruction::hasAllowReassoc() const { |
630 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
631 | return cast<FPMathOperator>(Val: this)->hasAllowReassoc(); |
632 | } |
633 | |
634 | bool Instruction::hasNoNaNs() const { |
635 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
636 | return cast<FPMathOperator>(Val: this)->hasNoNaNs(); |
637 | } |
638 | |
639 | bool Instruction::hasNoInfs() const { |
640 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
641 | return cast<FPMathOperator>(Val: this)->hasNoInfs(); |
642 | } |
643 | |
644 | bool Instruction::hasNoSignedZeros() const { |
645 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
646 | return cast<FPMathOperator>(Val: this)->hasNoSignedZeros(); |
647 | } |
648 | |
649 | bool Instruction::hasAllowReciprocal() const { |
650 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
651 | return cast<FPMathOperator>(Val: this)->hasAllowReciprocal(); |
652 | } |
653 | |
654 | bool Instruction::hasAllowContract() const { |
655 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
656 | return cast<FPMathOperator>(Val: this)->hasAllowContract(); |
657 | } |
658 | |
659 | bool Instruction::hasApproxFunc() const { |
660 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
661 | return cast<FPMathOperator>(Val: this)->hasApproxFunc(); |
662 | } |
663 | |
664 | FastMathFlags Instruction::getFastMathFlags() const { |
665 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
666 | return cast<FPMathOperator>(Val: this)->getFastMathFlags(); |
667 | } |
668 | |
669 | void Instruction::copyFastMathFlags(const Instruction *I) { |
670 | copyFastMathFlags(FMF: I->getFastMathFlags()); |
671 | } |
672 | |
673 | void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) { |
674 | // Copy the wrapping flags. |
675 | if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(Val: this)) { |
676 | if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) { |
677 | setHasNoSignedWrap(OB->hasNoSignedWrap()); |
678 | setHasNoUnsignedWrap(OB->hasNoUnsignedWrap()); |
679 | } |
680 | } |
681 | |
682 | if (auto *TI = dyn_cast<TruncInst>(Val: V)) { |
683 | if (isa<TruncInst>(Val: this)) { |
684 | setHasNoSignedWrap(TI->hasNoSignedWrap()); |
685 | setHasNoUnsignedWrap(TI->hasNoUnsignedWrap()); |
686 | } |
687 | } |
688 | |
689 | // Copy the exact flag. |
690 | if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V)) |
691 | if (isa<PossiblyExactOperator>(Val: this)) |
692 | setIsExact(PE->isExact()); |
693 | |
694 | if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V)) |
695 | if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this)) |
696 | DestPD->setIsDisjoint(SrcPD->isDisjoint()); |
697 | |
698 | // Copy the fast-math flags. |
699 | if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) |
700 | if (isa<FPMathOperator>(Val: this)) |
701 | copyFastMathFlags(FMF: FP->getFastMathFlags()); |
702 | |
703 | if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V)) |
704 | if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this)) |
705 | DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() | |
706 | DestGEP->getNoWrapFlags()); |
707 | |
708 | if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V)) |
709 | if (isa<PossiblyNonNegInst>(Val: this)) |
710 | setNonNeg(NNI->hasNonNeg()); |
711 | |
712 | if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V)) |
713 | if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this)) |
714 | DestICmp->setSameSign(SrcICmp->hasSameSign()); |
715 | } |
716 | |
717 | void Instruction::andIRFlags(const Value *V) { |
718 | if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) { |
719 | if (isa<OverflowingBinaryOperator>(Val: this)) { |
720 | setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap()); |
721 | setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap()); |
722 | } |
723 | } |
724 | |
725 | if (auto *TI = dyn_cast<TruncInst>(Val: V)) { |
726 | if (isa<TruncInst>(Val: this)) { |
727 | setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap()); |
728 | setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap()); |
729 | } |
730 | } |
731 | |
732 | if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V)) |
733 | if (isa<PossiblyExactOperator>(Val: this)) |
734 | setIsExact(isExact() && PE->isExact()); |
735 | |
736 | if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V)) |
737 | if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this)) |
738 | DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint()); |
739 | |
740 | if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) { |
741 | if (isa<FPMathOperator>(Val: this)) { |
742 | FastMathFlags FM = getFastMathFlags(); |
743 | FM &= FP->getFastMathFlags(); |
744 | copyFastMathFlags(FMF: FM); |
745 | } |
746 | } |
747 | |
748 | if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V)) |
749 | if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this)) |
750 | DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() & |
751 | DestGEP->getNoWrapFlags()); |
752 | |
753 | if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V)) |
754 | if (isa<PossiblyNonNegInst>(Val: this)) |
755 | setNonNeg(hasNonNeg() && NNI->hasNonNeg()); |
756 | |
757 | if (auto *SrcICmp = dyn_cast<ICmpInst>(Val: V)) |
758 | if (auto *DestICmp = dyn_cast<ICmpInst>(Val: this)) |
759 | DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign()); |
760 | } |
761 | |
762 | const char *Instruction::getOpcodeName(unsigned OpCode) { |
763 | switch (OpCode) { |
764 | // Terminators |
765 | case Ret: return "ret" ; |
766 | case Br: return "br" ; |
767 | case Switch: return "switch" ; |
768 | case IndirectBr: return "indirectbr" ; |
769 | case Invoke: return "invoke" ; |
770 | case Resume: return "resume" ; |
771 | case Unreachable: return "unreachable" ; |
772 | case CleanupRet: return "cleanupret" ; |
773 | case CatchRet: return "catchret" ; |
774 | case CatchPad: return "catchpad" ; |
775 | case CatchSwitch: return "catchswitch" ; |
776 | case CallBr: return "callbr" ; |
777 | |
778 | // Standard unary operators... |
779 | case FNeg: return "fneg" ; |
780 | |
781 | // Standard binary operators... |
782 | case Add: return "add" ; |
783 | case FAdd: return "fadd" ; |
784 | case Sub: return "sub" ; |
785 | case FSub: return "fsub" ; |
786 | case Mul: return "mul" ; |
787 | case FMul: return "fmul" ; |
788 | case UDiv: return "udiv" ; |
789 | case SDiv: return "sdiv" ; |
790 | case FDiv: return "fdiv" ; |
791 | case URem: return "urem" ; |
792 | case SRem: return "srem" ; |
793 | case FRem: return "frem" ; |
794 | |
795 | // Logical operators... |
796 | case And: return "and" ; |
797 | case Or : return "or" ; |
798 | case Xor: return "xor" ; |
799 | |
800 | // Memory instructions... |
801 | case Alloca: return "alloca" ; |
802 | case Load: return "load" ; |
803 | case Store: return "store" ; |
804 | case AtomicCmpXchg: return "cmpxchg" ; |
805 | case AtomicRMW: return "atomicrmw" ; |
806 | case Fence: return "fence" ; |
807 | case GetElementPtr: return "getelementptr" ; |
808 | |
809 | // Convert instructions... |
810 | case Trunc: return "trunc" ; |
811 | case ZExt: return "zext" ; |
812 | case SExt: return "sext" ; |
813 | case FPTrunc: return "fptrunc" ; |
814 | case FPExt: return "fpext" ; |
815 | case FPToUI: return "fptoui" ; |
816 | case FPToSI: return "fptosi" ; |
817 | case UIToFP: return "uitofp" ; |
818 | case SIToFP: return "sitofp" ; |
819 | case IntToPtr: return "inttoptr" ; |
820 | case PtrToInt: return "ptrtoint" ; |
821 | case BitCast: return "bitcast" ; |
822 | case AddrSpaceCast: return "addrspacecast" ; |
823 | |
824 | // Other instructions... |
825 | case ICmp: return "icmp" ; |
826 | case FCmp: return "fcmp" ; |
827 | case PHI: return "phi" ; |
828 | case Select: return "select" ; |
829 | case Call: return "call" ; |
830 | case Shl: return "shl" ; |
831 | case LShr: return "lshr" ; |
832 | case AShr: return "ashr" ; |
833 | case VAArg: return "va_arg" ; |
834 | case ExtractElement: return "extractelement" ; |
835 | case InsertElement: return "insertelement" ; |
836 | case ShuffleVector: return "shufflevector" ; |
837 | case ExtractValue: return "extractvalue" ; |
838 | case InsertValue: return "insertvalue" ; |
839 | case LandingPad: return "landingpad" ; |
840 | case CleanupPad: return "cleanuppad" ; |
841 | case Freeze: return "freeze" ; |
842 | |
843 | default: return "<Invalid operator> " ; |
844 | } |
845 | } |
846 | |
847 | /// This must be kept in sync with FunctionComparator::cmpOperations in |
848 | /// lib/Transforms/IPO/MergeFunctions.cpp. |
849 | bool Instruction::hasSameSpecialState(const Instruction *I2, |
850 | bool IgnoreAlignment, |
851 | bool IntersectAttrs) const { |
852 | auto I1 = this; |
853 | assert(I1->getOpcode() == I2->getOpcode() && |
854 | "Can not compare special state of different instructions" ); |
855 | |
856 | auto = [IntersectAttrs](const CallBase *CB0, |
857 | const CallBase *CB1) { |
858 | return IntersectAttrs |
859 | ? CB0->getAttributes() |
860 | .intersectWith(C&: CB0->getContext(), Other: CB1->getAttributes()) |
861 | .has_value() |
862 | : CB0->getAttributes() == CB1->getAttributes(); |
863 | }; |
864 | |
865 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: I1)) |
866 | return AI->getAllocatedType() == cast<AllocaInst>(Val: I2)->getAllocatedType() && |
867 | (AI->getAlign() == cast<AllocaInst>(Val: I2)->getAlign() || |
868 | IgnoreAlignment); |
869 | if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I1)) |
870 | return LI->isVolatile() == cast<LoadInst>(Val: I2)->isVolatile() && |
871 | (LI->getAlign() == cast<LoadInst>(Val: I2)->getAlign() || |
872 | IgnoreAlignment) && |
873 | LI->getOrdering() == cast<LoadInst>(Val: I2)->getOrdering() && |
874 | LI->getSyncScopeID() == cast<LoadInst>(Val: I2)->getSyncScopeID(); |
875 | if (const StoreInst *SI = dyn_cast<StoreInst>(Val: I1)) |
876 | return SI->isVolatile() == cast<StoreInst>(Val: I2)->isVolatile() && |
877 | (SI->getAlign() == cast<StoreInst>(Val: I2)->getAlign() || |
878 | IgnoreAlignment) && |
879 | SI->getOrdering() == cast<StoreInst>(Val: I2)->getOrdering() && |
880 | SI->getSyncScopeID() == cast<StoreInst>(Val: I2)->getSyncScopeID(); |
881 | if (const CmpInst *CI = dyn_cast<CmpInst>(Val: I1)) |
882 | return CI->getPredicate() == cast<CmpInst>(Val: I2)->getPredicate(); |
883 | if (const CallInst *CI = dyn_cast<CallInst>(Val: I1)) |
884 | return CI->isTailCall() == cast<CallInst>(Val: I2)->isTailCall() && |
885 | CI->getCallingConv() == cast<CallInst>(Val: I2)->getCallingConv() && |
886 | CheckAttrsSame(CI, cast<CallInst>(Val: I2)) && |
887 | CI->hasIdenticalOperandBundleSchema(Other: *cast<CallInst>(Val: I2)); |
888 | if (const InvokeInst *CI = dyn_cast<InvokeInst>(Val: I1)) |
889 | return CI->getCallingConv() == cast<InvokeInst>(Val: I2)->getCallingConv() && |
890 | CheckAttrsSame(CI, cast<InvokeInst>(Val: I2)) && |
891 | CI->hasIdenticalOperandBundleSchema(Other: *cast<InvokeInst>(Val: I2)); |
892 | if (const CallBrInst *CI = dyn_cast<CallBrInst>(Val: I1)) |
893 | return CI->getCallingConv() == cast<CallBrInst>(Val: I2)->getCallingConv() && |
894 | CheckAttrsSame(CI, cast<CallBrInst>(Val: I2)) && |
895 | CI->hasIdenticalOperandBundleSchema(Other: *cast<CallBrInst>(Val: I2)); |
896 | if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: I1)) |
897 | return IVI->getIndices() == cast<InsertValueInst>(Val: I2)->getIndices(); |
898 | if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: I1)) |
899 | return EVI->getIndices() == cast<ExtractValueInst>(Val: I2)->getIndices(); |
900 | if (const FenceInst *FI = dyn_cast<FenceInst>(Val: I1)) |
901 | return FI->getOrdering() == cast<FenceInst>(Val: I2)->getOrdering() && |
902 | FI->getSyncScopeID() == cast<FenceInst>(Val: I2)->getSyncScopeID(); |
903 | if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I1)) |
904 | return CXI->isVolatile() == cast<AtomicCmpXchgInst>(Val: I2)->isVolatile() && |
905 | CXI->isWeak() == cast<AtomicCmpXchgInst>(Val: I2)->isWeak() && |
906 | CXI->getSuccessOrdering() == |
907 | cast<AtomicCmpXchgInst>(Val: I2)->getSuccessOrdering() && |
908 | CXI->getFailureOrdering() == |
909 | cast<AtomicCmpXchgInst>(Val: I2)->getFailureOrdering() && |
910 | CXI->getSyncScopeID() == |
911 | cast<AtomicCmpXchgInst>(Val: I2)->getSyncScopeID(); |
912 | if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: I1)) |
913 | return RMWI->getOperation() == cast<AtomicRMWInst>(Val: I2)->getOperation() && |
914 | RMWI->isVolatile() == cast<AtomicRMWInst>(Val: I2)->isVolatile() && |
915 | RMWI->getOrdering() == cast<AtomicRMWInst>(Val: I2)->getOrdering() && |
916 | RMWI->getSyncScopeID() == cast<AtomicRMWInst>(Val: I2)->getSyncScopeID(); |
917 | if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: I1)) |
918 | return SVI->getShuffleMask() == |
919 | cast<ShuffleVectorInst>(Val: I2)->getShuffleMask(); |
920 | if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I1)) |
921 | return GEP->getSourceElementType() == |
922 | cast<GetElementPtrInst>(Val: I2)->getSourceElementType(); |
923 | |
924 | return true; |
925 | } |
926 | |
927 | bool Instruction::isIdenticalTo(const Instruction *I) const { |
928 | return isIdenticalToWhenDefined(I) && |
929 | SubclassOptionalData == I->SubclassOptionalData; |
930 | } |
931 | |
932 | bool Instruction::isIdenticalToWhenDefined(const Instruction *I, |
933 | bool IntersectAttrs) const { |
934 | if (getOpcode() != I->getOpcode() || |
935 | getNumOperands() != I->getNumOperands() || getType() != I->getType()) |
936 | return false; |
937 | |
938 | // If both instructions have no operands, they are identical. |
939 | if (getNumOperands() == 0 && I->getNumOperands() == 0) |
940 | return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false, |
941 | IntersectAttrs); |
942 | |
943 | // We have two instructions of identical opcode and #operands. Check to see |
944 | // if all operands are the same. |
945 | if (!std::equal(first1: op_begin(), last1: op_end(), first2: I->op_begin())) |
946 | return false; |
947 | |
948 | // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()! |
949 | if (const PHINode *thisPHI = dyn_cast<PHINode>(Val: this)) { |
950 | const PHINode *otherPHI = cast<PHINode>(Val: I); |
951 | return std::equal(first1: thisPHI->block_begin(), last1: thisPHI->block_end(), |
952 | first2: otherPHI->block_begin()); |
953 | } |
954 | |
955 | return this->hasSameSpecialState(I2: I, /*IgnoreAlignment=*/false, |
956 | IntersectAttrs); |
957 | } |
958 | |
959 | // Keep this in sync with FunctionComparator::cmpOperations in |
960 | // lib/Transforms/IPO/MergeFunctions.cpp. |
961 | bool Instruction::isSameOperationAs(const Instruction *I, |
962 | unsigned flags) const { |
963 | bool IgnoreAlignment = flags & CompareIgnoringAlignment; |
964 | bool UseScalarTypes = flags & CompareUsingScalarTypes; |
965 | bool IntersectAttrs = flags & CompareUsingIntersectedAttrs; |
966 | |
967 | if (getOpcode() != I->getOpcode() || |
968 | getNumOperands() != I->getNumOperands() || |
969 | (UseScalarTypes ? |
970 | getType()->getScalarType() != I->getType()->getScalarType() : |
971 | getType() != I->getType())) |
972 | return false; |
973 | |
974 | // We have two instructions of identical opcode and #operands. Check to see |
975 | // if all operands are the same type |
976 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
977 | if (UseScalarTypes ? |
978 | getOperand(i)->getType()->getScalarType() != |
979 | I->getOperand(i)->getType()->getScalarType() : |
980 | getOperand(i)->getType() != I->getOperand(i)->getType()) |
981 | return false; |
982 | |
983 | return this->hasSameSpecialState(I2: I, IgnoreAlignment, IntersectAttrs); |
984 | } |
985 | |
986 | bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { |
987 | for (const Use &U : uses()) { |
988 | // PHI nodes uses values in the corresponding predecessor block. For other |
989 | // instructions, just check to see whether the parent of the use matches up. |
990 | const Instruction *I = cast<Instruction>(Val: U.getUser()); |
991 | const PHINode *PN = dyn_cast<PHINode>(Val: I); |
992 | if (!PN) { |
993 | if (I->getParent() != BB) |
994 | return true; |
995 | continue; |
996 | } |
997 | |
998 | if (PN->getIncomingBlock(U) != BB) |
999 | return true; |
1000 | } |
1001 | return false; |
1002 | } |
1003 | |
1004 | bool Instruction::mayReadFromMemory() const { |
1005 | switch (getOpcode()) { |
1006 | default: return false; |
1007 | case Instruction::VAArg: |
1008 | case Instruction::Load: |
1009 | case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory |
1010 | case Instruction::AtomicCmpXchg: |
1011 | case Instruction::AtomicRMW: |
1012 | case Instruction::CatchPad: |
1013 | case Instruction::CatchRet: |
1014 | return true; |
1015 | case Instruction::Call: |
1016 | case Instruction::Invoke: |
1017 | case Instruction::CallBr: |
1018 | return !cast<CallBase>(Val: this)->onlyWritesMemory(); |
1019 | case Instruction::Store: |
1020 | return !cast<StoreInst>(Val: this)->isUnordered(); |
1021 | } |
1022 | } |
1023 | |
1024 | bool Instruction::mayWriteToMemory() const { |
1025 | switch (getOpcode()) { |
1026 | default: return false; |
1027 | case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory |
1028 | case Instruction::Store: |
1029 | case Instruction::VAArg: |
1030 | case Instruction::AtomicCmpXchg: |
1031 | case Instruction::AtomicRMW: |
1032 | case Instruction::CatchPad: |
1033 | case Instruction::CatchRet: |
1034 | return true; |
1035 | case Instruction::Call: |
1036 | case Instruction::Invoke: |
1037 | case Instruction::CallBr: |
1038 | return !cast<CallBase>(Val: this)->onlyReadsMemory(); |
1039 | case Instruction::Load: |
1040 | return !cast<LoadInst>(Val: this)->isUnordered(); |
1041 | } |
1042 | } |
1043 | |
1044 | bool Instruction::isAtomic() const { |
1045 | switch (getOpcode()) { |
1046 | default: |
1047 | return false; |
1048 | case Instruction::AtomicCmpXchg: |
1049 | case Instruction::AtomicRMW: |
1050 | case Instruction::Fence: |
1051 | return true; |
1052 | case Instruction::Load: |
1053 | return cast<LoadInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic; |
1054 | case Instruction::Store: |
1055 | return cast<StoreInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic; |
1056 | } |
1057 | } |
1058 | |
1059 | bool Instruction::hasAtomicLoad() const { |
1060 | assert(isAtomic()); |
1061 | switch (getOpcode()) { |
1062 | default: |
1063 | return false; |
1064 | case Instruction::AtomicCmpXchg: |
1065 | case Instruction::AtomicRMW: |
1066 | case Instruction::Load: |
1067 | return true; |
1068 | } |
1069 | } |
1070 | |
1071 | bool Instruction::hasAtomicStore() const { |
1072 | assert(isAtomic()); |
1073 | switch (getOpcode()) { |
1074 | default: |
1075 | return false; |
1076 | case Instruction::AtomicCmpXchg: |
1077 | case Instruction::AtomicRMW: |
1078 | case Instruction::Store: |
1079 | return true; |
1080 | } |
1081 | } |
1082 | |
1083 | bool Instruction::isVolatile() const { |
1084 | switch (getOpcode()) { |
1085 | default: |
1086 | return false; |
1087 | case Instruction::AtomicRMW: |
1088 | return cast<AtomicRMWInst>(Val: this)->isVolatile(); |
1089 | case Instruction::Store: |
1090 | return cast<StoreInst>(Val: this)->isVolatile(); |
1091 | case Instruction::Load: |
1092 | return cast<LoadInst>(Val: this)->isVolatile(); |
1093 | case Instruction::AtomicCmpXchg: |
1094 | return cast<AtomicCmpXchgInst>(Val: this)->isVolatile(); |
1095 | case Instruction::Call: |
1096 | case Instruction::Invoke: |
1097 | // There are a very limited number of intrinsics with volatile flags. |
1098 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) { |
1099 | if (auto *MI = dyn_cast<MemIntrinsic>(Val: II)) |
1100 | return MI->isVolatile(); |
1101 | switch (II->getIntrinsicID()) { |
1102 | default: break; |
1103 | case Intrinsic::matrix_column_major_load: |
1104 | return cast<ConstantInt>(Val: II->getArgOperand(i: 2))->isOne(); |
1105 | case Intrinsic::matrix_column_major_store: |
1106 | return cast<ConstantInt>(Val: II->getArgOperand(i: 3))->isOne(); |
1107 | } |
1108 | } |
1109 | return false; |
1110 | } |
1111 | } |
1112 | |
1113 | Type *Instruction::getAccessType() const { |
1114 | switch (getOpcode()) { |
1115 | case Instruction::Store: |
1116 | return cast<StoreInst>(Val: this)->getValueOperand()->getType(); |
1117 | case Instruction::Load: |
1118 | case Instruction::AtomicRMW: |
1119 | return getType(); |
1120 | case Instruction::AtomicCmpXchg: |
1121 | return cast<AtomicCmpXchgInst>(Val: this)->getNewValOperand()->getType(); |
1122 | case Instruction::Call: |
1123 | case Instruction::Invoke: |
1124 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: this)) { |
1125 | switch (II->getIntrinsicID()) { |
1126 | case Intrinsic::masked_load: |
1127 | case Intrinsic::masked_gather: |
1128 | case Intrinsic::masked_expandload: |
1129 | case Intrinsic::vp_load: |
1130 | case Intrinsic::vp_gather: |
1131 | case Intrinsic::experimental_vp_strided_load: |
1132 | return II->getType(); |
1133 | case Intrinsic::masked_store: |
1134 | case Intrinsic::masked_scatter: |
1135 | case Intrinsic::masked_compressstore: |
1136 | case Intrinsic::vp_store: |
1137 | case Intrinsic::vp_scatter: |
1138 | case Intrinsic::experimental_vp_strided_store: |
1139 | return II->getOperand(i_nocapture: 0)->getType(); |
1140 | default: |
1141 | break; |
1142 | } |
1143 | } |
1144 | } |
1145 | |
1146 | return nullptr; |
1147 | } |
1148 | |
1149 | static bool canUnwindPastLandingPad(const LandingPadInst *LP, |
1150 | bool IncludePhaseOneUnwind) { |
1151 | // Because phase one unwinding skips cleanup landingpads, we effectively |
1152 | // unwind past this frame, and callers need to have valid unwind info. |
1153 | if (LP->isCleanup()) |
1154 | return IncludePhaseOneUnwind; |
1155 | |
1156 | for (unsigned I = 0; I < LP->getNumClauses(); ++I) { |
1157 | Constant *Clause = LP->getClause(Idx: I); |
1158 | // catch ptr null catches all exceptions. |
1159 | if (LP->isCatch(Idx: I) && isa<ConstantPointerNull>(Val: Clause)) |
1160 | return false; |
1161 | // filter [0 x ptr] catches all exceptions. |
1162 | if (LP->isFilter(Idx: I) && Clause->getType()->getArrayNumElements() == 0) |
1163 | return false; |
1164 | } |
1165 | |
1166 | // May catch only some subset of exceptions, in which case other exceptions |
1167 | // will continue unwinding. |
1168 | return true; |
1169 | } |
1170 | |
1171 | bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const { |
1172 | switch (getOpcode()) { |
1173 | case Instruction::Call: |
1174 | return !cast<CallInst>(Val: this)->doesNotThrow(); |
1175 | case Instruction::CleanupRet: |
1176 | return cast<CleanupReturnInst>(Val: this)->unwindsToCaller(); |
1177 | case Instruction::CatchSwitch: |
1178 | return cast<CatchSwitchInst>(Val: this)->unwindsToCaller(); |
1179 | case Instruction::Resume: |
1180 | return true; |
1181 | case Instruction::Invoke: { |
1182 | // Landingpads themselves don't unwind -- however, an invoke of a skipped |
1183 | // landingpad may continue unwinding. |
1184 | BasicBlock *UnwindDest = cast<InvokeInst>(Val: this)->getUnwindDest(); |
1185 | BasicBlock::iterator Pad = UnwindDest->getFirstNonPHIIt(); |
1186 | if (auto *LP = dyn_cast<LandingPadInst>(Val&: Pad)) |
1187 | return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind); |
1188 | return false; |
1189 | } |
1190 | case Instruction::CleanupPad: |
1191 | // Treat the same as cleanup landingpad. |
1192 | return IncludePhaseOneUnwind; |
1193 | default: |
1194 | return false; |
1195 | } |
1196 | } |
1197 | |
1198 | bool Instruction::mayHaveSideEffects() const { |
1199 | return mayWriteToMemory() || mayThrow() || !willReturn(); |
1200 | } |
1201 | |
1202 | bool Instruction::isSafeToRemove() const { |
1203 | return (!isa<CallInst>(Val: this) || !this->mayHaveSideEffects()) && |
1204 | !this->isTerminator() && !this->isEHPad(); |
1205 | } |
1206 | |
1207 | bool Instruction::willReturn() const { |
1208 | // Volatile store isn't guaranteed to return; see LangRef. |
1209 | if (auto *SI = dyn_cast<StoreInst>(Val: this)) |
1210 | return !SI->isVolatile(); |
1211 | |
1212 | if (const auto *CB = dyn_cast<CallBase>(Val: this)) |
1213 | return CB->hasFnAttr(Kind: Attribute::WillReturn); |
1214 | return true; |
1215 | } |
1216 | |
1217 | bool Instruction::isLifetimeStartOrEnd() const { |
1218 | auto *II = dyn_cast<IntrinsicInst>(Val: this); |
1219 | if (!II) |
1220 | return false; |
1221 | Intrinsic::ID ID = II->getIntrinsicID(); |
1222 | return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end; |
1223 | } |
1224 | |
1225 | bool Instruction::isLaunderOrStripInvariantGroup() const { |
1226 | auto *II = dyn_cast<IntrinsicInst>(Val: this); |
1227 | if (!II) |
1228 | return false; |
1229 | Intrinsic::ID ID = II->getIntrinsicID(); |
1230 | return ID == Intrinsic::launder_invariant_group || |
1231 | ID == Intrinsic::strip_invariant_group; |
1232 | } |
1233 | |
1234 | bool Instruction::isDebugOrPseudoInst() const { |
1235 | return isa<DbgInfoIntrinsic>(Val: this) || isa<PseudoProbeInst>(Val: this); |
1236 | } |
1237 | |
1238 | const Instruction * |
1239 | Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const { |
1240 | for (const Instruction *I = getNextNode(); I; I = I->getNextNode()) |
1241 | if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I))) |
1242 | return I; |
1243 | return nullptr; |
1244 | } |
1245 | |
1246 | const Instruction * |
1247 | Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const { |
1248 | for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode()) |
1249 | if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I))) |
1250 | return I; |
1251 | return nullptr; |
1252 | } |
1253 | |
1254 | const DebugLoc &Instruction::getStableDebugLoc() const { |
1255 | if (isa<DbgInfoIntrinsic>(Val: this)) |
1256 | if (const Instruction *Next = getNextNonDebugInstruction()) |
1257 | return Next->getDebugLoc(); |
1258 | return getDebugLoc(); |
1259 | } |
1260 | |
1261 | bool Instruction::isAssociative() const { |
1262 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) |
1263 | return II->isAssociative(); |
1264 | unsigned Opcode = getOpcode(); |
1265 | if (isAssociative(Opcode)) |
1266 | return true; |
1267 | |
1268 | switch (Opcode) { |
1269 | case FMul: |
1270 | case FAdd: |
1271 | return cast<FPMathOperator>(Val: this)->hasAllowReassoc() && |
1272 | cast<FPMathOperator>(Val: this)->hasNoSignedZeros(); |
1273 | default: |
1274 | return false; |
1275 | } |
1276 | } |
1277 | |
1278 | bool Instruction::isCommutative() const { |
1279 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) |
1280 | return II->isCommutative(); |
1281 | // TODO: Should allow icmp/fcmp? |
1282 | return isCommutative(Opcode: getOpcode()); |
1283 | } |
1284 | |
1285 | unsigned Instruction::getNumSuccessors() const { |
1286 | switch (getOpcode()) { |
1287 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1288 | case Instruction::OPC: \ |
1289 | return static_cast<const CLASS *>(this)->getNumSuccessors(); |
1290 | #include "llvm/IR/Instruction.def" |
1291 | default: |
1292 | break; |
1293 | } |
1294 | llvm_unreachable("not a terminator" ); |
1295 | } |
1296 | |
1297 | BasicBlock *Instruction::getSuccessor(unsigned idx) const { |
1298 | switch (getOpcode()) { |
1299 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1300 | case Instruction::OPC: \ |
1301 | return static_cast<const CLASS *>(this)->getSuccessor(idx); |
1302 | #include "llvm/IR/Instruction.def" |
1303 | default: |
1304 | break; |
1305 | } |
1306 | llvm_unreachable("not a terminator" ); |
1307 | } |
1308 | |
1309 | void Instruction::setSuccessor(unsigned idx, BasicBlock *B) { |
1310 | switch (getOpcode()) { |
1311 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1312 | case Instruction::OPC: \ |
1313 | return static_cast<CLASS *>(this)->setSuccessor(idx, B); |
1314 | #include "llvm/IR/Instruction.def" |
1315 | default: |
1316 | break; |
1317 | } |
1318 | llvm_unreachable("not a terminator" ); |
1319 | } |
1320 | |
1321 | void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) { |
1322 | for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors(); |
1323 | Idx != NumSuccessors; ++Idx) |
1324 | if (getSuccessor(idx: Idx) == OldBB) |
1325 | setSuccessor(idx: Idx, B: NewBB); |
1326 | } |
1327 | |
1328 | Instruction *Instruction::cloneImpl() const { |
1329 | llvm_unreachable("Subclass of Instruction failed to implement cloneImpl" ); |
1330 | } |
1331 | |
1332 | void Instruction::swapProfMetadata() { |
1333 | MDNode *ProfileData = getBranchWeightMDNode(I: *this); |
1334 | if (!ProfileData) |
1335 | return; |
1336 | unsigned FirstIdx = getBranchWeightOffset(ProfileData); |
1337 | if (ProfileData->getNumOperands() != 2 + FirstIdx) |
1338 | return; |
1339 | |
1340 | unsigned SecondIdx = FirstIdx + 1; |
1341 | SmallVector<Metadata *, 4> Ops; |
1342 | // If there are more weights past the second, we can't swap them |
1343 | if (ProfileData->getNumOperands() > SecondIdx + 1) |
1344 | return; |
1345 | for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) { |
1346 | Ops.push_back(Elt: ProfileData->getOperand(I: Idx)); |
1347 | } |
1348 | // Switch the order of the weights |
1349 | Ops.push_back(Elt: ProfileData->getOperand(I: SecondIdx)); |
1350 | Ops.push_back(Elt: ProfileData->getOperand(I: FirstIdx)); |
1351 | setMetadata(KindID: LLVMContext::MD_prof, |
1352 | Node: MDNode::get(Context&: ProfileData->getContext(), MDs: Ops)); |
1353 | } |
1354 | |
1355 | void Instruction::copyMetadata(const Instruction &SrcInst, |
1356 | ArrayRef<unsigned> WL) { |
1357 | if (WL.empty() || is_contained(Range&: WL, Element: LLVMContext::MD_dbg)) |
1358 | setDebugLoc(SrcInst.getDebugLoc().orElse(Other: getDebugLoc())); |
1359 | |
1360 | if (!SrcInst.hasMetadata()) |
1361 | return; |
1362 | |
1363 | SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end()); |
1364 | |
1365 | // Otherwise, enumerate and copy over metadata from the old instruction to the |
1366 | // new one. |
1367 | SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs; |
1368 | SrcInst.getAllMetadataOtherThanDebugLoc(MDs&: TheMDs); |
1369 | for (const auto &MD : TheMDs) { |
1370 | if (WL.empty() || WLS.count(V: MD.first)) |
1371 | setMetadata(KindID: MD.first, Node: MD.second); |
1372 | } |
1373 | } |
1374 | |
1375 | Instruction *Instruction::clone() const { |
1376 | Instruction *New = nullptr; |
1377 | switch (getOpcode()) { |
1378 | default: |
1379 | llvm_unreachable("Unhandled Opcode." ); |
1380 | #define HANDLE_INST(num, opc, clas) \ |
1381 | case Instruction::opc: \ |
1382 | New = cast<clas>(this)->cloneImpl(); \ |
1383 | break; |
1384 | #include "llvm/IR/Instruction.def" |
1385 | #undef HANDLE_INST |
1386 | } |
1387 | |
1388 | New->SubclassOptionalData = SubclassOptionalData; |
1389 | New->copyMetadata(SrcInst: *this); |
1390 | return New; |
1391 | } |
1392 | |