1 | //===-- Instruction.cpp - Implement the Instruction class -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the Instruction class for the IR library. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/IR/Instruction.h" |
14 | #include "llvm/ADT/DenseSet.h" |
15 | #include "llvm/IR/AttributeMask.h" |
16 | #include "llvm/IR/Attributes.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/InstrTypes.h" |
19 | #include "llvm/IR/Instructions.h" |
20 | #include "llvm/IR/IntrinsicInst.h" |
21 | #include "llvm/IR/Intrinsics.h" |
22 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
23 | #include "llvm/IR/Module.h" |
24 | #include "llvm/IR/Operator.h" |
25 | #include "llvm/IR/ProfDataUtils.h" |
26 | #include "llvm/IR/Type.h" |
27 | using namespace llvm; |
28 | |
29 | InsertPosition::InsertPosition(Instruction *InsertBefore) |
30 | : InsertAt(InsertBefore ? InsertBefore->getIterator() |
31 | : InstListType::iterator()) {} |
32 | InsertPosition::InsertPosition(BasicBlock *InsertAtEnd) |
33 | : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {} |
34 | |
35 | Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, |
36 | InsertPosition InsertBefore) |
37 | : User(ty, Value::InstructionVal + it, Ops, NumOps) { |
38 | // When called with an iterator, there must be a block to insert into. |
39 | if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) { |
40 | BasicBlock *BB = InsertIt.getNodeParent(); |
41 | assert(BB && "Instruction to insert before is not in a basic block!" ); |
42 | insertInto(ParentBB: BB, It: InsertBefore); |
43 | } |
44 | } |
45 | |
46 | Instruction::~Instruction() { |
47 | assert(!getParent() && "Instruction still linked in the program!" ); |
48 | |
49 | // Replace any extant metadata uses of this instruction with undef to |
50 | // preserve debug info accuracy. Some alternatives include: |
51 | // - Treat Instruction like any other Value, and point its extant metadata |
52 | // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses |
53 | // trivially dead (i.e. fair game for deletion in many passes), leading to |
54 | // stale dbg.values being in effect for too long. |
55 | // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal |
56 | // correct. OTOH results in wasted work in some common cases (e.g. when all |
57 | // instructions in a BasicBlock are deleted). |
58 | if (isUsedByMetadata()) |
59 | ValueAsMetadata::handleRAUW(From: this, To: UndefValue::get(T: getType())); |
60 | |
61 | // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s) |
62 | // mapping in LLVMContext. |
63 | setMetadata(KindID: LLVMContext::MD_DIAssignID, Node: nullptr); |
64 | } |
65 | |
66 | const Module *Instruction::getModule() const { |
67 | return getParent()->getModule(); |
68 | } |
69 | |
70 | const Function *Instruction::getFunction() const { |
71 | return getParent()->getParent(); |
72 | } |
73 | |
74 | const DataLayout &Instruction::getDataLayout() const { |
75 | return getModule()->getDataLayout(); |
76 | } |
77 | |
78 | void Instruction::removeFromParent() { |
79 | // Perform any debug-info maintenence required. |
80 | handleMarkerRemoval(); |
81 | |
82 | getParent()->getInstList().remove(IT: getIterator()); |
83 | } |
84 | |
85 | void Instruction::handleMarkerRemoval() { |
86 | if (!getParent()->IsNewDbgInfoFormat || !DebugMarker) |
87 | return; |
88 | |
89 | DebugMarker->removeMarker(); |
90 | } |
91 | |
92 | BasicBlock::iterator Instruction::eraseFromParent() { |
93 | handleMarkerRemoval(); |
94 | return getParent()->getInstList().erase(where: getIterator()); |
95 | } |
96 | |
97 | void Instruction::insertBefore(Instruction *InsertPos) { |
98 | insertBefore(InsertPos: InsertPos->getIterator()); |
99 | } |
100 | |
101 | /// Insert an unlinked instruction into a basic block immediately before the |
102 | /// specified instruction. |
103 | void Instruction::insertBefore(BasicBlock::iterator InsertPos) { |
104 | insertBefore(BB&: *InsertPos->getParent(), InsertPos); |
105 | } |
106 | |
107 | /// Insert an unlinked instruction into a basic block immediately after the |
108 | /// specified instruction. |
109 | void Instruction::insertAfter(Instruction *InsertPos) { |
110 | BasicBlock *DestParent = InsertPos->getParent(); |
111 | |
112 | DestParent->getInstList().insertAfter(where: InsertPos->getIterator(), New: this); |
113 | } |
114 | |
115 | BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB, |
116 | BasicBlock::iterator It) { |
117 | assert(getParent() == nullptr && "Expected detached instruction" ); |
118 | assert((It == ParentBB->end() || It->getParent() == ParentBB) && |
119 | "It not in ParentBB" ); |
120 | insertBefore(BB&: *ParentBB, InsertPos: It); |
121 | return getIterator(); |
122 | } |
123 | |
124 | extern cl::opt<bool> UseNewDbgInfoFormat; |
125 | |
126 | void Instruction::insertBefore(BasicBlock &BB, |
127 | InstListType::iterator InsertPos) { |
128 | assert(!DebugMarker); |
129 | |
130 | BB.getInstList().insert(where: InsertPos, New: this); |
131 | |
132 | if (!BB.IsNewDbgInfoFormat) |
133 | return; |
134 | |
135 | // We've inserted "this": if InsertAtHead is set then it comes before any |
136 | // DbgVariableRecords attached to InsertPos. But if it's not set, then any |
137 | // DbgRecords should now come before "this". |
138 | bool InsertAtHead = InsertPos.getHeadBit(); |
139 | if (!InsertAtHead) { |
140 | DbgMarker *SrcMarker = BB.getMarker(It: InsertPos); |
141 | if (SrcMarker && !SrcMarker->empty()) { |
142 | // If this assertion fires, the calling code is about to insert a PHI |
143 | // after debug-records, which would form a sequence like: |
144 | // %0 = PHI |
145 | // #dbg_value |
146 | // %1 = PHI |
147 | // Which is de-normalised and undesired -- hence the assertion. To avoid |
148 | // this, you must insert at that position using an iterator, and it must |
149 | // be aquired by calling getFirstNonPHIIt / begin or similar methods on |
150 | // the block. This will signal to this behind-the-scenes debug-info |
151 | // maintenence code that you intend the PHI to be ahead of everything, |
152 | // including any debug-info. |
153 | assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!" ); |
154 | adoptDbgRecords(BB: &BB, It: InsertPos, InsertAtHead: false); |
155 | } |
156 | } |
157 | |
158 | // If we're inserting a terminator, check if we need to flush out |
159 | // TrailingDbgRecords. Inserting instructions at the end of an incomplete |
160 | // block is handled by the code block above. |
161 | if (isTerminator()) |
162 | getParent()->flushTerminatorDbgRecords(); |
163 | } |
164 | |
165 | /// Unlink this instruction from its current basic block and insert it into the |
166 | /// basic block that MovePos lives in, right before MovePos. |
167 | void Instruction::moveBefore(Instruction *MovePos) { |
168 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: false); |
169 | } |
170 | |
171 | void Instruction::moveBeforePreserving(Instruction *MovePos) { |
172 | moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: true); |
173 | } |
174 | |
175 | void Instruction::moveAfter(Instruction *MovePos) { |
176 | auto NextIt = std::next(x: MovePos->getIterator()); |
177 | // We want this instruction to be moved to before NextIt in the instruction |
178 | // list, but before NextIt's debug value range. |
179 | NextIt.setHeadBit(true); |
180 | moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: false); |
181 | } |
182 | |
183 | void Instruction::moveAfterPreserving(Instruction *MovePos) { |
184 | auto NextIt = std::next(x: MovePos->getIterator()); |
185 | // We want this instruction and its debug range to be moved to before NextIt |
186 | // in the instruction list, but before NextIt's debug value range. |
187 | NextIt.setHeadBit(true); |
188 | moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: true); |
189 | } |
190 | |
191 | void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) { |
192 | moveBeforeImpl(BB, I, Preserve: false); |
193 | } |
194 | |
195 | void Instruction::moveBeforePreserving(BasicBlock &BB, |
196 | InstListType::iterator I) { |
197 | moveBeforeImpl(BB, I, Preserve: true); |
198 | } |
199 | |
200 | void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I, |
201 | bool Preserve) { |
202 | assert(I == BB.end() || I->getParent() == &BB); |
203 | bool InsertAtHead = I.getHeadBit(); |
204 | |
205 | // If we've been given the "Preserve" flag, then just move the DbgRecords with |
206 | // the instruction, no more special handling needed. |
207 | if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) { |
208 | if (I != this->getIterator() || InsertAtHead) { |
209 | // "this" is definitely moving in the list, or it's moving ahead of its |
210 | // attached DbgVariableRecords. Detach any existing DbgRecords. |
211 | handleMarkerRemoval(); |
212 | } |
213 | } |
214 | |
215 | // Move this single instruction. Use the list splice method directly, not |
216 | // the block splicer, which will do more debug-info things. |
217 | BB.getInstList().splice(where: I, L2&: getParent()->getInstList(), first: getIterator()); |
218 | |
219 | if (BB.IsNewDbgInfoFormat && !Preserve) { |
220 | DbgMarker *NextMarker = getParent()->getNextMarker(I: this); |
221 | |
222 | // If we're inserting at point I, and not in front of the DbgRecords |
223 | // attached there, then we should absorb the DbgRecords attached to I. |
224 | if (!InsertAtHead && NextMarker && !NextMarker->empty()) { |
225 | adoptDbgRecords(BB: &BB, It: I, InsertAtHead: false); |
226 | } |
227 | } |
228 | |
229 | if (isTerminator()) |
230 | getParent()->flushTerminatorDbgRecords(); |
231 | } |
232 | |
233 | iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom( |
234 | const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere, |
235 | bool InsertAtHead) { |
236 | if (!From->DebugMarker) |
237 | return DbgMarker::getEmptyDbgRecordRange(); |
238 | |
239 | assert(getParent()->IsNewDbgInfoFormat); |
240 | assert(getParent()->IsNewDbgInfoFormat == |
241 | From->getParent()->IsNewDbgInfoFormat); |
242 | |
243 | if (!DebugMarker) |
244 | getParent()->createMarker(I: this); |
245 | |
246 | return DebugMarker->cloneDebugInfoFrom(From: From->DebugMarker, FromHere, |
247 | InsertAtHead); |
248 | } |
249 | |
250 | std::optional<DbgRecord::self_iterator> |
251 | Instruction::getDbgReinsertionPosition() { |
252 | // Is there a marker on the next instruction? |
253 | DbgMarker *NextMarker = getParent()->getNextMarker(I: this); |
254 | if (!NextMarker) |
255 | return std::nullopt; |
256 | |
257 | // Are there any DbgRecords in the next marker? |
258 | if (NextMarker->StoredDbgRecords.empty()) |
259 | return std::nullopt; |
260 | |
261 | return NextMarker->StoredDbgRecords.begin(); |
262 | } |
263 | |
264 | bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); } |
265 | |
266 | void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It, |
267 | bool InsertAtHead) { |
268 | DbgMarker *SrcMarker = BB->getMarker(It); |
269 | auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() { |
270 | if (BB->end() == It) { |
271 | SrcMarker->eraseFromParent(); |
272 | BB->deleteTrailingDbgRecords(); |
273 | } |
274 | }; |
275 | |
276 | if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) { |
277 | ReleaseTrailingDbgRecords(); |
278 | return; |
279 | } |
280 | |
281 | // If we have DbgMarkers attached to this instruction, we have to honour the |
282 | // ordering of DbgRecords between this and the other marker. Fall back to just |
283 | // absorbing from the source. |
284 | if (DebugMarker || It == BB->end()) { |
285 | // Ensure we _do_ have a marker. |
286 | getParent()->createMarker(I: this); |
287 | DebugMarker->absorbDebugValues(Src&: *SrcMarker, InsertAtHead); |
288 | |
289 | // Having transferred everything out of SrcMarker, we _could_ clean it up |
290 | // and free the marker now. However, that's a lot of heap-accounting for a |
291 | // small amount of memory with a good chance of re-use. Leave it for the |
292 | // moment. It will be released when the Instruction is freed in the worst |
293 | // case. |
294 | // However: if we transferred from a trailing marker off the end of the |
295 | // block, it's important to not leave the empty marker trailing. It will |
296 | // give a misleading impression that some debug records have been left |
297 | // trailing. |
298 | ReleaseTrailingDbgRecords(); |
299 | } else { |
300 | // Optimisation: we're transferring all the DbgRecords from the source |
301 | // marker onto this empty location: just adopt the other instructions |
302 | // marker. |
303 | DebugMarker = SrcMarker; |
304 | DebugMarker->MarkedInstr = this; |
305 | It->DebugMarker = nullptr; |
306 | } |
307 | } |
308 | |
309 | void Instruction::dropDbgRecords() { |
310 | if (DebugMarker) |
311 | DebugMarker->dropDbgRecords(); |
312 | } |
313 | |
314 | void Instruction::dropOneDbgRecord(DbgRecord *DVR) { |
315 | DebugMarker->dropOneDbgRecord(DR: DVR); |
316 | } |
317 | |
318 | bool Instruction::comesBefore(const Instruction *Other) const { |
319 | assert(getParent() && Other->getParent() && |
320 | "instructions without BB parents have no order" ); |
321 | assert(getParent() == Other->getParent() && |
322 | "cross-BB instruction order comparison" ); |
323 | if (!getParent()->isInstrOrderValid()) |
324 | const_cast<BasicBlock *>(getParent())->renumberInstructions(); |
325 | return Order < Other->Order; |
326 | } |
327 | |
328 | std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() { |
329 | assert(!getType()->isVoidTy() && "Instruction must define result" ); |
330 | BasicBlock *InsertBB; |
331 | BasicBlock::iterator InsertPt; |
332 | if (auto *PN = dyn_cast<PHINode>(Val: this)) { |
333 | InsertBB = PN->getParent(); |
334 | InsertPt = InsertBB->getFirstInsertionPt(); |
335 | } else if (auto *II = dyn_cast<InvokeInst>(Val: this)) { |
336 | InsertBB = II->getNormalDest(); |
337 | InsertPt = InsertBB->getFirstInsertionPt(); |
338 | } else if (isa<CallBrInst>(Val: this)) { |
339 | // Def is available in multiple successors, there's no single dominating |
340 | // insertion point. |
341 | return std::nullopt; |
342 | } else { |
343 | assert(!isTerminator() && "Only invoke/callbr terminators return value" ); |
344 | InsertBB = getParent(); |
345 | InsertPt = std::next(x: getIterator()); |
346 | // Any instruction inserted immediately after "this" will come before any |
347 | // debug-info records take effect -- thus, set the head bit indicating that |
348 | // to debug-info-transfer code. |
349 | InsertPt.setHeadBit(true); |
350 | } |
351 | |
352 | // catchswitch blocks don't have any legal insertion point (because they |
353 | // are both an exception pad and a terminator). |
354 | if (InsertPt == InsertBB->end()) |
355 | return std::nullopt; |
356 | return InsertPt; |
357 | } |
358 | |
359 | bool Instruction::isOnlyUserOfAnyOperand() { |
360 | return any_of(Range: operands(), P: [](Value *V) { return V->hasOneUser(); }); |
361 | } |
362 | |
363 | void Instruction::setHasNoUnsignedWrap(bool b) { |
364 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
365 | Inst->setHasNoUnsignedWrap(b); |
366 | else |
367 | cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(b); |
368 | } |
369 | |
370 | void Instruction::setHasNoSignedWrap(bool b) { |
371 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
372 | Inst->setHasNoSignedWrap(b); |
373 | else |
374 | cast<TruncInst>(Val: this)->setHasNoSignedWrap(b); |
375 | } |
376 | |
377 | void Instruction::setIsExact(bool b) { |
378 | cast<PossiblyExactOperator>(Val: this)->setIsExact(b); |
379 | } |
380 | |
381 | void Instruction::setNonNeg(bool b) { |
382 | assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp" ); |
383 | SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) | |
384 | (b * PossiblyNonNegInst::NonNeg); |
385 | } |
386 | |
387 | bool Instruction::hasNoUnsignedWrap() const { |
388 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
389 | return Inst->hasNoUnsignedWrap(); |
390 | |
391 | return cast<TruncInst>(Val: this)->hasNoUnsignedWrap(); |
392 | } |
393 | |
394 | bool Instruction::hasNoSignedWrap() const { |
395 | if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this)) |
396 | return Inst->hasNoSignedWrap(); |
397 | |
398 | return cast<TruncInst>(Val: this)->hasNoSignedWrap(); |
399 | } |
400 | |
401 | bool Instruction::hasNonNeg() const { |
402 | assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp" ); |
403 | return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0; |
404 | } |
405 | |
406 | bool Instruction::hasPoisonGeneratingFlags() const { |
407 | return cast<Operator>(Val: this)->hasPoisonGeneratingFlags(); |
408 | } |
409 | |
410 | void Instruction::dropPoisonGeneratingFlags() { |
411 | switch (getOpcode()) { |
412 | case Instruction::Add: |
413 | case Instruction::Sub: |
414 | case Instruction::Mul: |
415 | case Instruction::Shl: |
416 | cast<OverflowingBinaryOperator>(Val: this)->setHasNoUnsignedWrap(false); |
417 | cast<OverflowingBinaryOperator>(Val: this)->setHasNoSignedWrap(false); |
418 | break; |
419 | |
420 | case Instruction::UDiv: |
421 | case Instruction::SDiv: |
422 | case Instruction::AShr: |
423 | case Instruction::LShr: |
424 | cast<PossiblyExactOperator>(Val: this)->setIsExact(false); |
425 | break; |
426 | |
427 | case Instruction::Or: |
428 | cast<PossiblyDisjointInst>(Val: this)->setIsDisjoint(false); |
429 | break; |
430 | |
431 | case Instruction::GetElementPtr: |
432 | cast<GetElementPtrInst>(Val: this)->setNoWrapFlags(GEPNoWrapFlags::none()); |
433 | break; |
434 | |
435 | case Instruction::UIToFP: |
436 | case Instruction::ZExt: |
437 | setNonNeg(false); |
438 | break; |
439 | |
440 | case Instruction::Trunc: |
441 | cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(false); |
442 | cast<TruncInst>(Val: this)->setHasNoSignedWrap(false); |
443 | break; |
444 | } |
445 | |
446 | if (isa<FPMathOperator>(Val: this)) { |
447 | setHasNoNaNs(false); |
448 | setHasNoInfs(false); |
449 | } |
450 | |
451 | assert(!hasPoisonGeneratingFlags() && "must be kept in sync" ); |
452 | } |
453 | |
454 | bool Instruction::hasPoisonGeneratingMetadata() const { |
455 | return hasMetadata(KindID: LLVMContext::MD_range) || |
456 | hasMetadata(KindID: LLVMContext::MD_nonnull) || |
457 | hasMetadata(KindID: LLVMContext::MD_align); |
458 | } |
459 | |
460 | void Instruction::dropPoisonGeneratingMetadata() { |
461 | eraseMetadata(KindID: LLVMContext::MD_range); |
462 | eraseMetadata(KindID: LLVMContext::MD_nonnull); |
463 | eraseMetadata(KindID: LLVMContext::MD_align); |
464 | } |
465 | |
466 | bool Instruction::hasPoisonGeneratingReturnAttributes() const { |
467 | if (const auto *CB = dyn_cast<CallBase>(Val: this)) { |
468 | AttributeSet RetAttrs = CB->getAttributes().getRetAttrs(); |
469 | return RetAttrs.hasAttribute(Kind: Attribute::Range) || |
470 | RetAttrs.hasAttribute(Kind: Attribute::Alignment) || |
471 | RetAttrs.hasAttribute(Kind: Attribute::NonNull); |
472 | } |
473 | return false; |
474 | } |
475 | |
476 | void Instruction::dropPoisonGeneratingReturnAttributes() { |
477 | if (auto *CB = dyn_cast<CallBase>(Val: this)) { |
478 | AttributeMask AM; |
479 | AM.addAttribute(Val: Attribute::Range); |
480 | AM.addAttribute(Val: Attribute::Alignment); |
481 | AM.addAttribute(Val: Attribute::NonNull); |
482 | CB->removeRetAttrs(AttrsToRemove: AM); |
483 | } |
484 | assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync" ); |
485 | } |
486 | |
487 | void Instruction::dropUBImplyingAttrsAndUnknownMetadata( |
488 | ArrayRef<unsigned> KnownIDs) { |
489 | dropUnknownNonDebugMetadata(KnownIDs); |
490 | auto *CB = dyn_cast<CallBase>(Val: this); |
491 | if (!CB) |
492 | return; |
493 | // For call instructions, we also need to drop parameter and return attributes |
494 | // that are can cause UB if the call is moved to a location where the |
495 | // attribute is not valid. |
496 | AttributeList AL = CB->getAttributes(); |
497 | if (AL.isEmpty()) |
498 | return; |
499 | AttributeMask UBImplyingAttributes = |
500 | AttributeFuncs::getUBImplyingAttributes(); |
501 | for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++) |
502 | CB->removeParamAttrs(ArgNo, AttrsToRemove: UBImplyingAttributes); |
503 | CB->removeRetAttrs(AttrsToRemove: UBImplyingAttributes); |
504 | } |
505 | |
506 | void Instruction::dropUBImplyingAttrsAndMetadata() { |
507 | // !annotation metadata does not impact semantics. |
508 | // !range, !nonnull and !align produce poison, so they are safe to speculate. |
509 | // !noundef and various AA metadata must be dropped, as it generally produces |
510 | // immediate undefined behavior. |
511 | unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range, |
512 | LLVMContext::MD_nonnull, LLVMContext::MD_align}; |
513 | dropUBImplyingAttrsAndUnknownMetadata(KnownIDs); |
514 | } |
515 | |
516 | bool Instruction::isExact() const { |
517 | return cast<PossiblyExactOperator>(Val: this)->isExact(); |
518 | } |
519 | |
520 | void Instruction::setFast(bool B) { |
521 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
522 | cast<FPMathOperator>(Val: this)->setFast(B); |
523 | } |
524 | |
525 | void Instruction::setHasAllowReassoc(bool B) { |
526 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
527 | cast<FPMathOperator>(Val: this)->setHasAllowReassoc(B); |
528 | } |
529 | |
530 | void Instruction::setHasNoNaNs(bool B) { |
531 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
532 | cast<FPMathOperator>(Val: this)->setHasNoNaNs(B); |
533 | } |
534 | |
535 | void Instruction::setHasNoInfs(bool B) { |
536 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
537 | cast<FPMathOperator>(Val: this)->setHasNoInfs(B); |
538 | } |
539 | |
540 | void Instruction::setHasNoSignedZeros(bool B) { |
541 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
542 | cast<FPMathOperator>(Val: this)->setHasNoSignedZeros(B); |
543 | } |
544 | |
545 | void Instruction::setHasAllowReciprocal(bool B) { |
546 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
547 | cast<FPMathOperator>(Val: this)->setHasAllowReciprocal(B); |
548 | } |
549 | |
550 | void Instruction::setHasAllowContract(bool B) { |
551 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
552 | cast<FPMathOperator>(Val: this)->setHasAllowContract(B); |
553 | } |
554 | |
555 | void Instruction::setHasApproxFunc(bool B) { |
556 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
557 | cast<FPMathOperator>(Val: this)->setHasApproxFunc(B); |
558 | } |
559 | |
560 | void Instruction::setFastMathFlags(FastMathFlags FMF) { |
561 | assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op" ); |
562 | cast<FPMathOperator>(Val: this)->setFastMathFlags(FMF); |
563 | } |
564 | |
565 | void Instruction::copyFastMathFlags(FastMathFlags FMF) { |
566 | assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op" ); |
567 | cast<FPMathOperator>(Val: this)->copyFastMathFlags(FMF); |
568 | } |
569 | |
570 | bool Instruction::isFast() const { |
571 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
572 | return cast<FPMathOperator>(Val: this)->isFast(); |
573 | } |
574 | |
575 | bool Instruction::hasAllowReassoc() const { |
576 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
577 | return cast<FPMathOperator>(Val: this)->hasAllowReassoc(); |
578 | } |
579 | |
580 | bool Instruction::hasNoNaNs() const { |
581 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
582 | return cast<FPMathOperator>(Val: this)->hasNoNaNs(); |
583 | } |
584 | |
585 | bool Instruction::hasNoInfs() const { |
586 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
587 | return cast<FPMathOperator>(Val: this)->hasNoInfs(); |
588 | } |
589 | |
590 | bool Instruction::hasNoSignedZeros() const { |
591 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
592 | return cast<FPMathOperator>(Val: this)->hasNoSignedZeros(); |
593 | } |
594 | |
595 | bool Instruction::hasAllowReciprocal() const { |
596 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
597 | return cast<FPMathOperator>(Val: this)->hasAllowReciprocal(); |
598 | } |
599 | |
600 | bool Instruction::hasAllowContract() const { |
601 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
602 | return cast<FPMathOperator>(Val: this)->hasAllowContract(); |
603 | } |
604 | |
605 | bool Instruction::hasApproxFunc() const { |
606 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
607 | return cast<FPMathOperator>(Val: this)->hasApproxFunc(); |
608 | } |
609 | |
610 | FastMathFlags Instruction::getFastMathFlags() const { |
611 | assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op" ); |
612 | return cast<FPMathOperator>(Val: this)->getFastMathFlags(); |
613 | } |
614 | |
615 | void Instruction::copyFastMathFlags(const Instruction *I) { |
616 | copyFastMathFlags(FMF: I->getFastMathFlags()); |
617 | } |
618 | |
619 | void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) { |
620 | // Copy the wrapping flags. |
621 | if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(Val: this)) { |
622 | if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) { |
623 | setHasNoSignedWrap(OB->hasNoSignedWrap()); |
624 | setHasNoUnsignedWrap(OB->hasNoUnsignedWrap()); |
625 | } |
626 | } |
627 | |
628 | if (auto *TI = dyn_cast<TruncInst>(Val: V)) { |
629 | if (isa<TruncInst>(Val: this)) { |
630 | setHasNoSignedWrap(TI->hasNoSignedWrap()); |
631 | setHasNoUnsignedWrap(TI->hasNoUnsignedWrap()); |
632 | } |
633 | } |
634 | |
635 | // Copy the exact flag. |
636 | if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V)) |
637 | if (isa<PossiblyExactOperator>(Val: this)) |
638 | setIsExact(PE->isExact()); |
639 | |
640 | if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V)) |
641 | if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this)) |
642 | DestPD->setIsDisjoint(SrcPD->isDisjoint()); |
643 | |
644 | // Copy the fast-math flags. |
645 | if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) |
646 | if (isa<FPMathOperator>(Val: this)) |
647 | copyFastMathFlags(FMF: FP->getFastMathFlags()); |
648 | |
649 | if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V)) |
650 | if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this)) |
651 | DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() | |
652 | DestGEP->getNoWrapFlags()); |
653 | |
654 | if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V)) |
655 | if (isa<PossiblyNonNegInst>(Val: this)) |
656 | setNonNeg(NNI->hasNonNeg()); |
657 | } |
658 | |
659 | void Instruction::andIRFlags(const Value *V) { |
660 | if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) { |
661 | if (isa<OverflowingBinaryOperator>(Val: this)) { |
662 | setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap()); |
663 | setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap()); |
664 | } |
665 | } |
666 | |
667 | if (auto *TI = dyn_cast<TruncInst>(Val: V)) { |
668 | if (isa<TruncInst>(Val: this)) { |
669 | setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap()); |
670 | setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap()); |
671 | } |
672 | } |
673 | |
674 | if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V)) |
675 | if (isa<PossiblyExactOperator>(Val: this)) |
676 | setIsExact(isExact() && PE->isExact()); |
677 | |
678 | if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V)) |
679 | if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this)) |
680 | DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint()); |
681 | |
682 | if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) { |
683 | if (isa<FPMathOperator>(Val: this)) { |
684 | FastMathFlags FM = getFastMathFlags(); |
685 | FM &= FP->getFastMathFlags(); |
686 | copyFastMathFlags(FMF: FM); |
687 | } |
688 | } |
689 | |
690 | if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V)) |
691 | if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this)) |
692 | DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() & |
693 | DestGEP->getNoWrapFlags()); |
694 | |
695 | if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V)) |
696 | if (isa<PossiblyNonNegInst>(Val: this)) |
697 | setNonNeg(hasNonNeg() && NNI->hasNonNeg()); |
698 | } |
699 | |
700 | const char *Instruction::getOpcodeName(unsigned OpCode) { |
701 | switch (OpCode) { |
702 | // Terminators |
703 | case Ret: return "ret" ; |
704 | case Br: return "br" ; |
705 | case Switch: return "switch" ; |
706 | case IndirectBr: return "indirectbr" ; |
707 | case Invoke: return "invoke" ; |
708 | case Resume: return "resume" ; |
709 | case Unreachable: return "unreachable" ; |
710 | case CleanupRet: return "cleanupret" ; |
711 | case CatchRet: return "catchret" ; |
712 | case CatchPad: return "catchpad" ; |
713 | case CatchSwitch: return "catchswitch" ; |
714 | case CallBr: return "callbr" ; |
715 | |
716 | // Standard unary operators... |
717 | case FNeg: return "fneg" ; |
718 | |
719 | // Standard binary operators... |
720 | case Add: return "add" ; |
721 | case FAdd: return "fadd" ; |
722 | case Sub: return "sub" ; |
723 | case FSub: return "fsub" ; |
724 | case Mul: return "mul" ; |
725 | case FMul: return "fmul" ; |
726 | case UDiv: return "udiv" ; |
727 | case SDiv: return "sdiv" ; |
728 | case FDiv: return "fdiv" ; |
729 | case URem: return "urem" ; |
730 | case SRem: return "srem" ; |
731 | case FRem: return "frem" ; |
732 | |
733 | // Logical operators... |
734 | case And: return "and" ; |
735 | case Or : return "or" ; |
736 | case Xor: return "xor" ; |
737 | |
738 | // Memory instructions... |
739 | case Alloca: return "alloca" ; |
740 | case Load: return "load" ; |
741 | case Store: return "store" ; |
742 | case AtomicCmpXchg: return "cmpxchg" ; |
743 | case AtomicRMW: return "atomicrmw" ; |
744 | case Fence: return "fence" ; |
745 | case GetElementPtr: return "getelementptr" ; |
746 | |
747 | // Convert instructions... |
748 | case Trunc: return "trunc" ; |
749 | case ZExt: return "zext" ; |
750 | case SExt: return "sext" ; |
751 | case FPTrunc: return "fptrunc" ; |
752 | case FPExt: return "fpext" ; |
753 | case FPToUI: return "fptoui" ; |
754 | case FPToSI: return "fptosi" ; |
755 | case UIToFP: return "uitofp" ; |
756 | case SIToFP: return "sitofp" ; |
757 | case IntToPtr: return "inttoptr" ; |
758 | case PtrToInt: return "ptrtoint" ; |
759 | case BitCast: return "bitcast" ; |
760 | case AddrSpaceCast: return "addrspacecast" ; |
761 | |
762 | // Other instructions... |
763 | case ICmp: return "icmp" ; |
764 | case FCmp: return "fcmp" ; |
765 | case PHI: return "phi" ; |
766 | case Select: return "select" ; |
767 | case Call: return "call" ; |
768 | case Shl: return "shl" ; |
769 | case LShr: return "lshr" ; |
770 | case AShr: return "ashr" ; |
771 | case VAArg: return "va_arg" ; |
772 | case ExtractElement: return "extractelement" ; |
773 | case InsertElement: return "insertelement" ; |
774 | case ShuffleVector: return "shufflevector" ; |
775 | case ExtractValue: return "extractvalue" ; |
776 | case InsertValue: return "insertvalue" ; |
777 | case LandingPad: return "landingpad" ; |
778 | case CleanupPad: return "cleanuppad" ; |
779 | case Freeze: return "freeze" ; |
780 | |
781 | default: return "<Invalid operator> " ; |
782 | } |
783 | } |
784 | |
785 | /// This must be kept in sync with FunctionComparator::cmpOperations in |
786 | /// lib/Transforms/IPO/MergeFunctions.cpp. |
787 | bool Instruction::hasSameSpecialState(const Instruction *I2, |
788 | bool IgnoreAlignment) const { |
789 | auto I1 = this; |
790 | assert(I1->getOpcode() == I2->getOpcode() && |
791 | "Can not compare special state of different instructions" ); |
792 | |
793 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: I1)) |
794 | return AI->getAllocatedType() == cast<AllocaInst>(Val: I2)->getAllocatedType() && |
795 | (AI->getAlign() == cast<AllocaInst>(Val: I2)->getAlign() || |
796 | IgnoreAlignment); |
797 | if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I1)) |
798 | return LI->isVolatile() == cast<LoadInst>(Val: I2)->isVolatile() && |
799 | (LI->getAlign() == cast<LoadInst>(Val: I2)->getAlign() || |
800 | IgnoreAlignment) && |
801 | LI->getOrdering() == cast<LoadInst>(Val: I2)->getOrdering() && |
802 | LI->getSyncScopeID() == cast<LoadInst>(Val: I2)->getSyncScopeID(); |
803 | if (const StoreInst *SI = dyn_cast<StoreInst>(Val: I1)) |
804 | return SI->isVolatile() == cast<StoreInst>(Val: I2)->isVolatile() && |
805 | (SI->getAlign() == cast<StoreInst>(Val: I2)->getAlign() || |
806 | IgnoreAlignment) && |
807 | SI->getOrdering() == cast<StoreInst>(Val: I2)->getOrdering() && |
808 | SI->getSyncScopeID() == cast<StoreInst>(Val: I2)->getSyncScopeID(); |
809 | if (const CmpInst *CI = dyn_cast<CmpInst>(Val: I1)) |
810 | return CI->getPredicate() == cast<CmpInst>(Val: I2)->getPredicate(); |
811 | if (const CallInst *CI = dyn_cast<CallInst>(Val: I1)) |
812 | return CI->isTailCall() == cast<CallInst>(Val: I2)->isTailCall() && |
813 | CI->getCallingConv() == cast<CallInst>(Val: I2)->getCallingConv() && |
814 | CI->getAttributes() == cast<CallInst>(Val: I2)->getAttributes() && |
815 | CI->hasIdenticalOperandBundleSchema(Other: *cast<CallInst>(Val: I2)); |
816 | if (const InvokeInst *CI = dyn_cast<InvokeInst>(Val: I1)) |
817 | return CI->getCallingConv() == cast<InvokeInst>(Val: I2)->getCallingConv() && |
818 | CI->getAttributes() == cast<InvokeInst>(Val: I2)->getAttributes() && |
819 | CI->hasIdenticalOperandBundleSchema(Other: *cast<InvokeInst>(Val: I2)); |
820 | if (const CallBrInst *CI = dyn_cast<CallBrInst>(Val: I1)) |
821 | return CI->getCallingConv() == cast<CallBrInst>(Val: I2)->getCallingConv() && |
822 | CI->getAttributes() == cast<CallBrInst>(Val: I2)->getAttributes() && |
823 | CI->hasIdenticalOperandBundleSchema(Other: *cast<CallBrInst>(Val: I2)); |
824 | if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: I1)) |
825 | return IVI->getIndices() == cast<InsertValueInst>(Val: I2)->getIndices(); |
826 | if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: I1)) |
827 | return EVI->getIndices() == cast<ExtractValueInst>(Val: I2)->getIndices(); |
828 | if (const FenceInst *FI = dyn_cast<FenceInst>(Val: I1)) |
829 | return FI->getOrdering() == cast<FenceInst>(Val: I2)->getOrdering() && |
830 | FI->getSyncScopeID() == cast<FenceInst>(Val: I2)->getSyncScopeID(); |
831 | if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I1)) |
832 | return CXI->isVolatile() == cast<AtomicCmpXchgInst>(Val: I2)->isVolatile() && |
833 | CXI->isWeak() == cast<AtomicCmpXchgInst>(Val: I2)->isWeak() && |
834 | CXI->getSuccessOrdering() == |
835 | cast<AtomicCmpXchgInst>(Val: I2)->getSuccessOrdering() && |
836 | CXI->getFailureOrdering() == |
837 | cast<AtomicCmpXchgInst>(Val: I2)->getFailureOrdering() && |
838 | CXI->getSyncScopeID() == |
839 | cast<AtomicCmpXchgInst>(Val: I2)->getSyncScopeID(); |
840 | if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: I1)) |
841 | return RMWI->getOperation() == cast<AtomicRMWInst>(Val: I2)->getOperation() && |
842 | RMWI->isVolatile() == cast<AtomicRMWInst>(Val: I2)->isVolatile() && |
843 | RMWI->getOrdering() == cast<AtomicRMWInst>(Val: I2)->getOrdering() && |
844 | RMWI->getSyncScopeID() == cast<AtomicRMWInst>(Val: I2)->getSyncScopeID(); |
845 | if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: I1)) |
846 | return SVI->getShuffleMask() == |
847 | cast<ShuffleVectorInst>(Val: I2)->getShuffleMask(); |
848 | if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I1)) |
849 | return GEP->getSourceElementType() == |
850 | cast<GetElementPtrInst>(Val: I2)->getSourceElementType(); |
851 | |
852 | return true; |
853 | } |
854 | |
855 | bool Instruction::isIdenticalTo(const Instruction *I) const { |
856 | return isIdenticalToWhenDefined(I) && |
857 | SubclassOptionalData == I->SubclassOptionalData; |
858 | } |
859 | |
860 | bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const { |
861 | if (getOpcode() != I->getOpcode() || |
862 | getNumOperands() != I->getNumOperands() || |
863 | getType() != I->getType()) |
864 | return false; |
865 | |
866 | // If both instructions have no operands, they are identical. |
867 | if (getNumOperands() == 0 && I->getNumOperands() == 0) |
868 | return this->hasSameSpecialState(I2: I); |
869 | |
870 | // We have two instructions of identical opcode and #operands. Check to see |
871 | // if all operands are the same. |
872 | if (!std::equal(op_begin(), op_end(), I->op_begin())) |
873 | return false; |
874 | |
875 | // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()! |
876 | if (const PHINode *thisPHI = dyn_cast<PHINode>(Val: this)) { |
877 | const PHINode *otherPHI = cast<PHINode>(Val: I); |
878 | return std::equal(thisPHI->block_begin(), thisPHI->block_end(), |
879 | otherPHI->block_begin()); |
880 | } |
881 | |
882 | return this->hasSameSpecialState(I2: I); |
883 | } |
884 | |
885 | // Keep this in sync with FunctionComparator::cmpOperations in |
886 | // lib/Transforms/IPO/MergeFunctions.cpp. |
887 | bool Instruction::isSameOperationAs(const Instruction *I, |
888 | unsigned flags) const { |
889 | bool IgnoreAlignment = flags & CompareIgnoringAlignment; |
890 | bool UseScalarTypes = flags & CompareUsingScalarTypes; |
891 | |
892 | if (getOpcode() != I->getOpcode() || |
893 | getNumOperands() != I->getNumOperands() || |
894 | (UseScalarTypes ? |
895 | getType()->getScalarType() != I->getType()->getScalarType() : |
896 | getType() != I->getType())) |
897 | return false; |
898 | |
899 | // We have two instructions of identical opcode and #operands. Check to see |
900 | // if all operands are the same type |
901 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
902 | if (UseScalarTypes ? |
903 | getOperand(i)->getType()->getScalarType() != |
904 | I->getOperand(i)->getType()->getScalarType() : |
905 | getOperand(i)->getType() != I->getOperand(i)->getType()) |
906 | return false; |
907 | |
908 | return this->hasSameSpecialState(I2: I, IgnoreAlignment); |
909 | } |
910 | |
911 | bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { |
912 | for (const Use &U : uses()) { |
913 | // PHI nodes uses values in the corresponding predecessor block. For other |
914 | // instructions, just check to see whether the parent of the use matches up. |
915 | const Instruction *I = cast<Instruction>(Val: U.getUser()); |
916 | const PHINode *PN = dyn_cast<PHINode>(Val: I); |
917 | if (!PN) { |
918 | if (I->getParent() != BB) |
919 | return true; |
920 | continue; |
921 | } |
922 | |
923 | if (PN->getIncomingBlock(U) != BB) |
924 | return true; |
925 | } |
926 | return false; |
927 | } |
928 | |
929 | bool Instruction::mayReadFromMemory() const { |
930 | switch (getOpcode()) { |
931 | default: return false; |
932 | case Instruction::VAArg: |
933 | case Instruction::Load: |
934 | case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory |
935 | case Instruction::AtomicCmpXchg: |
936 | case Instruction::AtomicRMW: |
937 | case Instruction::CatchPad: |
938 | case Instruction::CatchRet: |
939 | return true; |
940 | case Instruction::Call: |
941 | case Instruction::Invoke: |
942 | case Instruction::CallBr: |
943 | return !cast<CallBase>(Val: this)->onlyWritesMemory(); |
944 | case Instruction::Store: |
945 | return !cast<StoreInst>(Val: this)->isUnordered(); |
946 | } |
947 | } |
948 | |
949 | bool Instruction::mayWriteToMemory() const { |
950 | switch (getOpcode()) { |
951 | default: return false; |
952 | case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory |
953 | case Instruction::Store: |
954 | case Instruction::VAArg: |
955 | case Instruction::AtomicCmpXchg: |
956 | case Instruction::AtomicRMW: |
957 | case Instruction::CatchPad: |
958 | case Instruction::CatchRet: |
959 | return true; |
960 | case Instruction::Call: |
961 | case Instruction::Invoke: |
962 | case Instruction::CallBr: |
963 | return !cast<CallBase>(Val: this)->onlyReadsMemory(); |
964 | case Instruction::Load: |
965 | return !cast<LoadInst>(Val: this)->isUnordered(); |
966 | } |
967 | } |
968 | |
969 | bool Instruction::isAtomic() const { |
970 | switch (getOpcode()) { |
971 | default: |
972 | return false; |
973 | case Instruction::AtomicCmpXchg: |
974 | case Instruction::AtomicRMW: |
975 | case Instruction::Fence: |
976 | return true; |
977 | case Instruction::Load: |
978 | return cast<LoadInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic; |
979 | case Instruction::Store: |
980 | return cast<StoreInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic; |
981 | } |
982 | } |
983 | |
984 | bool Instruction::hasAtomicLoad() const { |
985 | assert(isAtomic()); |
986 | switch (getOpcode()) { |
987 | default: |
988 | return false; |
989 | case Instruction::AtomicCmpXchg: |
990 | case Instruction::AtomicRMW: |
991 | case Instruction::Load: |
992 | return true; |
993 | } |
994 | } |
995 | |
996 | bool Instruction::hasAtomicStore() const { |
997 | assert(isAtomic()); |
998 | switch (getOpcode()) { |
999 | default: |
1000 | return false; |
1001 | case Instruction::AtomicCmpXchg: |
1002 | case Instruction::AtomicRMW: |
1003 | case Instruction::Store: |
1004 | return true; |
1005 | } |
1006 | } |
1007 | |
1008 | bool Instruction::isVolatile() const { |
1009 | switch (getOpcode()) { |
1010 | default: |
1011 | return false; |
1012 | case Instruction::AtomicRMW: |
1013 | return cast<AtomicRMWInst>(Val: this)->isVolatile(); |
1014 | case Instruction::Store: |
1015 | return cast<StoreInst>(Val: this)->isVolatile(); |
1016 | case Instruction::Load: |
1017 | return cast<LoadInst>(Val: this)->isVolatile(); |
1018 | case Instruction::AtomicCmpXchg: |
1019 | return cast<AtomicCmpXchgInst>(Val: this)->isVolatile(); |
1020 | case Instruction::Call: |
1021 | case Instruction::Invoke: |
1022 | // There are a very limited number of intrinsics with volatile flags. |
1023 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) { |
1024 | if (auto *MI = dyn_cast<MemIntrinsic>(Val: II)) |
1025 | return MI->isVolatile(); |
1026 | switch (II->getIntrinsicID()) { |
1027 | default: break; |
1028 | case Intrinsic::matrix_column_major_load: |
1029 | return cast<ConstantInt>(Val: II->getArgOperand(i: 2))->isOne(); |
1030 | case Intrinsic::matrix_column_major_store: |
1031 | return cast<ConstantInt>(Val: II->getArgOperand(i: 3))->isOne(); |
1032 | } |
1033 | } |
1034 | return false; |
1035 | } |
1036 | } |
1037 | |
1038 | Type *Instruction::getAccessType() const { |
1039 | switch (getOpcode()) { |
1040 | case Instruction::Store: |
1041 | return cast<StoreInst>(Val: this)->getValueOperand()->getType(); |
1042 | case Instruction::Load: |
1043 | case Instruction::AtomicRMW: |
1044 | return getType(); |
1045 | case Instruction::AtomicCmpXchg: |
1046 | return cast<AtomicCmpXchgInst>(Val: this)->getNewValOperand()->getType(); |
1047 | case Instruction::Call: |
1048 | case Instruction::Invoke: |
1049 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: this)) { |
1050 | switch (II->getIntrinsicID()) { |
1051 | case Intrinsic::masked_load: |
1052 | case Intrinsic::masked_gather: |
1053 | case Intrinsic::masked_expandload: |
1054 | case Intrinsic::vp_load: |
1055 | case Intrinsic::vp_gather: |
1056 | case Intrinsic::experimental_vp_strided_load: |
1057 | return II->getType(); |
1058 | case Intrinsic::masked_store: |
1059 | case Intrinsic::masked_scatter: |
1060 | case Intrinsic::masked_compressstore: |
1061 | case Intrinsic::vp_store: |
1062 | case Intrinsic::vp_scatter: |
1063 | case Intrinsic::experimental_vp_strided_store: |
1064 | return II->getOperand(i_nocapture: 0)->getType(); |
1065 | default: |
1066 | break; |
1067 | } |
1068 | } |
1069 | } |
1070 | |
1071 | return nullptr; |
1072 | } |
1073 | |
1074 | static bool canUnwindPastLandingPad(const LandingPadInst *LP, |
1075 | bool IncludePhaseOneUnwind) { |
1076 | // Because phase one unwinding skips cleanup landingpads, we effectively |
1077 | // unwind past this frame, and callers need to have valid unwind info. |
1078 | if (LP->isCleanup()) |
1079 | return IncludePhaseOneUnwind; |
1080 | |
1081 | for (unsigned I = 0; I < LP->getNumClauses(); ++I) { |
1082 | Constant *Clause = LP->getClause(Idx: I); |
1083 | // catch ptr null catches all exceptions. |
1084 | if (LP->isCatch(Idx: I) && isa<ConstantPointerNull>(Val: Clause)) |
1085 | return false; |
1086 | // filter [0 x ptr] catches all exceptions. |
1087 | if (LP->isFilter(Idx: I) && Clause->getType()->getArrayNumElements() == 0) |
1088 | return false; |
1089 | } |
1090 | |
1091 | // May catch only some subset of exceptions, in which case other exceptions |
1092 | // will continue unwinding. |
1093 | return true; |
1094 | } |
1095 | |
1096 | bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const { |
1097 | switch (getOpcode()) { |
1098 | case Instruction::Call: |
1099 | return !cast<CallInst>(Val: this)->doesNotThrow(); |
1100 | case Instruction::CleanupRet: |
1101 | return cast<CleanupReturnInst>(Val: this)->unwindsToCaller(); |
1102 | case Instruction::CatchSwitch: |
1103 | return cast<CatchSwitchInst>(Val: this)->unwindsToCaller(); |
1104 | case Instruction::Resume: |
1105 | return true; |
1106 | case Instruction::Invoke: { |
1107 | // Landingpads themselves don't unwind -- however, an invoke of a skipped |
1108 | // landingpad may continue unwinding. |
1109 | BasicBlock *UnwindDest = cast<InvokeInst>(Val: this)->getUnwindDest(); |
1110 | Instruction *Pad = UnwindDest->getFirstNonPHI(); |
1111 | if (auto *LP = dyn_cast<LandingPadInst>(Val: Pad)) |
1112 | return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind); |
1113 | return false; |
1114 | } |
1115 | case Instruction::CleanupPad: |
1116 | // Treat the same as cleanup landingpad. |
1117 | return IncludePhaseOneUnwind; |
1118 | default: |
1119 | return false; |
1120 | } |
1121 | } |
1122 | |
1123 | bool Instruction::mayHaveSideEffects() const { |
1124 | return mayWriteToMemory() || mayThrow() || !willReturn(); |
1125 | } |
1126 | |
1127 | bool Instruction::isSafeToRemove() const { |
1128 | return (!isa<CallInst>(Val: this) || !this->mayHaveSideEffects()) && |
1129 | !this->isTerminator() && !this->isEHPad(); |
1130 | } |
1131 | |
1132 | bool Instruction::willReturn() const { |
1133 | // Volatile store isn't guaranteed to return; see LangRef. |
1134 | if (auto *SI = dyn_cast<StoreInst>(Val: this)) |
1135 | return !SI->isVolatile(); |
1136 | |
1137 | if (const auto *CB = dyn_cast<CallBase>(Val: this)) |
1138 | return CB->hasFnAttr(Kind: Attribute::WillReturn); |
1139 | return true; |
1140 | } |
1141 | |
1142 | bool Instruction::isLifetimeStartOrEnd() const { |
1143 | auto *II = dyn_cast<IntrinsicInst>(Val: this); |
1144 | if (!II) |
1145 | return false; |
1146 | Intrinsic::ID ID = II->getIntrinsicID(); |
1147 | return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end; |
1148 | } |
1149 | |
1150 | bool Instruction::isLaunderOrStripInvariantGroup() const { |
1151 | auto *II = dyn_cast<IntrinsicInst>(Val: this); |
1152 | if (!II) |
1153 | return false; |
1154 | Intrinsic::ID ID = II->getIntrinsicID(); |
1155 | return ID == Intrinsic::launder_invariant_group || |
1156 | ID == Intrinsic::strip_invariant_group; |
1157 | } |
1158 | |
1159 | bool Instruction::isDebugOrPseudoInst() const { |
1160 | return isa<DbgInfoIntrinsic>(Val: this) || isa<PseudoProbeInst>(Val: this); |
1161 | } |
1162 | |
1163 | const Instruction * |
1164 | Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const { |
1165 | for (const Instruction *I = getNextNode(); I; I = I->getNextNode()) |
1166 | if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I))) |
1167 | return I; |
1168 | return nullptr; |
1169 | } |
1170 | |
1171 | const Instruction * |
1172 | Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const { |
1173 | for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode()) |
1174 | if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I))) |
1175 | return I; |
1176 | return nullptr; |
1177 | } |
1178 | |
1179 | const DebugLoc &Instruction::getStableDebugLoc() const { |
1180 | if (isa<DbgInfoIntrinsic>(Val: this)) |
1181 | if (const Instruction *Next = getNextNonDebugInstruction()) |
1182 | return Next->getDebugLoc(); |
1183 | return getDebugLoc(); |
1184 | } |
1185 | |
1186 | bool Instruction::isAssociative() const { |
1187 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) |
1188 | return II->isAssociative(); |
1189 | unsigned Opcode = getOpcode(); |
1190 | if (isAssociative(Opcode)) |
1191 | return true; |
1192 | |
1193 | switch (Opcode) { |
1194 | case FMul: |
1195 | case FAdd: |
1196 | return cast<FPMathOperator>(Val: this)->hasAllowReassoc() && |
1197 | cast<FPMathOperator>(Val: this)->hasNoSignedZeros(); |
1198 | default: |
1199 | return false; |
1200 | } |
1201 | } |
1202 | |
1203 | bool Instruction::isCommutative() const { |
1204 | if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) |
1205 | return II->isCommutative(); |
1206 | // TODO: Should allow icmp/fcmp? |
1207 | return isCommutative(Opcode: getOpcode()); |
1208 | } |
1209 | |
1210 | unsigned Instruction::getNumSuccessors() const { |
1211 | switch (getOpcode()) { |
1212 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1213 | case Instruction::OPC: \ |
1214 | return static_cast<const CLASS *>(this)->getNumSuccessors(); |
1215 | #include "llvm/IR/Instruction.def" |
1216 | default: |
1217 | break; |
1218 | } |
1219 | llvm_unreachable("not a terminator" ); |
1220 | } |
1221 | |
1222 | BasicBlock *Instruction::getSuccessor(unsigned idx) const { |
1223 | switch (getOpcode()) { |
1224 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1225 | case Instruction::OPC: \ |
1226 | return static_cast<const CLASS *>(this)->getSuccessor(idx); |
1227 | #include "llvm/IR/Instruction.def" |
1228 | default: |
1229 | break; |
1230 | } |
1231 | llvm_unreachable("not a terminator" ); |
1232 | } |
1233 | |
1234 | void Instruction::setSuccessor(unsigned idx, BasicBlock *B) { |
1235 | switch (getOpcode()) { |
1236 | #define HANDLE_TERM_INST(N, OPC, CLASS) \ |
1237 | case Instruction::OPC: \ |
1238 | return static_cast<CLASS *>(this)->setSuccessor(idx, B); |
1239 | #include "llvm/IR/Instruction.def" |
1240 | default: |
1241 | break; |
1242 | } |
1243 | llvm_unreachable("not a terminator" ); |
1244 | } |
1245 | |
1246 | void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) { |
1247 | for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors(); |
1248 | Idx != NumSuccessors; ++Idx) |
1249 | if (getSuccessor(idx: Idx) == OldBB) |
1250 | setSuccessor(idx: Idx, B: NewBB); |
1251 | } |
1252 | |
1253 | Instruction *Instruction::cloneImpl() const { |
1254 | llvm_unreachable("Subclass of Instruction failed to implement cloneImpl" ); |
1255 | } |
1256 | |
1257 | void Instruction::swapProfMetadata() { |
1258 | MDNode *ProfileData = getBranchWeightMDNode(I: *this); |
1259 | if (!ProfileData) |
1260 | return; |
1261 | unsigned FirstIdx = getBranchWeightOffset(ProfileData); |
1262 | if (ProfileData->getNumOperands() != 2 + FirstIdx) |
1263 | return; |
1264 | |
1265 | unsigned SecondIdx = FirstIdx + 1; |
1266 | SmallVector<Metadata *, 4> Ops; |
1267 | // If there are more weights past the second, we can't swap them |
1268 | if (ProfileData->getNumOperands() > SecondIdx + 1) |
1269 | return; |
1270 | for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) { |
1271 | Ops.push_back(Elt: ProfileData->getOperand(I: Idx)); |
1272 | } |
1273 | // Switch the order of the weights |
1274 | Ops.push_back(Elt: ProfileData->getOperand(I: SecondIdx)); |
1275 | Ops.push_back(Elt: ProfileData->getOperand(I: FirstIdx)); |
1276 | setMetadata(KindID: LLVMContext::MD_prof, |
1277 | Node: MDNode::get(Context&: ProfileData->getContext(), MDs: Ops)); |
1278 | } |
1279 | |
1280 | void Instruction::copyMetadata(const Instruction &SrcInst, |
1281 | ArrayRef<unsigned> WL) { |
1282 | if (!SrcInst.hasMetadata()) |
1283 | return; |
1284 | |
1285 | SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end()); |
1286 | |
1287 | // Otherwise, enumerate and copy over metadata from the old instruction to the |
1288 | // new one. |
1289 | SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs; |
1290 | SrcInst.getAllMetadataOtherThanDebugLoc(MDs&: TheMDs); |
1291 | for (const auto &MD : TheMDs) { |
1292 | if (WL.empty() || WLS.count(V: MD.first)) |
1293 | setMetadata(KindID: MD.first, Node: MD.second); |
1294 | } |
1295 | if (WL.empty() || WLS.count(V: LLVMContext::MD_dbg)) |
1296 | setDebugLoc(SrcInst.getDebugLoc()); |
1297 | } |
1298 | |
1299 | Instruction *Instruction::clone() const { |
1300 | Instruction *New = nullptr; |
1301 | switch (getOpcode()) { |
1302 | default: |
1303 | llvm_unreachable("Unhandled Opcode." ); |
1304 | #define HANDLE_INST(num, opc, clas) \ |
1305 | case Instruction::opc: \ |
1306 | New = cast<clas>(this)->cloneImpl(); \ |
1307 | break; |
1308 | #include "llvm/IR/Instruction.def" |
1309 | #undef HANDLE_INST |
1310 | } |
1311 | |
1312 | New->SubclassOptionalData = SubclassOptionalData; |
1313 | New->copyMetadata(SrcInst: *this); |
1314 | return New; |
1315 | } |
1316 | |