1//===-- Value.cpp - Implement the Value class -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Value, ValueHandle, and User classes.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Value.h"
14#include "LLVMContextImpl.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/IR/Constant.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/DerivedTypes.h"
22#include "llvm/IR/DerivedUser.h"
23#include "llvm/IR/GetElementPtrTypeIterator.h"
24#include "llvm/IR/InstrTypes.h"
25#include "llvm/IR/Instructions.h"
26#include "llvm/IR/IntrinsicInst.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/Operator.h"
29#include "llvm/IR/TypedPointerType.h"
30#include "llvm/IR/ValueHandle.h"
31#include "llvm/IR/ValueSymbolTable.h"
32#include "llvm/Support/CommandLine.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include <algorithm>
36
37using namespace llvm;
38
39static cl::opt<bool> UseDerefAtPointSemantics(
40 "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(Val: false),
41 cl::desc("Deref attributes and metadata infer facts at definition only"));
42
43//===----------------------------------------------------------------------===//
44// Value Class
45//===----------------------------------------------------------------------===//
46static inline Type *checkType(Type *Ty) {
47 assert(Ty && "Value defined with a null type: Error!");
48 assert(!isa<TypedPointerType>(Ty->getScalarType()) &&
49 "Cannot have values with typed pointer types");
50 return Ty;
51}
52
53Value::Value(Type *ty, unsigned scid)
54 : SubclassID(scid), HasValueHandle(0), SubclassOptionalData(0),
55 SubclassData(0), NumUserOperands(0), IsUsedByMD(false), HasName(false),
56 VTy(checkType(Ty: ty)) {
57 static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
60 // constructed.
61 unsigned OpCode = 0;
62 if (SubclassID >= InstructionVal)
63 OpCode = SubclassID - InstructionVal;
64 if (OpCode == Instruction::Call || OpCode == Instruction::Invoke ||
65 OpCode == Instruction::CallBr)
66 assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
67 "invalid CallBase type!");
68 else if (SubclassID != BasicBlockVal &&
69 (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
70 assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
71 "Cannot create non-first-class values except for constants!");
72 static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
73 "Value too big");
74}
75
76Value::~Value() {
77 // Notify all ValueHandles (if present) that this value is going away.
78 if (HasValueHandle)
79 ValueHandleBase::ValueIsDeleted(V: this);
80 if (isUsedByMetadata())
81 ValueAsMetadata::handleDeletion(V: this);
82
83#ifndef NDEBUG // Only in -g mode...
84 // Check to make sure that there are no uses of this value that are still
85 // around when the value is destroyed. If there are, then we have a dangling
86 // reference and something is wrong. This code is here to print out where
87 // the value is still being referenced.
88 //
89 // Note that use_empty() cannot be called here, as it eventually downcasts
90 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
91 // been destructed, so accessing it is UB.
92 //
93 if (!materialized_use_empty()) {
94 dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
95 for (auto *U : users())
96 dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
97
98 llvm_unreachable("Uses remain when a value is destroyed!");
99 }
100#endif
101
102 // If this value is named, destroy the name. This should not be in a symtab
103 // at this point.
104 destroyValueName();
105}
106
107void Value::deleteValue() {
108 switch (getValueID()) {
109#define HANDLE_VALUE(Name) \
110 case Value::Name##Val: \
111 delete static_cast<Name *>(this); \
112 break;
113#define HANDLE_MEMORY_VALUE(Name) \
114 case Value::Name##Val: \
115 static_cast<DerivedUser *>(this)->DeleteValue( \
116 static_cast<DerivedUser *>(this)); \
117 break;
118#define HANDLE_CONSTANT(Name) \
119 case Value::Name##Val: \
120 llvm_unreachable("constants should be destroyed with destroyConstant"); \
121 break;
122#define HANDLE_INSTRUCTION(Name) /* nothing */
123#include "llvm/IR/Value.def"
124
125#define HANDLE_INST(N, OPC, CLASS) \
126 case Value::InstructionVal + Instruction::OPC: \
127 delete static_cast<CLASS *>(this); \
128 break;
129#define HANDLE_USER_INST(N, OPC, CLASS)
130#include "llvm/IR/Instruction.def"
131
132 default:
133 llvm_unreachable("attempting to delete unknown value kind");
134 }
135}
136
137void Value::destroyValueName() {
138 ValueName *Name = getValueName();
139 if (Name) {
140 MallocAllocator Allocator;
141 Name->Destroy(allocator&: Allocator);
142 }
143 setValueName(nullptr);
144}
145
146bool Value::hasNUses(unsigned N) const {
147 if (!UseList)
148 return N == 0;
149
150 // TODO: Disallow for ConstantData and remove !UseList check?
151 return hasNItems(Begin: use_begin(), End: use_end(), N);
152}
153
154bool Value::hasNUsesOrMore(unsigned N) const {
155 // TODO: Disallow for ConstantData and remove !UseList check?
156 if (!UseList)
157 return N == 0;
158
159 return hasNItemsOrMore(Begin: use_begin(), End: use_end(), N);
160}
161
162bool Value::hasOneUser() const {
163 if (use_empty())
164 return false;
165 if (hasOneUse())
166 return true;
167 return std::equal(first1: ++user_begin(), last1: user_end(), first2: user_begin());
168}
169
170static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
171
172Use *Value::getSingleUndroppableUse() {
173 Use *Result = nullptr;
174 for (Use &U : uses()) {
175 if (!U.getUser()->isDroppable()) {
176 if (Result)
177 return nullptr;
178 Result = &U;
179 }
180 }
181 return Result;
182}
183
184User *Value::getUniqueUndroppableUser() {
185 User *Result = nullptr;
186 for (auto *U : users()) {
187 if (!U->isDroppable()) {
188 if (Result && Result != U)
189 return nullptr;
190 Result = U;
191 }
192 }
193 return Result;
194}
195
196bool Value::hasNUndroppableUses(unsigned int N) const {
197 return hasNItems(Begin: user_begin(), End: user_end(), N, ShouldBeCounted&: isUnDroppableUser);
198}
199
200bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
201 return hasNItemsOrMore(Begin: user_begin(), End: user_end(), N, ShouldBeCounted&: isUnDroppableUser);
202}
203
204void Value::dropDroppableUses(
205 llvm::function_ref<bool(const Use *)> ShouldDrop) {
206 SmallVector<Use *, 8> ToBeEdited;
207 for (Use &U : uses())
208 if (U.getUser()->isDroppable() && ShouldDrop(&U))
209 ToBeEdited.push_back(Elt: &U);
210 for (Use *U : ToBeEdited)
211 dropDroppableUse(U&: *U);
212}
213
214void Value::dropDroppableUsesIn(User &Usr) {
215 assert(Usr.isDroppable() && "Expected a droppable user!");
216 for (Use &UsrOp : Usr.operands()) {
217 if (UsrOp.get() == this)
218 dropDroppableUse(U&: UsrOp);
219 }
220}
221
222void Value::dropDroppableUse(Use &U) {
223 if (auto *Assume = dyn_cast<AssumeInst>(Val: U.getUser())) {
224 unsigned OpNo = U.getOperandNo();
225 if (OpNo == 0)
226 U.set(ConstantInt::getTrue(Context&: Assume->getContext()));
227 else {
228 U.set(PoisonValue::get(T: U.get()->getType()));
229 CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpIdx: OpNo);
230 BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag(Tag: "ignore");
231 }
232 return;
233 }
234
235 llvm_unreachable("unknown droppable use");
236}
237
238bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
239 assert(hasUseList() && "ConstantData has no use-list");
240
241 // This can be computed either by scanning the instructions in BB, or by
242 // scanning the use list of this Value. Both lists can be very long, but
243 // usually one is quite short.
244 //
245 // Scan both lists simultaneously until one is exhausted. This limits the
246 // search to the shorter list.
247 BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
248 const_user_iterator UI = user_begin(), UE = user_end();
249 for (; BI != BE && UI != UE; ++BI, ++UI) {
250 // Scan basic block: Check if this Value is used by the instruction at BI.
251 if (is_contained(Range: BI->operands(), Element: this))
252 return true;
253 // Scan use list: Check if the use at UI is in BB.
254 const auto *User = dyn_cast<Instruction>(Val: *UI);
255 if (User && User->getParent() == BB)
256 return true;
257 }
258 return false;
259}
260
261unsigned Value::getNumUses() const {
262 // TODO: Disallow for ConstantData and remove !UseList check?
263 if (!UseList)
264 return 0;
265 return (unsigned)std::distance(first: use_begin(), last: use_end());
266}
267
268static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
269 ST = nullptr;
270 if (Instruction *I = dyn_cast<Instruction>(Val: V)) {
271 if (BasicBlock *P = I->getParent())
272 if (Function *PP = P->getParent())
273 ST = PP->getValueSymbolTable();
274 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Val: V)) {
275 if (Function *P = BB->getParent())
276 ST = P->getValueSymbolTable();
277 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val: V)) {
278 if (Module *P = GV->getParent())
279 ST = &P->getValueSymbolTable();
280 } else if (Argument *A = dyn_cast<Argument>(Val: V)) {
281 if (Function *P = A->getParent())
282 ST = P->getValueSymbolTable();
283 } else {
284 assert(isa<Constant>(V) && "Unknown value type!");
285 return true; // no name is setable for this.
286 }
287 return false;
288}
289
290ValueName *Value::getValueName() const {
291 if (!HasName) return nullptr;
292
293 LLVMContext &Ctx = getContext();
294 auto I = Ctx.pImpl->ValueNames.find(Val: this);
295 assert(I != Ctx.pImpl->ValueNames.end() &&
296 "No name entry found!");
297
298 return I->second;
299}
300
301void Value::setValueName(ValueName *VN) {
302 LLVMContext &Ctx = getContext();
303
304 assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
305 "HasName bit out of sync!");
306
307 if (!VN) {
308 if (HasName)
309 Ctx.pImpl->ValueNames.erase(Val: this);
310 HasName = false;
311 return;
312 }
313
314 HasName = true;
315 Ctx.pImpl->ValueNames[this] = VN;
316}
317
318StringRef Value::getName() const {
319 // Make sure the empty string is still a C string. For historical reasons,
320 // some clients want to call .data() on the result and expect it to be null
321 // terminated.
322 if (!hasName())
323 return StringRef("", 0);
324 return getValueName()->getKey();
325}
326
327void Value::setNameImpl(const Twine &NewName) {
328 bool NeedNewName =
329 !getContext().shouldDiscardValueNames() || isa<GlobalValue>(Val: this);
330
331 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
332 // and there is no need to delete the old name.
333 if (!NeedNewName && !hasName())
334 return;
335
336 // Fast path for common IRBuilder case of setName("") when there is no name.
337 if (NewName.isTriviallyEmpty() && !hasName())
338 return;
339
340 SmallString<256> NameData;
341 StringRef NameRef = NeedNewName ? NewName.toStringRef(Out&: NameData) : "";
342 assert(!NameRef.contains(0) && "Null bytes are not allowed in names");
343
344 // Name isn't changing?
345 if (getName() == NameRef)
346 return;
347
348 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
349
350 // Get the symbol table to update for this object.
351 ValueSymbolTable *ST;
352 if (getSymTab(V: this, ST))
353 return; // Cannot set a name on this value (e.g. constant).
354
355 ValueName *NewValueName = nullptr;
356 if (!ST) { // No symbol table to update? Just do the change.
357 if (!NameRef.empty()) {
358 // Create the new name.
359 MallocAllocator Allocator;
360 NewValueName = ValueName::create(key: NameRef, allocator&: Allocator);
361 }
362 // NOTE: Could optimize for the case the name is shrinking to not deallocate
363 // then reallocated.
364 destroyValueName();
365
366 if (NewValueName) {
367 assert(NeedNewName);
368 setValueName(NewValueName);
369 getValueName()->setValue(this);
370 }
371 return;
372 }
373
374 if (!NameRef.empty())
375 NewValueName = ST->createValueName(Name: NameRef, V: this);
376
377 // NOTE: Could optimize for the case the name is shrinking to not deallocate
378 // then reallocated.
379 if (hasName()) {
380 // Remove old name.
381 ST->removeValueName(V: getValueName());
382 destroyValueName();
383
384 if (NameRef.empty())
385 return;
386 }
387
388 // Name is changing to something new.
389 assert(NeedNewName && NewValueName != nullptr);
390 setValueName(NewValueName);
391}
392
393void Value::setName(const Twine &NewName) {
394 setNameImpl(NewName);
395 if (Function *F = dyn_cast<Function>(Val: this))
396 F->updateAfterNameChange();
397}
398
399void Value::takeName(Value *V) {
400 assert(V != this && "Illegal call to this->takeName(this)!");
401 ValueSymbolTable *ST = nullptr;
402 // If this value has a name, drop it.
403 if (hasName()) {
404 // Get the symtab this is in.
405 if (getSymTab(V: this, ST)) {
406 // We can't set a name on this value, but we need to clear V's name if
407 // it has one.
408 if (V->hasName()) V->setName("");
409 return; // Cannot set a name on this value (e.g. constant).
410 }
411
412 // Remove old name.
413 if (ST)
414 ST->removeValueName(V: getValueName());
415 destroyValueName();
416 }
417
418 // Now we know that this has no name.
419
420 // If V has no name either, we're done.
421 if (!V->hasName()) return;
422
423 // Get this's symtab if we didn't before.
424 if (!ST) {
425 if (getSymTab(V: this, ST)) {
426 // Clear V's name.
427 V->setName("");
428 return; // Cannot set a name on this value (e.g. constant).
429 }
430 }
431
432 // Get V's ST, this should always succeed, because V has a name.
433 ValueSymbolTable *VST;
434 bool Failure = getSymTab(V, ST&: VST);
435 assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
436
437 // If these values are both in the same symtab, we can do this very fast.
438 // This works even if both values have no symtab yet.
439 if (ST == VST) {
440 // Take the name!
441 setValueName(V->getValueName());
442 V->setValueName(nullptr);
443 getValueName()->setValue(this);
444 return;
445 }
446
447 // Otherwise, things are slightly more complex. Remove V's name from VST and
448 // then reinsert it into ST.
449
450 if (VST)
451 VST->removeValueName(V: V->getValueName());
452 setValueName(V->getValueName());
453 V->setValueName(nullptr);
454 getValueName()->setValue(this);
455
456 if (ST)
457 ST->reinsertValue(V: this);
458}
459
460std::string Value::getNameOrAsOperand() const {
461 if (!getName().empty())
462 return std::string(getName());
463
464 std::string BBName;
465 raw_string_ostream OS(BBName);
466 printAsOperand(O&: OS, PrintType: false);
467 return OS.str();
468}
469
470void Value::assertModuleIsMaterializedImpl() const {
471#ifndef NDEBUG
472 const GlobalValue *GV = dyn_cast<GlobalValue>(this);
473 if (!GV)
474 return;
475 const Module *M = GV->getParent();
476 if (!M)
477 return;
478 assert(M->isMaterialized());
479#endif
480}
481
482#ifndef NDEBUG
483static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
484 Constant *C) {
485 if (!Cache.insert(Expr).second)
486 return false;
487
488 for (auto &O : Expr->operands()) {
489 if (O == C)
490 return true;
491 auto *CE = dyn_cast<ConstantExpr>(O);
492 if (!CE)
493 continue;
494 if (contains(Cache, CE, C))
495 return true;
496 }
497 return false;
498}
499
500static bool contains(Value *Expr, Value *V) {
501 if (Expr == V)
502 return true;
503
504 auto *C = dyn_cast<Constant>(V);
505 if (!C)
506 return false;
507
508 auto *CE = dyn_cast<ConstantExpr>(Expr);
509 if (!CE)
510 return false;
511
512 SmallPtrSet<ConstantExpr *, 4> Cache;
513 return contains(Cache, CE, C);
514}
515#endif // NDEBUG
516
517void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
518 assert(hasUseList() && "Cannot replace constant data");
519 assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
520 assert(!contains(New, this) &&
521 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
522 assert(New->getType() == getType() &&
523 "replaceAllUses of value with new value of different type!");
524
525 // Notify all ValueHandles (if present) that this value is going away.
526 if (HasValueHandle)
527 ValueHandleBase::ValueIsRAUWd(Old: this, New);
528 if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
529 ValueAsMetadata::handleRAUW(From: this, To: New);
530
531 while (!materialized_use_empty()) {
532 Use &U = *UseList;
533 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
534 // constant because they are uniqued.
535 if (auto *C = dyn_cast<Constant>(Val: U.getUser())) {
536 if (!isa<GlobalValue>(Val: C)) {
537 C->handleOperandChange(this, New);
538 continue;
539 }
540 }
541
542 U.set(New);
543 }
544
545 if (BasicBlock *BB = dyn_cast<BasicBlock>(Val: this))
546 BB->replaceSuccessorsPhiUsesWith(New: cast<BasicBlock>(Val: New));
547}
548
549void Value::replaceAllUsesWith(Value *New) {
550 doRAUW(New, ReplaceMetaUses: ReplaceMetadataUses::Yes);
551}
552
553void Value::replaceNonMetadataUsesWith(Value *New) {
554 doRAUW(New, ReplaceMetaUses: ReplaceMetadataUses::No);
555}
556
557bool Value::replaceUsesWithIf(Value *New,
558 llvm::function_ref<bool(Use &U)> ShouldReplace) {
559 assert(New && "Value::replaceUsesWithIf(<null>) is invalid!");
560 assert(New->getType() == getType() &&
561 "replaceUses of value with new value of different type!");
562
563 SmallVector<TrackingVH<Constant>, 8> Consts;
564 SmallPtrSet<Constant *, 8> Visited;
565
566 bool Changed = false;
567 for (Use &U : llvm::make_early_inc_range(Range: uses())) {
568 if (!ShouldReplace(U))
569 continue;
570 Changed = true;
571
572 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
573 // constant because they are uniqued.
574 if (auto *C = dyn_cast<Constant>(Val: U.getUser())) {
575 if (!isa<GlobalValue>(Val: C)) {
576 if (Visited.insert(Ptr: C).second)
577 Consts.push_back(Elt: TrackingVH<Constant>(C));
578 continue;
579 }
580 }
581 U.set(New);
582 }
583
584 while (!Consts.empty()) {
585 // FIXME: handleOperandChange() updates all the uses in a given Constant,
586 // not just the one passed to ShouldReplace
587 Consts.pop_back_val()->handleOperandChange(this, New);
588 }
589
590 return Changed;
591}
592
593/// Replace debug record uses of MetadataAsValue(ValueAsMetadata(V)) outside BB
594/// with New.
595static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) {
596 SmallVector<DbgVariableRecord *> DPUsers;
597 findDbgUsers(V, DbgVariableRecords&: DPUsers);
598 for (auto *DVR : DPUsers) {
599 DbgMarker *Marker = DVR->getMarker();
600 if (Marker->getParent() != BB)
601 DVR->replaceVariableLocationOp(OldValue: V, NewValue: New);
602 }
603}
604
605// Like replaceAllUsesWith except it does not handle constants or basic blocks.
606// This routine leaves uses within BB.
607void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
608 assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
609 assert(!contains(New, this) &&
610 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
611 assert(New->getType() == getType() &&
612 "replaceUses of value with new value of different type!");
613 assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
614
615 replaceDbgUsesOutsideBlock(V: this, New, BB);
616 replaceUsesWithIf(New, ShouldReplace: [BB](Use &U) {
617 auto *I = dyn_cast<Instruction>(Val: U.getUser());
618 // Don't replace if it's an instruction in the BB basic block.
619 return !I || I->getParent() != BB;
620 });
621}
622
623namespace {
624// Various metrics for how much to strip off of pointers.
625enum PointerStripKind {
626 PSK_ZeroIndices,
627 PSK_ZeroIndicesAndAliases,
628 PSK_ZeroIndicesSameRepresentation,
629 PSK_ForAliasAnalysis,
630 PSK_InBoundsConstantIndices,
631 PSK_InBounds
632};
633} // end anonymous namespace
634
635template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
636
637template <PointerStripKind StripKind>
638static const Value *stripPointerCastsAndOffsets(
639 const Value *V,
640 function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
641 if (!V->getType()->isPointerTy())
642 return V;
643
644 // Even though we don't look through PHI nodes, we could be called on an
645 // instruction in an unreachable block, which may be on a cycle. E.g.:
646 // %gep = getelementptr inbounds nuw i8, ptr %gep, i64 32
647 //
648 // Bound against that case with a simple iteration counter. Practically, more
649 // than 10 iterations are almost never needed.
650 for (unsigned N = 0; N < 12; ++N) {
651 Func(V);
652 if (auto *GEP = dyn_cast<GEPOperator>(Val: V)) {
653 switch (StripKind) {
654 case PSK_ZeroIndices:
655 case PSK_ZeroIndicesAndAliases:
656 case PSK_ZeroIndicesSameRepresentation:
657 case PSK_ForAliasAnalysis:
658 if (!GEP->hasAllZeroIndices())
659 return V;
660 break;
661 case PSK_InBoundsConstantIndices:
662 if (!GEP->hasAllConstantIndices())
663 return V;
664 [[fallthrough]];
665 case PSK_InBounds:
666 if (!GEP->isInBounds())
667 return V;
668 break;
669 }
670 const Value *NewV = GEP->getPointerOperand();
671 // Quick exit for degenerate IR, which can happen in unreachable blocks.
672 if (NewV == V)
673 return V;
674 V = NewV;
675 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
676 Value *NewV = cast<Operator>(Val: V)->getOperand(i: 0);
677 if (!NewV->getType()->isPointerTy())
678 return V;
679 V = NewV;
680 } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
681 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
682 // TODO: If we know an address space cast will not change the
683 // representation we could look through it here as well.
684 V = cast<Operator>(Val: V)->getOperand(i: 0);
685 } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(Val: V)) {
686 V = cast<GlobalAlias>(Val: V)->getAliasee();
687 } else if (StripKind == PSK_ForAliasAnalysis && isa<PHINode>(Val: V) &&
688 cast<PHINode>(Val: V)->getNumIncomingValues() == 1) {
689 V = cast<PHINode>(Val: V)->getIncomingValue(i: 0);
690 } else {
691 if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
692 if (const Value *RV = Call->getReturnedArgOperand()) {
693 V = RV;
694 continue;
695 }
696 // The result of launder.invariant.group must alias it's argument,
697 // but it can't be marked with returned attribute, that's why it needs
698 // special case.
699 if (StripKind == PSK_ForAliasAnalysis &&
700 (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
701 Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
702 V = Call->getArgOperand(i: 0);
703 continue;
704 }
705 }
706 return V;
707 }
708 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
709 }
710
711 return V;
712}
713
714const Value *Value::stripPointerCasts() const {
715 return stripPointerCastsAndOffsets<PSK_ZeroIndices>(V: this);
716}
717
718const Value *Value::stripPointerCastsAndAliases() const {
719 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(V: this);
720}
721
722const Value *Value::stripPointerCastsSameRepresentation() const {
723 return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(V: this);
724}
725
726const Value *Value::stripInBoundsConstantOffsets() const {
727 return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(V: this);
728}
729
730const Value *Value::stripPointerCastsForAliasAnalysis() const {
731 return stripPointerCastsAndOffsets<PSK_ForAliasAnalysis>(V: this);
732}
733
734const Value *Value::stripAndAccumulateConstantOffsets(
735 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
736 bool AllowInvariantGroup,
737 function_ref<bool(Value &, APInt &)> ExternalAnalysis,
738 bool LookThroughIntToPtr) const {
739 if (!getType()->isPtrOrPtrVectorTy())
740 return this;
741
742 unsigned BitWidth = Offset.getBitWidth();
743 assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
744 "The offset bit width does not match the DL specification.");
745
746 // Even though we don't look through PHI nodes, we could be called on an
747 // instruction in an unreachable block, which may be on a cycle.
748 SmallPtrSet<const Value *, 4> Visited;
749 Visited.insert(Ptr: this);
750 const Value *V = this;
751 do {
752 if (auto *GEP = dyn_cast<GEPOperator>(Val: V)) {
753 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
754 if (!AllowNonInbounds && !GEP->isInBounds())
755 return V;
756
757 // If one of the values we have visited is an addrspacecast, then
758 // the pointer type of this GEP may be different from the type
759 // of the Ptr parameter which was passed to this function. This
760 // means when we construct GEPOffset, we need to use the size
761 // of GEP's pointer type rather than the size of the original
762 // pointer type.
763 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ty: V->getType()), 0);
764 if (!GEP->accumulateConstantOffset(DL, Offset&: GEPOffset, ExternalAnalysis))
765 return V;
766
767 // Stop traversal if the pointer offset wouldn't fit in the bit-width
768 // provided by the Offset argument. This can happen due to AddrSpaceCast
769 // stripping.
770 if (GEPOffset.getSignificantBits() > BitWidth)
771 return V;
772
773 // External Analysis can return a result higher/lower than the value
774 // represents. We need to detect overflow/underflow.
775 APInt GEPOffsetST = GEPOffset.sextOrTrunc(width: BitWidth);
776 if (!ExternalAnalysis) {
777 Offset += GEPOffsetST;
778 } else {
779 bool Overflow = false;
780 APInt OldOffset = Offset;
781 Offset = Offset.sadd_ov(RHS: GEPOffsetST, Overflow);
782 if (Overflow) {
783 Offset = std::move(OldOffset);
784 return V;
785 }
786 }
787 V = GEP->getPointerOperand();
788 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
789 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
790 V = cast<Operator>(Val: V)->getOperand(i: 0);
791 } else if (auto *GA = dyn_cast<GlobalAlias>(Val: V)) {
792 if (!GA->isInterposable())
793 V = GA->getAliasee();
794 } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
795 if (const Value *RV = Call->getReturnedArgOperand())
796 V = RV;
797 if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup())
798 V = Call->getArgOperand(i: 0);
799 } else if (auto *Int2Ptr = dyn_cast<Operator>(Val: V)) {
800 // Try to accumulate across (inttoptr (add (ptrtoint p), off)).
801 if (!AllowNonInbounds || !LookThroughIntToPtr || !Int2Ptr ||
802 Int2Ptr->getOpcode() != Instruction::IntToPtr ||
803 Int2Ptr->getOperand(i: 0)->getType()->getScalarSizeInBits() != BitWidth)
804 return V;
805
806 auto *Add = dyn_cast<AddOperator>(Val: Int2Ptr->getOperand(i: 0));
807 if (!Add)
808 return V;
809
810 auto *Ptr2Int = dyn_cast<PtrToIntOperator>(Val: Add->getOperand(i_nocapture: 0));
811 auto *CI = dyn_cast<ConstantInt>(Val: Add->getOperand(i_nocapture: 1));
812 if (!Ptr2Int || !CI)
813 return V;
814
815 Offset += CI->getValue();
816 V = Ptr2Int->getOperand(i_nocapture: 0);
817 }
818 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
819 } while (Visited.insert(Ptr: V).second);
820
821 return V;
822}
823
824const Value *
825Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
826 return stripPointerCastsAndOffsets<PSK_InBounds>(V: this, Func);
827}
828
829bool Value::canBeFreed() const {
830 assert(getType()->isPointerTy());
831
832 // Cases that can simply never be deallocated
833 // *) Constants aren't allocated per se, thus not deallocated either.
834 if (isa<Constant>(Val: this))
835 return false;
836
837 // Handle byval/byref/sret/inalloca/preallocated arguments. The storage
838 // lifetime is guaranteed to be longer than the callee's lifetime.
839 if (auto *A = dyn_cast<Argument>(Val: this)) {
840 if (A->hasPointeeInMemoryValueAttr())
841 return false;
842 // A pointer to an object in a function which neither frees, nor can arrange
843 // for another thread to free on its behalf, can not be freed in the scope
844 // of the function. Note that this logic is restricted to memory
845 // allocations in existance before the call; a nofree function *is* allowed
846 // to free memory it allocated.
847 const Function *F = A->getParent();
848 if (F->doesNotFreeMemory() && F->hasNoSync())
849 return false;
850 }
851
852 if (auto *ITP = dyn_cast<IntToPtrInst>(Val: this);
853 ITP && ITP->hasMetadata(KindID: LLVMContext::MD_nofree))
854 return false;
855
856 const Function *F = nullptr;
857 if (auto *I = dyn_cast<Instruction>(Val: this))
858 F = I->getFunction();
859 if (auto *A = dyn_cast<Argument>(Val: this))
860 F = A->getParent();
861
862 if (!F)
863 return true;
864
865 // With garbage collection, deallocation typically occurs solely at or after
866 // safepoints. If we're compiling for a collector which uses the
867 // gc.statepoint infrastructure, safepoints aren't explicitly present
868 // in the IR until after lowering from abstract to physical machine model.
869 // The collector could chose to mix explicit deallocation and gc'd objects
870 // which is why we need the explicit opt in on a per collector basis.
871 if (!F->hasGC())
872 return true;
873
874 const auto &GCName = F->getGC();
875 if (GCName == "statepoint-example") {
876 auto *PT = cast<PointerType>(Val: this->getType());
877 if (PT->getAddressSpace() != 1)
878 // For the sake of this example GC, we arbitrarily pick addrspace(1) as
879 // our GC managed heap. This must match the same check in
880 // RewriteStatepointsForGC (and probably needs better factored.)
881 return true;
882
883 // It is cheaper to scan for a declaration than to scan for a use in this
884 // function. Note that gc.statepoint is a type overloaded function so the
885 // usual trick of requesting declaration of the intrinsic from the module
886 // doesn't work.
887 for (auto &Fn : *F->getParent())
888 if (Fn.getIntrinsicID() == Intrinsic::experimental_gc_statepoint)
889 return true;
890 return false;
891 }
892 return true;
893}
894
895uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
896 bool &CanBeNull,
897 bool &CanBeFreed) const {
898 assert(getType()->isPointerTy() && "must be pointer");
899
900 uint64_t DerefBytes = 0;
901 CanBeNull = false;
902 CanBeFreed = UseDerefAtPointSemantics && canBeFreed();
903 if (const Argument *A = dyn_cast<Argument>(Val: this)) {
904 DerefBytes = A->getDereferenceableBytes();
905 if (DerefBytes == 0) {
906 // Handle byval/byref/inalloca/preallocated arguments
907 if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
908 if (ArgMemTy->isSized()) {
909 // FIXME: Why isn't this the type alloc size?
910 DerefBytes = DL.getTypeStoreSize(Ty: ArgMemTy).getKnownMinValue();
911 }
912 }
913 }
914
915 if (DerefBytes == 0) {
916 DerefBytes = A->getDereferenceableOrNullBytes();
917 CanBeNull = true;
918 }
919 } else if (const auto *Call = dyn_cast<CallBase>(Val: this)) {
920 DerefBytes = Call->getRetDereferenceableBytes();
921 if (DerefBytes == 0) {
922 DerefBytes = Call->getRetDereferenceableOrNullBytes();
923 CanBeNull = true;
924 }
925 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Val: this)) {
926 if (MDNode *MD = LI->getMetadata(KindID: LLVMContext::MD_dereferenceable)) {
927 ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0));
928 DerefBytes = CI->getLimitedValue();
929 }
930 if (DerefBytes == 0) {
931 if (MDNode *MD =
932 LI->getMetadata(KindID: LLVMContext::MD_dereferenceable_or_null)) {
933 ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0));
934 DerefBytes = CI->getLimitedValue();
935 }
936 CanBeNull = true;
937 }
938 } else if (auto *IP = dyn_cast<IntToPtrInst>(Val: this)) {
939 if (MDNode *MD = IP->getMetadata(KindID: LLVMContext::MD_dereferenceable)) {
940 ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0));
941 DerefBytes = CI->getLimitedValue();
942 }
943 if (DerefBytes == 0) {
944 if (MDNode *MD =
945 IP->getMetadata(KindID: LLVMContext::MD_dereferenceable_or_null)) {
946 ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0));
947 DerefBytes = CI->getLimitedValue();
948 }
949 CanBeNull = true;
950 }
951 } else if (auto *AI = dyn_cast<AllocaInst>(Val: this)) {
952 if (std::optional<TypeSize> Size = AI->getAllocationSize(DL)) {
953 DerefBytes = Size->getKnownMinValue();
954 CanBeNull = false;
955 CanBeFreed = false;
956 }
957 } else if (auto *GV = dyn_cast<GlobalVariable>(Val: this)) {
958 if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
959 // TODO: Don't outright reject hasExternalWeakLinkage but set the
960 // CanBeNull flag.
961 DerefBytes = DL.getTypeStoreSize(Ty: GV->getValueType()).getFixedValue();
962 CanBeNull = false;
963 CanBeFreed = false;
964 }
965 }
966 return DerefBytes;
967}
968
969Align Value::getPointerAlignment(const DataLayout &DL) const {
970 assert(getType()->isPointerTy() && "must be pointer");
971 if (const Function *F = dyn_cast<Function>(Val: this)) {
972 Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
973 switch (DL.getFunctionPtrAlignType()) {
974 case DataLayout::FunctionPtrAlignType::Independent:
975 return FunctionPtrAlign;
976 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
977 return std::max(a: FunctionPtrAlign, b: F->getAlign().valueOrOne());
978 }
979 llvm_unreachable("Unhandled FunctionPtrAlignType");
980 } else if (auto *GVar = dyn_cast<GlobalVariable>(Val: this)) {
981 const MaybeAlign Alignment(GVar->getAlign());
982 if (!Alignment) {
983 Type *ObjectType = GVar->getValueType();
984 if (ObjectType->isSized()) {
985 // If the object is defined in the current Module, we'll be giving
986 // it the preferred alignment. Otherwise, we have to assume that it
987 // may only have the minimum ABI alignment.
988 if (GVar->isStrongDefinitionForLinker())
989 return DL.getPreferredAlign(GV: GVar);
990 else
991 return DL.getABITypeAlign(Ty: ObjectType);
992 }
993 }
994 return Alignment.valueOrOne();
995 } else if (const Argument *A = dyn_cast<Argument>(Val: this)) {
996 const MaybeAlign Alignment = A->getParamAlign();
997 if (!Alignment && A->hasStructRetAttr()) {
998 // An sret parameter has at least the ABI alignment of the return type.
999 Type *EltTy = A->getParamStructRetType();
1000 if (EltTy->isSized())
1001 return DL.getABITypeAlign(Ty: EltTy);
1002 }
1003 return Alignment.valueOrOne();
1004 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: this)) {
1005 return AI->getAlign();
1006 } else if (const auto *Call = dyn_cast<CallBase>(Val: this)) {
1007 MaybeAlign Alignment = Call->getRetAlign();
1008 if (!Alignment && Call->getCalledFunction())
1009 Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
1010 return Alignment.valueOrOne();
1011 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Val: this)) {
1012 if (MDNode *MD = LI->getMetadata(KindID: LLVMContext::MD_align)) {
1013 ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0));
1014 return Align(CI->getLimitedValue());
1015 }
1016 } else if (auto *CE = dyn_cast<ConstantExpr>(Val: this)) {
1017 // Determine the alignment of inttoptr(C).
1018 if (CE->getOpcode() == Instruction::IntToPtr &&
1019 isa<ConstantInt>(Val: CE->getOperand(i_nocapture: 0))) {
1020 ConstantInt *IntPtr = cast<ConstantInt>(Val: CE->getOperand(i_nocapture: 0));
1021 size_t TrailingZeros = IntPtr->getValue().countr_zero();
1022 // While the actual alignment may be large, elsewhere we have
1023 // an arbitrary upper alignmet limit, so let's clamp to it.
1024 return Align(TrailingZeros < Value::MaxAlignmentExponent
1025 ? uint64_t(1) << TrailingZeros
1026 : Value::MaximumAlignment);
1027 }
1028 }
1029 return Align(1);
1030}
1031
1032static std::optional<int64_t>
1033getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
1034 // Skip over the first indices.
1035 gep_type_iterator GTI = gep_type_begin(GEP);
1036 for (unsigned i = 1; i != Idx; ++i, ++GTI)
1037 /*skip along*/;
1038
1039 // Compute the offset implied by the rest of the indices.
1040 int64_t Offset = 0;
1041 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
1042 ConstantInt *OpC = dyn_cast<ConstantInt>(Val: GEP->getOperand(i_nocapture: i));
1043 if (!OpC)
1044 return std::nullopt;
1045 if (OpC->isZero())
1046 continue; // No offset.
1047
1048 // Handle struct indices, which add their field offset to the pointer.
1049 if (StructType *STy = GTI.getStructTypeOrNull()) {
1050 Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: OpC->getZExtValue());
1051 continue;
1052 }
1053
1054 // Otherwise, we have a sequential type like an array or fixed-length
1055 // vector. Multiply the index by the ElementSize.
1056 TypeSize Size = GTI.getSequentialElementStride(DL);
1057 if (Size.isScalable())
1058 return std::nullopt;
1059 Offset += Size.getFixedValue() * OpC->getSExtValue();
1060 }
1061
1062 return Offset;
1063}
1064
1065std::optional<int64_t> Value::getPointerOffsetFrom(const Value *Other,
1066 const DataLayout &DL) const {
1067 const Value *Ptr1 = Other;
1068 const Value *Ptr2 = this;
1069 APInt Offset1(DL.getIndexTypeSizeInBits(Ty: Ptr1->getType()), 0);
1070 APInt Offset2(DL.getIndexTypeSizeInBits(Ty: Ptr2->getType()), 0);
1071 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset&: Offset1, AllowNonInbounds: true);
1072 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset&: Offset2, AllowNonInbounds: true);
1073
1074 // Handle the trivial case first.
1075 if (Ptr1 == Ptr2)
1076 return Offset2.getSExtValue() - Offset1.getSExtValue();
1077
1078 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Val: Ptr1);
1079 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Val: Ptr2);
1080
1081 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
1082 // base. After that base, they may have some number of common (and
1083 // potentially variable) indices. After that they handle some constant
1084 // offset, which determines their offset from each other. At this point, we
1085 // handle no other case.
1086 if (!GEP1 || !GEP2 || GEP1->getOperand(i_nocapture: 0) != GEP2->getOperand(i_nocapture: 0) ||
1087 GEP1->getSourceElementType() != GEP2->getSourceElementType())
1088 return std::nullopt;
1089
1090 // Skip any common indices and track the GEP types.
1091 unsigned Idx = 1;
1092 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
1093 if (GEP1->getOperand(i_nocapture: Idx) != GEP2->getOperand(i_nocapture: Idx))
1094 break;
1095
1096 auto IOffset1 = getOffsetFromIndex(GEP: GEP1, Idx, DL);
1097 auto IOffset2 = getOffsetFromIndex(GEP: GEP2, Idx, DL);
1098 if (!IOffset1 || !IOffset2)
1099 return std::nullopt;
1100 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
1101 Offset1.getSExtValue();
1102}
1103
1104const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
1105 const BasicBlock *PredBB) const {
1106 auto *PN = dyn_cast<PHINode>(Val: this);
1107 if (PN && PN->getParent() == CurBB)
1108 return PN->getIncomingValueForBlock(BB: PredBB);
1109 return this;
1110}
1111
1112void Value::reverseUseList() {
1113 if (!UseList || !UseList->Next)
1114 // No need to reverse 0 or 1 uses.
1115 return;
1116
1117 Use *Head = UseList;
1118 Use *Current = UseList->Next;
1119 Head->Next = nullptr;
1120 while (Current) {
1121 Use *Next = Current->Next;
1122 Current->Next = Head;
1123 Head->Prev = &Current->Next;
1124 Head = Current;
1125 Current = Next;
1126 }
1127 UseList = Head;
1128 Head->Prev = &UseList;
1129}
1130
1131bool Value::isSwiftError() const {
1132 auto *Arg = dyn_cast<Argument>(Val: this);
1133 if (Arg)
1134 return Arg->hasSwiftErrorAttr();
1135 auto *Alloca = dyn_cast<AllocaInst>(Val: this);
1136 if (!Alloca)
1137 return false;
1138 return Alloca->isSwiftError();
1139}
1140
1141//===----------------------------------------------------------------------===//
1142// ValueHandleBase Class
1143//===----------------------------------------------------------------------===//
1144
1145void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
1146 assert(List && "Handle list is null?");
1147
1148 // Splice ourselves into the list.
1149 Next = *List;
1150 *List = this;
1151 setPrevPtr(List);
1152 if (Next) {
1153 Next->setPrevPtr(&Next);
1154 assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
1155 }
1156}
1157
1158void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
1159 assert(List && "Must insert after existing node");
1160
1161 Next = List->Next;
1162 setPrevPtr(&List->Next);
1163 List->Next = this;
1164 if (Next)
1165 Next->setPrevPtr(&Next);
1166}
1167
1168void ValueHandleBase::AddToUseList() {
1169 assert(getValPtr() && "Null pointer doesn't have a use list!");
1170
1171 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1172
1173 if (getValPtr()->HasValueHandle) {
1174 // If this value already has a ValueHandle, then it must be in the
1175 // ValueHandles map already.
1176 ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
1177 assert(Entry && "Value doesn't have any handles?");
1178 AddToExistingUseList(List: &Entry);
1179 return;
1180 }
1181
1182 // Ok, it doesn't have any handles yet, so we must insert it into the
1183 // DenseMap. However, doing this insertion could cause the DenseMap to
1184 // reallocate itself, which would invalidate all of the PrevP pointers that
1185 // point into the old table. Handle this by checking for reallocation and
1186 // updating the stale pointers only if needed.
1187 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1188 const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
1189
1190 ValueHandleBase *&Entry = Handles[getValPtr()];
1191 assert(!Entry && "Value really did already have handles?");
1192 AddToExistingUseList(List: &Entry);
1193 getValPtr()->HasValueHandle = true;
1194
1195 // If reallocation didn't happen or if this was the first insertion, don't
1196 // walk the table.
1197 if (Handles.isPointerIntoBucketsArray(Ptr: OldBucketPtr) ||
1198 Handles.size() == 1) {
1199 return;
1200 }
1201
1202 // Okay, reallocation did happen. Fix the Prev Pointers.
1203 for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
1204 E = Handles.end(); I != E; ++I) {
1205 assert(I->second && I->first == I->second->getValPtr() &&
1206 "List invariant broken!");
1207 I->second->setPrevPtr(&I->second);
1208 }
1209}
1210
1211void ValueHandleBase::RemoveFromUseList() {
1212 assert(getValPtr() && getValPtr()->HasValueHandle &&
1213 "Pointer doesn't have a use list!");
1214
1215 // Unlink this from its use list.
1216 ValueHandleBase **PrevPtr = getPrevPtr();
1217 assert(*PrevPtr == this && "List invariant broken");
1218
1219 *PrevPtr = Next;
1220 if (Next) {
1221 assert(Next->getPrevPtr() == &Next && "List invariant broken");
1222 Next->setPrevPtr(PrevPtr);
1223 return;
1224 }
1225
1226 // If the Next pointer was null, then it is possible that this was the last
1227 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
1228 // map.
1229 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1230 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1231 if (Handles.isPointerIntoBucketsArray(Ptr: PrevPtr)) {
1232 Handles.erase(Val: getValPtr());
1233 getValPtr()->HasValueHandle = false;
1234 }
1235}
1236
1237void ValueHandleBase::ValueIsDeleted(Value *V) {
1238 assert(V->HasValueHandle && "Should only be called if ValueHandles present");
1239
1240 // Get the linked list base, which is guaranteed to exist since the
1241 // HasValueHandle flag is set.
1242 LLVMContextImpl *pImpl = V->getContext().pImpl;
1243 ValueHandleBase *Entry = pImpl->ValueHandles[V];
1244 assert(Entry && "Value bit set but no entries exist");
1245
1246 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
1247 // and remove themselves from the list without breaking our iteration. This
1248 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
1249 // Note that we deliberately do not the support the case when dropping a value
1250 // handle results in a new value handle being permanently added to the list
1251 // (as might occur in theory for CallbackVH's): the new value handle will not
1252 // be processed and the checking code will mete out righteous punishment if
1253 // the handle is still present once we have finished processing all the other
1254 // value handles (it is fine to momentarily add then remove a value handle).
1255 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1256 Iterator.RemoveFromUseList();
1257 Iterator.AddToExistingUseListAfter(List: Entry);
1258 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1259
1260 switch (Entry->getKind()) {
1261 case Assert:
1262 break;
1263 case Weak:
1264 case WeakTracking:
1265 // WeakTracking and Weak just go to null, which unlinks them
1266 // from the list.
1267 Entry->operator=(RHS: nullptr);
1268 break;
1269 case Callback:
1270 // Forward to the subclass's implementation.
1271 static_cast<CallbackVH*>(Entry)->deleted();
1272 break;
1273 }
1274 }
1275
1276 // All callbacks, weak references, and assertingVHs should be dropped by now.
1277 if (V->HasValueHandle) {
1278#ifndef NDEBUG // Only in +Asserts mode...
1279 dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
1280 << "\n";
1281 if (pImpl->ValueHandles[V]->getKind() == Assert)
1282 llvm_unreachable("An asserting value handle still pointed to this"
1283 " value!");
1284
1285#endif
1286 llvm_unreachable("All references to V were not removed?");
1287 }
1288}
1289
1290void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
1291 assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
1292 assert(Old != New && "Changing value into itself!");
1293 assert(Old->getType() == New->getType() &&
1294 "replaceAllUses of value with new value of different type!");
1295
1296 // Get the linked list base, which is guaranteed to exist since the
1297 // HasValueHandle flag is set.
1298 LLVMContextImpl *pImpl = Old->getContext().pImpl;
1299 ValueHandleBase *Entry = pImpl->ValueHandles[Old];
1300
1301 assert(Entry && "Value bit set but no entries exist");
1302
1303 // We use a local ValueHandleBase as an iterator so that
1304 // ValueHandles can add and remove themselves from the list without
1305 // breaking our iteration. This is not really an AssertingVH; we
1306 // just have to give ValueHandleBase some kind.
1307 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1308 Iterator.RemoveFromUseList();
1309 Iterator.AddToExistingUseListAfter(List: Entry);
1310 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1311
1312 switch (Entry->getKind()) {
1313 case Assert:
1314 case Weak:
1315 // Asserting and Weak handles do not follow RAUW implicitly.
1316 break;
1317 case WeakTracking:
1318 // Weak goes to the new value, which will unlink it from Old's list.
1319 Entry->operator=(RHS: New);
1320 break;
1321 case Callback:
1322 // Forward to the subclass's implementation.
1323 static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
1324 break;
1325 }
1326 }
1327
1328#ifndef NDEBUG
1329 // If any new weak value handles were added while processing the
1330 // list, then complain about it now.
1331 if (Old->HasValueHandle)
1332 for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
1333 switch (Entry->getKind()) {
1334 case WeakTracking:
1335 dbgs() << "After RAUW from " << *Old->getType() << " %"
1336 << Old->getName() << " to " << *New->getType() << " %"
1337 << New->getName() << "\n";
1338 llvm_unreachable(
1339 "A weak tracking value handle still pointed to the old value!\n");
1340 default:
1341 break;
1342 }
1343#endif
1344}
1345
1346// Pin the vtable to this file.
1347void CallbackVH::anchor() {}
1348