| 1 | //===-- Value.cpp - Implement the Value class -----------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the Value, ValueHandle, and User classes. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "llvm/IR/Value.h" |
| 14 | #include "LLVMContextImpl.h" |
| 15 | #include "llvm/ADT/DenseMap.h" |
| 16 | #include "llvm/ADT/SmallString.h" |
| 17 | #include "llvm/IR/Constant.h" |
| 18 | #include "llvm/IR/Constants.h" |
| 19 | #include "llvm/IR/DataLayout.h" |
| 20 | #include "llvm/IR/DebugInfo.h" |
| 21 | #include "llvm/IR/DerivedTypes.h" |
| 22 | #include "llvm/IR/DerivedUser.h" |
| 23 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 24 | #include "llvm/IR/InstrTypes.h" |
| 25 | #include "llvm/IR/Instructions.h" |
| 26 | #include "llvm/IR/IntrinsicInst.h" |
| 27 | #include "llvm/IR/Module.h" |
| 28 | #include "llvm/IR/Operator.h" |
| 29 | #include "llvm/IR/TypedPointerType.h" |
| 30 | #include "llvm/IR/ValueHandle.h" |
| 31 | #include "llvm/IR/ValueSymbolTable.h" |
| 32 | #include "llvm/Support/CommandLine.h" |
| 33 | #include "llvm/Support/ErrorHandling.h" |
| 34 | #include "llvm/Support/raw_ostream.h" |
| 35 | #include <algorithm> |
| 36 | |
| 37 | using namespace llvm; |
| 38 | |
| 39 | cl::opt<bool> UseDerefAtPointSemantics( |
| 40 | "use-dereferenceable-at-point-semantics" , cl::Hidden, cl::init(Val: false), |
| 41 | cl::desc("Deref attributes and metadata infer facts at definition only" )); |
| 42 | |
| 43 | //===----------------------------------------------------------------------===// |
| 44 | // Value Class |
| 45 | //===----------------------------------------------------------------------===// |
| 46 | static inline Type *checkType(Type *Ty) { |
| 47 | assert(Ty && "Value defined with a null type: Error!" ); |
| 48 | assert(!isa<TypedPointerType>(Ty->getScalarType()) && |
| 49 | "Cannot have values with typed pointer types" ); |
| 50 | return Ty; |
| 51 | } |
| 52 | |
| 53 | Value::Value(Type *ty, unsigned scid) |
| 54 | : SubclassID(scid), HasValueHandle(0), SubclassOptionalData(0), |
| 55 | SubclassData(0), NumUserOperands(0), IsUsedByMD(false), HasName(false), |
| 56 | HasMetadata(false), VTy(checkType(Ty: ty)) { |
| 57 | static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)" ); |
| 58 | // FIXME: Why isn't this in the subclass gunk?? |
| 59 | // Note, we cannot call isa<CallInst> before the CallInst has been |
| 60 | // constructed. |
| 61 | unsigned OpCode = 0; |
| 62 | if (SubclassID >= InstructionVal) |
| 63 | OpCode = SubclassID - InstructionVal; |
| 64 | if (OpCode == Instruction::Call || OpCode == Instruction::Invoke || |
| 65 | OpCode == Instruction::CallBr) |
| 66 | assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) && |
| 67 | "invalid CallBase type!" ); |
| 68 | else if (SubclassID != BasicBlockVal && |
| 69 | (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal)) |
| 70 | assert((VTy->isFirstClassType() || VTy->isVoidTy()) && |
| 71 | "Cannot create non-first-class values except for constants!" ); |
| 72 | static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned), |
| 73 | "Value too big" ); |
| 74 | } |
| 75 | |
| 76 | Value::~Value() { |
| 77 | // Notify all ValueHandles (if present) that this value is going away. |
| 78 | if (HasValueHandle) |
| 79 | ValueHandleBase::ValueIsDeleted(V: this); |
| 80 | if (isUsedByMetadata()) |
| 81 | ValueAsMetadata::handleDeletion(V: this); |
| 82 | |
| 83 | // Remove associated metadata from context. |
| 84 | if (HasMetadata) |
| 85 | clearMetadata(); |
| 86 | |
| 87 | #ifndef NDEBUG // Only in -g mode... |
| 88 | // Check to make sure that there are no uses of this value that are still |
| 89 | // around when the value is destroyed. If there are, then we have a dangling |
| 90 | // reference and something is wrong. This code is here to print out where |
| 91 | // the value is still being referenced. |
| 92 | // |
| 93 | // Note that use_empty() cannot be called here, as it eventually downcasts |
| 94 | // 'this' to GlobalValue (derived class of Value), but GlobalValue has already |
| 95 | // been destructed, so accessing it is UB. |
| 96 | // |
| 97 | if (!materialized_use_empty()) { |
| 98 | dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n" ; |
| 99 | for (auto *U : users()) |
| 100 | dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n" ; |
| 101 | |
| 102 | llvm_unreachable("Uses remain when a value is destroyed!" ); |
| 103 | } |
| 104 | #endif |
| 105 | |
| 106 | // If this value is named, destroy the name. This should not be in a symtab |
| 107 | // at this point. |
| 108 | destroyValueName(); |
| 109 | } |
| 110 | |
| 111 | void Value::deleteValue() { |
| 112 | switch (getValueID()) { |
| 113 | #define HANDLE_VALUE(Name) \ |
| 114 | case Value::Name##Val: \ |
| 115 | delete static_cast<Name *>(this); \ |
| 116 | break; |
| 117 | #define HANDLE_MEMORY_VALUE(Name) \ |
| 118 | case Value::Name##Val: \ |
| 119 | static_cast<DerivedUser *>(this)->DeleteValue( \ |
| 120 | static_cast<DerivedUser *>(this)); \ |
| 121 | break; |
| 122 | #define HANDLE_CONSTANT(Name) \ |
| 123 | case Value::Name##Val: \ |
| 124 | llvm_unreachable("constants should be destroyed with destroyConstant"); \ |
| 125 | break; |
| 126 | #define HANDLE_INSTRUCTION(Name) /* nothing */ |
| 127 | #include "llvm/IR/Value.def" |
| 128 | |
| 129 | #define HANDLE_INST(N, OPC, CLASS) \ |
| 130 | case Value::InstructionVal + Instruction::OPC: \ |
| 131 | delete static_cast<CLASS *>(this); \ |
| 132 | break; |
| 133 | #define HANDLE_USER_INST(N, OPC, CLASS) |
| 134 | #include "llvm/IR/Instruction.def" |
| 135 | |
| 136 | default: |
| 137 | llvm_unreachable("attempting to delete unknown value kind" ); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | void Value::destroyValueName() { |
| 142 | ValueName *Name = getValueName(); |
| 143 | if (Name) { |
| 144 | MallocAllocator Allocator; |
| 145 | Name->Destroy(allocator&: Allocator); |
| 146 | } |
| 147 | setValueName(nullptr); |
| 148 | } |
| 149 | |
| 150 | bool Value::hasNUses(unsigned N) const { |
| 151 | if (!UseList) |
| 152 | return N == 0; |
| 153 | |
| 154 | // TODO: Disallow for ConstantData and remove !UseList check? |
| 155 | return hasNItems(Begin: use_begin(), End: use_end(), N); |
| 156 | } |
| 157 | |
| 158 | bool Value::hasNUsesOrMore(unsigned N) const { |
| 159 | // TODO: Disallow for ConstantData and remove !UseList check? |
| 160 | if (!UseList) |
| 161 | return N == 0; |
| 162 | |
| 163 | return hasNItemsOrMore(Begin: use_begin(), End: use_end(), N); |
| 164 | } |
| 165 | |
| 166 | bool Value::hasOneUser() const { |
| 167 | if (use_empty()) |
| 168 | return false; |
| 169 | if (hasOneUse()) |
| 170 | return true; |
| 171 | return std::equal(first1: ++user_begin(), last1: user_end(), first2: user_begin()); |
| 172 | } |
| 173 | |
| 174 | static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); } |
| 175 | |
| 176 | Use *Value::getSingleUndroppableUse() { |
| 177 | Use *Result = nullptr; |
| 178 | for (Use &U : uses()) { |
| 179 | if (!U.getUser()->isDroppable()) { |
| 180 | if (Result) |
| 181 | return nullptr; |
| 182 | Result = &U; |
| 183 | } |
| 184 | } |
| 185 | return Result; |
| 186 | } |
| 187 | |
| 188 | User *Value::getUniqueUndroppableUser() { |
| 189 | User *Result = nullptr; |
| 190 | for (auto *U : users()) { |
| 191 | if (!U->isDroppable()) { |
| 192 | if (Result && Result != U) |
| 193 | return nullptr; |
| 194 | Result = U; |
| 195 | } |
| 196 | } |
| 197 | return Result; |
| 198 | } |
| 199 | |
| 200 | bool Value::hasNUndroppableUses(unsigned int N) const { |
| 201 | return hasNItems(Begin: user_begin(), End: user_end(), N, ShouldBeCounted&: isUnDroppableUser); |
| 202 | } |
| 203 | |
| 204 | bool Value::hasNUndroppableUsesOrMore(unsigned int N) const { |
| 205 | return hasNItemsOrMore(Begin: user_begin(), End: user_end(), N, ShouldBeCounted&: isUnDroppableUser); |
| 206 | } |
| 207 | |
| 208 | void Value::dropDroppableUses( |
| 209 | llvm::function_ref<bool(const Use *)> ShouldDrop) { |
| 210 | SmallVector<Use *, 8> ToBeEdited; |
| 211 | for (Use &U : uses()) |
| 212 | if (U.getUser()->isDroppable() && ShouldDrop(&U)) |
| 213 | ToBeEdited.push_back(Elt: &U); |
| 214 | for (Use *U : ToBeEdited) |
| 215 | dropDroppableUse(U&: *U); |
| 216 | } |
| 217 | |
| 218 | void Value::dropDroppableUsesIn(User &Usr) { |
| 219 | assert(Usr.isDroppable() && "Expected a droppable user!" ); |
| 220 | for (Use &UsrOp : Usr.operands()) { |
| 221 | if (UsrOp.get() == this) |
| 222 | dropDroppableUse(U&: UsrOp); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | void Value::dropDroppableUse(Use &U) { |
| 227 | if (auto *Assume = dyn_cast<AssumeInst>(Val: U.getUser())) { |
| 228 | unsigned OpNo = U.getOperandNo(); |
| 229 | if (OpNo == 0) |
| 230 | U.set(ConstantInt::getTrue(Context&: Assume->getContext())); |
| 231 | else { |
| 232 | U.set(PoisonValue::get(T: U.get()->getType())); |
| 233 | CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpIdx: OpNo); |
| 234 | BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag(Tag: "ignore" ); |
| 235 | } |
| 236 | return; |
| 237 | } |
| 238 | |
| 239 | llvm_unreachable("unknown droppable use" ); |
| 240 | } |
| 241 | |
| 242 | bool Value::isUsedInBasicBlock(const BasicBlock *BB) const { |
| 243 | assert(hasUseList() && "ConstantData has no use-list" ); |
| 244 | |
| 245 | // This can be computed either by scanning the instructions in BB, or by |
| 246 | // scanning the use list of this Value. Both lists can be very long, but |
| 247 | // usually one is quite short. |
| 248 | // |
| 249 | // Scan both lists simultaneously until one is exhausted. This limits the |
| 250 | // search to the shorter list. |
| 251 | BasicBlock::const_iterator BI = BB->begin(), BE = BB->end(); |
| 252 | const_user_iterator UI = user_begin(), UE = user_end(); |
| 253 | for (; BI != BE && UI != UE; ++BI, ++UI) { |
| 254 | // Scan basic block: Check if this Value is used by the instruction at BI. |
| 255 | if (is_contained(Range: BI->operands(), Element: this)) |
| 256 | return true; |
| 257 | // Scan use list: Check if the use at UI is in BB. |
| 258 | const auto *User = dyn_cast<Instruction>(Val: *UI); |
| 259 | if (User && User->getParent() == BB) |
| 260 | return true; |
| 261 | } |
| 262 | return false; |
| 263 | } |
| 264 | |
| 265 | unsigned Value::getNumUses() const { |
| 266 | // TODO: Disallow for ConstantData and remove !UseList check? |
| 267 | if (!UseList) |
| 268 | return 0; |
| 269 | return (unsigned)std::distance(first: use_begin(), last: use_end()); |
| 270 | } |
| 271 | |
| 272 | static bool getSymTab(Value *V, ValueSymbolTable *&ST) { |
| 273 | ST = nullptr; |
| 274 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) { |
| 275 | if (BasicBlock *P = I->getParent()) |
| 276 | if (Function *PP = P->getParent()) |
| 277 | ST = PP->getValueSymbolTable(); |
| 278 | } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Val: V)) { |
| 279 | if (Function *P = BB->getParent()) |
| 280 | ST = P->getValueSymbolTable(); |
| 281 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val: V)) { |
| 282 | if (Module *P = GV->getParent()) |
| 283 | ST = &P->getValueSymbolTable(); |
| 284 | } else if (Argument *A = dyn_cast<Argument>(Val: V)) { |
| 285 | if (Function *P = A->getParent()) |
| 286 | ST = P->getValueSymbolTable(); |
| 287 | } else { |
| 288 | assert(isa<Constant>(V) && "Unknown value type!" ); |
| 289 | return true; // no name is setable for this. |
| 290 | } |
| 291 | return false; |
| 292 | } |
| 293 | |
| 294 | ValueName *Value::getValueName() const { |
| 295 | if (!HasName) return nullptr; |
| 296 | |
| 297 | LLVMContext &Ctx = getContext(); |
| 298 | auto I = Ctx.pImpl->ValueNames.find(Val: this); |
| 299 | assert(I != Ctx.pImpl->ValueNames.end() && |
| 300 | "No name entry found!" ); |
| 301 | |
| 302 | return I->second; |
| 303 | } |
| 304 | |
| 305 | void Value::setValueName(ValueName *VN) { |
| 306 | LLVMContext &Ctx = getContext(); |
| 307 | |
| 308 | assert(HasName == Ctx.pImpl->ValueNames.count(this) && |
| 309 | "HasName bit out of sync!" ); |
| 310 | |
| 311 | if (!VN) { |
| 312 | if (HasName) |
| 313 | Ctx.pImpl->ValueNames.erase(Val: this); |
| 314 | HasName = false; |
| 315 | return; |
| 316 | } |
| 317 | |
| 318 | HasName = true; |
| 319 | Ctx.pImpl->ValueNames[this] = VN; |
| 320 | } |
| 321 | |
| 322 | StringRef Value::getName() const { |
| 323 | // Make sure the empty string is still a C string. For historical reasons, |
| 324 | // some clients want to call .data() on the result and expect it to be null |
| 325 | // terminated. |
| 326 | if (!hasName()) |
| 327 | return StringRef("" , 0); |
| 328 | return getValueName()->getKey(); |
| 329 | } |
| 330 | |
| 331 | void Value::setNameImpl(const Twine &NewName) { |
| 332 | bool NeedNewName = |
| 333 | !getContext().shouldDiscardValueNames() || isa<GlobalValue>(Val: this); |
| 334 | |
| 335 | // Fast-path: LLVMContext can be set to strip out non-GlobalValue names |
| 336 | // and there is no need to delete the old name. |
| 337 | if (!NeedNewName && !hasName()) |
| 338 | return; |
| 339 | |
| 340 | // Fast path for common IRBuilder case of setName("") when there is no name. |
| 341 | if (NewName.isTriviallyEmpty() && !hasName()) |
| 342 | return; |
| 343 | |
| 344 | SmallString<256> NameData; |
| 345 | StringRef NameRef = NeedNewName ? NewName.toStringRef(Out&: NameData) : "" ; |
| 346 | assert(!NameRef.contains(0) && "Null bytes are not allowed in names" ); |
| 347 | |
| 348 | // Name isn't changing? |
| 349 | if (getName() == NameRef) |
| 350 | return; |
| 351 | |
| 352 | assert(!getType()->isVoidTy() && "Cannot assign a name to void values!" ); |
| 353 | |
| 354 | // Get the symbol table to update for this object. |
| 355 | ValueSymbolTable *ST; |
| 356 | if (getSymTab(V: this, ST)) |
| 357 | return; // Cannot set a name on this value (e.g. constant). |
| 358 | |
| 359 | if (!ST) { // No symbol table to update? Just do the change. |
| 360 | // NOTE: Could optimize for the case the name is shrinking to not deallocate |
| 361 | // then reallocated. |
| 362 | destroyValueName(); |
| 363 | |
| 364 | if (!NameRef.empty()) { |
| 365 | // Create the new name. |
| 366 | assert(NeedNewName); |
| 367 | MallocAllocator Allocator; |
| 368 | setValueName(ValueName::create(key: NameRef, allocator&: Allocator)); |
| 369 | getValueName()->setValue(this); |
| 370 | } |
| 371 | return; |
| 372 | } |
| 373 | |
| 374 | // NOTE: Could optimize for the case the name is shrinking to not deallocate |
| 375 | // then reallocated. |
| 376 | if (hasName()) { |
| 377 | // Remove old name. |
| 378 | ST->removeValueName(V: getValueName()); |
| 379 | destroyValueName(); |
| 380 | |
| 381 | if (NameRef.empty()) |
| 382 | return; |
| 383 | } |
| 384 | |
| 385 | // Name is changing to something new. |
| 386 | assert(NeedNewName); |
| 387 | setValueName(ST->createValueName(Name: NameRef, V: this)); |
| 388 | } |
| 389 | |
| 390 | void Value::setName(const Twine &NewName) { |
| 391 | setNameImpl(NewName); |
| 392 | if (Function *F = dyn_cast<Function>(Val: this)) |
| 393 | F->updateAfterNameChange(); |
| 394 | } |
| 395 | |
| 396 | void Value::takeName(Value *V) { |
| 397 | assert(V != this && "Illegal call to this->takeName(this)!" ); |
| 398 | ValueSymbolTable *ST = nullptr; |
| 399 | // If this value has a name, drop it. |
| 400 | if (hasName()) { |
| 401 | // Get the symtab this is in. |
| 402 | if (getSymTab(V: this, ST)) { |
| 403 | // We can't set a name on this value, but we need to clear V's name if |
| 404 | // it has one. |
| 405 | if (V->hasName()) V->setName("" ); |
| 406 | return; // Cannot set a name on this value (e.g. constant). |
| 407 | } |
| 408 | |
| 409 | // Remove old name. |
| 410 | if (ST) |
| 411 | ST->removeValueName(V: getValueName()); |
| 412 | destroyValueName(); |
| 413 | } |
| 414 | |
| 415 | // Now we know that this has no name. |
| 416 | |
| 417 | // If V has no name either, we're done. |
| 418 | if (!V->hasName()) return; |
| 419 | |
| 420 | // Get this's symtab if we didn't before. |
| 421 | if (!ST) { |
| 422 | if (getSymTab(V: this, ST)) { |
| 423 | // Clear V's name. |
| 424 | V->setName("" ); |
| 425 | return; // Cannot set a name on this value (e.g. constant). |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | // Get V's ST, this should always succeed, because V has a name. |
| 430 | ValueSymbolTable *VST; |
| 431 | bool Failure = getSymTab(V, ST&: VST); |
| 432 | assert(!Failure && "V has a name, so it should have a ST!" ); (void)Failure; |
| 433 | |
| 434 | // If these values are both in the same symtab, we can do this very fast. |
| 435 | // This works even if both values have no symtab yet. |
| 436 | if (ST == VST) { |
| 437 | // Take the name! |
| 438 | setValueName(V->getValueName()); |
| 439 | V->setValueName(nullptr); |
| 440 | getValueName()->setValue(this); |
| 441 | return; |
| 442 | } |
| 443 | |
| 444 | // Otherwise, things are slightly more complex. Remove V's name from VST and |
| 445 | // then reinsert it into ST. |
| 446 | |
| 447 | if (VST) |
| 448 | VST->removeValueName(V: V->getValueName()); |
| 449 | setValueName(V->getValueName()); |
| 450 | V->setValueName(nullptr); |
| 451 | getValueName()->setValue(this); |
| 452 | |
| 453 | if (ST) |
| 454 | ST->reinsertValue(V: this); |
| 455 | } |
| 456 | |
| 457 | std::string Value::getNameOrAsOperand() const { |
| 458 | if (!getName().empty()) |
| 459 | return std::string(getName()); |
| 460 | |
| 461 | std::string BBName; |
| 462 | raw_string_ostream OS(BBName); |
| 463 | printAsOperand(O&: OS, PrintType: false); |
| 464 | return OS.str(); |
| 465 | } |
| 466 | |
| 467 | void Value::assertModuleIsMaterializedImpl() const { |
| 468 | #ifndef NDEBUG |
| 469 | const GlobalValue *GV = dyn_cast<GlobalValue>(this); |
| 470 | if (!GV) |
| 471 | return; |
| 472 | const Module *M = GV->getParent(); |
| 473 | if (!M) |
| 474 | return; |
| 475 | assert(M->isMaterialized()); |
| 476 | #endif |
| 477 | } |
| 478 | |
| 479 | #ifndef NDEBUG |
| 480 | static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr, |
| 481 | Constant *C) { |
| 482 | if (!Cache.insert(Expr).second) |
| 483 | return false; |
| 484 | |
| 485 | for (auto &O : Expr->operands()) { |
| 486 | if (O == C) |
| 487 | return true; |
| 488 | auto *CE = dyn_cast<ConstantExpr>(O); |
| 489 | if (!CE) |
| 490 | continue; |
| 491 | if (contains(Cache, CE, C)) |
| 492 | return true; |
| 493 | } |
| 494 | return false; |
| 495 | } |
| 496 | |
| 497 | static bool contains(Value *Expr, Value *V) { |
| 498 | if (Expr == V) |
| 499 | return true; |
| 500 | |
| 501 | auto *C = dyn_cast<Constant>(V); |
| 502 | if (!C) |
| 503 | return false; |
| 504 | |
| 505 | auto *CE = dyn_cast<ConstantExpr>(Expr); |
| 506 | if (!CE) |
| 507 | return false; |
| 508 | |
| 509 | SmallPtrSet<ConstantExpr *, 4> Cache; |
| 510 | return contains(Cache, CE, C); |
| 511 | } |
| 512 | #endif // NDEBUG |
| 513 | |
| 514 | void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) { |
| 515 | assert(hasUseList() && "Cannot replace constant data" ); |
| 516 | assert(New && "Value::replaceAllUsesWith(<null>) is invalid!" ); |
| 517 | assert(!contains(New, this) && |
| 518 | "this->replaceAllUsesWith(expr(this)) is NOT valid!" ); |
| 519 | assert(New->getType() == getType() && |
| 520 | "replaceAllUses of value with new value of different type!" ); |
| 521 | |
| 522 | // Notify all ValueHandles (if present) that this value is going away. |
| 523 | if (HasValueHandle) |
| 524 | ValueHandleBase::ValueIsRAUWd(Old: this, New); |
| 525 | if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata()) |
| 526 | ValueAsMetadata::handleRAUW(From: this, To: New); |
| 527 | |
| 528 | while (!materialized_use_empty()) { |
| 529 | Use &U = *UseList; |
| 530 | // Must handle Constants specially, we cannot call replaceUsesOfWith on a |
| 531 | // constant because they are uniqued. |
| 532 | if (auto *C = dyn_cast<Constant>(Val: U.getUser())) { |
| 533 | if (!isa<GlobalValue>(Val: C)) { |
| 534 | C->handleOperandChange(this, New); |
| 535 | continue; |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | U.set(New); |
| 540 | } |
| 541 | |
| 542 | if (BasicBlock *BB = dyn_cast<BasicBlock>(Val: this)) |
| 543 | BB->replaceSuccessorsPhiUsesWith(New: cast<BasicBlock>(Val: New)); |
| 544 | } |
| 545 | |
| 546 | void Value::replaceAllUsesWith(Value *New) { |
| 547 | doRAUW(New, ReplaceMetaUses: ReplaceMetadataUses::Yes); |
| 548 | } |
| 549 | |
| 550 | void Value::replaceNonMetadataUsesWith(Value *New) { |
| 551 | doRAUW(New, ReplaceMetaUses: ReplaceMetadataUses::No); |
| 552 | } |
| 553 | |
| 554 | void Value::replaceUsesWithIf(Value *New, |
| 555 | llvm::function_ref<bool(Use &U)> ShouldReplace) { |
| 556 | assert(New && "Value::replaceUsesWithIf(<null>) is invalid!" ); |
| 557 | assert(New->getType() == getType() && |
| 558 | "replaceUses of value with new value of different type!" ); |
| 559 | |
| 560 | SmallVector<TrackingVH<Constant>, 8> Consts; |
| 561 | SmallPtrSet<Constant *, 8> Visited; |
| 562 | |
| 563 | for (Use &U : llvm::make_early_inc_range(Range: uses())) { |
| 564 | if (!ShouldReplace(U)) |
| 565 | continue; |
| 566 | // Must handle Constants specially, we cannot call replaceUsesOfWith on a |
| 567 | // constant because they are uniqued. |
| 568 | if (auto *C = dyn_cast<Constant>(Val: U.getUser())) { |
| 569 | if (!isa<GlobalValue>(Val: C)) { |
| 570 | if (Visited.insert(Ptr: C).second) |
| 571 | Consts.push_back(Elt: TrackingVH<Constant>(C)); |
| 572 | continue; |
| 573 | } |
| 574 | } |
| 575 | U.set(New); |
| 576 | } |
| 577 | |
| 578 | while (!Consts.empty()) { |
| 579 | // FIXME: handleOperandChange() updates all the uses in a given Constant, |
| 580 | // not just the one passed to ShouldReplace |
| 581 | Consts.pop_back_val()->handleOperandChange(this, New); |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | /// Replace llvm.dbg.* uses of MetadataAsValue(ValueAsMetadata(V)) outside BB |
| 586 | /// with New. |
| 587 | static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) { |
| 588 | SmallVector<DbgVariableIntrinsic *> DbgUsers; |
| 589 | SmallVector<DbgVariableRecord *> DPUsers; |
| 590 | findDbgUsers(DbgInsts&: DbgUsers, V, DbgVariableRecords: &DPUsers); |
| 591 | for (auto *DVI : DbgUsers) { |
| 592 | if (DVI->getParent() != BB) |
| 593 | DVI->replaceVariableLocationOp(OldValue: V, NewValue: New); |
| 594 | } |
| 595 | for (auto *DVR : DPUsers) { |
| 596 | DbgMarker *Marker = DVR->getMarker(); |
| 597 | if (Marker->getParent() != BB) |
| 598 | DVR->replaceVariableLocationOp(OldValue: V, NewValue: New); |
| 599 | } |
| 600 | } |
| 601 | |
| 602 | // Like replaceAllUsesWith except it does not handle constants or basic blocks. |
| 603 | // This routine leaves uses within BB. |
| 604 | void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) { |
| 605 | assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!" ); |
| 606 | assert(!contains(New, this) && |
| 607 | "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!" ); |
| 608 | assert(New->getType() == getType() && |
| 609 | "replaceUses of value with new value of different type!" ); |
| 610 | assert(BB && "Basic block that may contain a use of 'New' must be defined\n" ); |
| 611 | |
| 612 | replaceDbgUsesOutsideBlock(V: this, New, BB); |
| 613 | replaceUsesWithIf(New, ShouldReplace: [BB](Use &U) { |
| 614 | auto *I = dyn_cast<Instruction>(Val: U.getUser()); |
| 615 | // Don't replace if it's an instruction in the BB basic block. |
| 616 | return !I || I->getParent() != BB; |
| 617 | }); |
| 618 | } |
| 619 | |
| 620 | namespace { |
| 621 | // Various metrics for how much to strip off of pointers. |
| 622 | enum PointerStripKind { |
| 623 | PSK_ZeroIndices, |
| 624 | PSK_ZeroIndicesAndAliases, |
| 625 | PSK_ZeroIndicesSameRepresentation, |
| 626 | PSK_ForAliasAnalysis, |
| 627 | PSK_InBoundsConstantIndices, |
| 628 | PSK_InBounds |
| 629 | }; |
| 630 | |
| 631 | template <PointerStripKind StripKind> static void NoopCallback(const Value *) {} |
| 632 | |
| 633 | template <PointerStripKind StripKind> |
| 634 | static const Value *stripPointerCastsAndOffsets( |
| 635 | const Value *V, |
| 636 | function_ref<void(const Value *)> Func = NoopCallback<StripKind>) { |
| 637 | if (!V->getType()->isPointerTy()) |
| 638 | return V; |
| 639 | |
| 640 | // Even though we don't look through PHI nodes, we could be called on an |
| 641 | // instruction in an unreachable block, which may be on a cycle. |
| 642 | SmallPtrSet<const Value *, 4> Visited; |
| 643 | |
| 644 | Visited.insert(Ptr: V); |
| 645 | do { |
| 646 | Func(V); |
| 647 | if (auto *GEP = dyn_cast<GEPOperator>(Val: V)) { |
| 648 | switch (StripKind) { |
| 649 | case PSK_ZeroIndices: |
| 650 | case PSK_ZeroIndicesAndAliases: |
| 651 | case PSK_ZeroIndicesSameRepresentation: |
| 652 | case PSK_ForAliasAnalysis: |
| 653 | if (!GEP->hasAllZeroIndices()) |
| 654 | return V; |
| 655 | break; |
| 656 | case PSK_InBoundsConstantIndices: |
| 657 | if (!GEP->hasAllConstantIndices()) |
| 658 | return V; |
| 659 | [[fallthrough]]; |
| 660 | case PSK_InBounds: |
| 661 | if (!GEP->isInBounds()) |
| 662 | return V; |
| 663 | break; |
| 664 | } |
| 665 | V = GEP->getPointerOperand(); |
| 666 | } else if (Operator::getOpcode(V) == Instruction::BitCast) { |
| 667 | Value *NewV = cast<Operator>(Val: V)->getOperand(i: 0); |
| 668 | if (!NewV->getType()->isPointerTy()) |
| 669 | return V; |
| 670 | V = NewV; |
| 671 | } else if (StripKind != PSK_ZeroIndicesSameRepresentation && |
| 672 | Operator::getOpcode(V) == Instruction::AddrSpaceCast) { |
| 673 | // TODO: If we know an address space cast will not change the |
| 674 | // representation we could look through it here as well. |
| 675 | V = cast<Operator>(Val: V)->getOperand(i: 0); |
| 676 | } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(Val: V)) { |
| 677 | V = cast<GlobalAlias>(Val: V)->getAliasee(); |
| 678 | } else if (StripKind == PSK_ForAliasAnalysis && isa<PHINode>(Val: V) && |
| 679 | cast<PHINode>(Val: V)->getNumIncomingValues() == 1) { |
| 680 | V = cast<PHINode>(Val: V)->getIncomingValue(i: 0); |
| 681 | } else { |
| 682 | if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
| 683 | if (const Value *RV = Call->getReturnedArgOperand()) { |
| 684 | V = RV; |
| 685 | continue; |
| 686 | } |
| 687 | // The result of launder.invariant.group must alias it's argument, |
| 688 | // but it can't be marked with returned attribute, that's why it needs |
| 689 | // special case. |
| 690 | if (StripKind == PSK_ForAliasAnalysis && |
| 691 | (Call->getIntrinsicID() == Intrinsic::launder_invariant_group || |
| 692 | Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) { |
| 693 | V = Call->getArgOperand(i: 0); |
| 694 | continue; |
| 695 | } |
| 696 | } |
| 697 | return V; |
| 698 | } |
| 699 | assert(V->getType()->isPointerTy() && "Unexpected operand type!" ); |
| 700 | } while (Visited.insert(Ptr: V).second); |
| 701 | |
| 702 | return V; |
| 703 | } |
| 704 | } // end anonymous namespace |
| 705 | |
| 706 | const Value *Value::stripPointerCasts() const { |
| 707 | return stripPointerCastsAndOffsets<PSK_ZeroIndices>(V: this); |
| 708 | } |
| 709 | |
| 710 | const Value *Value::stripPointerCastsAndAliases() const { |
| 711 | return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(V: this); |
| 712 | } |
| 713 | |
| 714 | const Value *Value::stripPointerCastsSameRepresentation() const { |
| 715 | return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(V: this); |
| 716 | } |
| 717 | |
| 718 | const Value *Value::stripInBoundsConstantOffsets() const { |
| 719 | return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(V: this); |
| 720 | } |
| 721 | |
| 722 | const Value *Value::stripPointerCastsForAliasAnalysis() const { |
| 723 | return stripPointerCastsAndOffsets<PSK_ForAliasAnalysis>(V: this); |
| 724 | } |
| 725 | |
| 726 | const Value *Value::stripAndAccumulateConstantOffsets( |
| 727 | const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, |
| 728 | bool AllowInvariantGroup, |
| 729 | function_ref<bool(Value &, APInt &)> ExternalAnalysis, |
| 730 | bool LookThroughIntToPtr) const { |
| 731 | if (!getType()->isPtrOrPtrVectorTy()) |
| 732 | return this; |
| 733 | |
| 734 | unsigned BitWidth = Offset.getBitWidth(); |
| 735 | assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) && |
| 736 | "The offset bit width does not match the DL specification." ); |
| 737 | |
| 738 | // Even though we don't look through PHI nodes, we could be called on an |
| 739 | // instruction in an unreachable block, which may be on a cycle. |
| 740 | SmallPtrSet<const Value *, 4> Visited; |
| 741 | Visited.insert(Ptr: this); |
| 742 | const Value *V = this; |
| 743 | do { |
| 744 | if (auto *GEP = dyn_cast<GEPOperator>(Val: V)) { |
| 745 | // If in-bounds was requested, we do not strip non-in-bounds GEPs. |
| 746 | if (!AllowNonInbounds && !GEP->isInBounds()) |
| 747 | return V; |
| 748 | |
| 749 | // If one of the values we have visited is an addrspacecast, then |
| 750 | // the pointer type of this GEP may be different from the type |
| 751 | // of the Ptr parameter which was passed to this function. This |
| 752 | // means when we construct GEPOffset, we need to use the size |
| 753 | // of GEP's pointer type rather than the size of the original |
| 754 | // pointer type. |
| 755 | APInt GEPOffset(DL.getIndexTypeSizeInBits(Ty: V->getType()), 0); |
| 756 | if (!GEP->accumulateConstantOffset(DL, Offset&: GEPOffset, ExternalAnalysis)) |
| 757 | return V; |
| 758 | |
| 759 | // Stop traversal if the pointer offset wouldn't fit in the bit-width |
| 760 | // provided by the Offset argument. This can happen due to AddrSpaceCast |
| 761 | // stripping. |
| 762 | if (GEPOffset.getSignificantBits() > BitWidth) |
| 763 | return V; |
| 764 | |
| 765 | // External Analysis can return a result higher/lower than the value |
| 766 | // represents. We need to detect overflow/underflow. |
| 767 | APInt GEPOffsetST = GEPOffset.sextOrTrunc(width: BitWidth); |
| 768 | if (!ExternalAnalysis) { |
| 769 | Offset += GEPOffsetST; |
| 770 | } else { |
| 771 | bool Overflow = false; |
| 772 | APInt OldOffset = Offset; |
| 773 | Offset = Offset.sadd_ov(RHS: GEPOffsetST, Overflow); |
| 774 | if (Overflow) { |
| 775 | Offset = OldOffset; |
| 776 | return V; |
| 777 | } |
| 778 | } |
| 779 | V = GEP->getPointerOperand(); |
| 780 | } else if (Operator::getOpcode(V) == Instruction::BitCast || |
| 781 | Operator::getOpcode(V) == Instruction::AddrSpaceCast) { |
| 782 | V = cast<Operator>(Val: V)->getOperand(i: 0); |
| 783 | } else if (auto *GA = dyn_cast<GlobalAlias>(Val: V)) { |
| 784 | if (!GA->isInterposable()) |
| 785 | V = GA->getAliasee(); |
| 786 | } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
| 787 | if (const Value *RV = Call->getReturnedArgOperand()) |
| 788 | V = RV; |
| 789 | if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup()) |
| 790 | V = Call->getArgOperand(i: 0); |
| 791 | } else if (auto *Int2Ptr = dyn_cast<Operator>(Val: V)) { |
| 792 | // Try to accumulate across (inttoptr (add (ptrtoint p), off)). |
| 793 | if (!AllowNonInbounds || !LookThroughIntToPtr || !Int2Ptr || |
| 794 | Int2Ptr->getOpcode() != Instruction::IntToPtr || |
| 795 | Int2Ptr->getOperand(i: 0)->getType()->getScalarSizeInBits() != BitWidth) |
| 796 | return V; |
| 797 | |
| 798 | auto *Add = dyn_cast<AddOperator>(Val: Int2Ptr->getOperand(i: 0)); |
| 799 | if (!Add) |
| 800 | return V; |
| 801 | |
| 802 | auto *Ptr2Int = dyn_cast<PtrToIntOperator>(Val: Add->getOperand(i_nocapture: 0)); |
| 803 | auto *CI = dyn_cast<ConstantInt>(Val: Add->getOperand(i_nocapture: 1)); |
| 804 | if (!Ptr2Int || !CI) |
| 805 | return V; |
| 806 | |
| 807 | Offset += CI->getValue(); |
| 808 | V = Ptr2Int->getOperand(i_nocapture: 0); |
| 809 | } |
| 810 | assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!" ); |
| 811 | } while (Visited.insert(Ptr: V).second); |
| 812 | |
| 813 | return V; |
| 814 | } |
| 815 | |
| 816 | const Value * |
| 817 | Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const { |
| 818 | return stripPointerCastsAndOffsets<PSK_InBounds>(V: this, Func); |
| 819 | } |
| 820 | |
| 821 | bool Value::canBeFreed() const { |
| 822 | assert(getType()->isPointerTy()); |
| 823 | |
| 824 | // Cases that can simply never be deallocated |
| 825 | // *) Constants aren't allocated per se, thus not deallocated either. |
| 826 | if (isa<Constant>(Val: this)) |
| 827 | return false; |
| 828 | |
| 829 | // Handle byval/byref/sret/inalloca/preallocated arguments. The storage |
| 830 | // lifetime is guaranteed to be longer than the callee's lifetime. |
| 831 | if (auto *A = dyn_cast<Argument>(Val: this)) { |
| 832 | if (A->hasPointeeInMemoryValueAttr()) |
| 833 | return false; |
| 834 | // A pointer to an object in a function which neither frees, nor can arrange |
| 835 | // for another thread to free on its behalf, can not be freed in the scope |
| 836 | // of the function. Note that this logic is restricted to memory |
| 837 | // allocations in existance before the call; a nofree function *is* allowed |
| 838 | // to free memory it allocated. |
| 839 | const Function *F = A->getParent(); |
| 840 | if (F->doesNotFreeMemory() && F->hasNoSync()) |
| 841 | return false; |
| 842 | } |
| 843 | |
| 844 | const Function *F = nullptr; |
| 845 | if (auto *I = dyn_cast<Instruction>(Val: this)) |
| 846 | F = I->getFunction(); |
| 847 | if (auto *A = dyn_cast<Argument>(Val: this)) |
| 848 | F = A->getParent(); |
| 849 | |
| 850 | if (!F) |
| 851 | return true; |
| 852 | |
| 853 | // With garbage collection, deallocation typically occurs solely at or after |
| 854 | // safepoints. If we're compiling for a collector which uses the |
| 855 | // gc.statepoint infrastructure, safepoints aren't explicitly present |
| 856 | // in the IR until after lowering from abstract to physical machine model. |
| 857 | // The collector could chose to mix explicit deallocation and gc'd objects |
| 858 | // which is why we need the explicit opt in on a per collector basis. |
| 859 | if (!F->hasGC()) |
| 860 | return true; |
| 861 | |
| 862 | const auto &GCName = F->getGC(); |
| 863 | if (GCName == "statepoint-example" ) { |
| 864 | auto *PT = cast<PointerType>(Val: this->getType()); |
| 865 | if (PT->getAddressSpace() != 1) |
| 866 | // For the sake of this example GC, we arbitrarily pick addrspace(1) as |
| 867 | // our GC managed heap. This must match the same check in |
| 868 | // RewriteStatepointsForGC (and probably needs better factored.) |
| 869 | return true; |
| 870 | |
| 871 | // It is cheaper to scan for a declaration than to scan for a use in this |
| 872 | // function. Note that gc.statepoint is a type overloaded function so the |
| 873 | // usual trick of requesting declaration of the intrinsic from the module |
| 874 | // doesn't work. |
| 875 | for (auto &Fn : *F->getParent()) |
| 876 | if (Fn.getIntrinsicID() == Intrinsic::experimental_gc_statepoint) |
| 877 | return true; |
| 878 | return false; |
| 879 | } |
| 880 | return true; |
| 881 | } |
| 882 | |
| 883 | uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL, |
| 884 | bool &CanBeNull, |
| 885 | bool &CanBeFreed) const { |
| 886 | assert(getType()->isPointerTy() && "must be pointer" ); |
| 887 | |
| 888 | uint64_t DerefBytes = 0; |
| 889 | CanBeNull = false; |
| 890 | CanBeFreed = UseDerefAtPointSemantics && canBeFreed(); |
| 891 | if (const Argument *A = dyn_cast<Argument>(Val: this)) { |
| 892 | DerefBytes = A->getDereferenceableBytes(); |
| 893 | if (DerefBytes == 0) { |
| 894 | // Handle byval/byref/inalloca/preallocated arguments |
| 895 | if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) { |
| 896 | if (ArgMemTy->isSized()) { |
| 897 | // FIXME: Why isn't this the type alloc size? |
| 898 | DerefBytes = DL.getTypeStoreSize(Ty: ArgMemTy).getKnownMinValue(); |
| 899 | } |
| 900 | } |
| 901 | } |
| 902 | |
| 903 | if (DerefBytes == 0) { |
| 904 | DerefBytes = A->getDereferenceableOrNullBytes(); |
| 905 | CanBeNull = true; |
| 906 | } |
| 907 | } else if (const auto *Call = dyn_cast<CallBase>(Val: this)) { |
| 908 | DerefBytes = Call->getRetDereferenceableBytes(); |
| 909 | if (DerefBytes == 0) { |
| 910 | DerefBytes = Call->getRetDereferenceableOrNullBytes(); |
| 911 | CanBeNull = true; |
| 912 | } |
| 913 | } else if (const LoadInst *LI = dyn_cast<LoadInst>(Val: this)) { |
| 914 | if (MDNode *MD = LI->getMetadata(KindID: LLVMContext::MD_dereferenceable)) { |
| 915 | ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 916 | DerefBytes = CI->getLimitedValue(); |
| 917 | } |
| 918 | if (DerefBytes == 0) { |
| 919 | if (MDNode *MD = |
| 920 | LI->getMetadata(KindID: LLVMContext::MD_dereferenceable_or_null)) { |
| 921 | ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 922 | DerefBytes = CI->getLimitedValue(); |
| 923 | } |
| 924 | CanBeNull = true; |
| 925 | } |
| 926 | } else if (auto *IP = dyn_cast<IntToPtrInst>(Val: this)) { |
| 927 | if (MDNode *MD = IP->getMetadata(KindID: LLVMContext::MD_dereferenceable)) { |
| 928 | ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 929 | DerefBytes = CI->getLimitedValue(); |
| 930 | } |
| 931 | if (DerefBytes == 0) { |
| 932 | if (MDNode *MD = |
| 933 | IP->getMetadata(KindID: LLVMContext::MD_dereferenceable_or_null)) { |
| 934 | ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 935 | DerefBytes = CI->getLimitedValue(); |
| 936 | } |
| 937 | CanBeNull = true; |
| 938 | } |
| 939 | } else if (auto *AI = dyn_cast<AllocaInst>(Val: this)) { |
| 940 | if (!AI->isArrayAllocation()) { |
| 941 | DerefBytes = |
| 942 | DL.getTypeStoreSize(Ty: AI->getAllocatedType()).getKnownMinValue(); |
| 943 | CanBeNull = false; |
| 944 | CanBeFreed = false; |
| 945 | } |
| 946 | } else if (auto *GV = dyn_cast<GlobalVariable>(Val: this)) { |
| 947 | if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) { |
| 948 | // TODO: Don't outright reject hasExternalWeakLinkage but set the |
| 949 | // CanBeNull flag. |
| 950 | DerefBytes = DL.getTypeStoreSize(Ty: GV->getValueType()).getFixedValue(); |
| 951 | CanBeNull = false; |
| 952 | CanBeFreed = false; |
| 953 | } |
| 954 | } |
| 955 | return DerefBytes; |
| 956 | } |
| 957 | |
| 958 | Align Value::getPointerAlignment(const DataLayout &DL) const { |
| 959 | assert(getType()->isPointerTy() && "must be pointer" ); |
| 960 | if (const Function *F = dyn_cast<Function>(Val: this)) { |
| 961 | Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne(); |
| 962 | switch (DL.getFunctionPtrAlignType()) { |
| 963 | case DataLayout::FunctionPtrAlignType::Independent: |
| 964 | return FunctionPtrAlign; |
| 965 | case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign: |
| 966 | return std::max(a: FunctionPtrAlign, b: F->getAlign().valueOrOne()); |
| 967 | } |
| 968 | llvm_unreachable("Unhandled FunctionPtrAlignType" ); |
| 969 | } else if (auto *GVar = dyn_cast<GlobalVariable>(Val: this)) { |
| 970 | const MaybeAlign Alignment(GVar->getAlign()); |
| 971 | if (!Alignment) { |
| 972 | Type *ObjectType = GVar->getValueType(); |
| 973 | if (ObjectType->isSized()) { |
| 974 | // If the object is defined in the current Module, we'll be giving |
| 975 | // it the preferred alignment. Otherwise, we have to assume that it |
| 976 | // may only have the minimum ABI alignment. |
| 977 | if (GVar->isStrongDefinitionForLinker()) |
| 978 | return DL.getPreferredAlign(GV: GVar); |
| 979 | else |
| 980 | return DL.getABITypeAlign(Ty: ObjectType); |
| 981 | } |
| 982 | } |
| 983 | return Alignment.valueOrOne(); |
| 984 | } else if (const Argument *A = dyn_cast<Argument>(Val: this)) { |
| 985 | const MaybeAlign Alignment = A->getParamAlign(); |
| 986 | if (!Alignment && A->hasStructRetAttr()) { |
| 987 | // An sret parameter has at least the ABI alignment of the return type. |
| 988 | Type *EltTy = A->getParamStructRetType(); |
| 989 | if (EltTy->isSized()) |
| 990 | return DL.getABITypeAlign(Ty: EltTy); |
| 991 | } |
| 992 | return Alignment.valueOrOne(); |
| 993 | } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: this)) { |
| 994 | return AI->getAlign(); |
| 995 | } else if (const auto *Call = dyn_cast<CallBase>(Val: this)) { |
| 996 | MaybeAlign Alignment = Call->getRetAlign(); |
| 997 | if (!Alignment && Call->getCalledFunction()) |
| 998 | Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment(); |
| 999 | return Alignment.valueOrOne(); |
| 1000 | } else if (const LoadInst *LI = dyn_cast<LoadInst>(Val: this)) { |
| 1001 | if (MDNode *MD = LI->getMetadata(KindID: LLVMContext::MD_align)) { |
| 1002 | ConstantInt *CI = mdconst::extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 1003 | return Align(CI->getLimitedValue()); |
| 1004 | } |
| 1005 | } else if (auto *CstPtr = dyn_cast<Constant>(Val: this)) { |
| 1006 | // Strip pointer casts to avoid creating unnecessary ptrtoint expression |
| 1007 | // if the only "reduction" is combining a bitcast + ptrtoint. |
| 1008 | CstPtr = CstPtr->stripPointerCasts(); |
| 1009 | if (auto *CstInt = dyn_cast_or_null<ConstantInt>(Val: ConstantExpr::getPtrToInt( |
| 1010 | C: const_cast<Constant *>(CstPtr), Ty: DL.getIntPtrType(getType()), |
| 1011 | /*OnlyIfReduced=*/true))) { |
| 1012 | size_t TrailingZeros = CstInt->getValue().countr_zero(); |
| 1013 | // While the actual alignment may be large, elsewhere we have |
| 1014 | // an arbitrary upper alignmet limit, so let's clamp to it. |
| 1015 | return Align(TrailingZeros < Value::MaxAlignmentExponent |
| 1016 | ? uint64_t(1) << TrailingZeros |
| 1017 | : Value::MaximumAlignment); |
| 1018 | } |
| 1019 | } |
| 1020 | return Align(1); |
| 1021 | } |
| 1022 | |
| 1023 | static std::optional<int64_t> |
| 1024 | getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) { |
| 1025 | // Skip over the first indices. |
| 1026 | gep_type_iterator GTI = gep_type_begin(GEP); |
| 1027 | for (unsigned i = 1; i != Idx; ++i, ++GTI) |
| 1028 | /*skip along*/; |
| 1029 | |
| 1030 | // Compute the offset implied by the rest of the indices. |
| 1031 | int64_t Offset = 0; |
| 1032 | for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { |
| 1033 | ConstantInt *OpC = dyn_cast<ConstantInt>(Val: GEP->getOperand(i_nocapture: i)); |
| 1034 | if (!OpC) |
| 1035 | return std::nullopt; |
| 1036 | if (OpC->isZero()) |
| 1037 | continue; // No offset. |
| 1038 | |
| 1039 | // Handle struct indices, which add their field offset to the pointer. |
| 1040 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
| 1041 | Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: OpC->getZExtValue()); |
| 1042 | continue; |
| 1043 | } |
| 1044 | |
| 1045 | // Otherwise, we have a sequential type like an array or fixed-length |
| 1046 | // vector. Multiply the index by the ElementSize. |
| 1047 | TypeSize Size = GTI.getSequentialElementStride(DL); |
| 1048 | if (Size.isScalable()) |
| 1049 | return std::nullopt; |
| 1050 | Offset += Size.getFixedValue() * OpC->getSExtValue(); |
| 1051 | } |
| 1052 | |
| 1053 | return Offset; |
| 1054 | } |
| 1055 | |
| 1056 | std::optional<int64_t> Value::getPointerOffsetFrom(const Value *Other, |
| 1057 | const DataLayout &DL) const { |
| 1058 | const Value *Ptr1 = Other; |
| 1059 | const Value *Ptr2 = this; |
| 1060 | APInt Offset1(DL.getIndexTypeSizeInBits(Ty: Ptr1->getType()), 0); |
| 1061 | APInt Offset2(DL.getIndexTypeSizeInBits(Ty: Ptr2->getType()), 0); |
| 1062 | Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset&: Offset1, AllowNonInbounds: true); |
| 1063 | Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset&: Offset2, AllowNonInbounds: true); |
| 1064 | |
| 1065 | // Handle the trivial case first. |
| 1066 | if (Ptr1 == Ptr2) |
| 1067 | return Offset2.getSExtValue() - Offset1.getSExtValue(); |
| 1068 | |
| 1069 | const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Val: Ptr1); |
| 1070 | const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Val: Ptr2); |
| 1071 | |
| 1072 | // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical |
| 1073 | // base. After that base, they may have some number of common (and |
| 1074 | // potentially variable) indices. After that they handle some constant |
| 1075 | // offset, which determines their offset from each other. At this point, we |
| 1076 | // handle no other case. |
| 1077 | if (!GEP1 || !GEP2 || GEP1->getOperand(i_nocapture: 0) != GEP2->getOperand(i_nocapture: 0) || |
| 1078 | GEP1->getSourceElementType() != GEP2->getSourceElementType()) |
| 1079 | return std::nullopt; |
| 1080 | |
| 1081 | // Skip any common indices and track the GEP types. |
| 1082 | unsigned Idx = 1; |
| 1083 | for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) |
| 1084 | if (GEP1->getOperand(i_nocapture: Idx) != GEP2->getOperand(i_nocapture: Idx)) |
| 1085 | break; |
| 1086 | |
| 1087 | auto IOffset1 = getOffsetFromIndex(GEP: GEP1, Idx, DL); |
| 1088 | auto IOffset2 = getOffsetFromIndex(GEP: GEP2, Idx, DL); |
| 1089 | if (!IOffset1 || !IOffset2) |
| 1090 | return std::nullopt; |
| 1091 | return *IOffset2 - *IOffset1 + Offset2.getSExtValue() - |
| 1092 | Offset1.getSExtValue(); |
| 1093 | } |
| 1094 | |
| 1095 | const Value *Value::DoPHITranslation(const BasicBlock *CurBB, |
| 1096 | const BasicBlock *PredBB) const { |
| 1097 | auto *PN = dyn_cast<PHINode>(Val: this); |
| 1098 | if (PN && PN->getParent() == CurBB) |
| 1099 | return PN->getIncomingValueForBlock(BB: PredBB); |
| 1100 | return this; |
| 1101 | } |
| 1102 | |
| 1103 | LLVMContext &Value::getContext() const { return VTy->getContext(); } |
| 1104 | |
| 1105 | void Value::reverseUseList() { |
| 1106 | if (!UseList || !UseList->Next) |
| 1107 | // No need to reverse 0 or 1 uses. |
| 1108 | return; |
| 1109 | |
| 1110 | Use *Head = UseList; |
| 1111 | Use *Current = UseList->Next; |
| 1112 | Head->Next = nullptr; |
| 1113 | while (Current) { |
| 1114 | Use *Next = Current->Next; |
| 1115 | Current->Next = Head; |
| 1116 | Head->Prev = &Current->Next; |
| 1117 | Head = Current; |
| 1118 | Current = Next; |
| 1119 | } |
| 1120 | UseList = Head; |
| 1121 | Head->Prev = &UseList; |
| 1122 | } |
| 1123 | |
| 1124 | bool Value::isSwiftError() const { |
| 1125 | auto *Arg = dyn_cast<Argument>(Val: this); |
| 1126 | if (Arg) |
| 1127 | return Arg->hasSwiftErrorAttr(); |
| 1128 | auto *Alloca = dyn_cast<AllocaInst>(Val: this); |
| 1129 | if (!Alloca) |
| 1130 | return false; |
| 1131 | return Alloca->isSwiftError(); |
| 1132 | } |
| 1133 | |
| 1134 | //===----------------------------------------------------------------------===// |
| 1135 | // ValueHandleBase Class |
| 1136 | //===----------------------------------------------------------------------===// |
| 1137 | |
| 1138 | void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) { |
| 1139 | assert(List && "Handle list is null?" ); |
| 1140 | |
| 1141 | // Splice ourselves into the list. |
| 1142 | Next = *List; |
| 1143 | *List = this; |
| 1144 | setPrevPtr(List); |
| 1145 | if (Next) { |
| 1146 | Next->setPrevPtr(&Next); |
| 1147 | assert(getValPtr() == Next->getValPtr() && "Added to wrong list?" ); |
| 1148 | } |
| 1149 | } |
| 1150 | |
| 1151 | void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) { |
| 1152 | assert(List && "Must insert after existing node" ); |
| 1153 | |
| 1154 | Next = List->Next; |
| 1155 | setPrevPtr(&List->Next); |
| 1156 | List->Next = this; |
| 1157 | if (Next) |
| 1158 | Next->setPrevPtr(&Next); |
| 1159 | } |
| 1160 | |
| 1161 | void ValueHandleBase::AddToUseList() { |
| 1162 | assert(getValPtr() && "Null pointer doesn't have a use list!" ); |
| 1163 | |
| 1164 | LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl; |
| 1165 | |
| 1166 | if (getValPtr()->HasValueHandle) { |
| 1167 | // If this value already has a ValueHandle, then it must be in the |
| 1168 | // ValueHandles map already. |
| 1169 | ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()]; |
| 1170 | assert(Entry && "Value doesn't have any handles?" ); |
| 1171 | AddToExistingUseList(List: &Entry); |
| 1172 | return; |
| 1173 | } |
| 1174 | |
| 1175 | // Ok, it doesn't have any handles yet, so we must insert it into the |
| 1176 | // DenseMap. However, doing this insertion could cause the DenseMap to |
| 1177 | // reallocate itself, which would invalidate all of the PrevP pointers that |
| 1178 | // point into the old table. Handle this by checking for reallocation and |
| 1179 | // updating the stale pointers only if needed. |
| 1180 | DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles; |
| 1181 | const void *OldBucketPtr = Handles.getPointerIntoBucketsArray(); |
| 1182 | |
| 1183 | ValueHandleBase *&Entry = Handles[getValPtr()]; |
| 1184 | assert(!Entry && "Value really did already have handles?" ); |
| 1185 | AddToExistingUseList(List: &Entry); |
| 1186 | getValPtr()->HasValueHandle = true; |
| 1187 | |
| 1188 | // If reallocation didn't happen or if this was the first insertion, don't |
| 1189 | // walk the table. |
| 1190 | if (Handles.isPointerIntoBucketsArray(Ptr: OldBucketPtr) || |
| 1191 | Handles.size() == 1) { |
| 1192 | return; |
| 1193 | } |
| 1194 | |
| 1195 | // Okay, reallocation did happen. Fix the Prev Pointers. |
| 1196 | for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(), |
| 1197 | E = Handles.end(); I != E; ++I) { |
| 1198 | assert(I->second && I->first == I->second->getValPtr() && |
| 1199 | "List invariant broken!" ); |
| 1200 | I->second->setPrevPtr(&I->second); |
| 1201 | } |
| 1202 | } |
| 1203 | |
| 1204 | void ValueHandleBase::RemoveFromUseList() { |
| 1205 | assert(getValPtr() && getValPtr()->HasValueHandle && |
| 1206 | "Pointer doesn't have a use list!" ); |
| 1207 | |
| 1208 | // Unlink this from its use list. |
| 1209 | ValueHandleBase **PrevPtr = getPrevPtr(); |
| 1210 | assert(*PrevPtr == this && "List invariant broken" ); |
| 1211 | |
| 1212 | *PrevPtr = Next; |
| 1213 | if (Next) { |
| 1214 | assert(Next->getPrevPtr() == &Next && "List invariant broken" ); |
| 1215 | Next->setPrevPtr(PrevPtr); |
| 1216 | return; |
| 1217 | } |
| 1218 | |
| 1219 | // If the Next pointer was null, then it is possible that this was the last |
| 1220 | // ValueHandle watching VP. If so, delete its entry from the ValueHandles |
| 1221 | // map. |
| 1222 | LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl; |
| 1223 | DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles; |
| 1224 | if (Handles.isPointerIntoBucketsArray(Ptr: PrevPtr)) { |
| 1225 | Handles.erase(Val: getValPtr()); |
| 1226 | getValPtr()->HasValueHandle = false; |
| 1227 | } |
| 1228 | } |
| 1229 | |
| 1230 | void ValueHandleBase::ValueIsDeleted(Value *V) { |
| 1231 | assert(V->HasValueHandle && "Should only be called if ValueHandles present" ); |
| 1232 | |
| 1233 | // Get the linked list base, which is guaranteed to exist since the |
| 1234 | // HasValueHandle flag is set. |
| 1235 | LLVMContextImpl *pImpl = V->getContext().pImpl; |
| 1236 | ValueHandleBase *Entry = pImpl->ValueHandles[V]; |
| 1237 | assert(Entry && "Value bit set but no entries exist" ); |
| 1238 | |
| 1239 | // We use a local ValueHandleBase as an iterator so that ValueHandles can add |
| 1240 | // and remove themselves from the list without breaking our iteration. This |
| 1241 | // is not really an AssertingVH; we just have to give ValueHandleBase a kind. |
| 1242 | // Note that we deliberately do not the support the case when dropping a value |
| 1243 | // handle results in a new value handle being permanently added to the list |
| 1244 | // (as might occur in theory for CallbackVH's): the new value handle will not |
| 1245 | // be processed and the checking code will mete out righteous punishment if |
| 1246 | // the handle is still present once we have finished processing all the other |
| 1247 | // value handles (it is fine to momentarily add then remove a value handle). |
| 1248 | for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) { |
| 1249 | Iterator.RemoveFromUseList(); |
| 1250 | Iterator.AddToExistingUseListAfter(List: Entry); |
| 1251 | assert(Entry->Next == &Iterator && "Loop invariant broken." ); |
| 1252 | |
| 1253 | switch (Entry->getKind()) { |
| 1254 | case Assert: |
| 1255 | break; |
| 1256 | case Weak: |
| 1257 | case WeakTracking: |
| 1258 | // WeakTracking and Weak just go to null, which unlinks them |
| 1259 | // from the list. |
| 1260 | Entry->operator=(RHS: nullptr); |
| 1261 | break; |
| 1262 | case Callback: |
| 1263 | // Forward to the subclass's implementation. |
| 1264 | static_cast<CallbackVH*>(Entry)->deleted(); |
| 1265 | break; |
| 1266 | } |
| 1267 | } |
| 1268 | |
| 1269 | // All callbacks, weak references, and assertingVHs should be dropped by now. |
| 1270 | if (V->HasValueHandle) { |
| 1271 | #ifndef NDEBUG // Only in +Asserts mode... |
| 1272 | dbgs() << "While deleting: " << *V->getType() << " %" << V->getName() |
| 1273 | << "\n" ; |
| 1274 | if (pImpl->ValueHandles[V]->getKind() == Assert) |
| 1275 | llvm_unreachable("An asserting value handle still pointed to this" |
| 1276 | " value!" ); |
| 1277 | |
| 1278 | #endif |
| 1279 | llvm_unreachable("All references to V were not removed?" ); |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) { |
| 1284 | assert(Old->HasValueHandle &&"Should only be called if ValueHandles present" ); |
| 1285 | assert(Old != New && "Changing value into itself!" ); |
| 1286 | assert(Old->getType() == New->getType() && |
| 1287 | "replaceAllUses of value with new value of different type!" ); |
| 1288 | |
| 1289 | // Get the linked list base, which is guaranteed to exist since the |
| 1290 | // HasValueHandle flag is set. |
| 1291 | LLVMContextImpl *pImpl = Old->getContext().pImpl; |
| 1292 | ValueHandleBase *Entry = pImpl->ValueHandles[Old]; |
| 1293 | |
| 1294 | assert(Entry && "Value bit set but no entries exist" ); |
| 1295 | |
| 1296 | // We use a local ValueHandleBase as an iterator so that |
| 1297 | // ValueHandles can add and remove themselves from the list without |
| 1298 | // breaking our iteration. This is not really an AssertingVH; we |
| 1299 | // just have to give ValueHandleBase some kind. |
| 1300 | for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) { |
| 1301 | Iterator.RemoveFromUseList(); |
| 1302 | Iterator.AddToExistingUseListAfter(List: Entry); |
| 1303 | assert(Entry->Next == &Iterator && "Loop invariant broken." ); |
| 1304 | |
| 1305 | switch (Entry->getKind()) { |
| 1306 | case Assert: |
| 1307 | case Weak: |
| 1308 | // Asserting and Weak handles do not follow RAUW implicitly. |
| 1309 | break; |
| 1310 | case WeakTracking: |
| 1311 | // Weak goes to the new value, which will unlink it from Old's list. |
| 1312 | Entry->operator=(RHS: New); |
| 1313 | break; |
| 1314 | case Callback: |
| 1315 | // Forward to the subclass's implementation. |
| 1316 | static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New); |
| 1317 | break; |
| 1318 | } |
| 1319 | } |
| 1320 | |
| 1321 | #ifndef NDEBUG |
| 1322 | // If any new weak value handles were added while processing the |
| 1323 | // list, then complain about it now. |
| 1324 | if (Old->HasValueHandle) |
| 1325 | for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next) |
| 1326 | switch (Entry->getKind()) { |
| 1327 | case WeakTracking: |
| 1328 | dbgs() << "After RAUW from " << *Old->getType() << " %" |
| 1329 | << Old->getName() << " to " << *New->getType() << " %" |
| 1330 | << New->getName() << "\n" ; |
| 1331 | llvm_unreachable( |
| 1332 | "A weak tracking value handle still pointed to the old value!\n" ); |
| 1333 | default: |
| 1334 | break; |
| 1335 | } |
| 1336 | #endif |
| 1337 | } |
| 1338 | |
| 1339 | // Pin the vtable to this file. |
| 1340 | void CallbackVH::anchor() {} |
| 1341 | |