| 1 | //===-- Verifier.cpp - Implement the Module Verifier -----------------------==// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the function verifier interface, that can be used for some |
| 10 | // basic correctness checking of input to the system. |
| 11 | // |
| 12 | // Note that this does not provide full `Java style' security and verifications, |
| 13 | // instead it just tries to ensure that code is well-formed. |
| 14 | // |
| 15 | // * Both of a binary operator's parameters are of the same type |
| 16 | // * Verify that the indices of mem access instructions match other operands |
| 17 | // * Verify that arithmetic and other things are only performed on first-class |
| 18 | // types. Verify that shifts & logicals only happen on integrals f.e. |
| 19 | // * All of the constants in a switch statement are of the correct type |
| 20 | // * The code is in valid SSA form |
| 21 | // * It should be illegal to put a label into any other type (like a structure) |
| 22 | // or to return one. [except constant arrays!] |
| 23 | // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad |
| 24 | // * PHI nodes must have an entry for each predecessor, with no extras. |
| 25 | // * PHI nodes must be the first thing in a basic block, all grouped together |
| 26 | // * All basic blocks should only end with terminator insts, not contain them |
| 27 | // * The entry node to a function must not have predecessors |
| 28 | // * All Instructions must be embedded into a basic block |
| 29 | // * Functions cannot take a void-typed parameter |
| 30 | // * Verify that a function's argument list agrees with it's declared type. |
| 31 | // * It is illegal to specify a name for a void value. |
| 32 | // * It is illegal to have a internal global value with no initializer |
| 33 | // * It is illegal to have a ret instruction that returns a value that does not |
| 34 | // agree with the function return value type. |
| 35 | // * Function call argument types match the function prototype |
| 36 | // * A landing pad is defined by a landingpad instruction, and can be jumped to |
| 37 | // only by the unwind edge of an invoke instruction. |
| 38 | // * A landingpad instruction must be the first non-PHI instruction in the |
| 39 | // block. |
| 40 | // * Landingpad instructions must be in a function with a personality function. |
| 41 | // * Convergence control intrinsics are introduced in ConvergentOperations.rst. |
| 42 | // The applied restrictions are too numerous to list here. |
| 43 | // * The convergence entry intrinsic and the loop heart must be the first |
| 44 | // non-PHI instruction in their respective block. This does not conflict with |
| 45 | // the landing pads, since these two kinds cannot occur in the same block. |
| 46 | // * All other things that are tested by asserts spread about the code... |
| 47 | // |
| 48 | //===----------------------------------------------------------------------===// |
| 49 | |
| 50 | #include "llvm/IR/Verifier.h" |
| 51 | #include "llvm/ADT/APFloat.h" |
| 52 | #include "llvm/ADT/APInt.h" |
| 53 | #include "llvm/ADT/ArrayRef.h" |
| 54 | #include "llvm/ADT/DenseMap.h" |
| 55 | #include "llvm/ADT/MapVector.h" |
| 56 | #include "llvm/ADT/STLExtras.h" |
| 57 | #include "llvm/ADT/SmallPtrSet.h" |
| 58 | #include "llvm/ADT/SmallSet.h" |
| 59 | #include "llvm/ADT/SmallVector.h" |
| 60 | #include "llvm/ADT/StringExtras.h" |
| 61 | #include "llvm/ADT/StringRef.h" |
| 62 | #include "llvm/ADT/Twine.h" |
| 63 | #include "llvm/BinaryFormat/Dwarf.h" |
| 64 | #include "llvm/IR/Argument.h" |
| 65 | #include "llvm/IR/AttributeMask.h" |
| 66 | #include "llvm/IR/Attributes.h" |
| 67 | #include "llvm/IR/BasicBlock.h" |
| 68 | #include "llvm/IR/CFG.h" |
| 69 | #include "llvm/IR/CallingConv.h" |
| 70 | #include "llvm/IR/Comdat.h" |
| 71 | #include "llvm/IR/Constant.h" |
| 72 | #include "llvm/IR/ConstantRange.h" |
| 73 | #include "llvm/IR/ConstantRangeList.h" |
| 74 | #include "llvm/IR/Constants.h" |
| 75 | #include "llvm/IR/ConvergenceVerifier.h" |
| 76 | #include "llvm/IR/DataLayout.h" |
| 77 | #include "llvm/IR/DebugInfo.h" |
| 78 | #include "llvm/IR/DebugInfoMetadata.h" |
| 79 | #include "llvm/IR/DebugLoc.h" |
| 80 | #include "llvm/IR/DerivedTypes.h" |
| 81 | #include "llvm/IR/Dominators.h" |
| 82 | #include "llvm/IR/EHPersonalities.h" |
| 83 | #include "llvm/IR/Function.h" |
| 84 | #include "llvm/IR/GCStrategy.h" |
| 85 | #include "llvm/IR/GlobalAlias.h" |
| 86 | #include "llvm/IR/GlobalValue.h" |
| 87 | #include "llvm/IR/GlobalVariable.h" |
| 88 | #include "llvm/IR/InlineAsm.h" |
| 89 | #include "llvm/IR/InstVisitor.h" |
| 90 | #include "llvm/IR/InstrTypes.h" |
| 91 | #include "llvm/IR/Instruction.h" |
| 92 | #include "llvm/IR/Instructions.h" |
| 93 | #include "llvm/IR/IntrinsicInst.h" |
| 94 | #include "llvm/IR/Intrinsics.h" |
| 95 | #include "llvm/IR/IntrinsicsAArch64.h" |
| 96 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
| 97 | #include "llvm/IR/IntrinsicsARM.h" |
| 98 | #include "llvm/IR/IntrinsicsNVPTX.h" |
| 99 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
| 100 | #include "llvm/IR/LLVMContext.h" |
| 101 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
| 102 | #include "llvm/IR/Metadata.h" |
| 103 | #include "llvm/IR/Module.h" |
| 104 | #include "llvm/IR/ModuleSlotTracker.h" |
| 105 | #include "llvm/IR/PassManager.h" |
| 106 | #include "llvm/IR/ProfDataUtils.h" |
| 107 | #include "llvm/IR/Statepoint.h" |
| 108 | #include "llvm/IR/Type.h" |
| 109 | #include "llvm/IR/Use.h" |
| 110 | #include "llvm/IR/User.h" |
| 111 | #include "llvm/IR/VFABIDemangler.h" |
| 112 | #include "llvm/IR/Value.h" |
| 113 | #include "llvm/InitializePasses.h" |
| 114 | #include "llvm/Pass.h" |
| 115 | #include "llvm/ProfileData/InstrProf.h" |
| 116 | #include "llvm/Support/AMDGPUAddrSpace.h" |
| 117 | #include "llvm/Support/AtomicOrdering.h" |
| 118 | #include "llvm/Support/Casting.h" |
| 119 | #include "llvm/Support/CommandLine.h" |
| 120 | #include "llvm/Support/ErrorHandling.h" |
| 121 | #include "llvm/Support/MathExtras.h" |
| 122 | #include "llvm/Support/ModRef.h" |
| 123 | #include "llvm/Support/raw_ostream.h" |
| 124 | #include <algorithm> |
| 125 | #include <cassert> |
| 126 | #include <cstdint> |
| 127 | #include <memory> |
| 128 | #include <optional> |
| 129 | #include <string> |
| 130 | #include <utility> |
| 131 | |
| 132 | using namespace llvm; |
| 133 | |
| 134 | static cl::opt<bool> VerifyNoAliasScopeDomination( |
| 135 | "verify-noalias-scope-decl-dom" , cl::Hidden, cl::init(Val: false), |
| 136 | cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " |
| 137 | "scopes are not dominating" )); |
| 138 | |
| 139 | namespace llvm { |
| 140 | |
| 141 | struct VerifierSupport { |
| 142 | raw_ostream *OS; |
| 143 | const Module &M; |
| 144 | ModuleSlotTracker MST; |
| 145 | const Triple &TT; |
| 146 | const DataLayout &DL; |
| 147 | LLVMContext &Context; |
| 148 | |
| 149 | /// Track the brokenness of the module while recursively visiting. |
| 150 | bool Broken = false; |
| 151 | /// Broken debug info can be "recovered" from by stripping the debug info. |
| 152 | bool BrokenDebugInfo = false; |
| 153 | /// Whether to treat broken debug info as an error. |
| 154 | bool TreatBrokenDebugInfoAsError = true; |
| 155 | |
| 156 | explicit VerifierSupport(raw_ostream *OS, const Module &M) |
| 157 | : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()), |
| 158 | Context(M.getContext()) {} |
| 159 | |
| 160 | private: |
| 161 | void Write(const Module *M) { |
| 162 | *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n" ; |
| 163 | } |
| 164 | |
| 165 | void Write(const Value *V) { |
| 166 | if (V) |
| 167 | Write(V: *V); |
| 168 | } |
| 169 | |
| 170 | void Write(const Value &V) { |
| 171 | if (isa<Instruction>(Val: V)) { |
| 172 | V.print(O&: *OS, MST); |
| 173 | *OS << '\n'; |
| 174 | } else { |
| 175 | V.printAsOperand(O&: *OS, PrintType: true, MST); |
| 176 | *OS << '\n'; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | void Write(const DbgRecord *DR) { |
| 181 | if (DR) { |
| 182 | DR->print(O&: *OS, MST, IsForDebug: false); |
| 183 | *OS << '\n'; |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | void Write(DbgVariableRecord::LocationType Type) { |
| 188 | switch (Type) { |
| 189 | case DbgVariableRecord::LocationType::Value: |
| 190 | *OS << "value" ; |
| 191 | break; |
| 192 | case DbgVariableRecord::LocationType::Declare: |
| 193 | *OS << "declare" ; |
| 194 | break; |
| 195 | case DbgVariableRecord::LocationType::Assign: |
| 196 | *OS << "assign" ; |
| 197 | break; |
| 198 | case DbgVariableRecord::LocationType::End: |
| 199 | *OS << "end" ; |
| 200 | break; |
| 201 | case DbgVariableRecord::LocationType::Any: |
| 202 | *OS << "any" ; |
| 203 | break; |
| 204 | }; |
| 205 | } |
| 206 | |
| 207 | void Write(const Metadata *MD) { |
| 208 | if (!MD) |
| 209 | return; |
| 210 | MD->print(OS&: *OS, MST, M: &M); |
| 211 | *OS << '\n'; |
| 212 | } |
| 213 | |
| 214 | template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) { |
| 215 | Write(MD.get()); |
| 216 | } |
| 217 | |
| 218 | void Write(const NamedMDNode *NMD) { |
| 219 | if (!NMD) |
| 220 | return; |
| 221 | NMD->print(ROS&: *OS, MST); |
| 222 | *OS << '\n'; |
| 223 | } |
| 224 | |
| 225 | void Write(Type *T) { |
| 226 | if (!T) |
| 227 | return; |
| 228 | *OS << ' ' << *T; |
| 229 | } |
| 230 | |
| 231 | void Write(const Comdat *C) { |
| 232 | if (!C) |
| 233 | return; |
| 234 | *OS << *C; |
| 235 | } |
| 236 | |
| 237 | void Write(const APInt *AI) { |
| 238 | if (!AI) |
| 239 | return; |
| 240 | *OS << *AI << '\n'; |
| 241 | } |
| 242 | |
| 243 | void Write(const unsigned i) { *OS << i << '\n'; } |
| 244 | |
| 245 | // NOLINTNEXTLINE(readability-identifier-naming) |
| 246 | void Write(const Attribute *A) { |
| 247 | if (!A) |
| 248 | return; |
| 249 | *OS << A->getAsString() << '\n'; |
| 250 | } |
| 251 | |
| 252 | // NOLINTNEXTLINE(readability-identifier-naming) |
| 253 | void Write(const AttributeSet *AS) { |
| 254 | if (!AS) |
| 255 | return; |
| 256 | *OS << AS->getAsString() << '\n'; |
| 257 | } |
| 258 | |
| 259 | // NOLINTNEXTLINE(readability-identifier-naming) |
| 260 | void Write(const AttributeList *AL) { |
| 261 | if (!AL) |
| 262 | return; |
| 263 | AL->print(O&: *OS); |
| 264 | } |
| 265 | |
| 266 | void Write(Printable P) { *OS << P << '\n'; } |
| 267 | |
| 268 | template <typename T> void Write(ArrayRef<T> Vs) { |
| 269 | for (const T &V : Vs) |
| 270 | Write(V); |
| 271 | } |
| 272 | |
| 273 | template <typename T1, typename... Ts> |
| 274 | void WriteTs(const T1 &V1, const Ts &... Vs) { |
| 275 | Write(V1); |
| 276 | WriteTs(Vs...); |
| 277 | } |
| 278 | |
| 279 | template <typename... Ts> void WriteTs() {} |
| 280 | |
| 281 | public: |
| 282 | /// A check failed, so printout out the condition and the message. |
| 283 | /// |
| 284 | /// This provides a nice place to put a breakpoint if you want to see why |
| 285 | /// something is not correct. |
| 286 | void CheckFailed(const Twine &Message) { |
| 287 | if (OS) |
| 288 | *OS << Message << '\n'; |
| 289 | Broken = true; |
| 290 | } |
| 291 | |
| 292 | /// A check failed (with values to print). |
| 293 | /// |
| 294 | /// This calls the Message-only version so that the above is easier to set a |
| 295 | /// breakpoint on. |
| 296 | template <typename T1, typename... Ts> |
| 297 | void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) { |
| 298 | CheckFailed(Message); |
| 299 | if (OS) |
| 300 | WriteTs(V1, Vs...); |
| 301 | } |
| 302 | |
| 303 | /// A debug info check failed. |
| 304 | void DebugInfoCheckFailed(const Twine &Message) { |
| 305 | if (OS) |
| 306 | *OS << Message << '\n'; |
| 307 | Broken |= TreatBrokenDebugInfoAsError; |
| 308 | BrokenDebugInfo = true; |
| 309 | } |
| 310 | |
| 311 | /// A debug info check failed (with values to print). |
| 312 | template <typename T1, typename... Ts> |
| 313 | void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, |
| 314 | const Ts &... Vs) { |
| 315 | DebugInfoCheckFailed(Message); |
| 316 | if (OS) |
| 317 | WriteTs(V1, Vs...); |
| 318 | } |
| 319 | }; |
| 320 | |
| 321 | } // namespace llvm |
| 322 | |
| 323 | namespace { |
| 324 | |
| 325 | class Verifier : public InstVisitor<Verifier>, VerifierSupport { |
| 326 | friend class InstVisitor<Verifier>; |
| 327 | DominatorTree DT; |
| 328 | |
| 329 | /// When verifying a basic block, keep track of all of the |
| 330 | /// instructions we have seen so far. |
| 331 | /// |
| 332 | /// This allows us to do efficient dominance checks for the case when an |
| 333 | /// instruction has an operand that is an instruction in the same block. |
| 334 | SmallPtrSet<Instruction *, 16> InstsInThisBlock; |
| 335 | |
| 336 | /// Keep track of the metadata nodes that have been checked already. |
| 337 | SmallPtrSet<const Metadata *, 32> MDNodes; |
| 338 | |
| 339 | /// Keep track which DISubprogram is attached to which function. |
| 340 | DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments; |
| 341 | |
| 342 | /// Track all DICompileUnits visited. |
| 343 | SmallPtrSet<const Metadata *, 2> CUVisited; |
| 344 | |
| 345 | /// The result type for a landingpad. |
| 346 | Type *LandingPadResultTy; |
| 347 | |
| 348 | /// Whether we've seen a call to @llvm.localescape in this function |
| 349 | /// already. |
| 350 | bool SawFrameEscape; |
| 351 | |
| 352 | /// Whether the current function has a DISubprogram attached to it. |
| 353 | bool HasDebugInfo = false; |
| 354 | |
| 355 | /// Stores the count of how many objects were passed to llvm.localescape for a |
| 356 | /// given function and the largest index passed to llvm.localrecover. |
| 357 | DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo; |
| 358 | |
| 359 | // Maps catchswitches and cleanuppads that unwind to siblings to the |
| 360 | // terminators that indicate the unwind, used to detect cycles therein. |
| 361 | MapVector<Instruction *, Instruction *> SiblingFuncletInfo; |
| 362 | |
| 363 | /// Cache which blocks are in which funclet, if an EH funclet personality is |
| 364 | /// in use. Otherwise empty. |
| 365 | DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors; |
| 366 | |
| 367 | /// Cache of constants visited in search of ConstantExprs. |
| 368 | SmallPtrSet<const Constant *, 32> ConstantExprVisited; |
| 369 | |
| 370 | /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic. |
| 371 | SmallVector<const Function *, 4> DeoptimizeDeclarations; |
| 372 | |
| 373 | /// Cache of attribute lists verified. |
| 374 | SmallPtrSet<const void *, 32> AttributeListsVisited; |
| 375 | |
| 376 | // Verify that this GlobalValue is only used in this module. |
| 377 | // This map is used to avoid visiting uses twice. We can arrive at a user |
| 378 | // twice, if they have multiple operands. In particular for very large |
| 379 | // constant expressions, we can arrive at a particular user many times. |
| 380 | SmallPtrSet<const Value *, 32> GlobalValueVisited; |
| 381 | |
| 382 | // Keeps track of duplicate function argument debug info. |
| 383 | SmallVector<const DILocalVariable *, 16> DebugFnArgs; |
| 384 | |
| 385 | TBAAVerifier TBAAVerifyHelper; |
| 386 | ConvergenceVerifier ConvergenceVerifyHelper; |
| 387 | |
| 388 | SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls; |
| 389 | |
| 390 | void checkAtomicMemAccessSize(Type *Ty, const Instruction *I); |
| 391 | |
| 392 | public: |
| 393 | explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError, |
| 394 | const Module &M) |
| 395 | : VerifierSupport(OS, M), LandingPadResultTy(nullptr), |
| 396 | SawFrameEscape(false), TBAAVerifyHelper(this) { |
| 397 | TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError; |
| 398 | } |
| 399 | |
| 400 | bool hasBrokenDebugInfo() const { return BrokenDebugInfo; } |
| 401 | |
| 402 | bool verify(const Function &F) { |
| 403 | assert(F.getParent() == &M && |
| 404 | "An instance of this class only works with a specific module!" ); |
| 405 | |
| 406 | // First ensure the function is well-enough formed to compute dominance |
| 407 | // information, and directly compute a dominance tree. We don't rely on the |
| 408 | // pass manager to provide this as it isolates us from a potentially |
| 409 | // out-of-date dominator tree and makes it significantly more complex to run |
| 410 | // this code outside of a pass manager. |
| 411 | // FIXME: It's really gross that we have to cast away constness here. |
| 412 | if (!F.empty()) |
| 413 | DT.recalculate(Func&: const_cast<Function &>(F)); |
| 414 | |
| 415 | for (const BasicBlock &BB : F) { |
| 416 | if (!BB.empty() && BB.back().isTerminator()) |
| 417 | continue; |
| 418 | |
| 419 | if (OS) { |
| 420 | *OS << "Basic Block in function '" << F.getName() |
| 421 | << "' does not have terminator!\n" ; |
| 422 | BB.printAsOperand(O&: *OS, PrintType: true, MST); |
| 423 | *OS << "\n" ; |
| 424 | } |
| 425 | return false; |
| 426 | } |
| 427 | |
| 428 | auto FailureCB = [this](const Twine &Message) { |
| 429 | this->CheckFailed(Message); |
| 430 | }; |
| 431 | ConvergenceVerifyHelper.initialize(OS, FailureCB, F); |
| 432 | |
| 433 | Broken = false; |
| 434 | // FIXME: We strip const here because the inst visitor strips const. |
| 435 | visit(F&: const_cast<Function &>(F)); |
| 436 | verifySiblingFuncletUnwinds(); |
| 437 | |
| 438 | if (ConvergenceVerifyHelper.sawTokens()) |
| 439 | ConvergenceVerifyHelper.verify(DT); |
| 440 | |
| 441 | InstsInThisBlock.clear(); |
| 442 | DebugFnArgs.clear(); |
| 443 | LandingPadResultTy = nullptr; |
| 444 | SawFrameEscape = false; |
| 445 | SiblingFuncletInfo.clear(); |
| 446 | verifyNoAliasScopeDecl(); |
| 447 | NoAliasScopeDecls.clear(); |
| 448 | |
| 449 | return !Broken; |
| 450 | } |
| 451 | |
| 452 | /// Verify the module that this instance of \c Verifier was initialized with. |
| 453 | bool verify() { |
| 454 | Broken = false; |
| 455 | |
| 456 | // Collect all declarations of the llvm.experimental.deoptimize intrinsic. |
| 457 | for (const Function &F : M) |
| 458 | if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize) |
| 459 | DeoptimizeDeclarations.push_back(Elt: &F); |
| 460 | |
| 461 | // Now that we've visited every function, verify that we never asked to |
| 462 | // recover a frame index that wasn't escaped. |
| 463 | verifyFrameRecoverIndices(); |
| 464 | for (const GlobalVariable &GV : M.globals()) |
| 465 | visitGlobalVariable(GV); |
| 466 | |
| 467 | for (const GlobalAlias &GA : M.aliases()) |
| 468 | visitGlobalAlias(GA); |
| 469 | |
| 470 | for (const GlobalIFunc &GI : M.ifuncs()) |
| 471 | visitGlobalIFunc(GI); |
| 472 | |
| 473 | for (const NamedMDNode &NMD : M.named_metadata()) |
| 474 | visitNamedMDNode(NMD); |
| 475 | |
| 476 | for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable()) |
| 477 | visitComdat(C: SMEC.getValue()); |
| 478 | |
| 479 | visitModuleFlags(); |
| 480 | visitModuleIdents(); |
| 481 | visitModuleCommandLines(); |
| 482 | |
| 483 | verifyCompileUnits(); |
| 484 | |
| 485 | verifyDeoptimizeCallingConvs(); |
| 486 | DISubprogramAttachments.clear(); |
| 487 | return !Broken; |
| 488 | } |
| 489 | |
| 490 | private: |
| 491 | /// Whether a metadata node is allowed to be, or contain, a DILocation. |
| 492 | enum class AreDebugLocsAllowed { No, Yes }; |
| 493 | |
| 494 | /// Metadata that should be treated as a range, with slightly different |
| 495 | /// requirements. |
| 496 | enum class RangeLikeMetadataKind { |
| 497 | Range, // MD_range |
| 498 | AbsoluteSymbol, // MD_absolute_symbol |
| 499 | NoaliasAddrspace // MD_noalias_addrspace |
| 500 | }; |
| 501 | |
| 502 | // Verification methods... |
| 503 | void visitGlobalValue(const GlobalValue &GV); |
| 504 | void visitGlobalVariable(const GlobalVariable &GV); |
| 505 | void visitGlobalAlias(const GlobalAlias &GA); |
| 506 | void visitGlobalIFunc(const GlobalIFunc &GI); |
| 507 | void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C); |
| 508 | void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited, |
| 509 | const GlobalAlias &A, const Constant &C); |
| 510 | void visitNamedMDNode(const NamedMDNode &NMD); |
| 511 | void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs); |
| 512 | void visitMetadataAsValue(const MetadataAsValue &MD, Function *F); |
| 513 | void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F); |
| 514 | void visitDIArgList(const DIArgList &AL, Function *F); |
| 515 | void visitComdat(const Comdat &C); |
| 516 | void visitModuleIdents(); |
| 517 | void visitModuleCommandLines(); |
| 518 | void visitModuleFlags(); |
| 519 | void visitModuleFlag(const MDNode *Op, |
| 520 | DenseMap<const MDString *, const MDNode *> &SeenIDs, |
| 521 | SmallVectorImpl<const MDNode *> &Requirements); |
| 522 | void visitModuleFlagCGProfileEntry(const MDOperand &MDO); |
| 523 | void visitFunction(const Function &F); |
| 524 | void visitBasicBlock(BasicBlock &BB); |
| 525 | void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty, |
| 526 | RangeLikeMetadataKind Kind); |
| 527 | void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty); |
| 528 | void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty); |
| 529 | void visitDereferenceableMetadata(Instruction &I, MDNode *MD); |
| 530 | void visitProfMetadata(Instruction &I, MDNode *MD); |
| 531 | void visitCallStackMetadata(MDNode *MD); |
| 532 | void visitMemProfMetadata(Instruction &I, MDNode *MD); |
| 533 | void visitCallsiteMetadata(Instruction &I, MDNode *MD); |
| 534 | void visitDIAssignIDMetadata(Instruction &I, MDNode *MD); |
| 535 | void visitMMRAMetadata(Instruction &I, MDNode *MD); |
| 536 | void visitAnnotationMetadata(MDNode *Annotation); |
| 537 | void visitAliasScopeMetadata(const MDNode *MD); |
| 538 | void visitAliasScopeListMetadata(const MDNode *MD); |
| 539 | void visitAccessGroupMetadata(const MDNode *MD); |
| 540 | |
| 541 | template <class Ty> bool isValidMetadataArray(const MDTuple &N); |
| 542 | #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); |
| 543 | #include "llvm/IR/Metadata.def" |
| 544 | void visitDIScope(const DIScope &N); |
| 545 | void visitDIVariable(const DIVariable &N); |
| 546 | void visitDILexicalBlockBase(const DILexicalBlockBase &N); |
| 547 | void visitDITemplateParameter(const DITemplateParameter &N); |
| 548 | |
| 549 | void visitTemplateParams(const MDNode &N, const Metadata &RawParams); |
| 550 | |
| 551 | void visit(DbgLabelRecord &DLR); |
| 552 | void visit(DbgVariableRecord &DVR); |
| 553 | // InstVisitor overrides... |
| 554 | using InstVisitor<Verifier>::visit; |
| 555 | void visitDbgRecords(Instruction &I); |
| 556 | void visit(Instruction &I); |
| 557 | |
| 558 | void visitTruncInst(TruncInst &I); |
| 559 | void visitZExtInst(ZExtInst &I); |
| 560 | void visitSExtInst(SExtInst &I); |
| 561 | void visitFPTruncInst(FPTruncInst &I); |
| 562 | void visitFPExtInst(FPExtInst &I); |
| 563 | void visitFPToUIInst(FPToUIInst &I); |
| 564 | void visitFPToSIInst(FPToSIInst &I); |
| 565 | void visitUIToFPInst(UIToFPInst &I); |
| 566 | void visitSIToFPInst(SIToFPInst &I); |
| 567 | void visitIntToPtrInst(IntToPtrInst &I); |
| 568 | void visitPtrToIntInst(PtrToIntInst &I); |
| 569 | void visitBitCastInst(BitCastInst &I); |
| 570 | void visitAddrSpaceCastInst(AddrSpaceCastInst &I); |
| 571 | void visitPHINode(PHINode &PN); |
| 572 | void visitCallBase(CallBase &Call); |
| 573 | void visitUnaryOperator(UnaryOperator &U); |
| 574 | void visitBinaryOperator(BinaryOperator &B); |
| 575 | void visitICmpInst(ICmpInst &IC); |
| 576 | void visitFCmpInst(FCmpInst &FC); |
| 577 | void visitExtractElementInst(ExtractElementInst &EI); |
| 578 | void visitInsertElementInst(InsertElementInst &EI); |
| 579 | void visitShuffleVectorInst(ShuffleVectorInst &EI); |
| 580 | void visitVAArgInst(VAArgInst &VAA) { visitInstruction(I&: VAA); } |
| 581 | void visitCallInst(CallInst &CI); |
| 582 | void visitInvokeInst(InvokeInst &II); |
| 583 | void visitGetElementPtrInst(GetElementPtrInst &GEP); |
| 584 | void visitLoadInst(LoadInst &LI); |
| 585 | void visitStoreInst(StoreInst &SI); |
| 586 | void verifyDominatesUse(Instruction &I, unsigned i); |
| 587 | void visitInstruction(Instruction &I); |
| 588 | void visitTerminator(Instruction &I); |
| 589 | void visitBranchInst(BranchInst &BI); |
| 590 | void visitReturnInst(ReturnInst &RI); |
| 591 | void visitSwitchInst(SwitchInst &SI); |
| 592 | void visitIndirectBrInst(IndirectBrInst &BI); |
| 593 | void visitCallBrInst(CallBrInst &CBI); |
| 594 | void visitSelectInst(SelectInst &SI); |
| 595 | void visitUserOp1(Instruction &I); |
| 596 | void visitUserOp2(Instruction &I) { visitUserOp1(I); } |
| 597 | void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call); |
| 598 | void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI); |
| 599 | void visitVPIntrinsic(VPIntrinsic &VPI); |
| 600 | void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII); |
| 601 | void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI); |
| 602 | void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); |
| 603 | void visitAtomicRMWInst(AtomicRMWInst &RMWI); |
| 604 | void visitFenceInst(FenceInst &FI); |
| 605 | void visitAllocaInst(AllocaInst &AI); |
| 606 | void visitExtractValueInst(ExtractValueInst &EVI); |
| 607 | void visitInsertValueInst(InsertValueInst &IVI); |
| 608 | void visitEHPadPredecessors(Instruction &I); |
| 609 | void visitLandingPadInst(LandingPadInst &LPI); |
| 610 | void visitResumeInst(ResumeInst &RI); |
| 611 | void visitCatchPadInst(CatchPadInst &CPI); |
| 612 | void visitCatchReturnInst(CatchReturnInst &CatchReturn); |
| 613 | void visitCleanupPadInst(CleanupPadInst &CPI); |
| 614 | void visitFuncletPadInst(FuncletPadInst &FPI); |
| 615 | void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch); |
| 616 | void visitCleanupReturnInst(CleanupReturnInst &CRI); |
| 617 | |
| 618 | void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal); |
| 619 | void verifySwiftErrorValue(const Value *SwiftErrorVal); |
| 620 | void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context); |
| 621 | void verifyMustTailCall(CallInst &CI); |
| 622 | bool verifyAttributeCount(AttributeList Attrs, unsigned Params); |
| 623 | void verifyAttributeTypes(AttributeSet Attrs, const Value *V); |
| 624 | void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V); |
| 625 | void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr, |
| 626 | const Value *V); |
| 627 | void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, |
| 628 | const Value *V, bool IsIntrinsic, bool IsInlineAsm); |
| 629 | void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs); |
| 630 | |
| 631 | void visitConstantExprsRecursively(const Constant *EntryC); |
| 632 | void visitConstantExpr(const ConstantExpr *CE); |
| 633 | void visitConstantPtrAuth(const ConstantPtrAuth *CPA); |
| 634 | void verifyInlineAsmCall(const CallBase &Call); |
| 635 | void verifyStatepoint(const CallBase &Call); |
| 636 | void verifyFrameRecoverIndices(); |
| 637 | void verifySiblingFuncletUnwinds(); |
| 638 | |
| 639 | void verifyFragmentExpression(const DbgVariableIntrinsic &I); |
| 640 | void verifyFragmentExpression(const DbgVariableRecord &I); |
| 641 | template <typename ValueOrMetadata> |
| 642 | void verifyFragmentExpression(const DIVariable &V, |
| 643 | DIExpression::FragmentInfo Fragment, |
| 644 | ValueOrMetadata *Desc); |
| 645 | void verifyFnArgs(const DbgVariableIntrinsic &I); |
| 646 | void verifyFnArgs(const DbgVariableRecord &DVR); |
| 647 | void verifyNotEntryValue(const DbgVariableIntrinsic &I); |
| 648 | void verifyNotEntryValue(const DbgVariableRecord &I); |
| 649 | |
| 650 | /// Module-level debug info verification... |
| 651 | void verifyCompileUnits(); |
| 652 | |
| 653 | /// Module-level verification that all @llvm.experimental.deoptimize |
| 654 | /// declarations share the same calling convention. |
| 655 | void verifyDeoptimizeCallingConvs(); |
| 656 | |
| 657 | void verifyAttachedCallBundle(const CallBase &Call, |
| 658 | const OperandBundleUse &BU); |
| 659 | |
| 660 | /// Verify the llvm.experimental.noalias.scope.decl declarations |
| 661 | void verifyNoAliasScopeDecl(); |
| 662 | }; |
| 663 | |
| 664 | } // end anonymous namespace |
| 665 | |
| 666 | /// We know that cond should be true, if not print an error message. |
| 667 | #define Check(C, ...) \ |
| 668 | do { \ |
| 669 | if (!(C)) { \ |
| 670 | CheckFailed(__VA_ARGS__); \ |
| 671 | return; \ |
| 672 | } \ |
| 673 | } while (false) |
| 674 | |
| 675 | /// We know that a debug info condition should be true, if not print |
| 676 | /// an error message. |
| 677 | #define CheckDI(C, ...) \ |
| 678 | do { \ |
| 679 | if (!(C)) { \ |
| 680 | DebugInfoCheckFailed(__VA_ARGS__); \ |
| 681 | return; \ |
| 682 | } \ |
| 683 | } while (false) |
| 684 | |
| 685 | void Verifier::visitDbgRecords(Instruction &I) { |
| 686 | if (!I.DebugMarker) |
| 687 | return; |
| 688 | CheckDI(I.DebugMarker->MarkedInstr == &I, |
| 689 | "Instruction has invalid DebugMarker" , &I); |
| 690 | CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(), |
| 691 | "PHI Node must not have any attached DbgRecords" , &I); |
| 692 | for (DbgRecord &DR : I.getDbgRecordRange()) { |
| 693 | CheckDI(DR.getMarker() == I.DebugMarker, |
| 694 | "DbgRecord had invalid DebugMarker" , &I, &DR); |
| 695 | if (auto *Loc = |
| 696 | dyn_cast_or_null<DILocation>(Val: DR.getDebugLoc().getAsMDNode())) |
| 697 | visitMDNode(MD: *Loc, AllowLocs: AreDebugLocsAllowed::Yes); |
| 698 | if (auto *DVR = dyn_cast<DbgVariableRecord>(Val: &DR)) { |
| 699 | visit(DVR&: *DVR); |
| 700 | // These have to appear after `visit` for consistency with existing |
| 701 | // intrinsic behaviour. |
| 702 | verifyFragmentExpression(I: *DVR); |
| 703 | verifyNotEntryValue(I: *DVR); |
| 704 | } else if (auto *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) { |
| 705 | visit(DLR&: *DLR); |
| 706 | } |
| 707 | } |
| 708 | } |
| 709 | |
| 710 | void Verifier::visit(Instruction &I) { |
| 711 | visitDbgRecords(I); |
| 712 | for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) |
| 713 | Check(I.getOperand(i) != nullptr, "Operand is null" , &I); |
| 714 | InstVisitor<Verifier>::visit(I); |
| 715 | } |
| 716 | |
| 717 | // Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further. |
| 718 | static void forEachUser(const Value *User, |
| 719 | SmallPtrSet<const Value *, 32> &Visited, |
| 720 | llvm::function_ref<bool(const Value *)> Callback) { |
| 721 | if (!Visited.insert(Ptr: User).second) |
| 722 | return; |
| 723 | |
| 724 | SmallVector<const Value *> WorkList(User->materialized_users()); |
| 725 | while (!WorkList.empty()) { |
| 726 | const Value *Cur = WorkList.pop_back_val(); |
| 727 | if (!Visited.insert(Ptr: Cur).second) |
| 728 | continue; |
| 729 | if (Callback(Cur)) |
| 730 | append_range(C&: WorkList, R: Cur->materialized_users()); |
| 731 | } |
| 732 | } |
| 733 | |
| 734 | void Verifier::visitGlobalValue(const GlobalValue &GV) { |
| 735 | Check(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(), |
| 736 | "Global is external, but doesn't have external or weak linkage!" , &GV); |
| 737 | |
| 738 | if (const GlobalObject *GO = dyn_cast<GlobalObject>(Val: &GV)) { |
| 739 | if (const MDNode *Associated = |
| 740 | GO->getMetadata(KindID: LLVMContext::MD_associated)) { |
| 741 | Check(Associated->getNumOperands() == 1, |
| 742 | "associated metadata must have one operand" , &GV, Associated); |
| 743 | const Metadata *Op = Associated->getOperand(I: 0).get(); |
| 744 | Check(Op, "associated metadata must have a global value" , GO, Associated); |
| 745 | |
| 746 | const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Val: Op); |
| 747 | Check(VM, "associated metadata must be ValueAsMetadata" , GO, Associated); |
| 748 | if (VM) { |
| 749 | Check(isa<PointerType>(VM->getValue()->getType()), |
| 750 | "associated value must be pointer typed" , GV, Associated); |
| 751 | |
| 752 | const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases(); |
| 753 | Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped), |
| 754 | "associated metadata must point to a GlobalObject" , GO, Stripped); |
| 755 | Check(Stripped != GO, |
| 756 | "global values should not associate to themselves" , GO, |
| 757 | Associated); |
| 758 | } |
| 759 | } |
| 760 | |
| 761 | // FIXME: Why is getMetadata on GlobalValue protected? |
| 762 | if (const MDNode *AbsoluteSymbol = |
| 763 | GO->getMetadata(KindID: LLVMContext::MD_absolute_symbol)) { |
| 764 | verifyRangeLikeMetadata(V: *GO, Range: AbsoluteSymbol, |
| 765 | Ty: DL.getIntPtrType(GO->getType()), |
| 766 | Kind: RangeLikeMetadataKind::AbsoluteSymbol); |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), |
| 771 | "Only global variables can have appending linkage!" , &GV); |
| 772 | |
| 773 | if (GV.hasAppendingLinkage()) { |
| 774 | const GlobalVariable *GVar = dyn_cast<GlobalVariable>(Val: &GV); |
| 775 | Check(GVar && GVar->getValueType()->isArrayTy(), |
| 776 | "Only global arrays can have appending linkage!" , GVar); |
| 777 | } |
| 778 | |
| 779 | if (GV.isDeclarationForLinker()) |
| 780 | Check(!GV.hasComdat(), "Declaration may not be in a Comdat!" , &GV); |
| 781 | |
| 782 | if (GV.hasDLLExportStorageClass()) { |
| 783 | Check(!GV.hasHiddenVisibility(), |
| 784 | "dllexport GlobalValue must have default or protected visibility" , |
| 785 | &GV); |
| 786 | } |
| 787 | if (GV.hasDLLImportStorageClass()) { |
| 788 | Check(GV.hasDefaultVisibility(), |
| 789 | "dllimport GlobalValue must have default visibility" , &GV); |
| 790 | Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!" , |
| 791 | &GV); |
| 792 | |
| 793 | Check((GV.isDeclaration() && |
| 794 | (GV.hasExternalLinkage() || GV.hasExternalWeakLinkage())) || |
| 795 | GV.hasAvailableExternallyLinkage(), |
| 796 | "Global is marked as dllimport, but not external" , &GV); |
| 797 | } |
| 798 | |
| 799 | if (GV.isImplicitDSOLocal()) |
| 800 | Check(GV.isDSOLocal(), |
| 801 | "GlobalValue with local linkage or non-default " |
| 802 | "visibility must be dso_local!" , |
| 803 | &GV); |
| 804 | |
| 805 | forEachUser(User: &GV, Visited&: GlobalValueVisited, Callback: [&](const Value *V) -> bool { |
| 806 | if (const Instruction *I = dyn_cast<Instruction>(Val: V)) { |
| 807 | if (!I->getParent() || !I->getParent()->getParent()) |
| 808 | CheckFailed(Message: "Global is referenced by parentless instruction!" , V1: &GV, Vs: &M, |
| 809 | Vs: I); |
| 810 | else if (I->getParent()->getParent()->getParent() != &M) |
| 811 | CheckFailed(Message: "Global is referenced in a different module!" , V1: &GV, Vs: &M, Vs: I, |
| 812 | Vs: I->getParent()->getParent(), |
| 813 | Vs: I->getParent()->getParent()->getParent()); |
| 814 | return false; |
| 815 | } else if (const Function *F = dyn_cast<Function>(Val: V)) { |
| 816 | if (F->getParent() != &M) |
| 817 | CheckFailed(Message: "Global is used by function in a different module" , V1: &GV, Vs: &M, |
| 818 | Vs: F, Vs: F->getParent()); |
| 819 | return false; |
| 820 | } |
| 821 | return true; |
| 822 | }); |
| 823 | } |
| 824 | |
| 825 | void Verifier::visitGlobalVariable(const GlobalVariable &GV) { |
| 826 | Type *GVType = GV.getValueType(); |
| 827 | |
| 828 | if (MaybeAlign A = GV.getAlign()) { |
| 829 | Check(A->value() <= Value::MaximumAlignment, |
| 830 | "huge alignment values are unsupported" , &GV); |
| 831 | } |
| 832 | |
| 833 | if (GV.hasInitializer()) { |
| 834 | Check(GV.getInitializer()->getType() == GVType, |
| 835 | "Global variable initializer type does not match global " |
| 836 | "variable type!" , |
| 837 | &GV); |
| 838 | Check(GV.getInitializer()->getType()->isSized(), |
| 839 | "Global variable initializer must be sized" , &GV); |
| 840 | // If the global has common linkage, it must have a zero initializer and |
| 841 | // cannot be constant. |
| 842 | if (GV.hasCommonLinkage()) { |
| 843 | Check(GV.getInitializer()->isNullValue(), |
| 844 | "'common' global must have a zero initializer!" , &GV); |
| 845 | Check(!GV.isConstant(), "'common' global may not be marked constant!" , |
| 846 | &GV); |
| 847 | Check(!GV.hasComdat(), "'common' global may not be in a Comdat!" , &GV); |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | if (GV.hasName() && (GV.getName() == "llvm.global_ctors" || |
| 852 | GV.getName() == "llvm.global_dtors" )) { |
| 853 | Check(!GV.hasInitializer() || GV.hasAppendingLinkage(), |
| 854 | "invalid linkage for intrinsic global variable" , &GV); |
| 855 | Check(GV.materialized_use_empty(), |
| 856 | "invalid uses of intrinsic global variable" , &GV); |
| 857 | |
| 858 | // Don't worry about emitting an error for it not being an array, |
| 859 | // visitGlobalValue will complain on appending non-array. |
| 860 | if (ArrayType *ATy = dyn_cast<ArrayType>(Val: GVType)) { |
| 861 | StructType *STy = dyn_cast<StructType>(Val: ATy->getElementType()); |
| 862 | PointerType *FuncPtrTy = |
| 863 | PointerType::get(C&: Context, AddressSpace: DL.getProgramAddressSpace()); |
| 864 | Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) && |
| 865 | STy->getTypeAtIndex(0u)->isIntegerTy(32) && |
| 866 | STy->getTypeAtIndex(1) == FuncPtrTy, |
| 867 | "wrong type for intrinsic global variable" , &GV); |
| 868 | Check(STy->getNumElements() == 3, |
| 869 | "the third field of the element type is mandatory, " |
| 870 | "specify ptr null to migrate from the obsoleted 2-field form" ); |
| 871 | Type *ETy = STy->getTypeAtIndex(N: 2); |
| 872 | Check(ETy->isPointerTy(), "wrong type for intrinsic global variable" , |
| 873 | &GV); |
| 874 | } |
| 875 | } |
| 876 | |
| 877 | if (GV.hasName() && (GV.getName() == "llvm.used" || |
| 878 | GV.getName() == "llvm.compiler.used" )) { |
| 879 | Check(!GV.hasInitializer() || GV.hasAppendingLinkage(), |
| 880 | "invalid linkage for intrinsic global variable" , &GV); |
| 881 | Check(GV.materialized_use_empty(), |
| 882 | "invalid uses of intrinsic global variable" , &GV); |
| 883 | |
| 884 | if (ArrayType *ATy = dyn_cast<ArrayType>(Val: GVType)) { |
| 885 | PointerType *PTy = dyn_cast<PointerType>(Val: ATy->getElementType()); |
| 886 | Check(PTy, "wrong type for intrinsic global variable" , &GV); |
| 887 | if (GV.hasInitializer()) { |
| 888 | const Constant *Init = GV.getInitializer(); |
| 889 | const ConstantArray *InitArray = dyn_cast<ConstantArray>(Val: Init); |
| 890 | Check(InitArray, "wrong initalizer for intrinsic global variable" , |
| 891 | Init); |
| 892 | for (Value *Op : InitArray->operands()) { |
| 893 | Value *V = Op->stripPointerCasts(); |
| 894 | Check(isa<GlobalVariable>(V) || isa<Function>(V) || |
| 895 | isa<GlobalAlias>(V), |
| 896 | Twine("invalid " ) + GV.getName() + " member" , V); |
| 897 | Check(V->hasName(), |
| 898 | Twine("members of " ) + GV.getName() + " must be named" , V); |
| 899 | } |
| 900 | } |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | // Visit any debug info attachments. |
| 905 | SmallVector<MDNode *, 1> MDs; |
| 906 | GV.getMetadata(KindID: LLVMContext::MD_dbg, MDs); |
| 907 | for (auto *MD : MDs) { |
| 908 | if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(Val: MD)) |
| 909 | visitDIGlobalVariableExpression(N: *GVE); |
| 910 | else |
| 911 | CheckDI(false, "!dbg attachment of global variable must be a " |
| 912 | "DIGlobalVariableExpression" ); |
| 913 | } |
| 914 | |
| 915 | // Scalable vectors cannot be global variables, since we don't know |
| 916 | // the runtime size. |
| 917 | Check(!GVType->isScalableTy(), "Globals cannot contain scalable types" , &GV); |
| 918 | |
| 919 | // Check if it is or contains a target extension type that disallows being |
| 920 | // used as a global. |
| 921 | Check(!GVType->containsNonGlobalTargetExtType(), |
| 922 | "Global @" + GV.getName() + " has illegal target extension type" , |
| 923 | GVType); |
| 924 | |
| 925 | if (!GV.hasInitializer()) { |
| 926 | visitGlobalValue(GV); |
| 927 | return; |
| 928 | } |
| 929 | |
| 930 | // Walk any aggregate initializers looking for bitcasts between address spaces |
| 931 | visitConstantExprsRecursively(EntryC: GV.getInitializer()); |
| 932 | |
| 933 | visitGlobalValue(GV); |
| 934 | } |
| 935 | |
| 936 | void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) { |
| 937 | SmallPtrSet<const GlobalAlias*, 4> Visited; |
| 938 | Visited.insert(Ptr: &GA); |
| 939 | visitAliaseeSubExpr(Visited, A: GA, C); |
| 940 | } |
| 941 | |
| 942 | void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited, |
| 943 | const GlobalAlias &GA, const Constant &C) { |
| 944 | if (GA.hasAvailableExternallyLinkage()) { |
| 945 | Check(isa<GlobalValue>(C) && |
| 946 | cast<GlobalValue>(C).hasAvailableExternallyLinkage(), |
| 947 | "available_externally alias must point to available_externally " |
| 948 | "global value" , |
| 949 | &GA); |
| 950 | } |
| 951 | if (const auto *GV = dyn_cast<GlobalValue>(Val: &C)) { |
| 952 | if (!GA.hasAvailableExternallyLinkage()) { |
| 953 | Check(!GV->isDeclarationForLinker(), "Alias must point to a definition" , |
| 954 | &GA); |
| 955 | } |
| 956 | |
| 957 | if (const auto *GA2 = dyn_cast<GlobalAlias>(Val: GV)) { |
| 958 | Check(Visited.insert(GA2).second, "Aliases cannot form a cycle" , &GA); |
| 959 | |
| 960 | Check(!GA2->isInterposable(), |
| 961 | "Alias cannot point to an interposable alias" , &GA); |
| 962 | } else { |
| 963 | // Only continue verifying subexpressions of GlobalAliases. |
| 964 | // Do not recurse into global initializers. |
| 965 | return; |
| 966 | } |
| 967 | } |
| 968 | |
| 969 | if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C)) |
| 970 | visitConstantExprsRecursively(EntryC: CE); |
| 971 | |
| 972 | for (const Use &U : C.operands()) { |
| 973 | Value *V = &*U; |
| 974 | if (const auto *GA2 = dyn_cast<GlobalAlias>(Val: V)) |
| 975 | visitAliaseeSubExpr(Visited, GA, C: *GA2->getAliasee()); |
| 976 | else if (const auto *C2 = dyn_cast<Constant>(Val: V)) |
| 977 | visitAliaseeSubExpr(Visited, GA, C: *C2); |
| 978 | } |
| 979 | } |
| 980 | |
| 981 | void Verifier::visitGlobalAlias(const GlobalAlias &GA) { |
| 982 | Check(GlobalAlias::isValidLinkage(GA.getLinkage()), |
| 983 | "Alias should have private, internal, linkonce, weak, linkonce_odr, " |
| 984 | "weak_odr, external, or available_externally linkage!" , |
| 985 | &GA); |
| 986 | const Constant *Aliasee = GA.getAliasee(); |
| 987 | Check(Aliasee, "Aliasee cannot be NULL!" , &GA); |
| 988 | Check(GA.getType() == Aliasee->getType(), |
| 989 | "Alias and aliasee types should match!" , &GA); |
| 990 | |
| 991 | Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee), |
| 992 | "Aliasee should be either GlobalValue or ConstantExpr" , &GA); |
| 993 | |
| 994 | visitAliaseeSubExpr(GA, C: *Aliasee); |
| 995 | |
| 996 | visitGlobalValue(GV: GA); |
| 997 | } |
| 998 | |
| 999 | void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) { |
| 1000 | Check(GlobalIFunc::isValidLinkage(GI.getLinkage()), |
| 1001 | "IFunc should have private, internal, linkonce, weak, linkonce_odr, " |
| 1002 | "weak_odr, or external linkage!" , |
| 1003 | &GI); |
| 1004 | // Pierce through ConstantExprs and GlobalAliases and check that the resolver |
| 1005 | // is a Function definition. |
| 1006 | const Function *Resolver = GI.getResolverFunction(); |
| 1007 | Check(Resolver, "IFunc must have a Function resolver" , &GI); |
| 1008 | Check(!Resolver->isDeclarationForLinker(), |
| 1009 | "IFunc resolver must be a definition" , &GI); |
| 1010 | |
| 1011 | // Check that the immediate resolver operand (prior to any bitcasts) has the |
| 1012 | // correct type. |
| 1013 | const Type *ResolverTy = GI.getResolver()->getType(); |
| 1014 | |
| 1015 | Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()), |
| 1016 | "IFunc resolver must return a pointer" , &GI); |
| 1017 | |
| 1018 | Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()), |
| 1019 | "IFunc resolver has incorrect type" , &GI); |
| 1020 | } |
| 1021 | |
| 1022 | void Verifier::visitNamedMDNode(const NamedMDNode &NMD) { |
| 1023 | // There used to be various other llvm.dbg.* nodes, but we don't support |
| 1024 | // upgrading them and we want to reserve the namespace for future uses. |
| 1025 | if (NMD.getName().starts_with(Prefix: "llvm.dbg." )) |
| 1026 | CheckDI(NMD.getName() == "llvm.dbg.cu" , |
| 1027 | "unrecognized named metadata node in the llvm.dbg namespace" , &NMD); |
| 1028 | for (const MDNode *MD : NMD.operands()) { |
| 1029 | if (NMD.getName() == "llvm.dbg.cu" ) |
| 1030 | CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit" , &NMD, MD); |
| 1031 | |
| 1032 | if (!MD) |
| 1033 | continue; |
| 1034 | |
| 1035 | visitMDNode(MD: *MD, AllowLocs: AreDebugLocsAllowed::Yes); |
| 1036 | } |
| 1037 | } |
| 1038 | |
| 1039 | void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) { |
| 1040 | // Only visit each node once. Metadata can be mutually recursive, so this |
| 1041 | // avoids infinite recursion here, as well as being an optimization. |
| 1042 | if (!MDNodes.insert(Ptr: &MD).second) |
| 1043 | return; |
| 1044 | |
| 1045 | Check(&MD.getContext() == &Context, |
| 1046 | "MDNode context does not match Module context!" , &MD); |
| 1047 | |
| 1048 | switch (MD.getMetadataID()) { |
| 1049 | default: |
| 1050 | llvm_unreachable("Invalid MDNode subclass" ); |
| 1051 | case Metadata::MDTupleKind: |
| 1052 | break; |
| 1053 | #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \ |
| 1054 | case Metadata::CLASS##Kind: \ |
| 1055 | visit##CLASS(cast<CLASS>(MD)); \ |
| 1056 | break; |
| 1057 | #include "llvm/IR/Metadata.def" |
| 1058 | } |
| 1059 | |
| 1060 | for (const Metadata *Op : MD.operands()) { |
| 1061 | if (!Op) |
| 1062 | continue; |
| 1063 | Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!" , |
| 1064 | &MD, Op); |
| 1065 | CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes, |
| 1066 | "DILocation not allowed within this metadata node" , &MD, Op); |
| 1067 | if (auto *N = dyn_cast<MDNode>(Val: Op)) { |
| 1068 | visitMDNode(MD: *N, AllowLocs); |
| 1069 | continue; |
| 1070 | } |
| 1071 | if (auto *V = dyn_cast<ValueAsMetadata>(Val: Op)) { |
| 1072 | visitValueAsMetadata(MD: *V, F: nullptr); |
| 1073 | continue; |
| 1074 | } |
| 1075 | } |
| 1076 | |
| 1077 | // Check these last, so we diagnose problems in operands first. |
| 1078 | Check(!MD.isTemporary(), "Expected no forward declarations!" , &MD); |
| 1079 | Check(MD.isResolved(), "All nodes should be resolved!" , &MD); |
| 1080 | } |
| 1081 | |
| 1082 | void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) { |
| 1083 | Check(MD.getValue(), "Expected valid value" , &MD); |
| 1084 | Check(!MD.getValue()->getType()->isMetadataTy(), |
| 1085 | "Unexpected metadata round-trip through values" , &MD, MD.getValue()); |
| 1086 | |
| 1087 | auto *L = dyn_cast<LocalAsMetadata>(Val: &MD); |
| 1088 | if (!L) |
| 1089 | return; |
| 1090 | |
| 1091 | Check(F, "function-local metadata used outside a function" , L); |
| 1092 | |
| 1093 | // If this was an instruction, bb, or argument, verify that it is in the |
| 1094 | // function that we expect. |
| 1095 | Function *ActualF = nullptr; |
| 1096 | if (Instruction *I = dyn_cast<Instruction>(Val: L->getValue())) { |
| 1097 | Check(I->getParent(), "function-local metadata not in basic block" , L, I); |
| 1098 | ActualF = I->getParent()->getParent(); |
| 1099 | } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Val: L->getValue())) |
| 1100 | ActualF = BB->getParent(); |
| 1101 | else if (Argument *A = dyn_cast<Argument>(Val: L->getValue())) |
| 1102 | ActualF = A->getParent(); |
| 1103 | assert(ActualF && "Unimplemented function local metadata case!" ); |
| 1104 | |
| 1105 | Check(ActualF == F, "function-local metadata used in wrong function" , L); |
| 1106 | } |
| 1107 | |
| 1108 | void Verifier::visitDIArgList(const DIArgList &AL, Function *F) { |
| 1109 | for (const ValueAsMetadata *VAM : AL.getArgs()) |
| 1110 | visitValueAsMetadata(MD: *VAM, F); |
| 1111 | } |
| 1112 | |
| 1113 | void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) { |
| 1114 | Metadata *MD = MDV.getMetadata(); |
| 1115 | if (auto *N = dyn_cast<MDNode>(Val: MD)) { |
| 1116 | visitMDNode(MD: *N, AllowLocs: AreDebugLocsAllowed::No); |
| 1117 | return; |
| 1118 | } |
| 1119 | |
| 1120 | // Only visit each node once. Metadata can be mutually recursive, so this |
| 1121 | // avoids infinite recursion here, as well as being an optimization. |
| 1122 | if (!MDNodes.insert(Ptr: MD).second) |
| 1123 | return; |
| 1124 | |
| 1125 | if (auto *V = dyn_cast<ValueAsMetadata>(Val: MD)) |
| 1126 | visitValueAsMetadata(MD: *V, F); |
| 1127 | |
| 1128 | if (auto *AL = dyn_cast<DIArgList>(Val: MD)) |
| 1129 | visitDIArgList(AL: *AL, F); |
| 1130 | } |
| 1131 | |
| 1132 | static bool isType(const Metadata *MD) { return !MD || isa<DIType>(Val: MD); } |
| 1133 | static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(Val: MD); } |
| 1134 | static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(Val: MD); } |
| 1135 | |
| 1136 | void Verifier::visitDILocation(const DILocation &N) { |
| 1137 | CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), |
| 1138 | "location requires a valid scope" , &N, N.getRawScope()); |
| 1139 | if (auto *IA = N.getRawInlinedAt()) |
| 1140 | CheckDI(isa<DILocation>(IA), "inlined-at should be a location" , &N, IA); |
| 1141 | if (auto *SP = dyn_cast<DISubprogram>(Val: N.getRawScope())) |
| 1142 | CheckDI(SP->isDefinition(), "scope points into the type hierarchy" , &N); |
| 1143 | } |
| 1144 | |
| 1145 | void Verifier::visitGenericDINode(const GenericDINode &N) { |
| 1146 | CheckDI(N.getTag(), "invalid tag" , &N); |
| 1147 | } |
| 1148 | |
| 1149 | void Verifier::visitDIScope(const DIScope &N) { |
| 1150 | if (auto *F = N.getRawFile()) |
| 1151 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1152 | } |
| 1153 | |
| 1154 | void Verifier::visitDISubrangeType(const DISubrangeType &N) { |
| 1155 | CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag" , &N); |
| 1156 | auto *BaseType = N.getRawBaseType(); |
| 1157 | CheckDI(!BaseType || isType(BaseType), "BaseType must be a type" ); |
| 1158 | auto *LBound = N.getRawLowerBound(); |
| 1159 | CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) || |
| 1160 | isa<DIVariable>(LBound) || isa<DIExpression>(LBound), |
| 1161 | "LowerBound must be signed constant or DIVariable or DIExpression" , |
| 1162 | &N); |
| 1163 | auto *UBound = N.getRawUpperBound(); |
| 1164 | CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) || |
| 1165 | isa<DIVariable>(UBound) || isa<DIExpression>(UBound), |
| 1166 | "UpperBound must be signed constant or DIVariable or DIExpression" , |
| 1167 | &N); |
| 1168 | auto *Stride = N.getRawStride(); |
| 1169 | CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) || |
| 1170 | isa<DIVariable>(Stride) || isa<DIExpression>(Stride), |
| 1171 | "Stride must be signed constant or DIVariable or DIExpression" , &N); |
| 1172 | auto *Bias = N.getRawBias(); |
| 1173 | CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) || |
| 1174 | isa<DIExpression>(Bias), |
| 1175 | "Bias must be signed constant or DIVariable or DIExpression" , &N); |
| 1176 | // Subrange types currently only support constant size. |
| 1177 | auto *Size = N.getRawSizeInBits(); |
| 1178 | CheckDI(!Size || isa<ConstantAsMetadata>(Size), |
| 1179 | "SizeInBits must be a constant" ); |
| 1180 | } |
| 1181 | |
| 1182 | void Verifier::visitDISubrange(const DISubrange &N) { |
| 1183 | CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag" , &N); |
| 1184 | CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(), |
| 1185 | "Subrange can have any one of count or upperBound" , &N); |
| 1186 | auto *CBound = N.getRawCountNode(); |
| 1187 | CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) || |
| 1188 | isa<DIVariable>(CBound) || isa<DIExpression>(CBound), |
| 1189 | "Count must be signed constant or DIVariable or DIExpression" , &N); |
| 1190 | auto Count = N.getCount(); |
| 1191 | CheckDI(!Count || !isa<ConstantInt *>(Count) || |
| 1192 | cast<ConstantInt *>(Count)->getSExtValue() >= -1, |
| 1193 | "invalid subrange count" , &N); |
| 1194 | auto *LBound = N.getRawLowerBound(); |
| 1195 | CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) || |
| 1196 | isa<DIVariable>(LBound) || isa<DIExpression>(LBound), |
| 1197 | "LowerBound must be signed constant or DIVariable or DIExpression" , |
| 1198 | &N); |
| 1199 | auto *UBound = N.getRawUpperBound(); |
| 1200 | CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) || |
| 1201 | isa<DIVariable>(UBound) || isa<DIExpression>(UBound), |
| 1202 | "UpperBound must be signed constant or DIVariable or DIExpression" , |
| 1203 | &N); |
| 1204 | auto *Stride = N.getRawStride(); |
| 1205 | CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) || |
| 1206 | isa<DIVariable>(Stride) || isa<DIExpression>(Stride), |
| 1207 | "Stride must be signed constant or DIVariable or DIExpression" , &N); |
| 1208 | } |
| 1209 | |
| 1210 | void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) { |
| 1211 | CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag" , &N); |
| 1212 | CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(), |
| 1213 | "GenericSubrange can have any one of count or upperBound" , &N); |
| 1214 | auto *CBound = N.getRawCountNode(); |
| 1215 | CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound), |
| 1216 | "Count must be signed constant or DIVariable or DIExpression" , &N); |
| 1217 | auto *LBound = N.getRawLowerBound(); |
| 1218 | CheckDI(LBound, "GenericSubrange must contain lowerBound" , &N); |
| 1219 | CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound), |
| 1220 | "LowerBound must be signed constant or DIVariable or DIExpression" , |
| 1221 | &N); |
| 1222 | auto *UBound = N.getRawUpperBound(); |
| 1223 | CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound), |
| 1224 | "UpperBound must be signed constant or DIVariable or DIExpression" , |
| 1225 | &N); |
| 1226 | auto *Stride = N.getRawStride(); |
| 1227 | CheckDI(Stride, "GenericSubrange must contain stride" , &N); |
| 1228 | CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride), |
| 1229 | "Stride must be signed constant or DIVariable or DIExpression" , &N); |
| 1230 | } |
| 1231 | |
| 1232 | void Verifier::visitDIEnumerator(const DIEnumerator &N) { |
| 1233 | CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag" , &N); |
| 1234 | } |
| 1235 | |
| 1236 | void Verifier::visitDIBasicType(const DIBasicType &N) { |
| 1237 | CheckDI(N.getTag() == dwarf::DW_TAG_base_type || |
| 1238 | N.getTag() == dwarf::DW_TAG_unspecified_type || |
| 1239 | N.getTag() == dwarf::DW_TAG_string_type, |
| 1240 | "invalid tag" , &N); |
| 1241 | // Basic types currently only support constant size. |
| 1242 | auto *Size = N.getRawSizeInBits(); |
| 1243 | CheckDI(!Size || isa<ConstantAsMetadata>(Size), |
| 1244 | "SizeInBits must be a constant" ); |
| 1245 | } |
| 1246 | |
| 1247 | void Verifier::visitDIFixedPointType(const DIFixedPointType &N) { |
| 1248 | visitDIBasicType(N); |
| 1249 | |
| 1250 | CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag" , &N); |
| 1251 | CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed || |
| 1252 | N.getEncoding() == dwarf::DW_ATE_unsigned_fixed, |
| 1253 | "invalid encoding" , &N); |
| 1254 | CheckDI(N.getKind() == DIFixedPointType::FixedPointBinary || |
| 1255 | N.getKind() == DIFixedPointType::FixedPointDecimal || |
| 1256 | N.getKind() == DIFixedPointType::FixedPointRational, |
| 1257 | "invalid kind" , &N); |
| 1258 | CheckDI(N.getKind() != DIFixedPointType::FixedPointRational || |
| 1259 | N.getFactorRaw() == 0, |
| 1260 | "factor should be 0 for rationals" , &N); |
| 1261 | CheckDI(N.getKind() == DIFixedPointType::FixedPointRational || |
| 1262 | (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0), |
| 1263 | "numerator and denominator should be 0 for non-rationals" , &N); |
| 1264 | } |
| 1265 | |
| 1266 | void Verifier::visitDIStringType(const DIStringType &N) { |
| 1267 | CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag" , &N); |
| 1268 | CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags" , |
| 1269 | &N); |
| 1270 | } |
| 1271 | |
| 1272 | void Verifier::visitDIDerivedType(const DIDerivedType &N) { |
| 1273 | // Common scope checks. |
| 1274 | visitDIScope(N); |
| 1275 | |
| 1276 | CheckDI(N.getTag() == dwarf::DW_TAG_typedef || |
| 1277 | N.getTag() == dwarf::DW_TAG_pointer_type || |
| 1278 | N.getTag() == dwarf::DW_TAG_ptr_to_member_type || |
| 1279 | N.getTag() == dwarf::DW_TAG_reference_type || |
| 1280 | N.getTag() == dwarf::DW_TAG_rvalue_reference_type || |
| 1281 | N.getTag() == dwarf::DW_TAG_const_type || |
| 1282 | N.getTag() == dwarf::DW_TAG_immutable_type || |
| 1283 | N.getTag() == dwarf::DW_TAG_volatile_type || |
| 1284 | N.getTag() == dwarf::DW_TAG_restrict_type || |
| 1285 | N.getTag() == dwarf::DW_TAG_atomic_type || |
| 1286 | N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type || |
| 1287 | N.getTag() == dwarf::DW_TAG_member || |
| 1288 | (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) || |
| 1289 | N.getTag() == dwarf::DW_TAG_inheritance || |
| 1290 | N.getTag() == dwarf::DW_TAG_friend || |
| 1291 | N.getTag() == dwarf::DW_TAG_set_type || |
| 1292 | N.getTag() == dwarf::DW_TAG_template_alias, |
| 1293 | "invalid tag" , &N); |
| 1294 | if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) { |
| 1295 | CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type" , &N, |
| 1296 | N.getRawExtraData()); |
| 1297 | } |
| 1298 | |
| 1299 | if (N.getTag() == dwarf::DW_TAG_set_type) { |
| 1300 | if (auto *T = N.getRawBaseType()) { |
| 1301 | auto *Enum = dyn_cast_or_null<DICompositeType>(Val: T); |
| 1302 | auto *Basic = dyn_cast_or_null<DIBasicType>(Val: T); |
| 1303 | CheckDI( |
| 1304 | (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) || |
| 1305 | (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned || |
| 1306 | Basic->getEncoding() == dwarf::DW_ATE_signed || |
| 1307 | Basic->getEncoding() == dwarf::DW_ATE_unsigned_char || |
| 1308 | Basic->getEncoding() == dwarf::DW_ATE_signed_char || |
| 1309 | Basic->getEncoding() == dwarf::DW_ATE_boolean)), |
| 1310 | "invalid set base type" , &N, T); |
| 1311 | } |
| 1312 | } |
| 1313 | |
| 1314 | CheckDI(isScope(N.getRawScope()), "invalid scope" , &N, N.getRawScope()); |
| 1315 | CheckDI(isType(N.getRawBaseType()), "invalid base type" , &N, |
| 1316 | N.getRawBaseType()); |
| 1317 | |
| 1318 | if (N.getDWARFAddressSpace()) { |
| 1319 | CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type || |
| 1320 | N.getTag() == dwarf::DW_TAG_reference_type || |
| 1321 | N.getTag() == dwarf::DW_TAG_rvalue_reference_type, |
| 1322 | "DWARF address space only applies to pointer or reference types" , |
| 1323 | &N); |
| 1324 | } |
| 1325 | |
| 1326 | auto *Size = N.getRawSizeInBits(); |
| 1327 | CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) || |
| 1328 | isa<DIExpression>(Size), |
| 1329 | "SizeInBits must be a constant or DIVariable or DIExpression" ); |
| 1330 | } |
| 1331 | |
| 1332 | /// Detect mutually exclusive flags. |
| 1333 | static bool hasConflictingReferenceFlags(unsigned Flags) { |
| 1334 | return ((Flags & DINode::FlagLValueReference) && |
| 1335 | (Flags & DINode::FlagRValueReference)) || |
| 1336 | ((Flags & DINode::FlagTypePassByValue) && |
| 1337 | (Flags & DINode::FlagTypePassByReference)); |
| 1338 | } |
| 1339 | |
| 1340 | void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) { |
| 1341 | auto *Params = dyn_cast<MDTuple>(Val: &RawParams); |
| 1342 | CheckDI(Params, "invalid template params" , &N, &RawParams); |
| 1343 | for (Metadata *Op : Params->operands()) { |
| 1344 | CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter" , |
| 1345 | &N, Params, Op); |
| 1346 | } |
| 1347 | } |
| 1348 | |
| 1349 | void Verifier::visitDICompositeType(const DICompositeType &N) { |
| 1350 | // Common scope checks. |
| 1351 | visitDIScope(N); |
| 1352 | |
| 1353 | CheckDI(N.getTag() == dwarf::DW_TAG_array_type || |
| 1354 | N.getTag() == dwarf::DW_TAG_structure_type || |
| 1355 | N.getTag() == dwarf::DW_TAG_union_type || |
| 1356 | N.getTag() == dwarf::DW_TAG_enumeration_type || |
| 1357 | N.getTag() == dwarf::DW_TAG_class_type || |
| 1358 | N.getTag() == dwarf::DW_TAG_variant_part || |
| 1359 | N.getTag() == dwarf::DW_TAG_variant || |
| 1360 | N.getTag() == dwarf::DW_TAG_namelist, |
| 1361 | "invalid tag" , &N); |
| 1362 | |
| 1363 | CheckDI(isScope(N.getRawScope()), "invalid scope" , &N, N.getRawScope()); |
| 1364 | CheckDI(isType(N.getRawBaseType()), "invalid base type" , &N, |
| 1365 | N.getRawBaseType()); |
| 1366 | |
| 1367 | CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()), |
| 1368 | "invalid composite elements" , &N, N.getRawElements()); |
| 1369 | CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder" , &N, |
| 1370 | N.getRawVTableHolder()); |
| 1371 | CheckDI(!hasConflictingReferenceFlags(N.getFlags()), |
| 1372 | "invalid reference flags" , &N); |
| 1373 | unsigned DIBlockByRefStruct = 1 << 4; |
| 1374 | CheckDI((N.getFlags() & DIBlockByRefStruct) == 0, |
| 1375 | "DIBlockByRefStruct on DICompositeType is no longer supported" , &N); |
| 1376 | CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }), |
| 1377 | "DISubprogram contains null entry in `elements` field" , &N); |
| 1378 | |
| 1379 | if (N.isVector()) { |
| 1380 | const DINodeArray Elements = N.getElements(); |
| 1381 | CheckDI(Elements.size() == 1 && |
| 1382 | Elements[0]->getTag() == dwarf::DW_TAG_subrange_type, |
| 1383 | "invalid vector, expected one element of type subrange" , &N); |
| 1384 | } |
| 1385 | |
| 1386 | if (auto *Params = N.getRawTemplateParams()) |
| 1387 | visitTemplateParams(N, RawParams: *Params); |
| 1388 | |
| 1389 | if (auto *D = N.getRawDiscriminator()) { |
| 1390 | CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part, |
| 1391 | "discriminator can only appear on variant part" ); |
| 1392 | } |
| 1393 | |
| 1394 | if (N.getRawDataLocation()) { |
| 1395 | CheckDI(N.getTag() == dwarf::DW_TAG_array_type, |
| 1396 | "dataLocation can only appear in array type" ); |
| 1397 | } |
| 1398 | |
| 1399 | if (N.getRawAssociated()) { |
| 1400 | CheckDI(N.getTag() == dwarf::DW_TAG_array_type, |
| 1401 | "associated can only appear in array type" ); |
| 1402 | } |
| 1403 | |
| 1404 | if (N.getRawAllocated()) { |
| 1405 | CheckDI(N.getTag() == dwarf::DW_TAG_array_type, |
| 1406 | "allocated can only appear in array type" ); |
| 1407 | } |
| 1408 | |
| 1409 | if (N.getRawRank()) { |
| 1410 | CheckDI(N.getTag() == dwarf::DW_TAG_array_type, |
| 1411 | "rank can only appear in array type" ); |
| 1412 | } |
| 1413 | |
| 1414 | if (N.getTag() == dwarf::DW_TAG_array_type) { |
| 1415 | CheckDI(N.getRawBaseType(), "array types must have a base type" , &N); |
| 1416 | } |
| 1417 | |
| 1418 | auto *Size = N.getRawSizeInBits(); |
| 1419 | CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) || |
| 1420 | isa<DIExpression>(Size), |
| 1421 | "SizeInBits must be a constant or DIVariable or DIExpression" ); |
| 1422 | } |
| 1423 | |
| 1424 | void Verifier::visitDISubroutineType(const DISubroutineType &N) { |
| 1425 | CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag" , &N); |
| 1426 | if (auto *Types = N.getRawTypeArray()) { |
| 1427 | CheckDI(isa<MDTuple>(Types), "invalid composite elements" , &N, Types); |
| 1428 | for (Metadata *Ty : N.getTypeArray()->operands()) { |
| 1429 | CheckDI(isType(Ty), "invalid subroutine type ref" , &N, Types, Ty); |
| 1430 | } |
| 1431 | } |
| 1432 | CheckDI(!hasConflictingReferenceFlags(N.getFlags()), |
| 1433 | "invalid reference flags" , &N); |
| 1434 | } |
| 1435 | |
| 1436 | void Verifier::visitDIFile(const DIFile &N) { |
| 1437 | CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag" , &N); |
| 1438 | std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum(); |
| 1439 | if (Checksum) { |
| 1440 | CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last, |
| 1441 | "invalid checksum kind" , &N); |
| 1442 | size_t Size; |
| 1443 | switch (Checksum->Kind) { |
| 1444 | case DIFile::CSK_MD5: |
| 1445 | Size = 32; |
| 1446 | break; |
| 1447 | case DIFile::CSK_SHA1: |
| 1448 | Size = 40; |
| 1449 | break; |
| 1450 | case DIFile::CSK_SHA256: |
| 1451 | Size = 64; |
| 1452 | break; |
| 1453 | } |
| 1454 | CheckDI(Checksum->Value.size() == Size, "invalid checksum length" , &N); |
| 1455 | CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos, |
| 1456 | "invalid checksum" , &N); |
| 1457 | } |
| 1458 | } |
| 1459 | |
| 1460 | void Verifier::visitDICompileUnit(const DICompileUnit &N) { |
| 1461 | CheckDI(N.isDistinct(), "compile units must be distinct" , &N); |
| 1462 | CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag" , &N); |
| 1463 | |
| 1464 | // Don't bother verifying the compilation directory or producer string |
| 1465 | // as those could be empty. |
| 1466 | CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file" , &N, |
| 1467 | N.getRawFile()); |
| 1468 | CheckDI(!N.getFile()->getFilename().empty(), "invalid filename" , &N, |
| 1469 | N.getFile()); |
| 1470 | |
| 1471 | CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind), |
| 1472 | "invalid emission kind" , &N); |
| 1473 | |
| 1474 | if (auto *Array = N.getRawEnumTypes()) { |
| 1475 | CheckDI(isa<MDTuple>(Array), "invalid enum list" , &N, Array); |
| 1476 | for (Metadata *Op : N.getEnumTypes()->operands()) { |
| 1477 | auto *Enum = dyn_cast_or_null<DICompositeType>(Val: Op); |
| 1478 | CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type, |
| 1479 | "invalid enum type" , &N, N.getEnumTypes(), Op); |
| 1480 | } |
| 1481 | } |
| 1482 | if (auto *Array = N.getRawRetainedTypes()) { |
| 1483 | CheckDI(isa<MDTuple>(Array), "invalid retained type list" , &N, Array); |
| 1484 | for (Metadata *Op : N.getRetainedTypes()->operands()) { |
| 1485 | CheckDI( |
| 1486 | Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) && |
| 1487 | !cast<DISubprogram>(Op)->isDefinition())), |
| 1488 | "invalid retained type" , &N, Op); |
| 1489 | } |
| 1490 | } |
| 1491 | if (auto *Array = N.getRawGlobalVariables()) { |
| 1492 | CheckDI(isa<MDTuple>(Array), "invalid global variable list" , &N, Array); |
| 1493 | for (Metadata *Op : N.getGlobalVariables()->operands()) { |
| 1494 | CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)), |
| 1495 | "invalid global variable ref" , &N, Op); |
| 1496 | } |
| 1497 | } |
| 1498 | if (auto *Array = N.getRawImportedEntities()) { |
| 1499 | CheckDI(isa<MDTuple>(Array), "invalid imported entity list" , &N, Array); |
| 1500 | for (Metadata *Op : N.getImportedEntities()->operands()) { |
| 1501 | CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref" , |
| 1502 | &N, Op); |
| 1503 | } |
| 1504 | } |
| 1505 | if (auto *Array = N.getRawMacros()) { |
| 1506 | CheckDI(isa<MDTuple>(Array), "invalid macro list" , &N, Array); |
| 1507 | for (Metadata *Op : N.getMacros()->operands()) { |
| 1508 | CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref" , &N, Op); |
| 1509 | } |
| 1510 | } |
| 1511 | CUVisited.insert(Ptr: &N); |
| 1512 | } |
| 1513 | |
| 1514 | void Verifier::visitDISubprogram(const DISubprogram &N) { |
| 1515 | CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag" , &N); |
| 1516 | CheckDI(isScope(N.getRawScope()), "invalid scope" , &N, N.getRawScope()); |
| 1517 | if (auto *F = N.getRawFile()) |
| 1518 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1519 | else |
| 1520 | CheckDI(N.getLine() == 0, "line specified with no file" , &N, N.getLine()); |
| 1521 | if (auto *T = N.getRawType()) |
| 1522 | CheckDI(isa<DISubroutineType>(T), "invalid subroutine type" , &N, T); |
| 1523 | CheckDI(isType(N.getRawContainingType()), "invalid containing type" , &N, |
| 1524 | N.getRawContainingType()); |
| 1525 | if (auto *Params = N.getRawTemplateParams()) |
| 1526 | visitTemplateParams(N, RawParams: *Params); |
| 1527 | if (auto *S = N.getRawDeclaration()) |
| 1528 | CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(), |
| 1529 | "invalid subprogram declaration" , &N, S); |
| 1530 | if (auto *RawNode = N.getRawRetainedNodes()) { |
| 1531 | auto *Node = dyn_cast<MDTuple>(Val: RawNode); |
| 1532 | CheckDI(Node, "invalid retained nodes list" , &N, RawNode); |
| 1533 | for (Metadata *Op : Node->operands()) { |
| 1534 | CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) || |
| 1535 | isa<DIImportedEntity>(Op)), |
| 1536 | "invalid retained nodes, expected DILocalVariable, DILabel or " |
| 1537 | "DIImportedEntity" , |
| 1538 | &N, Node, Op); |
| 1539 | } |
| 1540 | } |
| 1541 | CheckDI(!hasConflictingReferenceFlags(N.getFlags()), |
| 1542 | "invalid reference flags" , &N); |
| 1543 | |
| 1544 | auto *Unit = N.getRawUnit(); |
| 1545 | if (N.isDefinition()) { |
| 1546 | // Subprogram definitions (not part of the type hierarchy). |
| 1547 | CheckDI(N.isDistinct(), "subprogram definitions must be distinct" , &N); |
| 1548 | CheckDI(Unit, "subprogram definitions must have a compile unit" , &N); |
| 1549 | CheckDI(isa<DICompileUnit>(Unit), "invalid unit type" , &N, Unit); |
| 1550 | // There's no good way to cross the CU boundary to insert a nested |
| 1551 | // DISubprogram definition in one CU into a type defined in another CU. |
| 1552 | auto *CT = dyn_cast_or_null<DICompositeType>(Val: N.getRawScope()); |
| 1553 | if (CT && CT->getRawIdentifier() && |
| 1554 | M.getContext().isODRUniquingDebugTypes()) |
| 1555 | CheckDI(N.getDeclaration(), |
| 1556 | "definition subprograms cannot be nested within DICompositeType " |
| 1557 | "when enabling ODR" , |
| 1558 | &N); |
| 1559 | } else { |
| 1560 | // Subprogram declarations (part of the type hierarchy). |
| 1561 | CheckDI(!Unit, "subprogram declarations must not have a compile unit" , &N); |
| 1562 | CheckDI(!N.getRawDeclaration(), |
| 1563 | "subprogram declaration must not have a declaration field" ); |
| 1564 | } |
| 1565 | |
| 1566 | if (auto *RawThrownTypes = N.getRawThrownTypes()) { |
| 1567 | auto *ThrownTypes = dyn_cast<MDTuple>(Val: RawThrownTypes); |
| 1568 | CheckDI(ThrownTypes, "invalid thrown types list" , &N, RawThrownTypes); |
| 1569 | for (Metadata *Op : ThrownTypes->operands()) |
| 1570 | CheckDI(Op && isa<DIType>(Op), "invalid thrown type" , &N, ThrownTypes, |
| 1571 | Op); |
| 1572 | } |
| 1573 | |
| 1574 | if (N.areAllCallsDescribed()) |
| 1575 | CheckDI(N.isDefinition(), |
| 1576 | "DIFlagAllCallsDescribed must be attached to a definition" ); |
| 1577 | } |
| 1578 | |
| 1579 | void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) { |
| 1580 | CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag" , &N); |
| 1581 | CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), |
| 1582 | "invalid local scope" , &N, N.getRawScope()); |
| 1583 | if (auto *SP = dyn_cast<DISubprogram>(Val: N.getRawScope())) |
| 1584 | CheckDI(SP->isDefinition(), "scope points into the type hierarchy" , &N); |
| 1585 | } |
| 1586 | |
| 1587 | void Verifier::visitDILexicalBlock(const DILexicalBlock &N) { |
| 1588 | visitDILexicalBlockBase(N); |
| 1589 | |
| 1590 | CheckDI(N.getLine() || !N.getColumn(), |
| 1591 | "cannot have column info without line info" , &N); |
| 1592 | } |
| 1593 | |
| 1594 | void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) { |
| 1595 | visitDILexicalBlockBase(N); |
| 1596 | } |
| 1597 | |
| 1598 | void Verifier::visitDICommonBlock(const DICommonBlock &N) { |
| 1599 | CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag" , &N); |
| 1600 | if (auto *S = N.getRawScope()) |
| 1601 | CheckDI(isa<DIScope>(S), "invalid scope ref" , &N, S); |
| 1602 | if (auto *S = N.getRawDecl()) |
| 1603 | CheckDI(isa<DIGlobalVariable>(S), "invalid declaration" , &N, S); |
| 1604 | } |
| 1605 | |
| 1606 | void Verifier::visitDINamespace(const DINamespace &N) { |
| 1607 | CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag" , &N); |
| 1608 | if (auto *S = N.getRawScope()) |
| 1609 | CheckDI(isa<DIScope>(S), "invalid scope ref" , &N, S); |
| 1610 | } |
| 1611 | |
| 1612 | void Verifier::visitDIMacro(const DIMacro &N) { |
| 1613 | CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define || |
| 1614 | N.getMacinfoType() == dwarf::DW_MACINFO_undef, |
| 1615 | "invalid macinfo type" , &N); |
| 1616 | CheckDI(!N.getName().empty(), "anonymous macro" , &N); |
| 1617 | if (!N.getValue().empty()) { |
| 1618 | assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix" ); |
| 1619 | } |
| 1620 | } |
| 1621 | |
| 1622 | void Verifier::visitDIMacroFile(const DIMacroFile &N) { |
| 1623 | CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file, |
| 1624 | "invalid macinfo type" , &N); |
| 1625 | if (auto *F = N.getRawFile()) |
| 1626 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1627 | |
| 1628 | if (auto *Array = N.getRawElements()) { |
| 1629 | CheckDI(isa<MDTuple>(Array), "invalid macro list" , &N, Array); |
| 1630 | for (Metadata *Op : N.getElements()->operands()) { |
| 1631 | CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref" , &N, Op); |
| 1632 | } |
| 1633 | } |
| 1634 | } |
| 1635 | |
| 1636 | void Verifier::visitDIModule(const DIModule &N) { |
| 1637 | CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag" , &N); |
| 1638 | CheckDI(!N.getName().empty(), "anonymous module" , &N); |
| 1639 | } |
| 1640 | |
| 1641 | void Verifier::visitDITemplateParameter(const DITemplateParameter &N) { |
| 1642 | CheckDI(isType(N.getRawType()), "invalid type ref" , &N, N.getRawType()); |
| 1643 | } |
| 1644 | |
| 1645 | void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) { |
| 1646 | visitDITemplateParameter(N); |
| 1647 | |
| 1648 | CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag" , |
| 1649 | &N); |
| 1650 | } |
| 1651 | |
| 1652 | void Verifier::visitDITemplateValueParameter( |
| 1653 | const DITemplateValueParameter &N) { |
| 1654 | visitDITemplateParameter(N); |
| 1655 | |
| 1656 | CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter || |
| 1657 | N.getTag() == dwarf::DW_TAG_GNU_template_template_param || |
| 1658 | N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack, |
| 1659 | "invalid tag" , &N); |
| 1660 | } |
| 1661 | |
| 1662 | void Verifier::visitDIVariable(const DIVariable &N) { |
| 1663 | if (auto *S = N.getRawScope()) |
| 1664 | CheckDI(isa<DIScope>(S), "invalid scope" , &N, S); |
| 1665 | if (auto *F = N.getRawFile()) |
| 1666 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1667 | } |
| 1668 | |
| 1669 | void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) { |
| 1670 | // Checks common to all variables. |
| 1671 | visitDIVariable(N); |
| 1672 | |
| 1673 | CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag" , &N); |
| 1674 | CheckDI(isType(N.getRawType()), "invalid type ref" , &N, N.getRawType()); |
| 1675 | // Check only if the global variable is not an extern |
| 1676 | if (N.isDefinition()) |
| 1677 | CheckDI(N.getType(), "missing global variable type" , &N); |
| 1678 | if (auto *Member = N.getRawStaticDataMemberDeclaration()) { |
| 1679 | CheckDI(isa<DIDerivedType>(Member), |
| 1680 | "invalid static data member declaration" , &N, Member); |
| 1681 | } |
| 1682 | } |
| 1683 | |
| 1684 | void Verifier::visitDILocalVariable(const DILocalVariable &N) { |
| 1685 | // Checks common to all variables. |
| 1686 | visitDIVariable(N); |
| 1687 | |
| 1688 | CheckDI(isType(N.getRawType()), "invalid type ref" , &N, N.getRawType()); |
| 1689 | CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag" , &N); |
| 1690 | CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), |
| 1691 | "local variable requires a valid scope" , &N, N.getRawScope()); |
| 1692 | if (auto Ty = N.getType()) |
| 1693 | CheckDI(!isa<DISubroutineType>(Ty), "invalid type" , &N, N.getType()); |
| 1694 | } |
| 1695 | |
| 1696 | void Verifier::visitDIAssignID(const DIAssignID &N) { |
| 1697 | CheckDI(!N.getNumOperands(), "DIAssignID has no arguments" , &N); |
| 1698 | CheckDI(N.isDistinct(), "DIAssignID must be distinct" , &N); |
| 1699 | } |
| 1700 | |
| 1701 | void Verifier::visitDILabel(const DILabel &N) { |
| 1702 | if (auto *S = N.getRawScope()) |
| 1703 | CheckDI(isa<DIScope>(S), "invalid scope" , &N, S); |
| 1704 | if (auto *F = N.getRawFile()) |
| 1705 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1706 | |
| 1707 | CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag" , &N); |
| 1708 | CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), |
| 1709 | "label requires a valid scope" , &N, N.getRawScope()); |
| 1710 | } |
| 1711 | |
| 1712 | void Verifier::visitDIExpression(const DIExpression &N) { |
| 1713 | CheckDI(N.isValid(), "invalid expression" , &N); |
| 1714 | } |
| 1715 | |
| 1716 | void Verifier::visitDIGlobalVariableExpression( |
| 1717 | const DIGlobalVariableExpression &GVE) { |
| 1718 | CheckDI(GVE.getVariable(), "missing variable" ); |
| 1719 | if (auto *Var = GVE.getVariable()) |
| 1720 | visitDIGlobalVariable(N: *Var); |
| 1721 | if (auto *Expr = GVE.getExpression()) { |
| 1722 | visitDIExpression(N: *Expr); |
| 1723 | if (auto Fragment = Expr->getFragmentInfo()) |
| 1724 | verifyFragmentExpression(V: *GVE.getVariable(), Fragment: *Fragment, Desc: &GVE); |
| 1725 | } |
| 1726 | } |
| 1727 | |
| 1728 | void Verifier::visitDIObjCProperty(const DIObjCProperty &N) { |
| 1729 | CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag" , &N); |
| 1730 | if (auto *T = N.getRawType()) |
| 1731 | CheckDI(isType(T), "invalid type ref" , &N, T); |
| 1732 | if (auto *F = N.getRawFile()) |
| 1733 | CheckDI(isa<DIFile>(F), "invalid file" , &N, F); |
| 1734 | } |
| 1735 | |
| 1736 | void Verifier::visitDIImportedEntity(const DIImportedEntity &N) { |
| 1737 | CheckDI(N.getTag() == dwarf::DW_TAG_imported_module || |
| 1738 | N.getTag() == dwarf::DW_TAG_imported_declaration, |
| 1739 | "invalid tag" , &N); |
| 1740 | if (auto *S = N.getRawScope()) |
| 1741 | CheckDI(isa<DIScope>(S), "invalid scope for imported entity" , &N, S); |
| 1742 | CheckDI(isDINode(N.getRawEntity()), "invalid imported entity" , &N, |
| 1743 | N.getRawEntity()); |
| 1744 | } |
| 1745 | |
| 1746 | void Verifier::visitComdat(const Comdat &C) { |
| 1747 | // In COFF the Module is invalid if the GlobalValue has private linkage. |
| 1748 | // Entities with private linkage don't have entries in the symbol table. |
| 1749 | if (TT.isOSBinFormatCOFF()) |
| 1750 | if (const GlobalValue *GV = M.getNamedValue(Name: C.getName())) |
| 1751 | Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage" , |
| 1752 | GV); |
| 1753 | } |
| 1754 | |
| 1755 | void Verifier::visitModuleIdents() { |
| 1756 | const NamedMDNode *Idents = M.getNamedMetadata(Name: "llvm.ident" ); |
| 1757 | if (!Idents) |
| 1758 | return; |
| 1759 | |
| 1760 | // llvm.ident takes a list of metadata entry. Each entry has only one string. |
| 1761 | // Scan each llvm.ident entry and make sure that this requirement is met. |
| 1762 | for (const MDNode *N : Idents->operands()) { |
| 1763 | Check(N->getNumOperands() == 1, |
| 1764 | "incorrect number of operands in llvm.ident metadata" , N); |
| 1765 | Check(dyn_cast_or_null<MDString>(N->getOperand(0)), |
| 1766 | ("invalid value for llvm.ident metadata entry operand" |
| 1767 | "(the operand should be a string)" ), |
| 1768 | N->getOperand(0)); |
| 1769 | } |
| 1770 | } |
| 1771 | |
| 1772 | void Verifier::visitModuleCommandLines() { |
| 1773 | const NamedMDNode *CommandLines = M.getNamedMetadata(Name: "llvm.commandline" ); |
| 1774 | if (!CommandLines) |
| 1775 | return; |
| 1776 | |
| 1777 | // llvm.commandline takes a list of metadata entry. Each entry has only one |
| 1778 | // string. Scan each llvm.commandline entry and make sure that this |
| 1779 | // requirement is met. |
| 1780 | for (const MDNode *N : CommandLines->operands()) { |
| 1781 | Check(N->getNumOperands() == 1, |
| 1782 | "incorrect number of operands in llvm.commandline metadata" , N); |
| 1783 | Check(dyn_cast_or_null<MDString>(N->getOperand(0)), |
| 1784 | ("invalid value for llvm.commandline metadata entry operand" |
| 1785 | "(the operand should be a string)" ), |
| 1786 | N->getOperand(0)); |
| 1787 | } |
| 1788 | } |
| 1789 | |
| 1790 | void Verifier::visitModuleFlags() { |
| 1791 | const NamedMDNode *Flags = M.getModuleFlagsMetadata(); |
| 1792 | if (!Flags) return; |
| 1793 | |
| 1794 | // Scan each flag, and track the flags and requirements. |
| 1795 | DenseMap<const MDString*, const MDNode*> SeenIDs; |
| 1796 | SmallVector<const MDNode*, 16> Requirements; |
| 1797 | uint64_t PAuthABIPlatform = -1; |
| 1798 | uint64_t PAuthABIVersion = -1; |
| 1799 | for (const MDNode *MDN : Flags->operands()) { |
| 1800 | visitModuleFlag(Op: MDN, SeenIDs, Requirements); |
| 1801 | if (MDN->getNumOperands() != 3) |
| 1802 | continue; |
| 1803 | if (const auto *FlagName = dyn_cast_or_null<MDString>(Val: MDN->getOperand(I: 1))) { |
| 1804 | if (FlagName->getString() == "aarch64-elf-pauthabi-platform" ) { |
| 1805 | if (const auto *PAP = |
| 1806 | mdconst::dyn_extract_or_null<ConstantInt>(MD: MDN->getOperand(I: 2))) |
| 1807 | PAuthABIPlatform = PAP->getZExtValue(); |
| 1808 | } else if (FlagName->getString() == "aarch64-elf-pauthabi-version" ) { |
| 1809 | if (const auto *PAV = |
| 1810 | mdconst::dyn_extract_or_null<ConstantInt>(MD: MDN->getOperand(I: 2))) |
| 1811 | PAuthABIVersion = PAV->getZExtValue(); |
| 1812 | } |
| 1813 | } |
| 1814 | } |
| 1815 | |
| 1816 | if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1))) |
| 1817 | CheckFailed(Message: "either both or no 'aarch64-elf-pauthabi-platform' and " |
| 1818 | "'aarch64-elf-pauthabi-version' module flags must be present" ); |
| 1819 | |
| 1820 | // Validate that the requirements in the module are valid. |
| 1821 | for (const MDNode *Requirement : Requirements) { |
| 1822 | const MDString *Flag = cast<MDString>(Val: Requirement->getOperand(I: 0)); |
| 1823 | const Metadata *ReqValue = Requirement->getOperand(I: 1); |
| 1824 | |
| 1825 | const MDNode *Op = SeenIDs.lookup(Val: Flag); |
| 1826 | if (!Op) { |
| 1827 | CheckFailed(Message: "invalid requirement on flag, flag is not present in module" , |
| 1828 | V1: Flag); |
| 1829 | continue; |
| 1830 | } |
| 1831 | |
| 1832 | if (Op->getOperand(I: 2) != ReqValue) { |
| 1833 | CheckFailed(Message: ("invalid requirement on flag, " |
| 1834 | "flag does not have the required value" ), |
| 1835 | V1: Flag); |
| 1836 | continue; |
| 1837 | } |
| 1838 | } |
| 1839 | } |
| 1840 | |
| 1841 | void |
| 1842 | Verifier::visitModuleFlag(const MDNode *Op, |
| 1843 | DenseMap<const MDString *, const MDNode *> &SeenIDs, |
| 1844 | SmallVectorImpl<const MDNode *> &Requirements) { |
| 1845 | // Each module flag should have three arguments, the merge behavior (a |
| 1846 | // constant int), the flag ID (an MDString), and the value. |
| 1847 | Check(Op->getNumOperands() == 3, |
| 1848 | "incorrect number of operands in module flag" , Op); |
| 1849 | Module::ModFlagBehavior MFB; |
| 1850 | if (!Module::isValidModFlagBehavior(MD: Op->getOperand(I: 0), MFB)) { |
| 1851 | Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)), |
| 1852 | "invalid behavior operand in module flag (expected constant integer)" , |
| 1853 | Op->getOperand(0)); |
| 1854 | Check(false, |
| 1855 | "invalid behavior operand in module flag (unexpected constant)" , |
| 1856 | Op->getOperand(0)); |
| 1857 | } |
| 1858 | MDString *ID = dyn_cast_or_null<MDString>(Val: Op->getOperand(I: 1)); |
| 1859 | Check(ID, "invalid ID operand in module flag (expected metadata string)" , |
| 1860 | Op->getOperand(1)); |
| 1861 | |
| 1862 | // Check the values for behaviors with additional requirements. |
| 1863 | switch (MFB) { |
| 1864 | case Module::Error: |
| 1865 | case Module::Warning: |
| 1866 | case Module::Override: |
| 1867 | // These behavior types accept any value. |
| 1868 | break; |
| 1869 | |
| 1870 | case Module::Min: { |
| 1871 | auto *V = mdconst::dyn_extract_or_null<ConstantInt>(MD: Op->getOperand(I: 2)); |
| 1872 | Check(V && V->getValue().isNonNegative(), |
| 1873 | "invalid value for 'min' module flag (expected constant non-negative " |
| 1874 | "integer)" , |
| 1875 | Op->getOperand(2)); |
| 1876 | break; |
| 1877 | } |
| 1878 | |
| 1879 | case Module::Max: { |
| 1880 | Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)), |
| 1881 | "invalid value for 'max' module flag (expected constant integer)" , |
| 1882 | Op->getOperand(2)); |
| 1883 | break; |
| 1884 | } |
| 1885 | |
| 1886 | case Module::Require: { |
| 1887 | // The value should itself be an MDNode with two operands, a flag ID (an |
| 1888 | // MDString), and a value. |
| 1889 | MDNode *Value = dyn_cast<MDNode>(Val: Op->getOperand(I: 2)); |
| 1890 | Check(Value && Value->getNumOperands() == 2, |
| 1891 | "invalid value for 'require' module flag (expected metadata pair)" , |
| 1892 | Op->getOperand(2)); |
| 1893 | Check(isa<MDString>(Value->getOperand(0)), |
| 1894 | ("invalid value for 'require' module flag " |
| 1895 | "(first value operand should be a string)" ), |
| 1896 | Value->getOperand(0)); |
| 1897 | |
| 1898 | // Append it to the list of requirements, to check once all module flags are |
| 1899 | // scanned. |
| 1900 | Requirements.push_back(Elt: Value); |
| 1901 | break; |
| 1902 | } |
| 1903 | |
| 1904 | case Module::Append: |
| 1905 | case Module::AppendUnique: { |
| 1906 | // These behavior types require the operand be an MDNode. |
| 1907 | Check(isa<MDNode>(Op->getOperand(2)), |
| 1908 | "invalid value for 'append'-type module flag " |
| 1909 | "(expected a metadata node)" , |
| 1910 | Op->getOperand(2)); |
| 1911 | break; |
| 1912 | } |
| 1913 | } |
| 1914 | |
| 1915 | // Unless this is a "requires" flag, check the ID is unique. |
| 1916 | if (MFB != Module::Require) { |
| 1917 | bool Inserted = SeenIDs.insert(KV: std::make_pair(x&: ID, y&: Op)).second; |
| 1918 | Check(Inserted, |
| 1919 | "module flag identifiers must be unique (or of 'require' type)" , ID); |
| 1920 | } |
| 1921 | |
| 1922 | if (ID->getString() == "wchar_size" ) { |
| 1923 | ConstantInt *Value |
| 1924 | = mdconst::dyn_extract_or_null<ConstantInt>(MD: Op->getOperand(I: 2)); |
| 1925 | Check(Value, "wchar_size metadata requires constant integer argument" ); |
| 1926 | } |
| 1927 | |
| 1928 | if (ID->getString() == "Linker Options" ) { |
| 1929 | // If the llvm.linker.options named metadata exists, we assume that the |
| 1930 | // bitcode reader has upgraded the module flag. Otherwise the flag might |
| 1931 | // have been created by a client directly. |
| 1932 | Check(M.getNamedMetadata("llvm.linker.options" ), |
| 1933 | "'Linker Options' named metadata no longer supported" ); |
| 1934 | } |
| 1935 | |
| 1936 | if (ID->getString() == "SemanticInterposition" ) { |
| 1937 | ConstantInt *Value = |
| 1938 | mdconst::dyn_extract_or_null<ConstantInt>(MD: Op->getOperand(I: 2)); |
| 1939 | Check(Value, |
| 1940 | "SemanticInterposition metadata requires constant integer argument" ); |
| 1941 | } |
| 1942 | |
| 1943 | if (ID->getString() == "CG Profile" ) { |
| 1944 | for (const MDOperand &MDO : cast<MDNode>(Val: Op->getOperand(I: 2))->operands()) |
| 1945 | visitModuleFlagCGProfileEntry(MDO); |
| 1946 | } |
| 1947 | } |
| 1948 | |
| 1949 | void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) { |
| 1950 | auto CheckFunction = [&](const MDOperand &FuncMDO) { |
| 1951 | if (!FuncMDO) |
| 1952 | return; |
| 1953 | auto F = dyn_cast<ValueAsMetadata>(Val: FuncMDO); |
| 1954 | Check(F && isa<Function>(F->getValue()->stripPointerCasts()), |
| 1955 | "expected a Function or null" , FuncMDO); |
| 1956 | }; |
| 1957 | auto Node = dyn_cast_or_null<MDNode>(Val: MDO); |
| 1958 | Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple" , MDO); |
| 1959 | CheckFunction(Node->getOperand(I: 0)); |
| 1960 | CheckFunction(Node->getOperand(I: 1)); |
| 1961 | auto Count = dyn_cast_or_null<ConstantAsMetadata>(Val: Node->getOperand(I: 2)); |
| 1962 | Check(Count && Count->getType()->isIntegerTy(), |
| 1963 | "expected an integer constant" , Node->getOperand(2)); |
| 1964 | } |
| 1965 | |
| 1966 | void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) { |
| 1967 | for (Attribute A : Attrs) { |
| 1968 | |
| 1969 | if (A.isStringAttribute()) { |
| 1970 | #define GET_ATTR_NAMES |
| 1971 | #define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME) |
| 1972 | #define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \ |
| 1973 | if (A.getKindAsString() == #DISPLAY_NAME) { \ |
| 1974 | auto V = A.getValueAsString(); \ |
| 1975 | if (!(V.empty() || V == "true" || V == "false")) \ |
| 1976 | CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \ |
| 1977 | ""); \ |
| 1978 | } |
| 1979 | |
| 1980 | #include "llvm/IR/Attributes.inc" |
| 1981 | continue; |
| 1982 | } |
| 1983 | |
| 1984 | if (A.isIntAttribute() != Attribute::isIntAttrKind(Kind: A.getKindAsEnum())) { |
| 1985 | CheckFailed(Message: "Attribute '" + A.getAsString() + "' should have an Argument" , |
| 1986 | V1: V); |
| 1987 | return; |
| 1988 | } |
| 1989 | } |
| 1990 | } |
| 1991 | |
| 1992 | // VerifyParameterAttrs - Check the given attributes for an argument or return |
| 1993 | // value of the specified type. The value V is printed in error messages. |
| 1994 | void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty, |
| 1995 | const Value *V) { |
| 1996 | if (!Attrs.hasAttributes()) |
| 1997 | return; |
| 1998 | |
| 1999 | verifyAttributeTypes(Attrs, V); |
| 2000 | |
| 2001 | for (Attribute Attr : Attrs) |
| 2002 | Check(Attr.isStringAttribute() || |
| 2003 | Attribute::canUseAsParamAttr(Attr.getKindAsEnum()), |
| 2004 | "Attribute '" + Attr.getAsString() + "' does not apply to parameters" , |
| 2005 | V); |
| 2006 | |
| 2007 | if (Attrs.hasAttribute(Kind: Attribute::ImmArg)) { |
| 2008 | unsigned AttrCount = |
| 2009 | Attrs.getNumAttributes() - Attrs.hasAttribute(Kind: Attribute::Range); |
| 2010 | Check(AttrCount == 1, |
| 2011 | "Attribute 'immarg' is incompatible with other attributes except the " |
| 2012 | "'range' attribute" , |
| 2013 | V); |
| 2014 | } |
| 2015 | |
| 2016 | // Check for mutually incompatible attributes. Only inreg is compatible with |
| 2017 | // sret. |
| 2018 | unsigned AttrCount = 0; |
| 2019 | AttrCount += Attrs.hasAttribute(Kind: Attribute::ByVal); |
| 2020 | AttrCount += Attrs.hasAttribute(Kind: Attribute::InAlloca); |
| 2021 | AttrCount += Attrs.hasAttribute(Kind: Attribute::Preallocated); |
| 2022 | AttrCount += Attrs.hasAttribute(Kind: Attribute::StructRet) || |
| 2023 | Attrs.hasAttribute(Kind: Attribute::InReg); |
| 2024 | AttrCount += Attrs.hasAttribute(Kind: Attribute::Nest); |
| 2025 | AttrCount += Attrs.hasAttribute(Kind: Attribute::ByRef); |
| 2026 | Check(AttrCount <= 1, |
| 2027 | "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', " |
| 2028 | "'byref', and 'sret' are incompatible!" , |
| 2029 | V); |
| 2030 | |
| 2031 | Check(!(Attrs.hasAttribute(Attribute::InAlloca) && |
| 2032 | Attrs.hasAttribute(Attribute::ReadOnly)), |
| 2033 | "Attributes " |
| 2034 | "'inalloca and readonly' are incompatible!" , |
| 2035 | V); |
| 2036 | |
| 2037 | Check(!(Attrs.hasAttribute(Attribute::StructRet) && |
| 2038 | Attrs.hasAttribute(Attribute::Returned)), |
| 2039 | "Attributes " |
| 2040 | "'sret and returned' are incompatible!" , |
| 2041 | V); |
| 2042 | |
| 2043 | Check(!(Attrs.hasAttribute(Attribute::ZExt) && |
| 2044 | Attrs.hasAttribute(Attribute::SExt)), |
| 2045 | "Attributes " |
| 2046 | "'zeroext and signext' are incompatible!" , |
| 2047 | V); |
| 2048 | |
| 2049 | Check(!(Attrs.hasAttribute(Attribute::ReadNone) && |
| 2050 | Attrs.hasAttribute(Attribute::ReadOnly)), |
| 2051 | "Attributes " |
| 2052 | "'readnone and readonly' are incompatible!" , |
| 2053 | V); |
| 2054 | |
| 2055 | Check(!(Attrs.hasAttribute(Attribute::ReadNone) && |
| 2056 | Attrs.hasAttribute(Attribute::WriteOnly)), |
| 2057 | "Attributes " |
| 2058 | "'readnone and writeonly' are incompatible!" , |
| 2059 | V); |
| 2060 | |
| 2061 | Check(!(Attrs.hasAttribute(Attribute::ReadOnly) && |
| 2062 | Attrs.hasAttribute(Attribute::WriteOnly)), |
| 2063 | "Attributes " |
| 2064 | "'readonly and writeonly' are incompatible!" , |
| 2065 | V); |
| 2066 | |
| 2067 | Check(!(Attrs.hasAttribute(Attribute::NoInline) && |
| 2068 | Attrs.hasAttribute(Attribute::AlwaysInline)), |
| 2069 | "Attributes " |
| 2070 | "'noinline and alwaysinline' are incompatible!" , |
| 2071 | V); |
| 2072 | |
| 2073 | Check(!(Attrs.hasAttribute(Attribute::Writable) && |
| 2074 | Attrs.hasAttribute(Attribute::ReadNone)), |
| 2075 | "Attributes writable and readnone are incompatible!" , V); |
| 2076 | |
| 2077 | Check(!(Attrs.hasAttribute(Attribute::Writable) && |
| 2078 | Attrs.hasAttribute(Attribute::ReadOnly)), |
| 2079 | "Attributes writable and readonly are incompatible!" , V); |
| 2080 | |
| 2081 | AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, AS: Attrs); |
| 2082 | for (Attribute Attr : Attrs) { |
| 2083 | if (!Attr.isStringAttribute() && |
| 2084 | IncompatibleAttrs.contains(A: Attr.getKindAsEnum())) { |
| 2085 | CheckFailed(Message: "Attribute '" + Attr.getAsString() + |
| 2086 | "' applied to incompatible type!" , V1: V); |
| 2087 | return; |
| 2088 | } |
| 2089 | } |
| 2090 | |
| 2091 | if (isa<PointerType>(Val: Ty)) { |
| 2092 | if (Attrs.hasAttribute(Kind: Attribute::Alignment)) { |
| 2093 | Align AttrAlign = Attrs.getAlignment().valueOrOne(); |
| 2094 | Check(AttrAlign.value() <= Value::MaximumAlignment, |
| 2095 | "huge alignment values are unsupported" , V); |
| 2096 | } |
| 2097 | if (Attrs.hasAttribute(Kind: Attribute::ByVal)) { |
| 2098 | Type *ByValTy = Attrs.getByValType(); |
| 2099 | SmallPtrSet<Type *, 4> Visited; |
| 2100 | Check(ByValTy->isSized(&Visited), |
| 2101 | "Attribute 'byval' does not support unsized types!" , V); |
| 2102 | // Check if it is or contains a target extension type that disallows being |
| 2103 | // used on the stack. |
| 2104 | Check(!ByValTy->containsNonLocalTargetExtType(), |
| 2105 | "'byval' argument has illegal target extension type" , V); |
| 2106 | Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32), |
| 2107 | "huge 'byval' arguments are unsupported" , V); |
| 2108 | } |
| 2109 | if (Attrs.hasAttribute(Kind: Attribute::ByRef)) { |
| 2110 | SmallPtrSet<Type *, 4> Visited; |
| 2111 | Check(Attrs.getByRefType()->isSized(&Visited), |
| 2112 | "Attribute 'byref' does not support unsized types!" , V); |
| 2113 | Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() < |
| 2114 | (1ULL << 32), |
| 2115 | "huge 'byref' arguments are unsupported" , V); |
| 2116 | } |
| 2117 | if (Attrs.hasAttribute(Kind: Attribute::InAlloca)) { |
| 2118 | SmallPtrSet<Type *, 4> Visited; |
| 2119 | Check(Attrs.getInAllocaType()->isSized(&Visited), |
| 2120 | "Attribute 'inalloca' does not support unsized types!" , V); |
| 2121 | Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() < |
| 2122 | (1ULL << 32), |
| 2123 | "huge 'inalloca' arguments are unsupported" , V); |
| 2124 | } |
| 2125 | if (Attrs.hasAttribute(Kind: Attribute::Preallocated)) { |
| 2126 | SmallPtrSet<Type *, 4> Visited; |
| 2127 | Check(Attrs.getPreallocatedType()->isSized(&Visited), |
| 2128 | "Attribute 'preallocated' does not support unsized types!" , V); |
| 2129 | Check( |
| 2130 | DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() < |
| 2131 | (1ULL << 32), |
| 2132 | "huge 'preallocated' arguments are unsupported" , V); |
| 2133 | } |
| 2134 | } |
| 2135 | |
| 2136 | if (Attrs.hasAttribute(Kind: Attribute::Initializes)) { |
| 2137 | auto Inits = Attrs.getAttribute(Kind: Attribute::Initializes).getInitializes(); |
| 2138 | Check(!Inits.empty(), "Attribute 'initializes' does not support empty list" , |
| 2139 | V); |
| 2140 | Check(ConstantRangeList::isOrderedRanges(Inits), |
| 2141 | "Attribute 'initializes' does not support unordered ranges" , V); |
| 2142 | } |
| 2143 | |
| 2144 | if (Attrs.hasAttribute(Kind: Attribute::NoFPClass)) { |
| 2145 | uint64_t Val = Attrs.getAttribute(Kind: Attribute::NoFPClass).getValueAsInt(); |
| 2146 | Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set" , |
| 2147 | V); |
| 2148 | Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0, |
| 2149 | "Invalid value for 'nofpclass' test mask" , V); |
| 2150 | } |
| 2151 | if (Attrs.hasAttribute(Kind: Attribute::Range)) { |
| 2152 | const ConstantRange &CR = |
| 2153 | Attrs.getAttribute(Kind: Attribute::Range).getValueAsConstantRange(); |
| 2154 | Check(Ty->isIntOrIntVectorTy(CR.getBitWidth()), |
| 2155 | "Range bit width must match type bit width!" , V); |
| 2156 | } |
| 2157 | } |
| 2158 | |
| 2159 | void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr, |
| 2160 | const Value *V) { |
| 2161 | if (Attrs.hasFnAttr(Kind: Attr)) { |
| 2162 | StringRef S = Attrs.getFnAttr(Kind: Attr).getValueAsString(); |
| 2163 | unsigned N; |
| 2164 | if (S.getAsInteger(Radix: 10, Result&: N)) |
| 2165 | CheckFailed(Message: "\"" + Attr + "\" takes an unsigned integer: " + S, V1: V); |
| 2166 | } |
| 2167 | } |
| 2168 | |
| 2169 | // Check parameter attributes against a function type. |
| 2170 | // The value V is printed in error messages. |
| 2171 | void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, |
| 2172 | const Value *V, bool IsIntrinsic, |
| 2173 | bool IsInlineAsm) { |
| 2174 | if (Attrs.isEmpty()) |
| 2175 | return; |
| 2176 | |
| 2177 | if (AttributeListsVisited.insert(Ptr: Attrs.getRawPointer()).second) { |
| 2178 | Check(Attrs.hasParentContext(Context), |
| 2179 | "Attribute list does not match Module context!" , &Attrs, V); |
| 2180 | for (const auto &AttrSet : Attrs) { |
| 2181 | Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context), |
| 2182 | "Attribute set does not match Module context!" , &AttrSet, V); |
| 2183 | for (const auto &A : AttrSet) { |
| 2184 | Check(A.hasParentContext(Context), |
| 2185 | "Attribute does not match Module context!" , &A, V); |
| 2186 | } |
| 2187 | } |
| 2188 | } |
| 2189 | |
| 2190 | bool SawNest = false; |
| 2191 | bool SawReturned = false; |
| 2192 | bool SawSRet = false; |
| 2193 | bool SawSwiftSelf = false; |
| 2194 | bool SawSwiftAsync = false; |
| 2195 | bool SawSwiftError = false; |
| 2196 | |
| 2197 | // Verify return value attributes. |
| 2198 | AttributeSet RetAttrs = Attrs.getRetAttrs(); |
| 2199 | for (Attribute RetAttr : RetAttrs) |
| 2200 | Check(RetAttr.isStringAttribute() || |
| 2201 | Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()), |
| 2202 | "Attribute '" + RetAttr.getAsString() + |
| 2203 | "' does not apply to function return values" , |
| 2204 | V); |
| 2205 | |
| 2206 | unsigned MaxParameterWidth = 0; |
| 2207 | auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) { |
| 2208 | if (Ty->isVectorTy()) { |
| 2209 | if (auto *VT = dyn_cast<FixedVectorType>(Val: Ty)) { |
| 2210 | unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue(); |
| 2211 | if (Size > MaxParameterWidth) |
| 2212 | MaxParameterWidth = Size; |
| 2213 | } |
| 2214 | } |
| 2215 | }; |
| 2216 | GetMaxParameterWidth(FT->getReturnType()); |
| 2217 | verifyParameterAttrs(Attrs: RetAttrs, Ty: FT->getReturnType(), V); |
| 2218 | |
| 2219 | // Verify parameter attributes. |
| 2220 | for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { |
| 2221 | Type *Ty = FT->getParamType(i); |
| 2222 | AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo: i); |
| 2223 | |
| 2224 | if (!IsIntrinsic) { |
| 2225 | Check(!ArgAttrs.hasAttribute(Attribute::ImmArg), |
| 2226 | "immarg attribute only applies to intrinsics" , V); |
| 2227 | if (!IsInlineAsm) |
| 2228 | Check(!ArgAttrs.hasAttribute(Attribute::ElementType), |
| 2229 | "Attribute 'elementtype' can only be applied to intrinsics" |
| 2230 | " and inline asm." , |
| 2231 | V); |
| 2232 | } |
| 2233 | |
| 2234 | verifyParameterAttrs(Attrs: ArgAttrs, Ty, V); |
| 2235 | GetMaxParameterWidth(Ty); |
| 2236 | |
| 2237 | if (ArgAttrs.hasAttribute(Kind: Attribute::Nest)) { |
| 2238 | Check(!SawNest, "More than one parameter has attribute nest!" , V); |
| 2239 | SawNest = true; |
| 2240 | } |
| 2241 | |
| 2242 | if (ArgAttrs.hasAttribute(Kind: Attribute::Returned)) { |
| 2243 | Check(!SawReturned, "More than one parameter has attribute returned!" , V); |
| 2244 | Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()), |
| 2245 | "Incompatible argument and return types for 'returned' attribute" , |
| 2246 | V); |
| 2247 | SawReturned = true; |
| 2248 | } |
| 2249 | |
| 2250 | if (ArgAttrs.hasAttribute(Kind: Attribute::StructRet)) { |
| 2251 | Check(!SawSRet, "Cannot have multiple 'sret' parameters!" , V); |
| 2252 | Check(i == 0 || i == 1, |
| 2253 | "Attribute 'sret' is not on first or second parameter!" , V); |
| 2254 | SawSRet = true; |
| 2255 | } |
| 2256 | |
| 2257 | if (ArgAttrs.hasAttribute(Kind: Attribute::SwiftSelf)) { |
| 2258 | Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!" , V); |
| 2259 | SawSwiftSelf = true; |
| 2260 | } |
| 2261 | |
| 2262 | if (ArgAttrs.hasAttribute(Kind: Attribute::SwiftAsync)) { |
| 2263 | Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!" , V); |
| 2264 | SawSwiftAsync = true; |
| 2265 | } |
| 2266 | |
| 2267 | if (ArgAttrs.hasAttribute(Kind: Attribute::SwiftError)) { |
| 2268 | Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!" , V); |
| 2269 | SawSwiftError = true; |
| 2270 | } |
| 2271 | |
| 2272 | if (ArgAttrs.hasAttribute(Kind: Attribute::InAlloca)) { |
| 2273 | Check(i == FT->getNumParams() - 1, |
| 2274 | "inalloca isn't on the last parameter!" , V); |
| 2275 | } |
| 2276 | } |
| 2277 | |
| 2278 | if (!Attrs.hasFnAttrs()) |
| 2279 | return; |
| 2280 | |
| 2281 | verifyAttributeTypes(Attrs: Attrs.getFnAttrs(), V); |
| 2282 | for (Attribute FnAttr : Attrs.getFnAttrs()) |
| 2283 | Check(FnAttr.isStringAttribute() || |
| 2284 | Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()), |
| 2285 | "Attribute '" + FnAttr.getAsString() + |
| 2286 | "' does not apply to functions!" , |
| 2287 | V); |
| 2288 | |
| 2289 | Check(!(Attrs.hasFnAttr(Attribute::NoInline) && |
| 2290 | Attrs.hasFnAttr(Attribute::AlwaysInline)), |
| 2291 | "Attributes 'noinline and alwaysinline' are incompatible!" , V); |
| 2292 | |
| 2293 | if (Attrs.hasFnAttr(Kind: Attribute::OptimizeNone)) { |
| 2294 | Check(Attrs.hasFnAttr(Attribute::NoInline), |
| 2295 | "Attribute 'optnone' requires 'noinline'!" , V); |
| 2296 | |
| 2297 | Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize), |
| 2298 | "Attributes 'optsize and optnone' are incompatible!" , V); |
| 2299 | |
| 2300 | Check(!Attrs.hasFnAttr(Attribute::MinSize), |
| 2301 | "Attributes 'minsize and optnone' are incompatible!" , V); |
| 2302 | |
| 2303 | Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging), |
| 2304 | "Attributes 'optdebug and optnone' are incompatible!" , V); |
| 2305 | } |
| 2306 | |
| 2307 | Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) && |
| 2308 | Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)), |
| 2309 | "Attributes " |
| 2310 | "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!" , |
| 2311 | V); |
| 2312 | |
| 2313 | if (Attrs.hasFnAttr(Kind: Attribute::OptimizeForDebugging)) { |
| 2314 | Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize), |
| 2315 | "Attributes 'optsize and optdebug' are incompatible!" , V); |
| 2316 | |
| 2317 | Check(!Attrs.hasFnAttr(Attribute::MinSize), |
| 2318 | "Attributes 'minsize and optdebug' are incompatible!" , V); |
| 2319 | } |
| 2320 | |
| 2321 | Check(!Attrs.hasAttrSomewhere(Attribute::Writable) || |
| 2322 | isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)), |
| 2323 | "Attribute writable and memory without argmem: write are incompatible!" , |
| 2324 | V); |
| 2325 | |
| 2326 | if (Attrs.hasFnAttr(Kind: "aarch64_pstate_sm_enabled" )) { |
| 2327 | Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible" ), |
| 2328 | "Attributes 'aarch64_pstate_sm_enabled and " |
| 2329 | "aarch64_pstate_sm_compatible' are incompatible!" , |
| 2330 | V); |
| 2331 | } |
| 2332 | |
| 2333 | Check((Attrs.hasFnAttr("aarch64_new_za" ) + Attrs.hasFnAttr("aarch64_in_za" ) + |
| 2334 | Attrs.hasFnAttr("aarch64_inout_za" ) + |
| 2335 | Attrs.hasFnAttr("aarch64_out_za" ) + |
| 2336 | Attrs.hasFnAttr("aarch64_preserves_za" ) + |
| 2337 | Attrs.hasFnAttr("aarch64_za_state_agnostic" )) <= 1, |
| 2338 | "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', " |
| 2339 | "'aarch64_inout_za', 'aarch64_preserves_za' and " |
| 2340 | "'aarch64_za_state_agnostic' are mutually exclusive" , |
| 2341 | V); |
| 2342 | |
| 2343 | Check((Attrs.hasFnAttr("aarch64_new_zt0" ) + |
| 2344 | Attrs.hasFnAttr("aarch64_in_zt0" ) + |
| 2345 | Attrs.hasFnAttr("aarch64_inout_zt0" ) + |
| 2346 | Attrs.hasFnAttr("aarch64_out_zt0" ) + |
| 2347 | Attrs.hasFnAttr("aarch64_preserves_zt0" ) + |
| 2348 | Attrs.hasFnAttr("aarch64_za_state_agnostic" )) <= 1, |
| 2349 | "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', " |
| 2350 | "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and " |
| 2351 | "'aarch64_za_state_agnostic' are mutually exclusive" , |
| 2352 | V); |
| 2353 | |
| 2354 | if (Attrs.hasFnAttr(Kind: Attribute::JumpTable)) { |
| 2355 | const GlobalValue *GV = cast<GlobalValue>(Val: V); |
| 2356 | Check(GV->hasGlobalUnnamedAddr(), |
| 2357 | "Attribute 'jumptable' requires 'unnamed_addr'" , V); |
| 2358 | } |
| 2359 | |
| 2360 | if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) { |
| 2361 | auto CheckParam = [&](StringRef Name, unsigned ParamNo) { |
| 2362 | if (ParamNo >= FT->getNumParams()) { |
| 2363 | CheckFailed(Message: "'allocsize' " + Name + " argument is out of bounds" , V1: V); |
| 2364 | return false; |
| 2365 | } |
| 2366 | |
| 2367 | if (!FT->getParamType(i: ParamNo)->isIntegerTy()) { |
| 2368 | CheckFailed(Message: "'allocsize' " + Name + |
| 2369 | " argument must refer to an integer parameter" , |
| 2370 | V1: V); |
| 2371 | return false; |
| 2372 | } |
| 2373 | |
| 2374 | return true; |
| 2375 | }; |
| 2376 | |
| 2377 | if (!CheckParam("element size" , Args->first)) |
| 2378 | return; |
| 2379 | |
| 2380 | if (Args->second && !CheckParam("number of elements" , *Args->second)) |
| 2381 | return; |
| 2382 | } |
| 2383 | |
| 2384 | if (Attrs.hasFnAttr(Kind: Attribute::AllocKind)) { |
| 2385 | AllocFnKind K = Attrs.getAllocKind(); |
| 2386 | AllocFnKind Type = |
| 2387 | K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free); |
| 2388 | if (!is_contained( |
| 2389 | Set: {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free}, |
| 2390 | Element: Type)) |
| 2391 | CheckFailed( |
| 2392 | Message: "'allockind()' requires exactly one of alloc, realloc, and free" ); |
| 2393 | if ((Type == AllocFnKind::Free) && |
| 2394 | ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed | |
| 2395 | AllocFnKind::Aligned)) != AllocFnKind::Unknown)) |
| 2396 | CheckFailed(Message: "'allockind(\"free\")' doesn't allow uninitialized, zeroed, " |
| 2397 | "or aligned modifiers." ); |
| 2398 | AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed; |
| 2399 | if ((K & ZeroedUninit) == ZeroedUninit) |
| 2400 | CheckFailed(Message: "'allockind()' can't be both zeroed and uninitialized" ); |
| 2401 | } |
| 2402 | |
| 2403 | if (Attribute A = Attrs.getFnAttr(Kind: "alloc-variant-zeroed" ); A.isValid()) { |
| 2404 | StringRef S = A.getValueAsString(); |
| 2405 | Check(!S.empty(), "'alloc-variant-zeroed' must not be empty" ); |
| 2406 | Function *Variant = M.getFunction(Name: S); |
| 2407 | if (Variant) { |
| 2408 | Attribute Family = Attrs.getFnAttr(Kind: "alloc-family" ); |
| 2409 | Attribute VariantFamily = Variant->getFnAttribute(Kind: "alloc-family" ); |
| 2410 | if (Family.isValid()) |
| 2411 | Check(VariantFamily.isValid() && |
| 2412 | VariantFamily.getValueAsString() == Family.getValueAsString(), |
| 2413 | "'alloc-variant-zeroed' must name a function belonging to the " |
| 2414 | "same 'alloc-family'" ); |
| 2415 | |
| 2416 | Check(Variant->hasFnAttribute(Attribute::AllocKind) && |
| 2417 | (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() & |
| 2418 | AllocFnKind::Zeroed) != AllocFnKind::Unknown, |
| 2419 | "'alloc-variant-zeroed' must name a function with " |
| 2420 | "'allockind(\"zeroed\")'" ); |
| 2421 | |
| 2422 | Check(FT == Variant->getFunctionType(), |
| 2423 | "'alloc-variant-zeroed' must name a function with the same " |
| 2424 | "signature" ); |
| 2425 | } |
| 2426 | } |
| 2427 | |
| 2428 | if (Attrs.hasFnAttr(Kind: Attribute::VScaleRange)) { |
| 2429 | unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin(); |
| 2430 | if (VScaleMin == 0) |
| 2431 | CheckFailed(Message: "'vscale_range' minimum must be greater than 0" , V1: V); |
| 2432 | else if (!isPowerOf2_32(Value: VScaleMin)) |
| 2433 | CheckFailed(Message: "'vscale_range' minimum must be power-of-two value" , V1: V); |
| 2434 | std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax(); |
| 2435 | if (VScaleMax && VScaleMin > VScaleMax) |
| 2436 | CheckFailed(Message: "'vscale_range' minimum cannot be greater than maximum" , V1: V); |
| 2437 | else if (VScaleMax && !isPowerOf2_32(Value: *VScaleMax)) |
| 2438 | CheckFailed(Message: "'vscale_range' maximum must be power-of-two value" , V1: V); |
| 2439 | } |
| 2440 | |
| 2441 | if (Attribute FPAttr = Attrs.getFnAttr(Kind: "frame-pointer" ); FPAttr.isValid()) { |
| 2442 | StringRef FP = FPAttr.getValueAsString(); |
| 2443 | if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" ) |
| 2444 | CheckFailed(Message: "invalid value for 'frame-pointer' attribute: " + FP, V1: V); |
| 2445 | } |
| 2446 | |
| 2447 | // Check EVEX512 feature. |
| 2448 | if (TT.isX86() && MaxParameterWidth >= 512) { |
| 2449 | Attribute TargetFeaturesAttr = Attrs.getFnAttr(Kind: "target-features" ); |
| 2450 | if (TargetFeaturesAttr.isValid()) { |
| 2451 | StringRef TF = TargetFeaturesAttr.getValueAsString(); |
| 2452 | Check(!TF.contains("+avx512f" ) || !TF.contains("-evex512" ), |
| 2453 | "512-bit vector arguments require 'evex512' for AVX512" , V); |
| 2454 | } |
| 2455 | } |
| 2456 | |
| 2457 | checkUnsignedBaseTenFuncAttr(Attrs, Attr: "patchable-function-prefix" , V); |
| 2458 | checkUnsignedBaseTenFuncAttr(Attrs, Attr: "patchable-function-entry" , V); |
| 2459 | if (Attrs.hasFnAttr(Kind: "patchable-function-entry-section" )) |
| 2460 | Check(!Attrs.getFnAttr("patchable-function-entry-section" ) |
| 2461 | .getValueAsString() |
| 2462 | .empty(), |
| 2463 | "\"patchable-function-entry-section\" must not be empty" ); |
| 2464 | checkUnsignedBaseTenFuncAttr(Attrs, Attr: "warn-stack-size" , V); |
| 2465 | |
| 2466 | if (auto A = Attrs.getFnAttr(Kind: "sign-return-address" ); A.isValid()) { |
| 2467 | StringRef S = A.getValueAsString(); |
| 2468 | if (S != "none" && S != "all" && S != "non-leaf" ) |
| 2469 | CheckFailed(Message: "invalid value for 'sign-return-address' attribute: " + S, V1: V); |
| 2470 | } |
| 2471 | |
| 2472 | if (auto A = Attrs.getFnAttr(Kind: "sign-return-address-key" ); A.isValid()) { |
| 2473 | StringRef S = A.getValueAsString(); |
| 2474 | if (S != "a_key" && S != "b_key" ) |
| 2475 | CheckFailed(Message: "invalid value for 'sign-return-address-key' attribute: " + S, |
| 2476 | V1: V); |
| 2477 | if (auto AA = Attrs.getFnAttr(Kind: "sign-return-address" ); !AA.isValid()) { |
| 2478 | CheckFailed( |
| 2479 | Message: "'sign-return-address-key' present without `sign-return-address`" ); |
| 2480 | } |
| 2481 | } |
| 2482 | |
| 2483 | if (auto A = Attrs.getFnAttr(Kind: "branch-target-enforcement" ); A.isValid()) { |
| 2484 | StringRef S = A.getValueAsString(); |
| 2485 | if (S != "" && S != "true" && S != "false" ) |
| 2486 | CheckFailed( |
| 2487 | Message: "invalid value for 'branch-target-enforcement' attribute: " + S, V1: V); |
| 2488 | } |
| 2489 | |
| 2490 | if (auto A = Attrs.getFnAttr(Kind: "branch-protection-pauth-lr" ); A.isValid()) { |
| 2491 | StringRef S = A.getValueAsString(); |
| 2492 | if (S != "" && S != "true" && S != "false" ) |
| 2493 | CheckFailed( |
| 2494 | Message: "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V1: V); |
| 2495 | } |
| 2496 | |
| 2497 | if (auto A = Attrs.getFnAttr(Kind: "guarded-control-stack" ); A.isValid()) { |
| 2498 | StringRef S = A.getValueAsString(); |
| 2499 | if (S != "" && S != "true" && S != "false" ) |
| 2500 | CheckFailed(Message: "invalid value for 'guarded-control-stack' attribute: " + S, |
| 2501 | V1: V); |
| 2502 | } |
| 2503 | |
| 2504 | if (auto A = Attrs.getFnAttr(Kind: "vector-function-abi-variant" ); A.isValid()) { |
| 2505 | StringRef S = A.getValueAsString(); |
| 2506 | const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(MangledName: S, FTy: FT); |
| 2507 | if (!Info) |
| 2508 | CheckFailed(Message: "invalid name for a VFABI variant: " + S, V1: V); |
| 2509 | } |
| 2510 | |
| 2511 | if (auto A = Attrs.getFnAttr(Kind: "denormal-fp-math" ); A.isValid()) { |
| 2512 | StringRef S = A.getValueAsString(); |
| 2513 | if (!parseDenormalFPAttribute(Str: S).isValid()) |
| 2514 | CheckFailed(Message: "invalid value for 'denormal-fp-math' attribute: " + S, V1: V); |
| 2515 | } |
| 2516 | |
| 2517 | if (auto A = Attrs.getFnAttr(Kind: "denormal-fp-math-f32" ); A.isValid()) { |
| 2518 | StringRef S = A.getValueAsString(); |
| 2519 | if (!parseDenormalFPAttribute(Str: S).isValid()) |
| 2520 | CheckFailed(Message: "invalid value for 'denormal-fp-math-f32' attribute: " + S, |
| 2521 | V1: V); |
| 2522 | } |
| 2523 | } |
| 2524 | |
| 2525 | void Verifier::verifyFunctionMetadata( |
| 2526 | ArrayRef<std::pair<unsigned, MDNode *>> MDs) { |
| 2527 | for (const auto &Pair : MDs) { |
| 2528 | if (Pair.first == LLVMContext::MD_prof) { |
| 2529 | MDNode *MD = Pair.second; |
| 2530 | if (isExplicitlyUnknownBranchWeightsMetadata(MD: *MD)) { |
| 2531 | CheckFailed(Message: "'unknown' !prof metadata should appear only on " |
| 2532 | "instructions supporting the 'branch_weights' metadata" , |
| 2533 | V1: MD); |
| 2534 | continue; |
| 2535 | } |
| 2536 | Check(MD->getNumOperands() >= 2, |
| 2537 | "!prof annotations should have no less than 2 operands" , MD); |
| 2538 | |
| 2539 | // Check first operand. |
| 2540 | Check(MD->getOperand(0) != nullptr, "first operand should not be null" , |
| 2541 | MD); |
| 2542 | Check(isa<MDString>(MD->getOperand(0)), |
| 2543 | "expected string with name of the !prof annotation" , MD); |
| 2544 | MDString *MDS = cast<MDString>(Val: MD->getOperand(I: 0)); |
| 2545 | StringRef ProfName = MDS->getString(); |
| 2546 | Check(ProfName == MDProfLabels::FunctionEntryCount || |
| 2547 | ProfName == MDProfLabels::SyntheticFunctionEntryCount, |
| 2548 | "first operand should be 'function_entry_count'" |
| 2549 | " or 'synthetic_function_entry_count'" , |
| 2550 | MD); |
| 2551 | |
| 2552 | // Check second operand. |
| 2553 | Check(MD->getOperand(1) != nullptr, "second operand should not be null" , |
| 2554 | MD); |
| 2555 | Check(isa<ConstantAsMetadata>(MD->getOperand(1)), |
| 2556 | "expected integer argument to function_entry_count" , MD); |
| 2557 | } else if (Pair.first == LLVMContext::MD_kcfi_type) { |
| 2558 | MDNode *MD = Pair.second; |
| 2559 | Check(MD->getNumOperands() == 1, |
| 2560 | "!kcfi_type must have exactly one operand" , MD); |
| 2561 | Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null" , |
| 2562 | MD); |
| 2563 | Check(isa<ConstantAsMetadata>(MD->getOperand(0)), |
| 2564 | "expected a constant operand for !kcfi_type" , MD); |
| 2565 | Constant *C = cast<ConstantAsMetadata>(Val: MD->getOperand(I: 0))->getValue(); |
| 2566 | Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()), |
| 2567 | "expected a constant integer operand for !kcfi_type" , MD); |
| 2568 | Check(cast<ConstantInt>(C)->getBitWidth() == 32, |
| 2569 | "expected a 32-bit integer constant operand for !kcfi_type" , MD); |
| 2570 | } |
| 2571 | } |
| 2572 | } |
| 2573 | |
| 2574 | void Verifier::visitConstantExprsRecursively(const Constant *EntryC) { |
| 2575 | if (!ConstantExprVisited.insert(Ptr: EntryC).second) |
| 2576 | return; |
| 2577 | |
| 2578 | SmallVector<const Constant *, 16> Stack; |
| 2579 | Stack.push_back(Elt: EntryC); |
| 2580 | |
| 2581 | while (!Stack.empty()) { |
| 2582 | const Constant *C = Stack.pop_back_val(); |
| 2583 | |
| 2584 | // Check this constant expression. |
| 2585 | if (const auto *CE = dyn_cast<ConstantExpr>(Val: C)) |
| 2586 | visitConstantExpr(CE); |
| 2587 | |
| 2588 | if (const auto *CPA = dyn_cast<ConstantPtrAuth>(Val: C)) |
| 2589 | visitConstantPtrAuth(CPA); |
| 2590 | |
| 2591 | if (const auto *GV = dyn_cast<GlobalValue>(Val: C)) { |
| 2592 | // Global Values get visited separately, but we do need to make sure |
| 2593 | // that the global value is in the correct module |
| 2594 | Check(GV->getParent() == &M, "Referencing global in another module!" , |
| 2595 | EntryC, &M, GV, GV->getParent()); |
| 2596 | continue; |
| 2597 | } |
| 2598 | |
| 2599 | // Visit all sub-expressions. |
| 2600 | for (const Use &U : C->operands()) { |
| 2601 | const auto *OpC = dyn_cast<Constant>(Val: U); |
| 2602 | if (!OpC) |
| 2603 | continue; |
| 2604 | if (!ConstantExprVisited.insert(Ptr: OpC).second) |
| 2605 | continue; |
| 2606 | Stack.push_back(Elt: OpC); |
| 2607 | } |
| 2608 | } |
| 2609 | } |
| 2610 | |
| 2611 | void Verifier::visitConstantExpr(const ConstantExpr *CE) { |
| 2612 | if (CE->getOpcode() == Instruction::BitCast) |
| 2613 | Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0), |
| 2614 | CE->getType()), |
| 2615 | "Invalid bitcast" , CE); |
| 2616 | } |
| 2617 | |
| 2618 | void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) { |
| 2619 | Check(CPA->getPointer()->getType()->isPointerTy(), |
| 2620 | "signed ptrauth constant base pointer must have pointer type" ); |
| 2621 | |
| 2622 | Check(CPA->getType() == CPA->getPointer()->getType(), |
| 2623 | "signed ptrauth constant must have same type as its base pointer" ); |
| 2624 | |
| 2625 | Check(CPA->getKey()->getBitWidth() == 32, |
| 2626 | "signed ptrauth constant key must be i32 constant integer" ); |
| 2627 | |
| 2628 | Check(CPA->getAddrDiscriminator()->getType()->isPointerTy(), |
| 2629 | "signed ptrauth constant address discriminator must be a pointer" ); |
| 2630 | |
| 2631 | Check(CPA->getDiscriminator()->getBitWidth() == 64, |
| 2632 | "signed ptrauth constant discriminator must be i64 constant integer" ); |
| 2633 | } |
| 2634 | |
| 2635 | bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) { |
| 2636 | // There shouldn't be more attribute sets than there are parameters plus the |
| 2637 | // function and return value. |
| 2638 | return Attrs.getNumAttrSets() <= Params + 2; |
| 2639 | } |
| 2640 | |
| 2641 | void Verifier::verifyInlineAsmCall(const CallBase &Call) { |
| 2642 | const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand()); |
| 2643 | unsigned ArgNo = 0; |
| 2644 | unsigned LabelNo = 0; |
| 2645 | for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { |
| 2646 | if (CI.Type == InlineAsm::isLabel) { |
| 2647 | ++LabelNo; |
| 2648 | continue; |
| 2649 | } |
| 2650 | |
| 2651 | // Only deal with constraints that correspond to call arguments. |
| 2652 | if (!CI.hasArg()) |
| 2653 | continue; |
| 2654 | |
| 2655 | if (CI.isIndirect) { |
| 2656 | const Value *Arg = Call.getArgOperand(i: ArgNo); |
| 2657 | Check(Arg->getType()->isPointerTy(), |
| 2658 | "Operand for indirect constraint must have pointer type" , &Call); |
| 2659 | |
| 2660 | Check(Call.getParamElementType(ArgNo), |
| 2661 | "Operand for indirect constraint must have elementtype attribute" , |
| 2662 | &Call); |
| 2663 | } else { |
| 2664 | Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType), |
| 2665 | "Elementtype attribute can only be applied for indirect " |
| 2666 | "constraints" , |
| 2667 | &Call); |
| 2668 | } |
| 2669 | |
| 2670 | ArgNo++; |
| 2671 | } |
| 2672 | |
| 2673 | if (auto *CallBr = dyn_cast<CallBrInst>(Val: &Call)) { |
| 2674 | Check(LabelNo == CallBr->getNumIndirectDests(), |
| 2675 | "Number of label constraints does not match number of callbr dests" , |
| 2676 | &Call); |
| 2677 | } else { |
| 2678 | Check(LabelNo == 0, "Label constraints can only be used with callbr" , |
| 2679 | &Call); |
| 2680 | } |
| 2681 | } |
| 2682 | |
| 2683 | /// Verify that statepoint intrinsic is well formed. |
| 2684 | void Verifier::verifyStatepoint(const CallBase &Call) { |
| 2685 | assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint); |
| 2686 | |
| 2687 | Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() && |
| 2688 | !Call.onlyAccessesArgMemory(), |
| 2689 | "gc.statepoint must read and write all memory to preserve " |
| 2690 | "reordering restrictions required by safepoint semantics" , |
| 2691 | Call); |
| 2692 | |
| 2693 | const int64_t NumPatchBytes = |
| 2694 | cast<ConstantInt>(Val: Call.getArgOperand(i: 1))->getSExtValue(); |
| 2695 | assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!" ); |
| 2696 | Check(NumPatchBytes >= 0, |
| 2697 | "gc.statepoint number of patchable bytes must be " |
| 2698 | "positive" , |
| 2699 | Call); |
| 2700 | |
| 2701 | Type *TargetElemType = Call.getParamElementType(ArgNo: 2); |
| 2702 | Check(TargetElemType, |
| 2703 | "gc.statepoint callee argument must have elementtype attribute" , Call); |
| 2704 | FunctionType *TargetFuncType = dyn_cast<FunctionType>(Val: TargetElemType); |
| 2705 | Check(TargetFuncType, |
| 2706 | "gc.statepoint callee elementtype must be function type" , Call); |
| 2707 | |
| 2708 | const int NumCallArgs = cast<ConstantInt>(Val: Call.getArgOperand(i: 3))->getZExtValue(); |
| 2709 | Check(NumCallArgs >= 0, |
| 2710 | "gc.statepoint number of arguments to underlying call " |
| 2711 | "must be positive" , |
| 2712 | Call); |
| 2713 | const int NumParams = (int)TargetFuncType->getNumParams(); |
| 2714 | if (TargetFuncType->isVarArg()) { |
| 2715 | Check(NumCallArgs >= NumParams, |
| 2716 | "gc.statepoint mismatch in number of vararg call args" , Call); |
| 2717 | |
| 2718 | // TODO: Remove this limitation |
| 2719 | Check(TargetFuncType->getReturnType()->isVoidTy(), |
| 2720 | "gc.statepoint doesn't support wrapping non-void " |
| 2721 | "vararg functions yet" , |
| 2722 | Call); |
| 2723 | } else |
| 2724 | Check(NumCallArgs == NumParams, |
| 2725 | "gc.statepoint mismatch in number of call args" , Call); |
| 2726 | |
| 2727 | const uint64_t Flags |
| 2728 | = cast<ConstantInt>(Val: Call.getArgOperand(i: 4))->getZExtValue(); |
| 2729 | Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, |
| 2730 | "unknown flag used in gc.statepoint flags argument" , Call); |
| 2731 | |
| 2732 | // Verify that the types of the call parameter arguments match |
| 2733 | // the type of the wrapped callee. |
| 2734 | AttributeList Attrs = Call.getAttributes(); |
| 2735 | for (int i = 0; i < NumParams; i++) { |
| 2736 | Type *ParamType = TargetFuncType->getParamType(i); |
| 2737 | Type *ArgType = Call.getArgOperand(i: 5 + i)->getType(); |
| 2738 | Check(ArgType == ParamType, |
| 2739 | "gc.statepoint call argument does not match wrapped " |
| 2740 | "function type" , |
| 2741 | Call); |
| 2742 | |
| 2743 | if (TargetFuncType->isVarArg()) { |
| 2744 | AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo: 5 + i); |
| 2745 | Check(!ArgAttrs.hasAttribute(Attribute::StructRet), |
| 2746 | "Attribute 'sret' cannot be used for vararg call arguments!" , Call); |
| 2747 | } |
| 2748 | } |
| 2749 | |
| 2750 | const int EndCallArgsInx = 4 + NumCallArgs; |
| 2751 | |
| 2752 | const Value *NumTransitionArgsV = Call.getArgOperand(i: EndCallArgsInx + 1); |
| 2753 | Check(isa<ConstantInt>(NumTransitionArgsV), |
| 2754 | "gc.statepoint number of transition arguments " |
| 2755 | "must be constant integer" , |
| 2756 | Call); |
| 2757 | const int NumTransitionArgs = |
| 2758 | cast<ConstantInt>(Val: NumTransitionArgsV)->getZExtValue(); |
| 2759 | Check(NumTransitionArgs == 0, |
| 2760 | "gc.statepoint w/inline transition bundle is deprecated" , Call); |
| 2761 | const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs; |
| 2762 | |
| 2763 | const Value *NumDeoptArgsV = Call.getArgOperand(i: EndTransitionArgsInx + 1); |
| 2764 | Check(isa<ConstantInt>(NumDeoptArgsV), |
| 2765 | "gc.statepoint number of deoptimization arguments " |
| 2766 | "must be constant integer" , |
| 2767 | Call); |
| 2768 | const int NumDeoptArgs = cast<ConstantInt>(Val: NumDeoptArgsV)->getZExtValue(); |
| 2769 | Check(NumDeoptArgs == 0, |
| 2770 | "gc.statepoint w/inline deopt operands is deprecated" , Call); |
| 2771 | |
| 2772 | const int ExpectedNumArgs = 7 + NumCallArgs; |
| 2773 | Check(ExpectedNumArgs == (int)Call.arg_size(), |
| 2774 | "gc.statepoint too many arguments" , Call); |
| 2775 | |
| 2776 | // Check that the only uses of this gc.statepoint are gc.result or |
| 2777 | // gc.relocate calls which are tied to this statepoint and thus part |
| 2778 | // of the same statepoint sequence |
| 2779 | for (const User *U : Call.users()) { |
| 2780 | const CallInst *UserCall = dyn_cast<const CallInst>(Val: U); |
| 2781 | Check(UserCall, "illegal use of statepoint token" , Call, U); |
| 2782 | if (!UserCall) |
| 2783 | continue; |
| 2784 | Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall), |
| 2785 | "gc.result or gc.relocate are the only value uses " |
| 2786 | "of a gc.statepoint" , |
| 2787 | Call, U); |
| 2788 | if (isa<GCResultInst>(Val: UserCall)) { |
| 2789 | Check(UserCall->getArgOperand(0) == &Call, |
| 2790 | "gc.result connected to wrong gc.statepoint" , Call, UserCall); |
| 2791 | } else if (isa<GCRelocateInst>(Val: Call)) { |
| 2792 | Check(UserCall->getArgOperand(0) == &Call, |
| 2793 | "gc.relocate connected to wrong gc.statepoint" , Call, UserCall); |
| 2794 | } |
| 2795 | } |
| 2796 | |
| 2797 | // Note: It is legal for a single derived pointer to be listed multiple |
| 2798 | // times. It's non-optimal, but it is legal. It can also happen after |
| 2799 | // insertion if we strip a bitcast away. |
| 2800 | // Note: It is really tempting to check that each base is relocated and |
| 2801 | // that a derived pointer is never reused as a base pointer. This turns |
| 2802 | // out to be problematic since optimizations run after safepoint insertion |
| 2803 | // can recognize equality properties that the insertion logic doesn't know |
| 2804 | // about. See example statepoint.ll in the verifier subdirectory |
| 2805 | } |
| 2806 | |
| 2807 | void Verifier::verifyFrameRecoverIndices() { |
| 2808 | for (auto &Counts : FrameEscapeInfo) { |
| 2809 | Function *F = Counts.first; |
| 2810 | unsigned EscapedObjectCount = Counts.second.first; |
| 2811 | unsigned MaxRecoveredIndex = Counts.second.second; |
| 2812 | Check(MaxRecoveredIndex <= EscapedObjectCount, |
| 2813 | "all indices passed to llvm.localrecover must be less than the " |
| 2814 | "number of arguments passed to llvm.localescape in the parent " |
| 2815 | "function" , |
| 2816 | F); |
| 2817 | } |
| 2818 | } |
| 2819 | |
| 2820 | static Instruction *getSuccPad(Instruction *Terminator) { |
| 2821 | BasicBlock *UnwindDest; |
| 2822 | if (auto *II = dyn_cast<InvokeInst>(Val: Terminator)) |
| 2823 | UnwindDest = II->getUnwindDest(); |
| 2824 | else if (auto *CSI = dyn_cast<CatchSwitchInst>(Val: Terminator)) |
| 2825 | UnwindDest = CSI->getUnwindDest(); |
| 2826 | else |
| 2827 | UnwindDest = cast<CleanupReturnInst>(Val: Terminator)->getUnwindDest(); |
| 2828 | return &*UnwindDest->getFirstNonPHIIt(); |
| 2829 | } |
| 2830 | |
| 2831 | void Verifier::verifySiblingFuncletUnwinds() { |
| 2832 | SmallPtrSet<Instruction *, 8> Visited; |
| 2833 | SmallPtrSet<Instruction *, 8> Active; |
| 2834 | for (const auto &Pair : SiblingFuncletInfo) { |
| 2835 | Instruction *PredPad = Pair.first; |
| 2836 | if (Visited.count(Ptr: PredPad)) |
| 2837 | continue; |
| 2838 | Active.insert(Ptr: PredPad); |
| 2839 | Instruction *Terminator = Pair.second; |
| 2840 | do { |
| 2841 | Instruction *SuccPad = getSuccPad(Terminator); |
| 2842 | if (Active.count(Ptr: SuccPad)) { |
| 2843 | // Found a cycle; report error |
| 2844 | Instruction *CyclePad = SuccPad; |
| 2845 | SmallVector<Instruction *, 8> CycleNodes; |
| 2846 | do { |
| 2847 | CycleNodes.push_back(Elt: CyclePad); |
| 2848 | Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad]; |
| 2849 | if (CycleTerminator != CyclePad) |
| 2850 | CycleNodes.push_back(Elt: CycleTerminator); |
| 2851 | CyclePad = getSuccPad(Terminator: CycleTerminator); |
| 2852 | } while (CyclePad != SuccPad); |
| 2853 | Check(false, "EH pads can't handle each other's exceptions" , |
| 2854 | ArrayRef<Instruction *>(CycleNodes)); |
| 2855 | } |
| 2856 | // Don't re-walk a node we've already checked |
| 2857 | if (!Visited.insert(Ptr: SuccPad).second) |
| 2858 | break; |
| 2859 | // Walk to this successor if it has a map entry. |
| 2860 | PredPad = SuccPad; |
| 2861 | auto TermI = SiblingFuncletInfo.find(Key: PredPad); |
| 2862 | if (TermI == SiblingFuncletInfo.end()) |
| 2863 | break; |
| 2864 | Terminator = TermI->second; |
| 2865 | Active.insert(Ptr: PredPad); |
| 2866 | } while (true); |
| 2867 | // Each node only has one successor, so we've walked all the active |
| 2868 | // nodes' successors. |
| 2869 | Active.clear(); |
| 2870 | } |
| 2871 | } |
| 2872 | |
| 2873 | // visitFunction - Verify that a function is ok. |
| 2874 | // |
| 2875 | void Verifier::visitFunction(const Function &F) { |
| 2876 | visitGlobalValue(GV: F); |
| 2877 | |
| 2878 | // Check function arguments. |
| 2879 | FunctionType *FT = F.getFunctionType(); |
| 2880 | unsigned NumArgs = F.arg_size(); |
| 2881 | |
| 2882 | Check(&Context == &F.getContext(), |
| 2883 | "Function context does not match Module context!" , &F); |
| 2884 | |
| 2885 | Check(!F.hasCommonLinkage(), "Functions may not have common linkage" , &F); |
| 2886 | Check(FT->getNumParams() == NumArgs, |
| 2887 | "# formal arguments must match # of arguments for function type!" , &F, |
| 2888 | FT); |
| 2889 | Check(F.getReturnType()->isFirstClassType() || |
| 2890 | F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(), |
| 2891 | "Functions cannot return aggregate values!" , &F); |
| 2892 | |
| 2893 | Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), |
| 2894 | "Invalid struct return type!" , &F); |
| 2895 | |
| 2896 | if (MaybeAlign A = F.getAlign()) { |
| 2897 | Check(A->value() <= Value::MaximumAlignment, |
| 2898 | "huge alignment values are unsupported" , &F); |
| 2899 | } |
| 2900 | |
| 2901 | AttributeList Attrs = F.getAttributes(); |
| 2902 | |
| 2903 | Check(verifyAttributeCount(Attrs, FT->getNumParams()), |
| 2904 | "Attribute after last parameter!" , &F); |
| 2905 | |
| 2906 | bool IsIntrinsic = F.isIntrinsic(); |
| 2907 | |
| 2908 | // Check function attributes. |
| 2909 | verifyFunctionAttrs(FT, Attrs, V: &F, IsIntrinsic, /* IsInlineAsm */ false); |
| 2910 | |
| 2911 | // On function declarations/definitions, we do not support the builtin |
| 2912 | // attribute. We do not check this in VerifyFunctionAttrs since that is |
| 2913 | // checking for Attributes that can/can not ever be on functions. |
| 2914 | Check(!Attrs.hasFnAttr(Attribute::Builtin), |
| 2915 | "Attribute 'builtin' can only be applied to a callsite." , &F); |
| 2916 | |
| 2917 | Check(!Attrs.hasAttrSomewhere(Attribute::ElementType), |
| 2918 | "Attribute 'elementtype' can only be applied to a callsite." , &F); |
| 2919 | |
| 2920 | Check(!Attrs.hasFnAttr("aarch64_zt0_undef" ), |
| 2921 | "Attribute 'aarch64_zt0_undef' can only be applied to a callsite." ); |
| 2922 | |
| 2923 | if (Attrs.hasFnAttr(Kind: Attribute::Naked)) |
| 2924 | for (const Argument &Arg : F.args()) |
| 2925 | Check(Arg.use_empty(), "cannot use argument of naked function" , &Arg); |
| 2926 | |
| 2927 | // Check that this function meets the restrictions on this calling convention. |
| 2928 | // Sometimes varargs is used for perfectly forwarding thunks, so some of these |
| 2929 | // restrictions can be lifted. |
| 2930 | switch (F.getCallingConv()) { |
| 2931 | default: |
| 2932 | case CallingConv::C: |
| 2933 | break; |
| 2934 | case CallingConv::X86_INTR: { |
| 2935 | Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal), |
| 2936 | "Calling convention parameter requires byval" , &F); |
| 2937 | break; |
| 2938 | } |
| 2939 | case CallingConv::AMDGPU_KERNEL: |
| 2940 | case CallingConv::SPIR_KERNEL: |
| 2941 | case CallingConv::AMDGPU_CS_Chain: |
| 2942 | case CallingConv::AMDGPU_CS_ChainPreserve: |
| 2943 | Check(F.getReturnType()->isVoidTy(), |
| 2944 | "Calling convention requires void return type" , &F); |
| 2945 | [[fallthrough]]; |
| 2946 | case CallingConv::AMDGPU_VS: |
| 2947 | case CallingConv::AMDGPU_HS: |
| 2948 | case CallingConv::AMDGPU_GS: |
| 2949 | case CallingConv::AMDGPU_PS: |
| 2950 | case CallingConv::AMDGPU_CS: |
| 2951 | Check(!F.hasStructRetAttr(), "Calling convention does not allow sret" , &F); |
| 2952 | if (F.getCallingConv() != CallingConv::SPIR_KERNEL) { |
| 2953 | const unsigned StackAS = DL.getAllocaAddrSpace(); |
| 2954 | unsigned i = 0; |
| 2955 | for (const Argument &Arg : F.args()) { |
| 2956 | Check(!Attrs.hasParamAttr(i, Attribute::ByVal), |
| 2957 | "Calling convention disallows byval" , &F); |
| 2958 | Check(!Attrs.hasParamAttr(i, Attribute::Preallocated), |
| 2959 | "Calling convention disallows preallocated" , &F); |
| 2960 | Check(!Attrs.hasParamAttr(i, Attribute::InAlloca), |
| 2961 | "Calling convention disallows inalloca" , &F); |
| 2962 | |
| 2963 | if (Attrs.hasParamAttr(ArgNo: i, Kind: Attribute::ByRef)) { |
| 2964 | // FIXME: Should also disallow LDS and GDS, but we don't have the enum |
| 2965 | // value here. |
| 2966 | Check(Arg.getType()->getPointerAddressSpace() != StackAS, |
| 2967 | "Calling convention disallows stack byref" , &F); |
| 2968 | } |
| 2969 | |
| 2970 | ++i; |
| 2971 | } |
| 2972 | } |
| 2973 | |
| 2974 | [[fallthrough]]; |
| 2975 | case CallingConv::Fast: |
| 2976 | case CallingConv::Cold: |
| 2977 | case CallingConv::Intel_OCL_BI: |
| 2978 | case CallingConv::PTX_Kernel: |
| 2979 | case CallingConv::PTX_Device: |
| 2980 | Check(!F.isVarArg(), |
| 2981 | "Calling convention does not support varargs or " |
| 2982 | "perfect forwarding!" , |
| 2983 | &F); |
| 2984 | break; |
| 2985 | } |
| 2986 | |
| 2987 | // Check that the argument values match the function type for this function... |
| 2988 | unsigned i = 0; |
| 2989 | for (const Argument &Arg : F.args()) { |
| 2990 | Check(Arg.getType() == FT->getParamType(i), |
| 2991 | "Argument value does not match function argument type!" , &Arg, |
| 2992 | FT->getParamType(i)); |
| 2993 | Check(Arg.getType()->isFirstClassType(), |
| 2994 | "Function arguments must have first-class types!" , &Arg); |
| 2995 | if (!IsIntrinsic) { |
| 2996 | Check(!Arg.getType()->isMetadataTy(), |
| 2997 | "Function takes metadata but isn't an intrinsic" , &Arg, &F); |
| 2998 | Check(!Arg.getType()->isTokenTy(), |
| 2999 | "Function takes token but isn't an intrinsic" , &Arg, &F); |
| 3000 | Check(!Arg.getType()->isX86_AMXTy(), |
| 3001 | "Function takes x86_amx but isn't an intrinsic" , &Arg, &F); |
| 3002 | } |
| 3003 | |
| 3004 | // Check that swifterror argument is only used by loads and stores. |
| 3005 | if (Attrs.hasParamAttr(ArgNo: i, Kind: Attribute::SwiftError)) { |
| 3006 | verifySwiftErrorValue(SwiftErrorVal: &Arg); |
| 3007 | } |
| 3008 | ++i; |
| 3009 | } |
| 3010 | |
| 3011 | if (!IsIntrinsic) { |
| 3012 | Check(!F.getReturnType()->isTokenTy(), |
| 3013 | "Function returns a token but isn't an intrinsic" , &F); |
| 3014 | Check(!F.getReturnType()->isX86_AMXTy(), |
| 3015 | "Function returns a x86_amx but isn't an intrinsic" , &F); |
| 3016 | } |
| 3017 | |
| 3018 | // Get the function metadata attachments. |
| 3019 | SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; |
| 3020 | F.getAllMetadata(MDs); |
| 3021 | assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync" ); |
| 3022 | verifyFunctionMetadata(MDs); |
| 3023 | |
| 3024 | // Check validity of the personality function |
| 3025 | if (F.hasPersonalityFn()) { |
| 3026 | auto *Per = dyn_cast<Function>(Val: F.getPersonalityFn()->stripPointerCasts()); |
| 3027 | if (Per) |
| 3028 | Check(Per->getParent() == F.getParent(), |
| 3029 | "Referencing personality function in another module!" , &F, |
| 3030 | F.getParent(), Per, Per->getParent()); |
| 3031 | } |
| 3032 | |
| 3033 | // EH funclet coloring can be expensive, recompute on-demand |
| 3034 | BlockEHFuncletColors.clear(); |
| 3035 | |
| 3036 | if (F.isMaterializable()) { |
| 3037 | // Function has a body somewhere we can't see. |
| 3038 | Check(MDs.empty(), "unmaterialized function cannot have metadata" , &F, |
| 3039 | MDs.empty() ? nullptr : MDs.front().second); |
| 3040 | } else if (F.isDeclaration()) { |
| 3041 | for (const auto &I : MDs) { |
| 3042 | // This is used for call site debug information. |
| 3043 | CheckDI(I.first != LLVMContext::MD_dbg || |
| 3044 | !cast<DISubprogram>(I.second)->isDistinct(), |
| 3045 | "function declaration may only have a unique !dbg attachment" , |
| 3046 | &F); |
| 3047 | Check(I.first != LLVMContext::MD_prof, |
| 3048 | "function declaration may not have a !prof attachment" , &F); |
| 3049 | |
| 3050 | // Verify the metadata itself. |
| 3051 | visitMDNode(MD: *I.second, AllowLocs: AreDebugLocsAllowed::Yes); |
| 3052 | } |
| 3053 | Check(!F.hasPersonalityFn(), |
| 3054 | "Function declaration shouldn't have a personality routine" , &F); |
| 3055 | } else { |
| 3056 | // Verify that this function (which has a body) is not named "llvm.*". It |
| 3057 | // is not legal to define intrinsics. |
| 3058 | Check(!IsIntrinsic, "llvm intrinsics cannot be defined!" , &F); |
| 3059 | |
| 3060 | // Check the entry node |
| 3061 | const BasicBlock *Entry = &F.getEntryBlock(); |
| 3062 | Check(pred_empty(Entry), |
| 3063 | "Entry block to function must not have predecessors!" , Entry); |
| 3064 | |
| 3065 | // The address of the entry block cannot be taken, unless it is dead. |
| 3066 | if (Entry->hasAddressTaken()) { |
| 3067 | Check(!BlockAddress::lookup(Entry)->isConstantUsed(), |
| 3068 | "blockaddress may not be used with the entry block!" , Entry); |
| 3069 | } |
| 3070 | |
| 3071 | unsigned NumDebugAttachments = 0, NumProfAttachments = 0, |
| 3072 | NumKCFIAttachments = 0; |
| 3073 | // Visit metadata attachments. |
| 3074 | for (const auto &I : MDs) { |
| 3075 | // Verify that the attachment is legal. |
| 3076 | auto AllowLocs = AreDebugLocsAllowed::No; |
| 3077 | switch (I.first) { |
| 3078 | default: |
| 3079 | break; |
| 3080 | case LLVMContext::MD_dbg: { |
| 3081 | ++NumDebugAttachments; |
| 3082 | CheckDI(NumDebugAttachments == 1, |
| 3083 | "function must have a single !dbg attachment" , &F, I.second); |
| 3084 | CheckDI(isa<DISubprogram>(I.second), |
| 3085 | "function !dbg attachment must be a subprogram" , &F, I.second); |
| 3086 | CheckDI(cast<DISubprogram>(I.second)->isDistinct(), |
| 3087 | "function definition may only have a distinct !dbg attachment" , |
| 3088 | &F); |
| 3089 | |
| 3090 | auto *SP = cast<DISubprogram>(Val: I.second); |
| 3091 | const Function *&AttachedTo = DISubprogramAttachments[SP]; |
| 3092 | CheckDI(!AttachedTo || AttachedTo == &F, |
| 3093 | "DISubprogram attached to more than one function" , SP, &F); |
| 3094 | AttachedTo = &F; |
| 3095 | AllowLocs = AreDebugLocsAllowed::Yes; |
| 3096 | break; |
| 3097 | } |
| 3098 | case LLVMContext::MD_prof: |
| 3099 | ++NumProfAttachments; |
| 3100 | Check(NumProfAttachments == 1, |
| 3101 | "function must have a single !prof attachment" , &F, I.second); |
| 3102 | break; |
| 3103 | case LLVMContext::MD_kcfi_type: |
| 3104 | ++NumKCFIAttachments; |
| 3105 | Check(NumKCFIAttachments == 1, |
| 3106 | "function must have a single !kcfi_type attachment" , &F, |
| 3107 | I.second); |
| 3108 | break; |
| 3109 | } |
| 3110 | |
| 3111 | // Verify the metadata itself. |
| 3112 | visitMDNode(MD: *I.second, AllowLocs); |
| 3113 | } |
| 3114 | } |
| 3115 | |
| 3116 | // If this function is actually an intrinsic, verify that it is only used in |
| 3117 | // direct call/invokes, never having its "address taken". |
| 3118 | // Only do this if the module is materialized, otherwise we don't have all the |
| 3119 | // uses. |
| 3120 | if (F.isIntrinsic() && F.getParent()->isMaterialized()) { |
| 3121 | const User *U; |
| 3122 | if (F.hasAddressTaken(&U, IgnoreCallbackUses: false, IgnoreAssumeLikeCalls: true, IngoreLLVMUsed: false, |
| 3123 | /*IgnoreARCAttachedCall=*/true)) |
| 3124 | Check(false, "Invalid user of intrinsic instruction!" , U); |
| 3125 | } |
| 3126 | |
| 3127 | // Check intrinsics' signatures. |
| 3128 | switch (F.getIntrinsicID()) { |
| 3129 | case Intrinsic::experimental_gc_get_pointer_base: { |
| 3130 | FunctionType *FT = F.getFunctionType(); |
| 3131 | Check(FT->getNumParams() == 1, "wrong number of parameters" , F); |
| 3132 | Check(isa<PointerType>(F.getReturnType()), |
| 3133 | "gc.get.pointer.base must return a pointer" , F); |
| 3134 | Check(FT->getParamType(0) == F.getReturnType(), |
| 3135 | "gc.get.pointer.base operand and result must be of the same type" , F); |
| 3136 | break; |
| 3137 | } |
| 3138 | case Intrinsic::experimental_gc_get_pointer_offset: { |
| 3139 | FunctionType *FT = F.getFunctionType(); |
| 3140 | Check(FT->getNumParams() == 1, "wrong number of parameters" , F); |
| 3141 | Check(isa<PointerType>(FT->getParamType(0)), |
| 3142 | "gc.get.pointer.offset operand must be a pointer" , F); |
| 3143 | Check(F.getReturnType()->isIntegerTy(), |
| 3144 | "gc.get.pointer.offset must return integer" , F); |
| 3145 | break; |
| 3146 | } |
| 3147 | } |
| 3148 | |
| 3149 | auto *N = F.getSubprogram(); |
| 3150 | HasDebugInfo = (N != nullptr); |
| 3151 | if (!HasDebugInfo) |
| 3152 | return; |
| 3153 | |
| 3154 | // Check that all !dbg attachments lead to back to N. |
| 3155 | // |
| 3156 | // FIXME: Check this incrementally while visiting !dbg attachments. |
| 3157 | // FIXME: Only check when N is the canonical subprogram for F. |
| 3158 | SmallPtrSet<const MDNode *, 32> Seen; |
| 3159 | auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) { |
| 3160 | // Be careful about using DILocation here since we might be dealing with |
| 3161 | // broken code (this is the Verifier after all). |
| 3162 | const DILocation *DL = dyn_cast_or_null<DILocation>(Val: Node); |
| 3163 | if (!DL) |
| 3164 | return; |
| 3165 | if (!Seen.insert(Ptr: DL).second) |
| 3166 | return; |
| 3167 | |
| 3168 | Metadata *Parent = DL->getRawScope(); |
| 3169 | CheckDI(Parent && isa<DILocalScope>(Parent), |
| 3170 | "DILocation's scope must be a DILocalScope" , N, &F, &I, DL, Parent); |
| 3171 | |
| 3172 | DILocalScope *Scope = DL->getInlinedAtScope(); |
| 3173 | Check(Scope, "Failed to find DILocalScope" , DL); |
| 3174 | |
| 3175 | if (!Seen.insert(Ptr: Scope).second) |
| 3176 | return; |
| 3177 | |
| 3178 | DISubprogram *SP = Scope->getSubprogram(); |
| 3179 | |
| 3180 | // Scope and SP could be the same MDNode and we don't want to skip |
| 3181 | // validation in that case |
| 3182 | if (SP && ((Scope != SP) && !Seen.insert(Ptr: SP).second)) |
| 3183 | return; |
| 3184 | |
| 3185 | CheckDI(SP->describes(&F), |
| 3186 | "!dbg attachment points at wrong subprogram for function" , N, &F, |
| 3187 | &I, DL, Scope, SP); |
| 3188 | |
| 3189 | if (DL->getAtomGroup()) |
| 3190 | CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(), |
| 3191 | "DbgLoc uses atomGroup but DISubprogram doesn't have Key " |
| 3192 | "Instructions enabled" , |
| 3193 | DL, DL->getScope()->getSubprogram()); |
| 3194 | }; |
| 3195 | for (auto &BB : F) |
| 3196 | for (auto &I : BB) { |
| 3197 | VisitDebugLoc(I, I.getDebugLoc().getAsMDNode()); |
| 3198 | // The llvm.loop annotations also contain two DILocations. |
| 3199 | if (auto MD = I.getMetadata(KindID: LLVMContext::MD_loop)) |
| 3200 | for (unsigned i = 1; i < MD->getNumOperands(); ++i) |
| 3201 | VisitDebugLoc(I, dyn_cast_or_null<MDNode>(Val: MD->getOperand(I: i))); |
| 3202 | if (BrokenDebugInfo) |
| 3203 | return; |
| 3204 | } |
| 3205 | } |
| 3206 | |
| 3207 | // verifyBasicBlock - Verify that a basic block is well formed... |
| 3208 | // |
| 3209 | void Verifier::visitBasicBlock(BasicBlock &BB) { |
| 3210 | InstsInThisBlock.clear(); |
| 3211 | ConvergenceVerifyHelper.visit(BB); |
| 3212 | |
| 3213 | // Ensure that basic blocks have terminators! |
| 3214 | Check(BB.getTerminator(), "Basic Block does not have terminator!" , &BB); |
| 3215 | |
| 3216 | // Check constraints that this basic block imposes on all of the PHI nodes in |
| 3217 | // it. |
| 3218 | if (isa<PHINode>(Val: BB.front())) { |
| 3219 | SmallVector<BasicBlock *, 8> Preds(predecessors(BB: &BB)); |
| 3220 | SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; |
| 3221 | llvm::sort(C&: Preds); |
| 3222 | for (const PHINode &PN : BB.phis()) { |
| 3223 | Check(PN.getNumIncomingValues() == Preds.size(), |
| 3224 | "PHINode should have one entry for each predecessor of its " |
| 3225 | "parent basic block!" , |
| 3226 | &PN); |
| 3227 | |
| 3228 | // Get and sort all incoming values in the PHI node... |
| 3229 | Values.clear(); |
| 3230 | Values.reserve(N: PN.getNumIncomingValues()); |
| 3231 | for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) |
| 3232 | Values.push_back( |
| 3233 | Elt: std::make_pair(x: PN.getIncomingBlock(i), y: PN.getIncomingValue(i))); |
| 3234 | llvm::sort(C&: Values); |
| 3235 | |
| 3236 | for (unsigned i = 0, e = Values.size(); i != e; ++i) { |
| 3237 | // Check to make sure that if there is more than one entry for a |
| 3238 | // particular basic block in this PHI node, that the incoming values are |
| 3239 | // all identical. |
| 3240 | // |
| 3241 | Check(i == 0 || Values[i].first != Values[i - 1].first || |
| 3242 | Values[i].second == Values[i - 1].second, |
| 3243 | "PHI node has multiple entries for the same basic block with " |
| 3244 | "different incoming values!" , |
| 3245 | &PN, Values[i].first, Values[i].second, Values[i - 1].second); |
| 3246 | |
| 3247 | // Check to make sure that the predecessors and PHI node entries are |
| 3248 | // matched up. |
| 3249 | Check(Values[i].first == Preds[i], |
| 3250 | "PHI node entries do not match predecessors!" , &PN, |
| 3251 | Values[i].first, Preds[i]); |
| 3252 | } |
| 3253 | } |
| 3254 | } |
| 3255 | |
| 3256 | // Check that all instructions have their parent pointers set up correctly. |
| 3257 | for (auto &I : BB) |
| 3258 | { |
| 3259 | Check(I.getParent() == &BB, "Instruction has bogus parent pointer!" ); |
| 3260 | } |
| 3261 | |
| 3262 | // Confirm that no issues arise from the debug program. |
| 3263 | CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!" , |
| 3264 | &BB); |
| 3265 | } |
| 3266 | |
| 3267 | void Verifier::visitTerminator(Instruction &I) { |
| 3268 | // Ensure that terminators only exist at the end of the basic block. |
| 3269 | Check(&I == I.getParent()->getTerminator(), |
| 3270 | "Terminator found in the middle of a basic block!" , I.getParent()); |
| 3271 | visitInstruction(I); |
| 3272 | } |
| 3273 | |
| 3274 | void Verifier::visitBranchInst(BranchInst &BI) { |
| 3275 | if (BI.isConditional()) { |
| 3276 | Check(BI.getCondition()->getType()->isIntegerTy(1), |
| 3277 | "Branch condition is not 'i1' type!" , &BI, BI.getCondition()); |
| 3278 | } |
| 3279 | visitTerminator(I&: BI); |
| 3280 | } |
| 3281 | |
| 3282 | void Verifier::visitReturnInst(ReturnInst &RI) { |
| 3283 | Function *F = RI.getParent()->getParent(); |
| 3284 | unsigned N = RI.getNumOperands(); |
| 3285 | if (F->getReturnType()->isVoidTy()) |
| 3286 | Check(N == 0, |
| 3287 | "Found return instr that returns non-void in Function of void " |
| 3288 | "return type!" , |
| 3289 | &RI, F->getReturnType()); |
| 3290 | else |
| 3291 | Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(), |
| 3292 | "Function return type does not match operand " |
| 3293 | "type of return inst!" , |
| 3294 | &RI, F->getReturnType()); |
| 3295 | |
| 3296 | // Check to make sure that the return value has necessary properties for |
| 3297 | // terminators... |
| 3298 | visitTerminator(I&: RI); |
| 3299 | } |
| 3300 | |
| 3301 | void Verifier::visitSwitchInst(SwitchInst &SI) { |
| 3302 | Check(SI.getType()->isVoidTy(), "Switch must have void result type!" , &SI); |
| 3303 | // Check to make sure that all of the constants in the switch instruction |
| 3304 | // have the same type as the switched-on value. |
| 3305 | Type *SwitchTy = SI.getCondition()->getType(); |
| 3306 | SmallPtrSet<ConstantInt*, 32> Constants; |
| 3307 | for (auto &Case : SI.cases()) { |
| 3308 | Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)), |
| 3309 | "Case value is not a constant integer." , &SI); |
| 3310 | Check(Case.getCaseValue()->getType() == SwitchTy, |
| 3311 | "Switch constants must all be same type as switch value!" , &SI); |
| 3312 | Check(Constants.insert(Case.getCaseValue()).second, |
| 3313 | "Duplicate integer as switch case" , &SI, Case.getCaseValue()); |
| 3314 | } |
| 3315 | |
| 3316 | visitTerminator(I&: SI); |
| 3317 | } |
| 3318 | |
| 3319 | void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { |
| 3320 | Check(BI.getAddress()->getType()->isPointerTy(), |
| 3321 | "Indirectbr operand must have pointer type!" , &BI); |
| 3322 | for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i) |
| 3323 | Check(BI.getDestination(i)->getType()->isLabelTy(), |
| 3324 | "Indirectbr destinations must all have pointer type!" , &BI); |
| 3325 | |
| 3326 | visitTerminator(I&: BI); |
| 3327 | } |
| 3328 | |
| 3329 | void Verifier::visitCallBrInst(CallBrInst &CBI) { |
| 3330 | Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!" , &CBI); |
| 3331 | const InlineAsm *IA = cast<InlineAsm>(Val: CBI.getCalledOperand()); |
| 3332 | Check(!IA->canThrow(), "Unwinding from Callbr is not allowed" ); |
| 3333 | |
| 3334 | verifyInlineAsmCall(Call: CBI); |
| 3335 | visitTerminator(I&: CBI); |
| 3336 | } |
| 3337 | |
| 3338 | void Verifier::visitSelectInst(SelectInst &SI) { |
| 3339 | Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), |
| 3340 | SI.getOperand(2)), |
| 3341 | "Invalid operands for select instruction!" , &SI); |
| 3342 | |
| 3343 | Check(SI.getTrueValue()->getType() == SI.getType(), |
| 3344 | "Select values must have same type as select instruction!" , &SI); |
| 3345 | visitInstruction(I&: SI); |
| 3346 | } |
| 3347 | |
| 3348 | /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of |
| 3349 | /// a pass, if any exist, it's an error. |
| 3350 | /// |
| 3351 | void Verifier::visitUserOp1(Instruction &I) { |
| 3352 | Check(false, "User-defined operators should not live outside of a pass!" , &I); |
| 3353 | } |
| 3354 | |
| 3355 | void Verifier::visitTruncInst(TruncInst &I) { |
| 3356 | // Get the source and destination types |
| 3357 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3358 | Type *DestTy = I.getType(); |
| 3359 | |
| 3360 | // Get the size of the types in bits, we'll need this later |
| 3361 | unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); |
| 3362 | unsigned DestBitSize = DestTy->getScalarSizeInBits(); |
| 3363 | |
| 3364 | Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer" , &I); |
| 3365 | Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer" , &I); |
| 3366 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), |
| 3367 | "trunc source and destination must both be a vector or neither" , &I); |
| 3368 | Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc" , &I); |
| 3369 | |
| 3370 | visitInstruction(I); |
| 3371 | } |
| 3372 | |
| 3373 | void Verifier::visitZExtInst(ZExtInst &I) { |
| 3374 | // Get the source and destination types |
| 3375 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3376 | Type *DestTy = I.getType(); |
| 3377 | |
| 3378 | // Get the size of the types in bits, we'll need this later |
| 3379 | Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer" , &I); |
| 3380 | Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer" , &I); |
| 3381 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), |
| 3382 | "zext source and destination must both be a vector or neither" , &I); |
| 3383 | unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); |
| 3384 | unsigned DestBitSize = DestTy->getScalarSizeInBits(); |
| 3385 | |
| 3386 | Check(SrcBitSize < DestBitSize, "Type too small for ZExt" , &I); |
| 3387 | |
| 3388 | visitInstruction(I); |
| 3389 | } |
| 3390 | |
| 3391 | void Verifier::visitSExtInst(SExtInst &I) { |
| 3392 | // Get the source and destination types |
| 3393 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3394 | Type *DestTy = I.getType(); |
| 3395 | |
| 3396 | // Get the size of the types in bits, we'll need this later |
| 3397 | unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); |
| 3398 | unsigned DestBitSize = DestTy->getScalarSizeInBits(); |
| 3399 | |
| 3400 | Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer" , &I); |
| 3401 | Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer" , &I); |
| 3402 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), |
| 3403 | "sext source and destination must both be a vector or neither" , &I); |
| 3404 | Check(SrcBitSize < DestBitSize, "Type too small for SExt" , &I); |
| 3405 | |
| 3406 | visitInstruction(I); |
| 3407 | } |
| 3408 | |
| 3409 | void Verifier::visitFPTruncInst(FPTruncInst &I) { |
| 3410 | // Get the source and destination types |
| 3411 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3412 | Type *DestTy = I.getType(); |
| 3413 | // Get the size of the types in bits, we'll need this later |
| 3414 | unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); |
| 3415 | unsigned DestBitSize = DestTy->getScalarSizeInBits(); |
| 3416 | |
| 3417 | Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP" , &I); |
| 3418 | Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP" , &I); |
| 3419 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), |
| 3420 | "fptrunc source and destination must both be a vector or neither" , &I); |
| 3421 | Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc" , &I); |
| 3422 | |
| 3423 | visitInstruction(I); |
| 3424 | } |
| 3425 | |
| 3426 | void Verifier::visitFPExtInst(FPExtInst &I) { |
| 3427 | // Get the source and destination types |
| 3428 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3429 | Type *DestTy = I.getType(); |
| 3430 | |
| 3431 | // Get the size of the types in bits, we'll need this later |
| 3432 | unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); |
| 3433 | unsigned DestBitSize = DestTy->getScalarSizeInBits(); |
| 3434 | |
| 3435 | Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP" , &I); |
| 3436 | Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP" , &I); |
| 3437 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), |
| 3438 | "fpext source and destination must both be a vector or neither" , &I); |
| 3439 | Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt" , &I); |
| 3440 | |
| 3441 | visitInstruction(I); |
| 3442 | } |
| 3443 | |
| 3444 | void Verifier::visitUIToFPInst(UIToFPInst &I) { |
| 3445 | // Get the source and destination types |
| 3446 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3447 | Type *DestTy = I.getType(); |
| 3448 | |
| 3449 | bool SrcVec = SrcTy->isVectorTy(); |
| 3450 | bool DstVec = DestTy->isVectorTy(); |
| 3451 | |
| 3452 | Check(SrcVec == DstVec, |
| 3453 | "UIToFP source and dest must both be vector or scalar" , &I); |
| 3454 | Check(SrcTy->isIntOrIntVectorTy(), |
| 3455 | "UIToFP source must be integer or integer vector" , &I); |
| 3456 | Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector" , |
| 3457 | &I); |
| 3458 | |
| 3459 | if (SrcVec && DstVec) |
| 3460 | Check(cast<VectorType>(SrcTy)->getElementCount() == |
| 3461 | cast<VectorType>(DestTy)->getElementCount(), |
| 3462 | "UIToFP source and dest vector length mismatch" , &I); |
| 3463 | |
| 3464 | visitInstruction(I); |
| 3465 | } |
| 3466 | |
| 3467 | void Verifier::visitSIToFPInst(SIToFPInst &I) { |
| 3468 | // Get the source and destination types |
| 3469 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3470 | Type *DestTy = I.getType(); |
| 3471 | |
| 3472 | bool SrcVec = SrcTy->isVectorTy(); |
| 3473 | bool DstVec = DestTy->isVectorTy(); |
| 3474 | |
| 3475 | Check(SrcVec == DstVec, |
| 3476 | "SIToFP source and dest must both be vector or scalar" , &I); |
| 3477 | Check(SrcTy->isIntOrIntVectorTy(), |
| 3478 | "SIToFP source must be integer or integer vector" , &I); |
| 3479 | Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector" , |
| 3480 | &I); |
| 3481 | |
| 3482 | if (SrcVec && DstVec) |
| 3483 | Check(cast<VectorType>(SrcTy)->getElementCount() == |
| 3484 | cast<VectorType>(DestTy)->getElementCount(), |
| 3485 | "SIToFP source and dest vector length mismatch" , &I); |
| 3486 | |
| 3487 | visitInstruction(I); |
| 3488 | } |
| 3489 | |
| 3490 | void Verifier::visitFPToUIInst(FPToUIInst &I) { |
| 3491 | // Get the source and destination types |
| 3492 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3493 | Type *DestTy = I.getType(); |
| 3494 | |
| 3495 | bool SrcVec = SrcTy->isVectorTy(); |
| 3496 | bool DstVec = DestTy->isVectorTy(); |
| 3497 | |
| 3498 | Check(SrcVec == DstVec, |
| 3499 | "FPToUI source and dest must both be vector or scalar" , &I); |
| 3500 | Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector" , &I); |
| 3501 | Check(DestTy->isIntOrIntVectorTy(), |
| 3502 | "FPToUI result must be integer or integer vector" , &I); |
| 3503 | |
| 3504 | if (SrcVec && DstVec) |
| 3505 | Check(cast<VectorType>(SrcTy)->getElementCount() == |
| 3506 | cast<VectorType>(DestTy)->getElementCount(), |
| 3507 | "FPToUI source and dest vector length mismatch" , &I); |
| 3508 | |
| 3509 | visitInstruction(I); |
| 3510 | } |
| 3511 | |
| 3512 | void Verifier::visitFPToSIInst(FPToSIInst &I) { |
| 3513 | // Get the source and destination types |
| 3514 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3515 | Type *DestTy = I.getType(); |
| 3516 | |
| 3517 | bool SrcVec = SrcTy->isVectorTy(); |
| 3518 | bool DstVec = DestTy->isVectorTy(); |
| 3519 | |
| 3520 | Check(SrcVec == DstVec, |
| 3521 | "FPToSI source and dest must both be vector or scalar" , &I); |
| 3522 | Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector" , &I); |
| 3523 | Check(DestTy->isIntOrIntVectorTy(), |
| 3524 | "FPToSI result must be integer or integer vector" , &I); |
| 3525 | |
| 3526 | if (SrcVec && DstVec) |
| 3527 | Check(cast<VectorType>(SrcTy)->getElementCount() == |
| 3528 | cast<VectorType>(DestTy)->getElementCount(), |
| 3529 | "FPToSI source and dest vector length mismatch" , &I); |
| 3530 | |
| 3531 | visitInstruction(I); |
| 3532 | } |
| 3533 | |
| 3534 | void Verifier::visitPtrToIntInst(PtrToIntInst &I) { |
| 3535 | // Get the source and destination types |
| 3536 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3537 | Type *DestTy = I.getType(); |
| 3538 | |
| 3539 | Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer" , &I); |
| 3540 | |
| 3541 | Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral" , &I); |
| 3542 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch" , |
| 3543 | &I); |
| 3544 | |
| 3545 | if (SrcTy->isVectorTy()) { |
| 3546 | auto *VSrc = cast<VectorType>(Val: SrcTy); |
| 3547 | auto *VDest = cast<VectorType>(Val: DestTy); |
| 3548 | Check(VSrc->getElementCount() == VDest->getElementCount(), |
| 3549 | "PtrToInt Vector width mismatch" , &I); |
| 3550 | } |
| 3551 | |
| 3552 | visitInstruction(I); |
| 3553 | } |
| 3554 | |
| 3555 | void Verifier::visitIntToPtrInst(IntToPtrInst &I) { |
| 3556 | // Get the source and destination types |
| 3557 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3558 | Type *DestTy = I.getType(); |
| 3559 | |
| 3560 | Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral" , &I); |
| 3561 | Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer" , &I); |
| 3562 | |
| 3563 | Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch" , |
| 3564 | &I); |
| 3565 | if (SrcTy->isVectorTy()) { |
| 3566 | auto *VSrc = cast<VectorType>(Val: SrcTy); |
| 3567 | auto *VDest = cast<VectorType>(Val: DestTy); |
| 3568 | Check(VSrc->getElementCount() == VDest->getElementCount(), |
| 3569 | "IntToPtr Vector width mismatch" , &I); |
| 3570 | } |
| 3571 | visitInstruction(I); |
| 3572 | } |
| 3573 | |
| 3574 | void Verifier::visitBitCastInst(BitCastInst &I) { |
| 3575 | Check( |
| 3576 | CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()), |
| 3577 | "Invalid bitcast" , &I); |
| 3578 | visitInstruction(I); |
| 3579 | } |
| 3580 | |
| 3581 | void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { |
| 3582 | Type *SrcTy = I.getOperand(i_nocapture: 0)->getType(); |
| 3583 | Type *DestTy = I.getType(); |
| 3584 | |
| 3585 | Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer" , |
| 3586 | &I); |
| 3587 | Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer" , |
| 3588 | &I); |
| 3589 | Check(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), |
| 3590 | "AddrSpaceCast must be between different address spaces" , &I); |
| 3591 | if (auto *SrcVTy = dyn_cast<VectorType>(Val: SrcTy)) |
| 3592 | Check(SrcVTy->getElementCount() == |
| 3593 | cast<VectorType>(DestTy)->getElementCount(), |
| 3594 | "AddrSpaceCast vector pointer number of elements mismatch" , &I); |
| 3595 | visitInstruction(I); |
| 3596 | } |
| 3597 | |
| 3598 | /// visitPHINode - Ensure that a PHI node is well formed. |
| 3599 | /// |
| 3600 | void Verifier::visitPHINode(PHINode &PN) { |
| 3601 | // Ensure that the PHI nodes are all grouped together at the top of the block. |
| 3602 | // This can be tested by checking whether the instruction before this is |
| 3603 | // either nonexistent (because this is begin()) or is a PHI node. If not, |
| 3604 | // then there is some other instruction before a PHI. |
| 3605 | Check(&PN == &PN.getParent()->front() || |
| 3606 | isa<PHINode>(--BasicBlock::iterator(&PN)), |
| 3607 | "PHI nodes not grouped at top of basic block!" , &PN, PN.getParent()); |
| 3608 | |
| 3609 | // Check that a PHI doesn't yield a Token. |
| 3610 | Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!" ); |
| 3611 | |
| 3612 | // Check that all of the values of the PHI node have the same type as the |
| 3613 | // result. |
| 3614 | for (Value *IncValue : PN.incoming_values()) { |
| 3615 | Check(PN.getType() == IncValue->getType(), |
| 3616 | "PHI node operands are not the same type as the result!" , &PN); |
| 3617 | } |
| 3618 | |
| 3619 | // All other PHI node constraints are checked in the visitBasicBlock method. |
| 3620 | |
| 3621 | visitInstruction(I&: PN); |
| 3622 | } |
| 3623 | |
| 3624 | void Verifier::visitCallBase(CallBase &Call) { |
| 3625 | Check(Call.getCalledOperand()->getType()->isPointerTy(), |
| 3626 | "Called function must be a pointer!" , Call); |
| 3627 | FunctionType *FTy = Call.getFunctionType(); |
| 3628 | |
| 3629 | // Verify that the correct number of arguments are being passed |
| 3630 | if (FTy->isVarArg()) |
| 3631 | Check(Call.arg_size() >= FTy->getNumParams(), |
| 3632 | "Called function requires more parameters than were provided!" , Call); |
| 3633 | else |
| 3634 | Check(Call.arg_size() == FTy->getNumParams(), |
| 3635 | "Incorrect number of arguments passed to called function!" , Call); |
| 3636 | |
| 3637 | // Verify that all arguments to the call match the function type. |
| 3638 | for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) |
| 3639 | Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i), |
| 3640 | "Call parameter type does not match function signature!" , |
| 3641 | Call.getArgOperand(i), FTy->getParamType(i), Call); |
| 3642 | |
| 3643 | AttributeList Attrs = Call.getAttributes(); |
| 3644 | |
| 3645 | Check(verifyAttributeCount(Attrs, Call.arg_size()), |
| 3646 | "Attribute after last parameter!" , Call); |
| 3647 | |
| 3648 | Function *Callee = |
| 3649 | dyn_cast<Function>(Val: Call.getCalledOperand()->stripPointerCasts()); |
| 3650 | bool IsIntrinsic = Callee && Callee->isIntrinsic(); |
| 3651 | if (IsIntrinsic) |
| 3652 | Check(Callee->getValueType() == FTy, |
| 3653 | "Intrinsic called with incompatible signature" , Call); |
| 3654 | |
| 3655 | // Verify if the calling convention of the callee is callable. |
| 3656 | Check(isCallableCC(Call.getCallingConv()), |
| 3657 | "calling convention does not permit calls" , Call); |
| 3658 | |
| 3659 | // Disallow passing/returning values with alignment higher than we can |
| 3660 | // represent. |
| 3661 | // FIXME: Consider making DataLayout cap the alignment, so this isn't |
| 3662 | // necessary. |
| 3663 | auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) { |
| 3664 | if (!Ty->isSized()) |
| 3665 | return; |
| 3666 | Align ABIAlign = DL.getABITypeAlign(Ty); |
| 3667 | Check(ABIAlign.value() <= Value::MaximumAlignment, |
| 3668 | "Incorrect alignment of " + Message + " to called function!" , Call); |
| 3669 | }; |
| 3670 | |
| 3671 | if (!IsIntrinsic) { |
| 3672 | VerifyTypeAlign(FTy->getReturnType(), "return type" ); |
| 3673 | for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { |
| 3674 | Type *Ty = FTy->getParamType(i); |
| 3675 | VerifyTypeAlign(Ty, "argument passed" ); |
| 3676 | } |
| 3677 | } |
| 3678 | |
| 3679 | if (Attrs.hasFnAttr(Kind: Attribute::Speculatable)) { |
| 3680 | // Don't allow speculatable on call sites, unless the underlying function |
| 3681 | // declaration is also speculatable. |
| 3682 | Check(Callee && Callee->isSpeculatable(), |
| 3683 | "speculatable attribute may not apply to call sites" , Call); |
| 3684 | } |
| 3685 | |
| 3686 | if (Attrs.hasFnAttr(Kind: Attribute::Preallocated)) { |
| 3687 | Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg, |
| 3688 | "preallocated as a call site attribute can only be on " |
| 3689 | "llvm.call.preallocated.arg" ); |
| 3690 | } |
| 3691 | |
| 3692 | // Verify call attributes. |
| 3693 | verifyFunctionAttrs(FT: FTy, Attrs, V: &Call, IsIntrinsic, IsInlineAsm: Call.isInlineAsm()); |
| 3694 | |
| 3695 | // Conservatively check the inalloca argument. |
| 3696 | // We have a bug if we can find that there is an underlying alloca without |
| 3697 | // inalloca. |
| 3698 | if (Call.hasInAllocaArgument()) { |
| 3699 | Value *InAllocaArg = Call.getArgOperand(i: FTy->getNumParams() - 1); |
| 3700 | if (auto AI = dyn_cast<AllocaInst>(Val: InAllocaArg->stripInBoundsOffsets())) |
| 3701 | Check(AI->isUsedWithInAlloca(), |
| 3702 | "inalloca argument for call has mismatched alloca" , AI, Call); |
| 3703 | } |
| 3704 | |
| 3705 | // For each argument of the callsite, if it has the swifterror argument, |
| 3706 | // make sure the underlying alloca/parameter it comes from has a swifterror as |
| 3707 | // well. |
| 3708 | for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { |
| 3709 | if (Call.paramHasAttr(ArgNo: i, Kind: Attribute::SwiftError)) { |
| 3710 | Value *SwiftErrorArg = Call.getArgOperand(i); |
| 3711 | if (auto AI = dyn_cast<AllocaInst>(Val: SwiftErrorArg->stripInBoundsOffsets())) { |
| 3712 | Check(AI->isSwiftError(), |
| 3713 | "swifterror argument for call has mismatched alloca" , AI, Call); |
| 3714 | continue; |
| 3715 | } |
| 3716 | auto ArgI = dyn_cast<Argument>(Val: SwiftErrorArg); |
| 3717 | Check(ArgI, "swifterror argument should come from an alloca or parameter" , |
| 3718 | SwiftErrorArg, Call); |
| 3719 | Check(ArgI->hasSwiftErrorAttr(), |
| 3720 | "swifterror argument for call has mismatched parameter" , ArgI, |
| 3721 | Call); |
| 3722 | } |
| 3723 | |
| 3724 | if (Attrs.hasParamAttr(ArgNo: i, Kind: Attribute::ImmArg)) { |
| 3725 | // Don't allow immarg on call sites, unless the underlying declaration |
| 3726 | // also has the matching immarg. |
| 3727 | Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg), |
| 3728 | "immarg may not apply only to call sites" , Call.getArgOperand(i), |
| 3729 | Call); |
| 3730 | } |
| 3731 | |
| 3732 | if (Call.paramHasAttr(ArgNo: i, Kind: Attribute::ImmArg)) { |
| 3733 | Value *ArgVal = Call.getArgOperand(i); |
| 3734 | Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal), |
| 3735 | "immarg operand has non-immediate parameter" , ArgVal, Call); |
| 3736 | |
| 3737 | // If the imm-arg is an integer and also has a range attached, |
| 3738 | // check if the given value is within the range. |
| 3739 | if (Call.paramHasAttr(ArgNo: i, Kind: Attribute::Range)) { |
| 3740 | if (auto *CI = dyn_cast<ConstantInt>(Val: ArgVal)) { |
| 3741 | const ConstantRange &CR = |
| 3742 | Call.getParamAttr(ArgNo: i, Kind: Attribute::Range).getValueAsConstantRange(); |
| 3743 | Check(CR.contains(CI->getValue()), |
| 3744 | "immarg value " + Twine(CI->getValue().getSExtValue()) + |
| 3745 | " out of range [" + Twine(CR.getLower().getSExtValue()) + |
| 3746 | ", " + Twine(CR.getUpper().getSExtValue()) + ")" , |
| 3747 | Call); |
| 3748 | } |
| 3749 | } |
| 3750 | } |
| 3751 | |
| 3752 | if (Call.paramHasAttr(ArgNo: i, Kind: Attribute::Preallocated)) { |
| 3753 | Value *ArgVal = Call.getArgOperand(i); |
| 3754 | bool hasOB = |
| 3755 | Call.countOperandBundlesOfType(ID: LLVMContext::OB_preallocated) != 0; |
| 3756 | bool isMustTail = Call.isMustTailCall(); |
| 3757 | Check(hasOB != isMustTail, |
| 3758 | "preallocated operand either requires a preallocated bundle or " |
| 3759 | "the call to be musttail (but not both)" , |
| 3760 | ArgVal, Call); |
| 3761 | } |
| 3762 | } |
| 3763 | |
| 3764 | if (FTy->isVarArg()) { |
| 3765 | // FIXME? is 'nest' even legal here? |
| 3766 | bool SawNest = false; |
| 3767 | bool SawReturned = false; |
| 3768 | |
| 3769 | for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) { |
| 3770 | if (Attrs.hasParamAttr(ArgNo: Idx, Kind: Attribute::Nest)) |
| 3771 | SawNest = true; |
| 3772 | if (Attrs.hasParamAttr(ArgNo: Idx, Kind: Attribute::Returned)) |
| 3773 | SawReturned = true; |
| 3774 | } |
| 3775 | |
| 3776 | // Check attributes on the varargs part. |
| 3777 | for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) { |
| 3778 | Type *Ty = Call.getArgOperand(i: Idx)->getType(); |
| 3779 | AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo: Idx); |
| 3780 | verifyParameterAttrs(Attrs: ArgAttrs, Ty, V: &Call); |
| 3781 | |
| 3782 | if (ArgAttrs.hasAttribute(Kind: Attribute::Nest)) { |
| 3783 | Check(!SawNest, "More than one parameter has attribute nest!" , Call); |
| 3784 | SawNest = true; |
| 3785 | } |
| 3786 | |
| 3787 | if (ArgAttrs.hasAttribute(Kind: Attribute::Returned)) { |
| 3788 | Check(!SawReturned, "More than one parameter has attribute returned!" , |
| 3789 | Call); |
| 3790 | Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), |
| 3791 | "Incompatible argument and return types for 'returned' " |
| 3792 | "attribute" , |
| 3793 | Call); |
| 3794 | SawReturned = true; |
| 3795 | } |
| 3796 | |
| 3797 | // Statepoint intrinsic is vararg but the wrapped function may be not. |
| 3798 | // Allow sret here and check the wrapped function in verifyStatepoint. |
| 3799 | if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint) |
| 3800 | Check(!ArgAttrs.hasAttribute(Attribute::StructRet), |
| 3801 | "Attribute 'sret' cannot be used for vararg call arguments!" , |
| 3802 | Call); |
| 3803 | |
| 3804 | if (ArgAttrs.hasAttribute(Kind: Attribute::InAlloca)) |
| 3805 | Check(Idx == Call.arg_size() - 1, |
| 3806 | "inalloca isn't on the last argument!" , Call); |
| 3807 | } |
| 3808 | } |
| 3809 | |
| 3810 | // Verify that there's no metadata unless it's a direct call to an intrinsic. |
| 3811 | if (!IsIntrinsic) { |
| 3812 | for (Type *ParamTy : FTy->params()) { |
| 3813 | Check(!ParamTy->isMetadataTy(), |
| 3814 | "Function has metadata parameter but isn't an intrinsic" , Call); |
| 3815 | Check(!ParamTy->isTokenTy(), |
| 3816 | "Function has token parameter but isn't an intrinsic" , Call); |
| 3817 | } |
| 3818 | } |
| 3819 | |
| 3820 | // Verify that indirect calls don't return tokens. |
| 3821 | if (!Call.getCalledFunction()) { |
| 3822 | Check(!FTy->getReturnType()->isTokenTy(), |
| 3823 | "Return type cannot be token for indirect call!" ); |
| 3824 | Check(!FTy->getReturnType()->isX86_AMXTy(), |
| 3825 | "Return type cannot be x86_amx for indirect call!" ); |
| 3826 | } |
| 3827 | |
| 3828 | if (Intrinsic::ID ID = Call.getIntrinsicID()) |
| 3829 | visitIntrinsicCall(ID, Call); |
| 3830 | |
| 3831 | // Verify that a callsite has at most one "deopt", at most one "funclet", at |
| 3832 | // most one "gc-transition", at most one "cfguardtarget", at most one |
| 3833 | // "preallocated" operand bundle, and at most one "ptrauth" operand bundle. |
| 3834 | bool FoundDeoptBundle = false, FoundFuncletBundle = false, |
| 3835 | FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false, |
| 3836 | FoundPreallocatedBundle = false, FoundGCLiveBundle = false, |
| 3837 | FoundPtrauthBundle = false, FoundKCFIBundle = false, |
| 3838 | FoundAttachedCallBundle = false; |
| 3839 | for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) { |
| 3840 | OperandBundleUse BU = Call.getOperandBundleAt(Index: i); |
| 3841 | uint32_t Tag = BU.getTagID(); |
| 3842 | if (Tag == LLVMContext::OB_deopt) { |
| 3843 | Check(!FoundDeoptBundle, "Multiple deopt operand bundles" , Call); |
| 3844 | FoundDeoptBundle = true; |
| 3845 | } else if (Tag == LLVMContext::OB_gc_transition) { |
| 3846 | Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles" , |
| 3847 | Call); |
| 3848 | FoundGCTransitionBundle = true; |
| 3849 | } else if (Tag == LLVMContext::OB_funclet) { |
| 3850 | Check(!FoundFuncletBundle, "Multiple funclet operand bundles" , Call); |
| 3851 | FoundFuncletBundle = true; |
| 3852 | Check(BU.Inputs.size() == 1, |
| 3853 | "Expected exactly one funclet bundle operand" , Call); |
| 3854 | Check(isa<FuncletPadInst>(BU.Inputs.front()), |
| 3855 | "Funclet bundle operands should correspond to a FuncletPadInst" , |
| 3856 | Call); |
| 3857 | } else if (Tag == LLVMContext::OB_cfguardtarget) { |
| 3858 | Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles" , |
| 3859 | Call); |
| 3860 | FoundCFGuardTargetBundle = true; |
| 3861 | Check(BU.Inputs.size() == 1, |
| 3862 | "Expected exactly one cfguardtarget bundle operand" , Call); |
| 3863 | } else if (Tag == LLVMContext::OB_ptrauth) { |
| 3864 | Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles" , Call); |
| 3865 | FoundPtrauthBundle = true; |
| 3866 | Check(BU.Inputs.size() == 2, |
| 3867 | "Expected exactly two ptrauth bundle operands" , Call); |
| 3868 | Check(isa<ConstantInt>(BU.Inputs[0]) && |
| 3869 | BU.Inputs[0]->getType()->isIntegerTy(32), |
| 3870 | "Ptrauth bundle key operand must be an i32 constant" , Call); |
| 3871 | Check(BU.Inputs[1]->getType()->isIntegerTy(64), |
| 3872 | "Ptrauth bundle discriminator operand must be an i64" , Call); |
| 3873 | } else if (Tag == LLVMContext::OB_kcfi) { |
| 3874 | Check(!FoundKCFIBundle, "Multiple kcfi operand bundles" , Call); |
| 3875 | FoundKCFIBundle = true; |
| 3876 | Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand" , |
| 3877 | Call); |
| 3878 | Check(isa<ConstantInt>(BU.Inputs[0]) && |
| 3879 | BU.Inputs[0]->getType()->isIntegerTy(32), |
| 3880 | "Kcfi bundle operand must be an i32 constant" , Call); |
| 3881 | } else if (Tag == LLVMContext::OB_preallocated) { |
| 3882 | Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles" , |
| 3883 | Call); |
| 3884 | FoundPreallocatedBundle = true; |
| 3885 | Check(BU.Inputs.size() == 1, |
| 3886 | "Expected exactly one preallocated bundle operand" , Call); |
| 3887 | auto Input = dyn_cast<IntrinsicInst>(Val: BU.Inputs.front()); |
| 3888 | Check(Input && |
| 3889 | Input->getIntrinsicID() == Intrinsic::call_preallocated_setup, |
| 3890 | "\"preallocated\" argument must be a token from " |
| 3891 | "llvm.call.preallocated.setup" , |
| 3892 | Call); |
| 3893 | } else if (Tag == LLVMContext::OB_gc_live) { |
| 3894 | Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles" , Call); |
| 3895 | FoundGCLiveBundle = true; |
| 3896 | } else if (Tag == LLVMContext::OB_clang_arc_attachedcall) { |
| 3897 | Check(!FoundAttachedCallBundle, |
| 3898 | "Multiple \"clang.arc.attachedcall\" operand bundles" , Call); |
| 3899 | FoundAttachedCallBundle = true; |
| 3900 | verifyAttachedCallBundle(Call, BU); |
| 3901 | } |
| 3902 | } |
| 3903 | |
| 3904 | // Verify that callee and callsite agree on whether to use pointer auth. |
| 3905 | Check(!(Call.getCalledFunction() && FoundPtrauthBundle), |
| 3906 | "Direct call cannot have a ptrauth bundle" , Call); |
| 3907 | |
| 3908 | // Verify that each inlinable callsite of a debug-info-bearing function in a |
| 3909 | // debug-info-bearing function has a debug location attached to it. Failure to |
| 3910 | // do so causes assertion failures when the inliner sets up inline scope info |
| 3911 | // (Interposable functions are not inlinable, neither are functions without |
| 3912 | // definitions.) |
| 3913 | if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() && |
| 3914 | !Call.getCalledFunction()->isInterposable() && |
| 3915 | !Call.getCalledFunction()->isDeclaration() && |
| 3916 | Call.getCalledFunction()->getSubprogram()) |
| 3917 | CheckDI(Call.getDebugLoc(), |
| 3918 | "inlinable function call in a function with " |
| 3919 | "debug info must have a !dbg location" , |
| 3920 | Call); |
| 3921 | |
| 3922 | if (Call.isInlineAsm()) |
| 3923 | verifyInlineAsmCall(Call); |
| 3924 | |
| 3925 | ConvergenceVerifyHelper.visit(I: Call); |
| 3926 | |
| 3927 | visitInstruction(I&: Call); |
| 3928 | } |
| 3929 | |
| 3930 | void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, |
| 3931 | StringRef Context) { |
| 3932 | Check(!Attrs.contains(Attribute::InAlloca), |
| 3933 | Twine("inalloca attribute not allowed in " ) + Context); |
| 3934 | Check(!Attrs.contains(Attribute::InReg), |
| 3935 | Twine("inreg attribute not allowed in " ) + Context); |
| 3936 | Check(!Attrs.contains(Attribute::SwiftError), |
| 3937 | Twine("swifterror attribute not allowed in " ) + Context); |
| 3938 | Check(!Attrs.contains(Attribute::Preallocated), |
| 3939 | Twine("preallocated attribute not allowed in " ) + Context); |
| 3940 | Check(!Attrs.contains(Attribute::ByRef), |
| 3941 | Twine("byref attribute not allowed in " ) + Context); |
| 3942 | } |
| 3943 | |
| 3944 | /// Two types are "congruent" if they are identical, or if they are both pointer |
| 3945 | /// types with different pointee types and the same address space. |
| 3946 | static bool isTypeCongruent(Type *L, Type *R) { |
| 3947 | if (L == R) |
| 3948 | return true; |
| 3949 | PointerType *PL = dyn_cast<PointerType>(Val: L); |
| 3950 | PointerType *PR = dyn_cast<PointerType>(Val: R); |
| 3951 | if (!PL || !PR) |
| 3952 | return false; |
| 3953 | return PL->getAddressSpace() == PR->getAddressSpace(); |
| 3954 | } |
| 3955 | |
| 3956 | static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) { |
| 3957 | static const Attribute::AttrKind ABIAttrs[] = { |
| 3958 | Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, |
| 3959 | Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf, |
| 3960 | Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated, |
| 3961 | Attribute::ByRef}; |
| 3962 | AttrBuilder Copy(C); |
| 3963 | for (auto AK : ABIAttrs) { |
| 3964 | Attribute Attr = Attrs.getParamAttrs(ArgNo: I).getAttribute(Kind: AK); |
| 3965 | if (Attr.isValid()) |
| 3966 | Copy.addAttribute(A: Attr); |
| 3967 | } |
| 3968 | |
| 3969 | // `align` is ABI-affecting only in combination with `byval` or `byref`. |
| 3970 | if (Attrs.hasParamAttr(ArgNo: I, Kind: Attribute::Alignment) && |
| 3971 | (Attrs.hasParamAttr(ArgNo: I, Kind: Attribute::ByVal) || |
| 3972 | Attrs.hasParamAttr(ArgNo: I, Kind: Attribute::ByRef))) |
| 3973 | Copy.addAlignmentAttr(Align: Attrs.getParamAlignment(ArgNo: I)); |
| 3974 | return Copy; |
| 3975 | } |
| 3976 | |
| 3977 | void Verifier::verifyMustTailCall(CallInst &CI) { |
| 3978 | Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm" , &CI); |
| 3979 | |
| 3980 | Function *F = CI.getParent()->getParent(); |
| 3981 | FunctionType *CallerTy = F->getFunctionType(); |
| 3982 | FunctionType *CalleeTy = CI.getFunctionType(); |
| 3983 | Check(CallerTy->isVarArg() == CalleeTy->isVarArg(), |
| 3984 | "cannot guarantee tail call due to mismatched varargs" , &CI); |
| 3985 | Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()), |
| 3986 | "cannot guarantee tail call due to mismatched return types" , &CI); |
| 3987 | |
| 3988 | // - The calling conventions of the caller and callee must match. |
| 3989 | Check(F->getCallingConv() == CI.getCallingConv(), |
| 3990 | "cannot guarantee tail call due to mismatched calling conv" , &CI); |
| 3991 | |
| 3992 | // - The call must immediately precede a :ref:`ret <i_ret>` instruction, |
| 3993 | // or a pointer bitcast followed by a ret instruction. |
| 3994 | // - The ret instruction must return the (possibly bitcasted) value |
| 3995 | // produced by the call or void. |
| 3996 | Value *RetVal = &CI; |
| 3997 | Instruction *Next = CI.getNextNode(); |
| 3998 | |
| 3999 | // Handle the optional bitcast. |
| 4000 | if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Val: Next)) { |
| 4001 | Check(BI->getOperand(0) == RetVal, |
| 4002 | "bitcast following musttail call must use the call" , BI); |
| 4003 | RetVal = BI; |
| 4004 | Next = BI->getNextNode(); |
| 4005 | } |
| 4006 | |
| 4007 | // Check the return. |
| 4008 | ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Val: Next); |
| 4009 | Check(Ret, "musttail call must precede a ret with an optional bitcast" , &CI); |
| 4010 | Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal || |
| 4011 | isa<UndefValue>(Ret->getReturnValue()), |
| 4012 | "musttail call result must be returned" , Ret); |
| 4013 | |
| 4014 | AttributeList CallerAttrs = F->getAttributes(); |
| 4015 | AttributeList CalleeAttrs = CI.getAttributes(); |
| 4016 | if (CI.getCallingConv() == CallingConv::SwiftTail || |
| 4017 | CI.getCallingConv() == CallingConv::Tail) { |
| 4018 | StringRef CCName = |
| 4019 | CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc" ; |
| 4020 | |
| 4021 | // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes |
| 4022 | // are allowed in swifttailcc call |
| 4023 | for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) { |
| 4024 | AttrBuilder ABIAttrs = getParameterABIAttributes(C&: F->getContext(), I, Attrs: CallerAttrs); |
| 4025 | SmallString<32> Context{CCName, StringRef(" musttail caller" )}; |
| 4026 | verifyTailCCMustTailAttrs(Attrs: ABIAttrs, Context); |
| 4027 | } |
| 4028 | for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) { |
| 4029 | AttrBuilder ABIAttrs = getParameterABIAttributes(C&: F->getContext(), I, Attrs: CalleeAttrs); |
| 4030 | SmallString<32> Context{CCName, StringRef(" musttail callee" )}; |
| 4031 | verifyTailCCMustTailAttrs(Attrs: ABIAttrs, Context); |
| 4032 | } |
| 4033 | // - Varargs functions are not allowed |
| 4034 | Check(!CallerTy->isVarArg(), Twine("cannot guarantee " ) + CCName + |
| 4035 | " tail call for varargs function" ); |
| 4036 | return; |
| 4037 | } |
| 4038 | |
| 4039 | // - The caller and callee prototypes must match. Pointer types of |
| 4040 | // parameters or return types may differ in pointee type, but not |
| 4041 | // address space. |
| 4042 | if (!CI.getIntrinsicID()) { |
| 4043 | Check(CallerTy->getNumParams() == CalleeTy->getNumParams(), |
| 4044 | "cannot guarantee tail call due to mismatched parameter counts" , &CI); |
| 4045 | for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) { |
| 4046 | Check( |
| 4047 | isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)), |
| 4048 | "cannot guarantee tail call due to mismatched parameter types" , &CI); |
| 4049 | } |
| 4050 | } |
| 4051 | |
| 4052 | // - All ABI-impacting function attributes, such as sret, byval, inreg, |
| 4053 | // returned, preallocated, and inalloca, must match. |
| 4054 | for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) { |
| 4055 | AttrBuilder CallerABIAttrs = getParameterABIAttributes(C&: F->getContext(), I, Attrs: CallerAttrs); |
| 4056 | AttrBuilder CalleeABIAttrs = getParameterABIAttributes(C&: F->getContext(), I, Attrs: CalleeAttrs); |
| 4057 | Check(CallerABIAttrs == CalleeABIAttrs, |
| 4058 | "cannot guarantee tail call due to mismatched ABI impacting " |
| 4059 | "function attributes" , |
| 4060 | &CI, CI.getOperand(I)); |
| 4061 | } |
| 4062 | } |
| 4063 | |
| 4064 | void Verifier::visitCallInst(CallInst &CI) { |
| 4065 | visitCallBase(Call&: CI); |
| 4066 | |
| 4067 | if (CI.isMustTailCall()) |
| 4068 | verifyMustTailCall(CI); |
| 4069 | } |
| 4070 | |
| 4071 | void Verifier::visitInvokeInst(InvokeInst &II) { |
| 4072 | visitCallBase(Call&: II); |
| 4073 | |
| 4074 | // Verify that the first non-PHI instruction of the unwind destination is an |
| 4075 | // exception handling instruction. |
| 4076 | Check( |
| 4077 | II.getUnwindDest()->isEHPad(), |
| 4078 | "The unwind destination does not have an exception handling instruction!" , |
| 4079 | &II); |
| 4080 | |
| 4081 | visitTerminator(I&: II); |
| 4082 | } |
| 4083 | |
| 4084 | /// visitUnaryOperator - Check the argument to the unary operator. |
| 4085 | /// |
| 4086 | void Verifier::visitUnaryOperator(UnaryOperator &U) { |
| 4087 | Check(U.getType() == U.getOperand(0)->getType(), |
| 4088 | "Unary operators must have same type for" |
| 4089 | "operands and result!" , |
| 4090 | &U); |
| 4091 | |
| 4092 | switch (U.getOpcode()) { |
| 4093 | // Check that floating-point arithmetic operators are only used with |
| 4094 | // floating-point operands. |
| 4095 | case Instruction::FNeg: |
| 4096 | Check(U.getType()->isFPOrFPVectorTy(), |
| 4097 | "FNeg operator only works with float types!" , &U); |
| 4098 | break; |
| 4099 | default: |
| 4100 | llvm_unreachable("Unknown UnaryOperator opcode!" ); |
| 4101 | } |
| 4102 | |
| 4103 | visitInstruction(I&: U); |
| 4104 | } |
| 4105 | |
| 4106 | /// visitBinaryOperator - Check that both arguments to the binary operator are |
| 4107 | /// of the same type! |
| 4108 | /// |
| 4109 | void Verifier::visitBinaryOperator(BinaryOperator &B) { |
| 4110 | Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(), |
| 4111 | "Both operands to a binary operator are not of the same type!" , &B); |
| 4112 | |
| 4113 | switch (B.getOpcode()) { |
| 4114 | // Check that integer arithmetic operators are only used with |
| 4115 | // integral operands. |
| 4116 | case Instruction::Add: |
| 4117 | case Instruction::Sub: |
| 4118 | case Instruction::Mul: |
| 4119 | case Instruction::SDiv: |
| 4120 | case Instruction::UDiv: |
| 4121 | case Instruction::SRem: |
| 4122 | case Instruction::URem: |
| 4123 | Check(B.getType()->isIntOrIntVectorTy(), |
| 4124 | "Integer arithmetic operators only work with integral types!" , &B); |
| 4125 | Check(B.getType() == B.getOperand(0)->getType(), |
| 4126 | "Integer arithmetic operators must have same type " |
| 4127 | "for operands and result!" , |
| 4128 | &B); |
| 4129 | break; |
| 4130 | // Check that floating-point arithmetic operators are only used with |
| 4131 | // floating-point operands. |
| 4132 | case Instruction::FAdd: |
| 4133 | case Instruction::FSub: |
| 4134 | case Instruction::FMul: |
| 4135 | case Instruction::FDiv: |
| 4136 | case Instruction::FRem: |
| 4137 | Check(B.getType()->isFPOrFPVectorTy(), |
| 4138 | "Floating-point arithmetic operators only work with " |
| 4139 | "floating-point types!" , |
| 4140 | &B); |
| 4141 | Check(B.getType() == B.getOperand(0)->getType(), |
| 4142 | "Floating-point arithmetic operators must have same type " |
| 4143 | "for operands and result!" , |
| 4144 | &B); |
| 4145 | break; |
| 4146 | // Check that logical operators are only used with integral operands. |
| 4147 | case Instruction::And: |
| 4148 | case Instruction::Or: |
| 4149 | case Instruction::Xor: |
| 4150 | Check(B.getType()->isIntOrIntVectorTy(), |
| 4151 | "Logical operators only work with integral types!" , &B); |
| 4152 | Check(B.getType() == B.getOperand(0)->getType(), |
| 4153 | "Logical operators must have same type for operands and result!" , &B); |
| 4154 | break; |
| 4155 | case Instruction::Shl: |
| 4156 | case Instruction::LShr: |
| 4157 | case Instruction::AShr: |
| 4158 | Check(B.getType()->isIntOrIntVectorTy(), |
| 4159 | "Shifts only work with integral types!" , &B); |
| 4160 | Check(B.getType() == B.getOperand(0)->getType(), |
| 4161 | "Shift return type must be same as operands!" , &B); |
| 4162 | break; |
| 4163 | default: |
| 4164 | llvm_unreachable("Unknown BinaryOperator opcode!" ); |
| 4165 | } |
| 4166 | |
| 4167 | visitInstruction(I&: B); |
| 4168 | } |
| 4169 | |
| 4170 | void Verifier::visitICmpInst(ICmpInst &IC) { |
| 4171 | // Check that the operands are the same type |
| 4172 | Type *Op0Ty = IC.getOperand(i_nocapture: 0)->getType(); |
| 4173 | Type *Op1Ty = IC.getOperand(i_nocapture: 1)->getType(); |
| 4174 | Check(Op0Ty == Op1Ty, |
| 4175 | "Both operands to ICmp instruction are not of the same type!" , &IC); |
| 4176 | // Check that the operands are the right type |
| 4177 | Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(), |
| 4178 | "Invalid operand types for ICmp instruction" , &IC); |
| 4179 | // Check that the predicate is valid. |
| 4180 | Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!" , &IC); |
| 4181 | |
| 4182 | visitInstruction(I&: IC); |
| 4183 | } |
| 4184 | |
| 4185 | void Verifier::visitFCmpInst(FCmpInst &FC) { |
| 4186 | // Check that the operands are the same type |
| 4187 | Type *Op0Ty = FC.getOperand(i_nocapture: 0)->getType(); |
| 4188 | Type *Op1Ty = FC.getOperand(i_nocapture: 1)->getType(); |
| 4189 | Check(Op0Ty == Op1Ty, |
| 4190 | "Both operands to FCmp instruction are not of the same type!" , &FC); |
| 4191 | // Check that the operands are the right type |
| 4192 | Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction" , |
| 4193 | &FC); |
| 4194 | // Check that the predicate is valid. |
| 4195 | Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!" , &FC); |
| 4196 | |
| 4197 | visitInstruction(I&: FC); |
| 4198 | } |
| 4199 | |
| 4200 | void Verifier::(ExtractElementInst &EI) { |
| 4201 | Check(ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)), |
| 4202 | "Invalid extractelement operands!" , &EI); |
| 4203 | visitInstruction(I&: EI); |
| 4204 | } |
| 4205 | |
| 4206 | void Verifier::visitInsertElementInst(InsertElementInst &IE) { |
| 4207 | Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1), |
| 4208 | IE.getOperand(2)), |
| 4209 | "Invalid insertelement operands!" , &IE); |
| 4210 | visitInstruction(I&: IE); |
| 4211 | } |
| 4212 | |
| 4213 | void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) { |
| 4214 | Check(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1), |
| 4215 | SV.getShuffleMask()), |
| 4216 | "Invalid shufflevector operands!" , &SV); |
| 4217 | visitInstruction(I&: SV); |
| 4218 | } |
| 4219 | |
| 4220 | void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { |
| 4221 | Type *TargetTy = GEP.getPointerOperandType()->getScalarType(); |
| 4222 | |
| 4223 | Check(isa<PointerType>(TargetTy), |
| 4224 | "GEP base pointer is not a vector or a vector of pointers" , &GEP); |
| 4225 | Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!" , &GEP); |
| 4226 | |
| 4227 | if (auto *STy = dyn_cast<StructType>(Val: GEP.getSourceElementType())) { |
| 4228 | Check(!STy->isScalableTy(), |
| 4229 | "getelementptr cannot target structure that contains scalable vector" |
| 4230 | "type" , |
| 4231 | &GEP); |
| 4232 | } |
| 4233 | |
| 4234 | SmallVector<Value *, 16> Idxs(GEP.indices()); |
| 4235 | Check( |
| 4236 | all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }), |
| 4237 | "GEP indexes must be integers" , &GEP); |
| 4238 | Type *ElTy = |
| 4239 | GetElementPtrInst::getIndexedType(Ty: GEP.getSourceElementType(), IdxList: Idxs); |
| 4240 | Check(ElTy, "Invalid indices for GEP pointer type!" , &GEP); |
| 4241 | |
| 4242 | PointerType *PtrTy = dyn_cast<PointerType>(Val: GEP.getType()->getScalarType()); |
| 4243 | |
| 4244 | Check(PtrTy && GEP.getResultElementType() == ElTy, |
| 4245 | "GEP is not of right type for indices!" , &GEP, ElTy); |
| 4246 | |
| 4247 | if (auto *GEPVTy = dyn_cast<VectorType>(Val: GEP.getType())) { |
| 4248 | // Additional checks for vector GEPs. |
| 4249 | ElementCount GEPWidth = GEPVTy->getElementCount(); |
| 4250 | if (GEP.getPointerOperandType()->isVectorTy()) |
| 4251 | Check( |
| 4252 | GEPWidth == |
| 4253 | cast<VectorType>(GEP.getPointerOperandType())->getElementCount(), |
| 4254 | "Vector GEP result width doesn't match operand's" , &GEP); |
| 4255 | for (Value *Idx : Idxs) { |
| 4256 | Type *IndexTy = Idx->getType(); |
| 4257 | if (auto *IndexVTy = dyn_cast<VectorType>(Val: IndexTy)) { |
| 4258 | ElementCount IndexWidth = IndexVTy->getElementCount(); |
| 4259 | Check(IndexWidth == GEPWidth, "Invalid GEP index vector width" , &GEP); |
| 4260 | } |
| 4261 | Check(IndexTy->isIntOrIntVectorTy(), |
| 4262 | "All GEP indices should be of integer type" ); |
| 4263 | } |
| 4264 | } |
| 4265 | |
| 4266 | Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(), |
| 4267 | "GEP address space doesn't match type" , &GEP); |
| 4268 | |
| 4269 | visitInstruction(I&: GEP); |
| 4270 | } |
| 4271 | |
| 4272 | static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { |
| 4273 | return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); |
| 4274 | } |
| 4275 | |
| 4276 | /// Verify !range and !absolute_symbol metadata. These have the same |
| 4277 | /// restrictions, except !absolute_symbol allows the full set. |
| 4278 | void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range, |
| 4279 | Type *Ty, RangeLikeMetadataKind Kind) { |
| 4280 | unsigned NumOperands = Range->getNumOperands(); |
| 4281 | Check(NumOperands % 2 == 0, "Unfinished range!" , Range); |
| 4282 | unsigned NumRanges = NumOperands / 2; |
| 4283 | Check(NumRanges >= 1, "It should have at least one range!" , Range); |
| 4284 | |
| 4285 | ConstantRange LastRange(1, true); // Dummy initial value |
| 4286 | for (unsigned i = 0; i < NumRanges; ++i) { |
| 4287 | ConstantInt *Low = |
| 4288 | mdconst::dyn_extract<ConstantInt>(MD: Range->getOperand(I: 2 * i)); |
| 4289 | Check(Low, "The lower limit must be an integer!" , Low); |
| 4290 | ConstantInt *High = |
| 4291 | mdconst::dyn_extract<ConstantInt>(MD: Range->getOperand(I: 2 * i + 1)); |
| 4292 | Check(High, "The upper limit must be an integer!" , High); |
| 4293 | |
| 4294 | Check(High->getType() == Low->getType(), "Range pair types must match!" , |
| 4295 | &I); |
| 4296 | |
| 4297 | if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) { |
| 4298 | Check(High->getType()->isIntegerTy(32), |
| 4299 | "noalias.addrspace type must be i32!" , &I); |
| 4300 | } else { |
| 4301 | Check(High->getType() == Ty->getScalarType(), |
| 4302 | "Range types must match instruction type!" , &I); |
| 4303 | } |
| 4304 | |
| 4305 | APInt HighV = High->getValue(); |
| 4306 | APInt LowV = Low->getValue(); |
| 4307 | |
| 4308 | // ConstantRange asserts if the ranges are the same except for the min/max |
| 4309 | // value. Leave the cases it tolerates for the empty range error below. |
| 4310 | Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(), |
| 4311 | "The upper and lower limits cannot be the same value" , &I); |
| 4312 | |
| 4313 | ConstantRange CurRange(LowV, HighV); |
| 4314 | Check(!CurRange.isEmptySet() && |
| 4315 | (Kind == RangeLikeMetadataKind::AbsoluteSymbol || |
| 4316 | !CurRange.isFullSet()), |
| 4317 | "Range must not be empty!" , Range); |
| 4318 | if (i != 0) { |
| 4319 | Check(CurRange.intersectWith(LastRange).isEmptySet(), |
| 4320 | "Intervals are overlapping" , Range); |
| 4321 | Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order" , |
| 4322 | Range); |
| 4323 | Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous" , |
| 4324 | Range); |
| 4325 | } |
| 4326 | LastRange = ConstantRange(LowV, HighV); |
| 4327 | } |
| 4328 | if (NumRanges > 2) { |
| 4329 | APInt FirstLow = |
| 4330 | mdconst::dyn_extract<ConstantInt>(MD: Range->getOperand(I: 0))->getValue(); |
| 4331 | APInt FirstHigh = |
| 4332 | mdconst::dyn_extract<ConstantInt>(MD: Range->getOperand(I: 1))->getValue(); |
| 4333 | ConstantRange FirstRange(FirstLow, FirstHigh); |
| 4334 | Check(FirstRange.intersectWith(LastRange).isEmptySet(), |
| 4335 | "Intervals are overlapping" , Range); |
| 4336 | Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous" , |
| 4337 | Range); |
| 4338 | } |
| 4339 | } |
| 4340 | |
| 4341 | void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) { |
| 4342 | assert(Range && Range == I.getMetadata(LLVMContext::MD_range) && |
| 4343 | "precondition violation" ); |
| 4344 | verifyRangeLikeMetadata(I, Range, Ty, Kind: RangeLikeMetadataKind::Range); |
| 4345 | } |
| 4346 | |
| 4347 | void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, |
| 4348 | Type *Ty) { |
| 4349 | assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) && |
| 4350 | "precondition violation" ); |
| 4351 | verifyRangeLikeMetadata(I, Range, Ty, |
| 4352 | Kind: RangeLikeMetadataKind::NoaliasAddrspace); |
| 4353 | } |
| 4354 | |
| 4355 | void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) { |
| 4356 | unsigned Size = DL.getTypeSizeInBits(Ty); |
| 4357 | Check(Size >= 8, "atomic memory access' size must be byte-sized" , Ty, I); |
| 4358 | Check(!(Size & (Size - 1)), |
| 4359 | "atomic memory access' operand must have a power-of-two size" , Ty, I); |
| 4360 | } |
| 4361 | |
| 4362 | void Verifier::visitLoadInst(LoadInst &LI) { |
| 4363 | PointerType *PTy = dyn_cast<PointerType>(Val: LI.getOperand(i_nocapture: 0)->getType()); |
| 4364 | Check(PTy, "Load operand must be a pointer." , &LI); |
| 4365 | Type *ElTy = LI.getType(); |
| 4366 | if (MaybeAlign A = LI.getAlign()) { |
| 4367 | Check(A->value() <= Value::MaximumAlignment, |
| 4368 | "huge alignment values are unsupported" , &LI); |
| 4369 | } |
| 4370 | Check(ElTy->isSized(), "loading unsized types is not allowed" , &LI); |
| 4371 | if (LI.isAtomic()) { |
| 4372 | Check(LI.getOrdering() != AtomicOrdering::Release && |
| 4373 | LI.getOrdering() != AtomicOrdering::AcquireRelease, |
| 4374 | "Load cannot have Release ordering" , &LI); |
| 4375 | Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(), |
| 4376 | "atomic load operand must have integer, pointer, or floating point " |
| 4377 | "type!" , |
| 4378 | ElTy, &LI); |
| 4379 | checkAtomicMemAccessSize(Ty: ElTy, I: &LI); |
| 4380 | } else { |
| 4381 | Check(LI.getSyncScopeID() == SyncScope::System, |
| 4382 | "Non-atomic load cannot have SynchronizationScope specified" , &LI); |
| 4383 | } |
| 4384 | |
| 4385 | visitInstruction(I&: LI); |
| 4386 | } |
| 4387 | |
| 4388 | void Verifier::visitStoreInst(StoreInst &SI) { |
| 4389 | PointerType *PTy = dyn_cast<PointerType>(Val: SI.getOperand(i_nocapture: 1)->getType()); |
| 4390 | Check(PTy, "Store operand must be a pointer." , &SI); |
| 4391 | Type *ElTy = SI.getOperand(i_nocapture: 0)->getType(); |
| 4392 | if (MaybeAlign A = SI.getAlign()) { |
| 4393 | Check(A->value() <= Value::MaximumAlignment, |
| 4394 | "huge alignment values are unsupported" , &SI); |
| 4395 | } |
| 4396 | Check(ElTy->isSized(), "storing unsized types is not allowed" , &SI); |
| 4397 | if (SI.isAtomic()) { |
| 4398 | Check(SI.getOrdering() != AtomicOrdering::Acquire && |
| 4399 | SI.getOrdering() != AtomicOrdering::AcquireRelease, |
| 4400 | "Store cannot have Acquire ordering" , &SI); |
| 4401 | Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(), |
| 4402 | "atomic store operand must have integer, pointer, or floating point " |
| 4403 | "type!" , |
| 4404 | ElTy, &SI); |
| 4405 | checkAtomicMemAccessSize(Ty: ElTy, I: &SI); |
| 4406 | } else { |
| 4407 | Check(SI.getSyncScopeID() == SyncScope::System, |
| 4408 | "Non-atomic store cannot have SynchronizationScope specified" , &SI); |
| 4409 | } |
| 4410 | visitInstruction(I&: SI); |
| 4411 | } |
| 4412 | |
| 4413 | /// Check that SwiftErrorVal is used as a swifterror argument in CS. |
| 4414 | void Verifier::verifySwiftErrorCall(CallBase &Call, |
| 4415 | const Value *SwiftErrorVal) { |
| 4416 | for (const auto &I : llvm::enumerate(First: Call.args())) { |
| 4417 | if (I.value() == SwiftErrorVal) { |
| 4418 | Check(Call.paramHasAttr(I.index(), Attribute::SwiftError), |
| 4419 | "swifterror value when used in a callsite should be marked " |
| 4420 | "with swifterror attribute" , |
| 4421 | SwiftErrorVal, Call); |
| 4422 | } |
| 4423 | } |
| 4424 | } |
| 4425 | |
| 4426 | void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) { |
| 4427 | // Check that swifterror value is only used by loads, stores, or as |
| 4428 | // a swifterror argument. |
| 4429 | for (const User *U : SwiftErrorVal->users()) { |
| 4430 | Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) || |
| 4431 | isa<InvokeInst>(U), |
| 4432 | "swifterror value can only be loaded and stored from, or " |
| 4433 | "as a swifterror argument!" , |
| 4434 | SwiftErrorVal, U); |
| 4435 | // If it is used by a store, check it is the second operand. |
| 4436 | if (auto StoreI = dyn_cast<StoreInst>(Val: U)) |
| 4437 | Check(StoreI->getOperand(1) == SwiftErrorVal, |
| 4438 | "swifterror value should be the second operand when used " |
| 4439 | "by stores" , |
| 4440 | SwiftErrorVal, U); |
| 4441 | if (auto *Call = dyn_cast<CallBase>(Val: U)) |
| 4442 | verifySwiftErrorCall(Call&: *const_cast<CallBase *>(Call), SwiftErrorVal); |
| 4443 | } |
| 4444 | } |
| 4445 | |
| 4446 | void Verifier::visitAllocaInst(AllocaInst &AI) { |
| 4447 | Type *Ty = AI.getAllocatedType(); |
| 4448 | SmallPtrSet<Type*, 4> Visited; |
| 4449 | Check(Ty->isSized(&Visited), "Cannot allocate unsized type" , &AI); |
| 4450 | // Check if it's a target extension type that disallows being used on the |
| 4451 | // stack. |
| 4452 | Check(!Ty->containsNonLocalTargetExtType(), |
| 4453 | "Alloca has illegal target extension type" , &AI); |
| 4454 | Check(AI.getArraySize()->getType()->isIntegerTy(), |
| 4455 | "Alloca array size must have integer type" , &AI); |
| 4456 | if (MaybeAlign A = AI.getAlign()) { |
| 4457 | Check(A->value() <= Value::MaximumAlignment, |
| 4458 | "huge alignment values are unsupported" , &AI); |
| 4459 | } |
| 4460 | |
| 4461 | if (AI.isSwiftError()) { |
| 4462 | Check(Ty->isPointerTy(), "swifterror alloca must have pointer type" , &AI); |
| 4463 | Check(!AI.isArrayAllocation(), |
| 4464 | "swifterror alloca must not be array allocation" , &AI); |
| 4465 | verifySwiftErrorValue(SwiftErrorVal: &AI); |
| 4466 | } |
| 4467 | |
| 4468 | if (TT.isAMDGPU()) { |
| 4469 | Check(AI.getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS, |
| 4470 | "alloca on amdgpu must be in addrspace(5)" , &AI); |
| 4471 | } |
| 4472 | |
| 4473 | visitInstruction(I&: AI); |
| 4474 | } |
| 4475 | |
| 4476 | void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { |
| 4477 | Type *ElTy = CXI.getOperand(i_nocapture: 1)->getType(); |
| 4478 | Check(ElTy->isIntOrPtrTy(), |
| 4479 | "cmpxchg operand must have integer or pointer type" , ElTy, &CXI); |
| 4480 | checkAtomicMemAccessSize(Ty: ElTy, I: &CXI); |
| 4481 | visitInstruction(I&: CXI); |
| 4482 | } |
| 4483 | |
| 4484 | void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { |
| 4485 | Check(RMWI.getOrdering() != AtomicOrdering::Unordered, |
| 4486 | "atomicrmw instructions cannot be unordered." , &RMWI); |
| 4487 | auto Op = RMWI.getOperation(); |
| 4488 | Type *ElTy = RMWI.getOperand(i_nocapture: 1)->getType(); |
| 4489 | if (Op == AtomicRMWInst::Xchg) { |
| 4490 | Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() || |
| 4491 | ElTy->isPointerTy(), |
| 4492 | "atomicrmw " + AtomicRMWInst::getOperationName(Op) + |
| 4493 | " operand must have integer or floating point type!" , |
| 4494 | &RMWI, ElTy); |
| 4495 | } else if (AtomicRMWInst::isFPOperation(Op)) { |
| 4496 | Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy), |
| 4497 | "atomicrmw " + AtomicRMWInst::getOperationName(Op) + |
| 4498 | " operand must have floating-point or fixed vector of floating-point " |
| 4499 | "type!" , |
| 4500 | &RMWI, ElTy); |
| 4501 | } else { |
| 4502 | Check(ElTy->isIntegerTy(), |
| 4503 | "atomicrmw " + AtomicRMWInst::getOperationName(Op) + |
| 4504 | " operand must have integer type!" , |
| 4505 | &RMWI, ElTy); |
| 4506 | } |
| 4507 | checkAtomicMemAccessSize(Ty: ElTy, I: &RMWI); |
| 4508 | Check(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP, |
| 4509 | "Invalid binary operation!" , &RMWI); |
| 4510 | visitInstruction(I&: RMWI); |
| 4511 | } |
| 4512 | |
| 4513 | void Verifier::visitFenceInst(FenceInst &FI) { |
| 4514 | const AtomicOrdering Ordering = FI.getOrdering(); |
| 4515 | Check(Ordering == AtomicOrdering::Acquire || |
| 4516 | Ordering == AtomicOrdering::Release || |
| 4517 | Ordering == AtomicOrdering::AcquireRelease || |
| 4518 | Ordering == AtomicOrdering::SequentiallyConsistent, |
| 4519 | "fence instructions may only have acquire, release, acq_rel, or " |
| 4520 | "seq_cst ordering." , |
| 4521 | &FI); |
| 4522 | visitInstruction(I&: FI); |
| 4523 | } |
| 4524 | |
| 4525 | void Verifier::(ExtractValueInst &EVI) { |
| 4526 | Check(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(), |
| 4527 | EVI.getIndices()) == EVI.getType(), |
| 4528 | "Invalid ExtractValueInst operands!" , &EVI); |
| 4529 | |
| 4530 | visitInstruction(I&: EVI); |
| 4531 | } |
| 4532 | |
| 4533 | void Verifier::visitInsertValueInst(InsertValueInst &IVI) { |
| 4534 | Check(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(), |
| 4535 | IVI.getIndices()) == |
| 4536 | IVI.getOperand(1)->getType(), |
| 4537 | "Invalid InsertValueInst operands!" , &IVI); |
| 4538 | |
| 4539 | visitInstruction(I&: IVI); |
| 4540 | } |
| 4541 | |
| 4542 | static Value *getParentPad(Value *EHPad) { |
| 4543 | if (auto *FPI = dyn_cast<FuncletPadInst>(Val: EHPad)) |
| 4544 | return FPI->getParentPad(); |
| 4545 | |
| 4546 | return cast<CatchSwitchInst>(Val: EHPad)->getParentPad(); |
| 4547 | } |
| 4548 | |
| 4549 | void Verifier::visitEHPadPredecessors(Instruction &I) { |
| 4550 | assert(I.isEHPad()); |
| 4551 | |
| 4552 | BasicBlock *BB = I.getParent(); |
| 4553 | Function *F = BB->getParent(); |
| 4554 | |
| 4555 | Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block." , &I); |
| 4556 | |
| 4557 | if (auto *LPI = dyn_cast<LandingPadInst>(Val: &I)) { |
| 4558 | // The landingpad instruction defines its parent as a landing pad block. The |
| 4559 | // landing pad block may be branched to only by the unwind edge of an |
| 4560 | // invoke. |
| 4561 | for (BasicBlock *PredBB : predecessors(BB)) { |
| 4562 | const auto *II = dyn_cast<InvokeInst>(Val: PredBB->getTerminator()); |
| 4563 | Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB, |
| 4564 | "Block containing LandingPadInst must be jumped to " |
| 4565 | "only by the unwind edge of an invoke." , |
| 4566 | LPI); |
| 4567 | } |
| 4568 | return; |
| 4569 | } |
| 4570 | if (auto *CPI = dyn_cast<CatchPadInst>(Val: &I)) { |
| 4571 | if (!pred_empty(BB)) |
| 4572 | Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(), |
| 4573 | "Block containg CatchPadInst must be jumped to " |
| 4574 | "only by its catchswitch." , |
| 4575 | CPI); |
| 4576 | Check(BB != CPI->getCatchSwitch()->getUnwindDest(), |
| 4577 | "Catchswitch cannot unwind to one of its catchpads" , |
| 4578 | CPI->getCatchSwitch(), CPI); |
| 4579 | return; |
| 4580 | } |
| 4581 | |
| 4582 | // Verify that each pred has a legal terminator with a legal to/from EH |
| 4583 | // pad relationship. |
| 4584 | Instruction *ToPad = &I; |
| 4585 | Value *ToPadParent = getParentPad(EHPad: ToPad); |
| 4586 | for (BasicBlock *PredBB : predecessors(BB)) { |
| 4587 | Instruction *TI = PredBB->getTerminator(); |
| 4588 | Value *FromPad; |
| 4589 | if (auto *II = dyn_cast<InvokeInst>(Val: TI)) { |
| 4590 | Check(II->getUnwindDest() == BB && II->getNormalDest() != BB, |
| 4591 | "EH pad must be jumped to via an unwind edge" , ToPad, II); |
| 4592 | auto *CalledFn = |
| 4593 | dyn_cast<Function>(Val: II->getCalledOperand()->stripPointerCasts()); |
| 4594 | if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() && |
| 4595 | !IntrinsicInst::mayLowerToFunctionCall(IID: CalledFn->getIntrinsicID())) |
| 4596 | continue; |
| 4597 | if (auto Bundle = II->getOperandBundle(ID: LLVMContext::OB_funclet)) |
| 4598 | FromPad = Bundle->Inputs[0]; |
| 4599 | else |
| 4600 | FromPad = ConstantTokenNone::get(Context&: II->getContext()); |
| 4601 | } else if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: TI)) { |
| 4602 | FromPad = CRI->getOperand(i_nocapture: 0); |
| 4603 | Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup" , CRI); |
| 4604 | } else if (auto *CSI = dyn_cast<CatchSwitchInst>(Val: TI)) { |
| 4605 | FromPad = CSI; |
| 4606 | } else { |
| 4607 | Check(false, "EH pad must be jumped to via an unwind edge" , ToPad, TI); |
| 4608 | } |
| 4609 | |
| 4610 | // The edge may exit from zero or more nested pads. |
| 4611 | SmallSet<Value *, 8> Seen; |
| 4612 | for (;; FromPad = getParentPad(EHPad: FromPad)) { |
| 4613 | Check(FromPad != ToPad, |
| 4614 | "EH pad cannot handle exceptions raised within it" , FromPad, TI); |
| 4615 | if (FromPad == ToPadParent) { |
| 4616 | // This is a legal unwind edge. |
| 4617 | break; |
| 4618 | } |
| 4619 | Check(!isa<ConstantTokenNone>(FromPad), |
| 4620 | "A single unwind edge may only enter one EH pad" , TI); |
| 4621 | Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads" , |
| 4622 | FromPad); |
| 4623 | |
| 4624 | // This will be diagnosed on the corresponding instruction already. We |
| 4625 | // need the extra check here to make sure getParentPad() works. |
| 4626 | Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad), |
| 4627 | "Parent pad must be catchpad/cleanuppad/catchswitch" , TI); |
| 4628 | } |
| 4629 | } |
| 4630 | } |
| 4631 | |
| 4632 | void Verifier::visitLandingPadInst(LandingPadInst &LPI) { |
| 4633 | // The landingpad instruction is ill-formed if it doesn't have any clauses and |
| 4634 | // isn't a cleanup. |
| 4635 | Check(LPI.getNumClauses() > 0 || LPI.isCleanup(), |
| 4636 | "LandingPadInst needs at least one clause or to be a cleanup." , &LPI); |
| 4637 | |
| 4638 | visitEHPadPredecessors(I&: LPI); |
| 4639 | |
| 4640 | if (!LandingPadResultTy) |
| 4641 | LandingPadResultTy = LPI.getType(); |
| 4642 | else |
| 4643 | Check(LandingPadResultTy == LPI.getType(), |
| 4644 | "The landingpad instruction should have a consistent result type " |
| 4645 | "inside a function." , |
| 4646 | &LPI); |
| 4647 | |
| 4648 | Function *F = LPI.getParent()->getParent(); |
| 4649 | Check(F->hasPersonalityFn(), |
| 4650 | "LandingPadInst needs to be in a function with a personality." , &LPI); |
| 4651 | |
| 4652 | // The landingpad instruction must be the first non-PHI instruction in the |
| 4653 | // block. |
| 4654 | Check(LPI.getParent()->getLandingPadInst() == &LPI, |
| 4655 | "LandingPadInst not the first non-PHI instruction in the block." , &LPI); |
| 4656 | |
| 4657 | for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) { |
| 4658 | Constant *Clause = LPI.getClause(Idx: i); |
| 4659 | if (LPI.isCatch(Idx: i)) { |
| 4660 | Check(isa<PointerType>(Clause->getType()), |
| 4661 | "Catch operand does not have pointer type!" , &LPI); |
| 4662 | } else { |
| 4663 | Check(LPI.isFilter(i), "Clause is neither catch nor filter!" , &LPI); |
| 4664 | Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause), |
| 4665 | "Filter operand is not an array of constants!" , &LPI); |
| 4666 | } |
| 4667 | } |
| 4668 | |
| 4669 | visitInstruction(I&: LPI); |
| 4670 | } |
| 4671 | |
| 4672 | void Verifier::visitResumeInst(ResumeInst &RI) { |
| 4673 | Check(RI.getFunction()->hasPersonalityFn(), |
| 4674 | "ResumeInst needs to be in a function with a personality." , &RI); |
| 4675 | |
| 4676 | if (!LandingPadResultTy) |
| 4677 | LandingPadResultTy = RI.getValue()->getType(); |
| 4678 | else |
| 4679 | Check(LandingPadResultTy == RI.getValue()->getType(), |
| 4680 | "The resume instruction should have a consistent result type " |
| 4681 | "inside a function." , |
| 4682 | &RI); |
| 4683 | |
| 4684 | visitTerminator(I&: RI); |
| 4685 | } |
| 4686 | |
| 4687 | void Verifier::visitCatchPadInst(CatchPadInst &CPI) { |
| 4688 | BasicBlock *BB = CPI.getParent(); |
| 4689 | |
| 4690 | Function *F = BB->getParent(); |
| 4691 | Check(F->hasPersonalityFn(), |
| 4692 | "CatchPadInst needs to be in a function with a personality." , &CPI); |
| 4693 | |
| 4694 | Check(isa<CatchSwitchInst>(CPI.getParentPad()), |
| 4695 | "CatchPadInst needs to be directly nested in a CatchSwitchInst." , |
| 4696 | CPI.getParentPad()); |
| 4697 | |
| 4698 | // The catchpad instruction must be the first non-PHI instruction in the |
| 4699 | // block. |
| 4700 | Check(&*BB->getFirstNonPHIIt() == &CPI, |
| 4701 | "CatchPadInst not the first non-PHI instruction in the block." , &CPI); |
| 4702 | |
| 4703 | visitEHPadPredecessors(I&: CPI); |
| 4704 | visitFuncletPadInst(FPI&: CPI); |
| 4705 | } |
| 4706 | |
| 4707 | void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) { |
| 4708 | Check(isa<CatchPadInst>(CatchReturn.getOperand(0)), |
| 4709 | "CatchReturnInst needs to be provided a CatchPad" , &CatchReturn, |
| 4710 | CatchReturn.getOperand(0)); |
| 4711 | |
| 4712 | visitTerminator(I&: CatchReturn); |
| 4713 | } |
| 4714 | |
| 4715 | void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) { |
| 4716 | BasicBlock *BB = CPI.getParent(); |
| 4717 | |
| 4718 | Function *F = BB->getParent(); |
| 4719 | Check(F->hasPersonalityFn(), |
| 4720 | "CleanupPadInst needs to be in a function with a personality." , &CPI); |
| 4721 | |
| 4722 | // The cleanuppad instruction must be the first non-PHI instruction in the |
| 4723 | // block. |
| 4724 | Check(&*BB->getFirstNonPHIIt() == &CPI, |
| 4725 | "CleanupPadInst not the first non-PHI instruction in the block." , &CPI); |
| 4726 | |
| 4727 | auto *ParentPad = CPI.getParentPad(); |
| 4728 | Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), |
| 4729 | "CleanupPadInst has an invalid parent." , &CPI); |
| 4730 | |
| 4731 | visitEHPadPredecessors(I&: CPI); |
| 4732 | visitFuncletPadInst(FPI&: CPI); |
| 4733 | } |
| 4734 | |
| 4735 | void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) { |
| 4736 | User *FirstUser = nullptr; |
| 4737 | Value *FirstUnwindPad = nullptr; |
| 4738 | SmallVector<FuncletPadInst *, 8> Worklist({&FPI}); |
| 4739 | SmallSet<FuncletPadInst *, 8> Seen; |
| 4740 | |
| 4741 | while (!Worklist.empty()) { |
| 4742 | FuncletPadInst *CurrentPad = Worklist.pop_back_val(); |
| 4743 | Check(Seen.insert(CurrentPad).second, |
| 4744 | "FuncletPadInst must not be nested within itself" , CurrentPad); |
| 4745 | Value *UnresolvedAncestorPad = nullptr; |
| 4746 | for (User *U : CurrentPad->users()) { |
| 4747 | BasicBlock *UnwindDest; |
| 4748 | if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: U)) { |
| 4749 | UnwindDest = CRI->getUnwindDest(); |
| 4750 | } else if (auto *CSI = dyn_cast<CatchSwitchInst>(Val: U)) { |
| 4751 | // We allow catchswitch unwind to caller to nest |
| 4752 | // within an outer pad that unwinds somewhere else, |
| 4753 | // because catchswitch doesn't have a nounwind variant. |
| 4754 | // See e.g. SimplifyCFGOpt::SimplifyUnreachable. |
| 4755 | if (CSI->unwindsToCaller()) |
| 4756 | continue; |
| 4757 | UnwindDest = CSI->getUnwindDest(); |
| 4758 | } else if (auto *II = dyn_cast<InvokeInst>(Val: U)) { |
| 4759 | UnwindDest = II->getUnwindDest(); |
| 4760 | } else if (isa<CallInst>(Val: U)) { |
| 4761 | // Calls which don't unwind may be found inside funclet |
| 4762 | // pads that unwind somewhere else. We don't *require* |
| 4763 | // such calls to be annotated nounwind. |
| 4764 | continue; |
| 4765 | } else if (auto *CPI = dyn_cast<CleanupPadInst>(Val: U)) { |
| 4766 | // The unwind dest for a cleanup can only be found by |
| 4767 | // recursive search. Add it to the worklist, and we'll |
| 4768 | // search for its first use that determines where it unwinds. |
| 4769 | Worklist.push_back(Elt: CPI); |
| 4770 | continue; |
| 4771 | } else { |
| 4772 | Check(isa<CatchReturnInst>(U), "Bogus funclet pad use" , U); |
| 4773 | continue; |
| 4774 | } |
| 4775 | |
| 4776 | Value *UnwindPad; |
| 4777 | bool ExitsFPI; |
| 4778 | if (UnwindDest) { |
| 4779 | UnwindPad = &*UnwindDest->getFirstNonPHIIt(); |
| 4780 | if (!cast<Instruction>(Val: UnwindPad)->isEHPad()) |
| 4781 | continue; |
| 4782 | Value *UnwindParent = getParentPad(EHPad: UnwindPad); |
| 4783 | // Ignore unwind edges that don't exit CurrentPad. |
| 4784 | if (UnwindParent == CurrentPad) |
| 4785 | continue; |
| 4786 | // Determine whether the original funclet pad is exited, |
| 4787 | // and if we are scanning nested pads determine how many |
| 4788 | // of them are exited so we can stop searching their |
| 4789 | // children. |
| 4790 | Value *ExitedPad = CurrentPad; |
| 4791 | ExitsFPI = false; |
| 4792 | do { |
| 4793 | if (ExitedPad == &FPI) { |
| 4794 | ExitsFPI = true; |
| 4795 | // Now we can resolve any ancestors of CurrentPad up to |
| 4796 | // FPI, but not including FPI since we need to make sure |
| 4797 | // to check all direct users of FPI for consistency. |
| 4798 | UnresolvedAncestorPad = &FPI; |
| 4799 | break; |
| 4800 | } |
| 4801 | Value *ExitedParent = getParentPad(EHPad: ExitedPad); |
| 4802 | if (ExitedParent == UnwindParent) { |
| 4803 | // ExitedPad is the ancestor-most pad which this unwind |
| 4804 | // edge exits, so we can resolve up to it, meaning that |
| 4805 | // ExitedParent is the first ancestor still unresolved. |
| 4806 | UnresolvedAncestorPad = ExitedParent; |
| 4807 | break; |
| 4808 | } |
| 4809 | ExitedPad = ExitedParent; |
| 4810 | } while (!isa<ConstantTokenNone>(Val: ExitedPad)); |
| 4811 | } else { |
| 4812 | // Unwinding to caller exits all pads. |
| 4813 | UnwindPad = ConstantTokenNone::get(Context&: FPI.getContext()); |
| 4814 | ExitsFPI = true; |
| 4815 | UnresolvedAncestorPad = &FPI; |
| 4816 | } |
| 4817 | |
| 4818 | if (ExitsFPI) { |
| 4819 | // This unwind edge exits FPI. Make sure it agrees with other |
| 4820 | // such edges. |
| 4821 | if (FirstUser) { |
| 4822 | Check(UnwindPad == FirstUnwindPad, |
| 4823 | "Unwind edges out of a funclet " |
| 4824 | "pad must have the same unwind " |
| 4825 | "dest" , |
| 4826 | &FPI, U, FirstUser); |
| 4827 | } else { |
| 4828 | FirstUser = U; |
| 4829 | FirstUnwindPad = UnwindPad; |
| 4830 | // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds |
| 4831 | if (isa<CleanupPadInst>(Val: &FPI) && !isa<ConstantTokenNone>(Val: UnwindPad) && |
| 4832 | getParentPad(EHPad: UnwindPad) == getParentPad(EHPad: &FPI)) |
| 4833 | SiblingFuncletInfo[&FPI] = cast<Instruction>(Val: U); |
| 4834 | } |
| 4835 | } |
| 4836 | // Make sure we visit all uses of FPI, but for nested pads stop as |
| 4837 | // soon as we know where they unwind to. |
| 4838 | if (CurrentPad != &FPI) |
| 4839 | break; |
| 4840 | } |
| 4841 | if (UnresolvedAncestorPad) { |
| 4842 | if (CurrentPad == UnresolvedAncestorPad) { |
| 4843 | // When CurrentPad is FPI itself, we don't mark it as resolved even if |
| 4844 | // we've found an unwind edge that exits it, because we need to verify |
| 4845 | // all direct uses of FPI. |
| 4846 | assert(CurrentPad == &FPI); |
| 4847 | continue; |
| 4848 | } |
| 4849 | // Pop off the worklist any nested pads that we've found an unwind |
| 4850 | // destination for. The pads on the worklist are the uncles, |
| 4851 | // great-uncles, etc. of CurrentPad. We've found an unwind destination |
| 4852 | // for all ancestors of CurrentPad up to but not including |
| 4853 | // UnresolvedAncestorPad. |
| 4854 | Value *ResolvedPad = CurrentPad; |
| 4855 | while (!Worklist.empty()) { |
| 4856 | Value *UnclePad = Worklist.back(); |
| 4857 | Value *AncestorPad = getParentPad(EHPad: UnclePad); |
| 4858 | // Walk ResolvedPad up the ancestor list until we either find the |
| 4859 | // uncle's parent or the last resolved ancestor. |
| 4860 | while (ResolvedPad != AncestorPad) { |
| 4861 | Value *ResolvedParent = getParentPad(EHPad: ResolvedPad); |
| 4862 | if (ResolvedParent == UnresolvedAncestorPad) { |
| 4863 | break; |
| 4864 | } |
| 4865 | ResolvedPad = ResolvedParent; |
| 4866 | } |
| 4867 | // If the resolved ancestor search didn't find the uncle's parent, |
| 4868 | // then the uncle is not yet resolved. |
| 4869 | if (ResolvedPad != AncestorPad) |
| 4870 | break; |
| 4871 | // This uncle is resolved, so pop it from the worklist. |
| 4872 | Worklist.pop_back(); |
| 4873 | } |
| 4874 | } |
| 4875 | } |
| 4876 | |
| 4877 | if (FirstUnwindPad) { |
| 4878 | if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: FPI.getParentPad())) { |
| 4879 | BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest(); |
| 4880 | Value *SwitchUnwindPad; |
| 4881 | if (SwitchUnwindDest) |
| 4882 | SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt(); |
| 4883 | else |
| 4884 | SwitchUnwindPad = ConstantTokenNone::get(Context&: FPI.getContext()); |
| 4885 | Check(SwitchUnwindPad == FirstUnwindPad, |
| 4886 | "Unwind edges out of a catch must have the same unwind dest as " |
| 4887 | "the parent catchswitch" , |
| 4888 | &FPI, FirstUser, CatchSwitch); |
| 4889 | } |
| 4890 | } |
| 4891 | |
| 4892 | visitInstruction(I&: FPI); |
| 4893 | } |
| 4894 | |
| 4895 | void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) { |
| 4896 | BasicBlock *BB = CatchSwitch.getParent(); |
| 4897 | |
| 4898 | Function *F = BB->getParent(); |
| 4899 | Check(F->hasPersonalityFn(), |
| 4900 | "CatchSwitchInst needs to be in a function with a personality." , |
| 4901 | &CatchSwitch); |
| 4902 | |
| 4903 | // The catchswitch instruction must be the first non-PHI instruction in the |
| 4904 | // block. |
| 4905 | Check(&*BB->getFirstNonPHIIt() == &CatchSwitch, |
| 4906 | "CatchSwitchInst not the first non-PHI instruction in the block." , |
| 4907 | &CatchSwitch); |
| 4908 | |
| 4909 | auto *ParentPad = CatchSwitch.getParentPad(); |
| 4910 | Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), |
| 4911 | "CatchSwitchInst has an invalid parent." , ParentPad); |
| 4912 | |
| 4913 | if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) { |
| 4914 | BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt(); |
| 4915 | Check(I->isEHPad() && !isa<LandingPadInst>(I), |
| 4916 | "CatchSwitchInst must unwind to an EH block which is not a " |
| 4917 | "landingpad." , |
| 4918 | &CatchSwitch); |
| 4919 | |
| 4920 | // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds |
| 4921 | if (getParentPad(EHPad: &*I) == ParentPad) |
| 4922 | SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch; |
| 4923 | } |
| 4924 | |
| 4925 | Check(CatchSwitch.getNumHandlers() != 0, |
| 4926 | "CatchSwitchInst cannot have empty handler list" , &CatchSwitch); |
| 4927 | |
| 4928 | for (BasicBlock *Handler : CatchSwitch.handlers()) { |
| 4929 | Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()), |
| 4930 | "CatchSwitchInst handlers must be catchpads" , &CatchSwitch, Handler); |
| 4931 | } |
| 4932 | |
| 4933 | visitEHPadPredecessors(I&: CatchSwitch); |
| 4934 | visitTerminator(I&: CatchSwitch); |
| 4935 | } |
| 4936 | |
| 4937 | void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) { |
| 4938 | Check(isa<CleanupPadInst>(CRI.getOperand(0)), |
| 4939 | "CleanupReturnInst needs to be provided a CleanupPad" , &CRI, |
| 4940 | CRI.getOperand(0)); |
| 4941 | |
| 4942 | if (BasicBlock *UnwindDest = CRI.getUnwindDest()) { |
| 4943 | BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt(); |
| 4944 | Check(I->isEHPad() && !isa<LandingPadInst>(I), |
| 4945 | "CleanupReturnInst must unwind to an EH block which is not a " |
| 4946 | "landingpad." , |
| 4947 | &CRI); |
| 4948 | } |
| 4949 | |
| 4950 | visitTerminator(I&: CRI); |
| 4951 | } |
| 4952 | |
| 4953 | void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { |
| 4954 | Instruction *Op = cast<Instruction>(Val: I.getOperand(i)); |
| 4955 | // If the we have an invalid invoke, don't try to compute the dominance. |
| 4956 | // We already reject it in the invoke specific checks and the dominance |
| 4957 | // computation doesn't handle multiple edges. |
| 4958 | if (InvokeInst *II = dyn_cast<InvokeInst>(Val: Op)) { |
| 4959 | if (II->getNormalDest() == II->getUnwindDest()) |
| 4960 | return; |
| 4961 | } |
| 4962 | |
| 4963 | // Quick check whether the def has already been encountered in the same block. |
| 4964 | // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI |
| 4965 | // uses are defined to happen on the incoming edge, not at the instruction. |
| 4966 | // |
| 4967 | // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata) |
| 4968 | // wrapping an SSA value, assert that we've already encountered it. See |
| 4969 | // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp. |
| 4970 | if (!isa<PHINode>(Val: I) && InstsInThisBlock.count(Ptr: Op)) |
| 4971 | return; |
| 4972 | |
| 4973 | const Use &U = I.getOperandUse(i); |
| 4974 | Check(DT.dominates(Op, U), "Instruction does not dominate all uses!" , Op, &I); |
| 4975 | } |
| 4976 | |
| 4977 | void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) { |
| 4978 | Check(I.getType()->isPointerTy(), |
| 4979 | "dereferenceable, dereferenceable_or_null " |
| 4980 | "apply only to pointer types" , |
| 4981 | &I); |
| 4982 | Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)), |
| 4983 | "dereferenceable, dereferenceable_or_null apply only to load" |
| 4984 | " and inttoptr instructions, use attributes for calls or invokes" , |
| 4985 | &I); |
| 4986 | Check(MD->getNumOperands() == 1, |
| 4987 | "dereferenceable, dereferenceable_or_null " |
| 4988 | "take one operand!" , |
| 4989 | &I); |
| 4990 | ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 0)); |
| 4991 | Check(CI && CI->getType()->isIntegerTy(64), |
| 4992 | "dereferenceable, " |
| 4993 | "dereferenceable_or_null metadata value must be an i64!" , |
| 4994 | &I); |
| 4995 | } |
| 4996 | |
| 4997 | void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) { |
| 4998 | auto GetBranchingTerminatorNumOperands = [&]() { |
| 4999 | unsigned ExpectedNumOperands = 0; |
| 5000 | if (BranchInst *BI = dyn_cast<BranchInst>(Val: &I)) |
| 5001 | ExpectedNumOperands = BI->getNumSuccessors(); |
| 5002 | else if (SwitchInst *SI = dyn_cast<SwitchInst>(Val: &I)) |
| 5003 | ExpectedNumOperands = SI->getNumSuccessors(); |
| 5004 | else if (isa<CallInst>(Val: &I)) |
| 5005 | ExpectedNumOperands = 1; |
| 5006 | else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(Val: &I)) |
| 5007 | ExpectedNumOperands = IBI->getNumDestinations(); |
| 5008 | else if (isa<SelectInst>(Val: &I)) |
| 5009 | ExpectedNumOperands = 2; |
| 5010 | else if (CallBrInst *CI = dyn_cast<CallBrInst>(Val: &I)) |
| 5011 | ExpectedNumOperands = CI->getNumSuccessors(); |
| 5012 | return ExpectedNumOperands; |
| 5013 | }; |
| 5014 | Check(MD->getNumOperands() >= 1, |
| 5015 | "!prof annotations should have at least 1 operand" , MD); |
| 5016 | // Check first operand. |
| 5017 | Check(MD->getOperand(0) != nullptr, "first operand should not be null" , MD); |
| 5018 | Check(isa<MDString>(MD->getOperand(0)), |
| 5019 | "expected string with name of the !prof annotation" , MD); |
| 5020 | MDString *MDS = cast<MDString>(Val: MD->getOperand(I: 0)); |
| 5021 | StringRef ProfName = MDS->getString(); |
| 5022 | |
| 5023 | if (ProfName == MDProfLabels::UnknownBranchWeightsMarker) { |
| 5024 | Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I), |
| 5025 | "'unknown' !prof should only appear on instructions on which " |
| 5026 | "'branch_weights' would" , |
| 5027 | MD); |
| 5028 | Check(MD->getNumOperands() == 1, |
| 5029 | "'unknown' !prof should have no additional operands" , MD); |
| 5030 | return; |
| 5031 | } |
| 5032 | |
| 5033 | Check(MD->getNumOperands() >= 2, |
| 5034 | "!prof annotations should have no less than 2 operands" , MD); |
| 5035 | |
| 5036 | // Check consistency of !prof branch_weights metadata. |
| 5037 | if (ProfName == MDProfLabels::BranchWeights) { |
| 5038 | unsigned NumBranchWeights = getNumBranchWeights(ProfileData: *MD); |
| 5039 | if (isa<InvokeInst>(Val: &I)) { |
| 5040 | Check(NumBranchWeights == 1 || NumBranchWeights == 2, |
| 5041 | "Wrong number of InvokeInst branch_weights operands" , MD); |
| 5042 | } else { |
| 5043 | const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands(); |
| 5044 | if (ExpectedNumOperands == 0) |
| 5045 | CheckFailed(Message: "!prof branch_weights are not allowed for this instruction" , |
| 5046 | V1: MD); |
| 5047 | |
| 5048 | Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands" , |
| 5049 | MD); |
| 5050 | } |
| 5051 | for (unsigned i = getBranchWeightOffset(ProfileData: MD); i < MD->getNumOperands(); |
| 5052 | ++i) { |
| 5053 | auto &MDO = MD->getOperand(I: i); |
| 5054 | Check(MDO, "second operand should not be null" , MD); |
| 5055 | Check(mdconst::dyn_extract<ConstantInt>(MDO), |
| 5056 | "!prof brunch_weights operand is not a const int" ); |
| 5057 | } |
| 5058 | } else if (ProfName == MDProfLabels::ValueProfile) { |
| 5059 | Check(isValueProfileMD(MD), "invalid value profiling metadata" , MD); |
| 5060 | ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 1)); |
| 5061 | Check(KindInt, "VP !prof missing kind argument" , MD); |
| 5062 | |
| 5063 | auto Kind = KindInt->getZExtValue(); |
| 5064 | Check(Kind >= InstrProfValueKind::IPVK_First && |
| 5065 | Kind <= InstrProfValueKind::IPVK_Last, |
| 5066 | "Invalid VP !prof kind" , MD); |
| 5067 | Check(MD->getNumOperands() % 2 == 1, |
| 5068 | "VP !prof should have an even number " |
| 5069 | "of arguments after 'VP'" , |
| 5070 | MD); |
| 5071 | if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget || |
| 5072 | Kind == InstrProfValueKind::IPVK_MemOPSize) |
| 5073 | Check(isa<CallBase>(I), |
| 5074 | "VP !prof indirect call or memop size expected to be applied to " |
| 5075 | "CallBase instructions only" , |
| 5076 | MD); |
| 5077 | } else { |
| 5078 | CheckFailed(Message: "expected either branch_weights or VP profile name" , V1: MD); |
| 5079 | } |
| 5080 | } |
| 5081 | |
| 5082 | void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) { |
| 5083 | assert(I.hasMetadata(LLVMContext::MD_DIAssignID)); |
| 5084 | // DIAssignID metadata must be attached to either an alloca or some form of |
| 5085 | // store/memory-writing instruction. |
| 5086 | // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all |
| 5087 | // possible store intrinsics. |
| 5088 | bool ExpectedInstTy = |
| 5089 | isa<AllocaInst>(Val: I) || isa<StoreInst>(Val: I) || isa<IntrinsicInst>(Val: I); |
| 5090 | CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind" , |
| 5091 | I, MD); |
| 5092 | // Iterate over the MetadataAsValue uses of the DIAssignID - these should |
| 5093 | // only be found as DbgAssignIntrinsic operands. |
| 5094 | if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) { |
| 5095 | for (auto *User : AsValue->users()) { |
| 5096 | CheckDI(isa<DbgAssignIntrinsic>(User), |
| 5097 | "!DIAssignID should only be used by llvm.dbg.assign intrinsics" , |
| 5098 | MD, User); |
| 5099 | // All of the dbg.assign intrinsics should be in the same function as I. |
| 5100 | if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: User)) |
| 5101 | CheckDI(DAI->getFunction() == I.getFunction(), |
| 5102 | "dbg.assign not in same function as inst" , DAI, &I); |
| 5103 | } |
| 5104 | } |
| 5105 | for (DbgVariableRecord *DVR : |
| 5106 | cast<DIAssignID>(Val: MD)->getAllDbgVariableRecordUsers()) { |
| 5107 | CheckDI(DVR->isDbgAssign(), |
| 5108 | "!DIAssignID should only be used by Assign DVRs." , MD, DVR); |
| 5109 | CheckDI(DVR->getFunction() == I.getFunction(), |
| 5110 | "DVRAssign not in same function as inst" , DVR, &I); |
| 5111 | } |
| 5112 | } |
| 5113 | |
| 5114 | void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) { |
| 5115 | Check(canInstructionHaveMMRAs(I), |
| 5116 | "!mmra metadata attached to unexpected instruction kind" , I, MD); |
| 5117 | |
| 5118 | // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a |
| 5119 | // list of tags such as !2 in the following example: |
| 5120 | // !0 = !{!"a", !"b"} |
| 5121 | // !1 = !{!"c", !"d"} |
| 5122 | // !2 = !{!0, !1} |
| 5123 | if (MMRAMetadata::isTagMD(MD)) |
| 5124 | return; |
| 5125 | |
| 5126 | Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple" , I, MD); |
| 5127 | for (const MDOperand &MDOp : MD->operands()) |
| 5128 | Check(MMRAMetadata::isTagMD(MDOp.get()), |
| 5129 | "!mmra metadata tuple operand is not an MMRA tag" , I, MDOp.get()); |
| 5130 | } |
| 5131 | |
| 5132 | void Verifier::visitCallStackMetadata(MDNode *MD) { |
| 5133 | // Call stack metadata should consist of a list of at least 1 constant int |
| 5134 | // (representing a hash of the location). |
| 5135 | Check(MD->getNumOperands() >= 1, |
| 5136 | "call stack metadata should have at least 1 operand" , MD); |
| 5137 | |
| 5138 | for (const auto &Op : MD->operands()) |
| 5139 | Check(mdconst::dyn_extract_or_null<ConstantInt>(Op), |
| 5140 | "call stack metadata operand should be constant integer" , Op); |
| 5141 | } |
| 5142 | |
| 5143 | void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) { |
| 5144 | Check(isa<CallBase>(I), "!memprof metadata should only exist on calls" , &I); |
| 5145 | Check(MD->getNumOperands() >= 1, |
| 5146 | "!memprof annotations should have at least 1 metadata operand " |
| 5147 | "(MemInfoBlock)" , |
| 5148 | MD); |
| 5149 | |
| 5150 | // Check each MIB |
| 5151 | for (auto &MIBOp : MD->operands()) { |
| 5152 | MDNode *MIB = dyn_cast<MDNode>(Val: MIBOp); |
| 5153 | // The first operand of an MIB should be the call stack metadata. |
| 5154 | // There rest of the operands should be MDString tags, and there should be |
| 5155 | // at least one. |
| 5156 | Check(MIB->getNumOperands() >= 2, |
| 5157 | "Each !memprof MemInfoBlock should have at least 2 operands" , MIB); |
| 5158 | |
| 5159 | // Check call stack metadata (first operand). |
| 5160 | Check(MIB->getOperand(0) != nullptr, |
| 5161 | "!memprof MemInfoBlock first operand should not be null" , MIB); |
| 5162 | Check(isa<MDNode>(MIB->getOperand(0)), |
| 5163 | "!memprof MemInfoBlock first operand should be an MDNode" , MIB); |
| 5164 | MDNode *StackMD = dyn_cast<MDNode>(Val: MIB->getOperand(I: 0)); |
| 5165 | visitCallStackMetadata(MD: StackMD); |
| 5166 | |
| 5167 | // The next set of 1 or more operands should be MDString. |
| 5168 | unsigned I = 1; |
| 5169 | for (; I < MIB->getNumOperands(); ++I) { |
| 5170 | if (!isa<MDString>(Val: MIB->getOperand(I))) { |
| 5171 | Check(I > 1, |
| 5172 | "!memprof MemInfoBlock second operand should be an MDString" , |
| 5173 | MIB); |
| 5174 | break; |
| 5175 | } |
| 5176 | } |
| 5177 | |
| 5178 | // Any remaining should be MDNode that are pairs of integers |
| 5179 | for (; I < MIB->getNumOperands(); ++I) { |
| 5180 | MDNode *OpNode = dyn_cast<MDNode>(Val: MIB->getOperand(I)); |
| 5181 | Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode" , |
| 5182 | MIB); |
| 5183 | Check(OpNode->getNumOperands() == 2, |
| 5184 | "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 " |
| 5185 | "operands" , |
| 5186 | MIB); |
| 5187 | // Check that all of Op's operands are ConstantInt. |
| 5188 | Check(llvm::all_of(OpNode->operands(), |
| 5189 | [](const MDOperand &Op) { |
| 5190 | return mdconst::hasa<ConstantInt>(Op); |
| 5191 | }), |
| 5192 | "Not all !memprof MemInfoBlock operands 2 to N are MDNode with " |
| 5193 | "ConstantInt operands" , |
| 5194 | MIB); |
| 5195 | } |
| 5196 | } |
| 5197 | } |
| 5198 | |
| 5199 | void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) { |
| 5200 | Check(isa<CallBase>(I), "!callsite metadata should only exist on calls" , &I); |
| 5201 | // Verify the partial callstack annotated from memprof profiles. This callsite |
| 5202 | // is a part of a profiled allocation callstack. |
| 5203 | visitCallStackMetadata(MD); |
| 5204 | } |
| 5205 | |
| 5206 | void Verifier::visitAnnotationMetadata(MDNode *Annotation) { |
| 5207 | Check(isa<MDTuple>(Annotation), "annotation must be a tuple" ); |
| 5208 | Check(Annotation->getNumOperands() >= 1, |
| 5209 | "annotation must have at least one operand" ); |
| 5210 | for (const MDOperand &Op : Annotation->operands()) { |
| 5211 | bool TupleOfStrings = |
| 5212 | isa<MDTuple>(Val: Op.get()) && |
| 5213 | all_of(Range: cast<MDTuple>(Val: Op)->operands(), P: [](auto &Annotation) { |
| 5214 | return isa<MDString>(Annotation.get()); |
| 5215 | }); |
| 5216 | Check(isa<MDString>(Op.get()) || TupleOfStrings, |
| 5217 | "operands must be a string or a tuple of strings" ); |
| 5218 | } |
| 5219 | } |
| 5220 | |
| 5221 | void Verifier::visitAliasScopeMetadata(const MDNode *MD) { |
| 5222 | unsigned NumOps = MD->getNumOperands(); |
| 5223 | Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands" , |
| 5224 | MD); |
| 5225 | Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)), |
| 5226 | "first scope operand must be self-referential or string" , MD); |
| 5227 | if (NumOps == 3) |
| 5228 | Check(isa<MDString>(MD->getOperand(2)), |
| 5229 | "third scope operand must be string (if used)" , MD); |
| 5230 | |
| 5231 | MDNode *Domain = dyn_cast<MDNode>(Val: MD->getOperand(I: 1)); |
| 5232 | Check(Domain != nullptr, "second scope operand must be MDNode" , MD); |
| 5233 | |
| 5234 | unsigned NumDomainOps = Domain->getNumOperands(); |
| 5235 | Check(NumDomainOps >= 1 && NumDomainOps <= 2, |
| 5236 | "domain must have one or two operands" , Domain); |
| 5237 | Check(Domain->getOperand(0).get() == Domain || |
| 5238 | isa<MDString>(Domain->getOperand(0)), |
| 5239 | "first domain operand must be self-referential or string" , Domain); |
| 5240 | if (NumDomainOps == 2) |
| 5241 | Check(isa<MDString>(Domain->getOperand(1)), |
| 5242 | "second domain operand must be string (if used)" , Domain); |
| 5243 | } |
| 5244 | |
| 5245 | void Verifier::visitAliasScopeListMetadata(const MDNode *MD) { |
| 5246 | for (const MDOperand &Op : MD->operands()) { |
| 5247 | const MDNode *OpMD = dyn_cast<MDNode>(Val: Op); |
| 5248 | Check(OpMD != nullptr, "scope list must consist of MDNodes" , MD); |
| 5249 | visitAliasScopeMetadata(MD: OpMD); |
| 5250 | } |
| 5251 | } |
| 5252 | |
| 5253 | void Verifier::visitAccessGroupMetadata(const MDNode *MD) { |
| 5254 | auto IsValidAccessScope = [](const MDNode *MD) { |
| 5255 | return MD->getNumOperands() == 0 && MD->isDistinct(); |
| 5256 | }; |
| 5257 | |
| 5258 | // It must be either an access scope itself... |
| 5259 | if (IsValidAccessScope(MD)) |
| 5260 | return; |
| 5261 | |
| 5262 | // ...or a list of access scopes. |
| 5263 | for (const MDOperand &Op : MD->operands()) { |
| 5264 | const MDNode *OpMD = dyn_cast<MDNode>(Val: Op); |
| 5265 | Check(OpMD != nullptr, "Access scope list must consist of MDNodes" , MD); |
| 5266 | Check(IsValidAccessScope(OpMD), |
| 5267 | "Access scope list contains invalid access scope" , MD); |
| 5268 | } |
| 5269 | } |
| 5270 | |
| 5271 | /// verifyInstruction - Verify that an instruction is well formed. |
| 5272 | /// |
| 5273 | void Verifier::visitInstruction(Instruction &I) { |
| 5274 | BasicBlock *BB = I.getParent(); |
| 5275 | Check(BB, "Instruction not embedded in basic block!" , &I); |
| 5276 | |
| 5277 | if (!isa<PHINode>(Val: I)) { // Check that non-phi nodes are not self referential |
| 5278 | for (User *U : I.users()) { |
| 5279 | Check(U != (User *)&I || !DT.isReachableFromEntry(BB), |
| 5280 | "Only PHI nodes may reference their own value!" , &I); |
| 5281 | } |
| 5282 | } |
| 5283 | |
| 5284 | // Check that void typed values don't have names |
| 5285 | Check(!I.getType()->isVoidTy() || !I.hasName(), |
| 5286 | "Instruction has a name, but provides a void value!" , &I); |
| 5287 | |
| 5288 | // Check that the return value of the instruction is either void or a legal |
| 5289 | // value type. |
| 5290 | Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(), |
| 5291 | "Instruction returns a non-scalar type!" , &I); |
| 5292 | |
| 5293 | // Check that the instruction doesn't produce metadata. Calls are already |
| 5294 | // checked against the callee type. |
| 5295 | Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I), |
| 5296 | "Invalid use of metadata!" , &I); |
| 5297 | |
| 5298 | // Check that all uses of the instruction, if they are instructions |
| 5299 | // themselves, actually have parent basic blocks. If the use is not an |
| 5300 | // instruction, it is an error! |
| 5301 | for (Use &U : I.uses()) { |
| 5302 | if (Instruction *Used = dyn_cast<Instruction>(Val: U.getUser())) |
| 5303 | Check(Used->getParent() != nullptr, |
| 5304 | "Instruction referencing" |
| 5305 | " instruction not embedded in a basic block!" , |
| 5306 | &I, Used); |
| 5307 | else { |
| 5308 | CheckFailed(Message: "Use of instruction is not an instruction!" , V1: U); |
| 5309 | return; |
| 5310 | } |
| 5311 | } |
| 5312 | |
| 5313 | // Get a pointer to the call base of the instruction if it is some form of |
| 5314 | // call. |
| 5315 | const CallBase *CBI = dyn_cast<CallBase>(Val: &I); |
| 5316 | |
| 5317 | for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { |
| 5318 | Check(I.getOperand(i) != nullptr, "Instruction has null operand!" , &I); |
| 5319 | |
| 5320 | // Check to make sure that only first-class-values are operands to |
| 5321 | // instructions. |
| 5322 | if (!I.getOperand(i)->getType()->isFirstClassType()) { |
| 5323 | Check(false, "Instruction operands must be first-class values!" , &I); |
| 5324 | } |
| 5325 | |
| 5326 | if (Function *F = dyn_cast<Function>(Val: I.getOperand(i))) { |
| 5327 | // This code checks whether the function is used as the operand of a |
| 5328 | // clang_arc_attachedcall operand bundle. |
| 5329 | auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI, |
| 5330 | int Idx) { |
| 5331 | return CBI && CBI->isOperandBundleOfType( |
| 5332 | ID: LLVMContext::OB_clang_arc_attachedcall, Idx); |
| 5333 | }; |
| 5334 | |
| 5335 | // Check to make sure that the "address of" an intrinsic function is never |
| 5336 | // taken. Ignore cases where the address of the intrinsic function is used |
| 5337 | // as the argument of operand bundle "clang.arc.attachedcall" as those |
| 5338 | // cases are handled in verifyAttachedCallBundle. |
| 5339 | Check((!F->isIntrinsic() || |
| 5340 | (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) || |
| 5341 | IsAttachedCallOperand(F, CBI, i)), |
| 5342 | "Cannot take the address of an intrinsic!" , &I); |
| 5343 | Check(!F->isIntrinsic() || isa<CallInst>(I) || |
| 5344 | F->getIntrinsicID() == Intrinsic::donothing || |
| 5345 | F->getIntrinsicID() == Intrinsic::seh_try_begin || |
| 5346 | F->getIntrinsicID() == Intrinsic::seh_try_end || |
| 5347 | F->getIntrinsicID() == Intrinsic::seh_scope_begin || |
| 5348 | F->getIntrinsicID() == Intrinsic::seh_scope_end || |
| 5349 | F->getIntrinsicID() == Intrinsic::coro_resume || |
| 5350 | F->getIntrinsicID() == Intrinsic::coro_destroy || |
| 5351 | F->getIntrinsicID() == Intrinsic::coro_await_suspend_void || |
| 5352 | F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool || |
| 5353 | F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle || |
| 5354 | F->getIntrinsicID() == |
| 5355 | Intrinsic::experimental_patchpoint_void || |
| 5356 | F->getIntrinsicID() == Intrinsic::experimental_patchpoint || |
| 5357 | F->getIntrinsicID() == Intrinsic::fake_use || |
| 5358 | F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint || |
| 5359 | F->getIntrinsicID() == Intrinsic::wasm_throw || |
| 5360 | F->getIntrinsicID() == Intrinsic::wasm_rethrow || |
| 5361 | IsAttachedCallOperand(F, CBI, i), |
| 5362 | "Cannot invoke an intrinsic other than donothing, patchpoint, " |
| 5363 | "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or " |
| 5364 | "wasm.(re)throw" , |
| 5365 | &I); |
| 5366 | Check(F->getParent() == &M, "Referencing function in another module!" , &I, |
| 5367 | &M, F, F->getParent()); |
| 5368 | } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(Val: I.getOperand(i))) { |
| 5369 | Check(OpBB->getParent() == BB->getParent(), |
| 5370 | "Referring to a basic block in another function!" , &I); |
| 5371 | } else if (Argument *OpArg = dyn_cast<Argument>(Val: I.getOperand(i))) { |
| 5372 | Check(OpArg->getParent() == BB->getParent(), |
| 5373 | "Referring to an argument in another function!" , &I); |
| 5374 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val: I.getOperand(i))) { |
| 5375 | Check(GV->getParent() == &M, "Referencing global in another module!" , &I, |
| 5376 | &M, GV, GV->getParent()); |
| 5377 | } else if (Instruction *OpInst = dyn_cast<Instruction>(Val: I.getOperand(i))) { |
| 5378 | Check(OpInst->getFunction() == BB->getParent(), |
| 5379 | "Referring to an instruction in another function!" , &I); |
| 5380 | verifyDominatesUse(I, i); |
| 5381 | } else if (isa<InlineAsm>(Val: I.getOperand(i))) { |
| 5382 | Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i), |
| 5383 | "Cannot take the address of an inline asm!" , &I); |
| 5384 | } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(Val: I.getOperand(i))) { |
| 5385 | visitConstantExprsRecursively(EntryC: CPA); |
| 5386 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: I.getOperand(i))) { |
| 5387 | if (CE->getType()->isPtrOrPtrVectorTy()) { |
| 5388 | // If we have a ConstantExpr pointer, we need to see if it came from an |
| 5389 | // illegal bitcast. |
| 5390 | visitConstantExprsRecursively(EntryC: CE); |
| 5391 | } |
| 5392 | } |
| 5393 | } |
| 5394 | |
| 5395 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_fpmath)) { |
| 5396 | Check(I.getType()->isFPOrFPVectorTy(), |
| 5397 | "fpmath requires a floating point result!" , &I); |
| 5398 | Check(MD->getNumOperands() == 1, "fpmath takes one operand!" , &I); |
| 5399 | if (ConstantFP *CFP0 = |
| 5400 | mdconst::dyn_extract_or_null<ConstantFP>(MD: MD->getOperand(I: 0))) { |
| 5401 | const APFloat &Accuracy = CFP0->getValueAPF(); |
| 5402 | Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(), |
| 5403 | "fpmath accuracy must have float type" , &I); |
| 5404 | Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(), |
| 5405 | "fpmath accuracy not a positive number!" , &I); |
| 5406 | } else { |
| 5407 | Check(false, "invalid fpmath accuracy!" , &I); |
| 5408 | } |
| 5409 | } |
| 5410 | |
| 5411 | if (MDNode *Range = I.getMetadata(KindID: LLVMContext::MD_range)) { |
| 5412 | Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I), |
| 5413 | "Ranges are only for loads, calls and invokes!" , &I); |
| 5414 | visitRangeMetadata(I, Range, Ty: I.getType()); |
| 5415 | } |
| 5416 | |
| 5417 | if (MDNode *Range = I.getMetadata(KindID: LLVMContext::MD_noalias_addrspace)) { |
| 5418 | Check(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicRMWInst>(I) || |
| 5419 | isa<AtomicCmpXchgInst>(I) || isa<CallInst>(I), |
| 5420 | "noalias.addrspace are only for memory operations!" , &I); |
| 5421 | visitNoaliasAddrspaceMetadata(I, Range, Ty: I.getType()); |
| 5422 | } |
| 5423 | |
| 5424 | if (I.hasMetadata(KindID: LLVMContext::MD_invariant_group)) { |
| 5425 | Check(isa<LoadInst>(I) || isa<StoreInst>(I), |
| 5426 | "invariant.group metadata is only for loads and stores" , &I); |
| 5427 | } |
| 5428 | |
| 5429 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_nonnull)) { |
| 5430 | Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types" , |
| 5431 | &I); |
| 5432 | Check(isa<LoadInst>(I), |
| 5433 | "nonnull applies only to load instructions, use attributes" |
| 5434 | " for calls or invokes" , |
| 5435 | &I); |
| 5436 | Check(MD->getNumOperands() == 0, "nonnull metadata must be empty" , &I); |
| 5437 | } |
| 5438 | |
| 5439 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_dereferenceable)) |
| 5440 | visitDereferenceableMetadata(I, MD); |
| 5441 | |
| 5442 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_dereferenceable_or_null)) |
| 5443 | visitDereferenceableMetadata(I, MD); |
| 5444 | |
| 5445 | if (MDNode *TBAA = I.getMetadata(KindID: LLVMContext::MD_tbaa)) |
| 5446 | TBAAVerifyHelper.visitTBAAMetadata(I, MD: TBAA); |
| 5447 | |
| 5448 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_noalias)) |
| 5449 | visitAliasScopeListMetadata(MD); |
| 5450 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_alias_scope)) |
| 5451 | visitAliasScopeListMetadata(MD); |
| 5452 | |
| 5453 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_access_group)) |
| 5454 | visitAccessGroupMetadata(MD); |
| 5455 | |
| 5456 | if (MDNode *AlignMD = I.getMetadata(KindID: LLVMContext::MD_align)) { |
| 5457 | Check(I.getType()->isPointerTy(), "align applies only to pointer types" , |
| 5458 | &I); |
| 5459 | Check(isa<LoadInst>(I), |
| 5460 | "align applies only to load instructions, " |
| 5461 | "use attributes for calls or invokes" , |
| 5462 | &I); |
| 5463 | Check(AlignMD->getNumOperands() == 1, "align takes one operand!" , &I); |
| 5464 | ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD: AlignMD->getOperand(I: 0)); |
| 5465 | Check(CI && CI->getType()->isIntegerTy(64), |
| 5466 | "align metadata value must be an i64!" , &I); |
| 5467 | uint64_t Align = CI->getZExtValue(); |
| 5468 | Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!" , |
| 5469 | &I); |
| 5470 | Check(Align <= Value::MaximumAlignment, |
| 5471 | "alignment is larger that implementation defined limit" , &I); |
| 5472 | } |
| 5473 | |
| 5474 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_prof)) |
| 5475 | visitProfMetadata(I, MD); |
| 5476 | |
| 5477 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_memprof)) |
| 5478 | visitMemProfMetadata(I, MD); |
| 5479 | |
| 5480 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_callsite)) |
| 5481 | visitCallsiteMetadata(I, MD); |
| 5482 | |
| 5483 | if (MDNode *MD = I.getMetadata(KindID: LLVMContext::MD_DIAssignID)) |
| 5484 | visitDIAssignIDMetadata(I, MD); |
| 5485 | |
| 5486 | if (MDNode *MMRA = I.getMetadata(KindID: LLVMContext::MD_mmra)) |
| 5487 | visitMMRAMetadata(I, MD: MMRA); |
| 5488 | |
| 5489 | if (MDNode *Annotation = I.getMetadata(KindID: LLVMContext::MD_annotation)) |
| 5490 | visitAnnotationMetadata(Annotation); |
| 5491 | |
| 5492 | if (MDNode *N = I.getDebugLoc().getAsMDNode()) { |
| 5493 | CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment" , &I, N); |
| 5494 | visitMDNode(MD: *N, AllowLocs: AreDebugLocsAllowed::Yes); |
| 5495 | } |
| 5496 | |
| 5497 | if (auto *DII = dyn_cast<DbgVariableIntrinsic>(Val: &I)) { |
| 5498 | verifyFragmentExpression(I: *DII); |
| 5499 | verifyNotEntryValue(I: *DII); |
| 5500 | } |
| 5501 | |
| 5502 | SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; |
| 5503 | I.getAllMetadata(MDs); |
| 5504 | for (auto Attachment : MDs) { |
| 5505 | unsigned Kind = Attachment.first; |
| 5506 | auto AllowLocs = |
| 5507 | (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop) |
| 5508 | ? AreDebugLocsAllowed::Yes |
| 5509 | : AreDebugLocsAllowed::No; |
| 5510 | visitMDNode(MD: *Attachment.second, AllowLocs); |
| 5511 | } |
| 5512 | |
| 5513 | InstsInThisBlock.insert(Ptr: &I); |
| 5514 | } |
| 5515 | |
| 5516 | /// Allow intrinsics to be verified in different ways. |
| 5517 | void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { |
| 5518 | Function *IF = Call.getCalledFunction(); |
| 5519 | Check(IF->isDeclaration(), "Intrinsic functions should never be defined!" , |
| 5520 | IF); |
| 5521 | |
| 5522 | // Verify that the intrinsic prototype lines up with what the .td files |
| 5523 | // describe. |
| 5524 | FunctionType *IFTy = IF->getFunctionType(); |
| 5525 | bool IsVarArg = IFTy->isVarArg(); |
| 5526 | |
| 5527 | SmallVector<Intrinsic::IITDescriptor, 8> Table; |
| 5528 | getIntrinsicInfoTableEntries(id: ID, T&: Table); |
| 5529 | ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; |
| 5530 | |
| 5531 | // Walk the descriptors to extract overloaded types. |
| 5532 | SmallVector<Type *, 4> ArgTys; |
| 5533 | Intrinsic::MatchIntrinsicTypesResult Res = |
| 5534 | Intrinsic::matchIntrinsicSignature(FTy: IFTy, Infos&: TableRef, ArgTys); |
| 5535 | Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet, |
| 5536 | "Intrinsic has incorrect return type!" , IF); |
| 5537 | Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg, |
| 5538 | "Intrinsic has incorrect argument type!" , IF); |
| 5539 | |
| 5540 | // Verify if the intrinsic call matches the vararg property. |
| 5541 | if (IsVarArg) |
| 5542 | Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), |
| 5543 | "Intrinsic was not defined with variable arguments!" , IF); |
| 5544 | else |
| 5545 | Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), |
| 5546 | "Callsite was not defined with variable arguments!" , IF); |
| 5547 | |
| 5548 | // All descriptors should be absorbed by now. |
| 5549 | Check(TableRef.empty(), "Intrinsic has too few arguments!" , IF); |
| 5550 | |
| 5551 | // Now that we have the intrinsic ID and the actual argument types (and we |
| 5552 | // know they are legal for the intrinsic!) get the intrinsic name through the |
| 5553 | // usual means. This allows us to verify the mangling of argument types into |
| 5554 | // the name. |
| 5555 | const std::string ExpectedName = |
| 5556 | Intrinsic::getName(Id: ID, Tys: ArgTys, M: IF->getParent(), FT: IFTy); |
| 5557 | Check(ExpectedName == IF->getName(), |
| 5558 | "Intrinsic name not mangled correctly for type arguments! " |
| 5559 | "Should be: " + |
| 5560 | ExpectedName, |
| 5561 | IF); |
| 5562 | |
| 5563 | // If the intrinsic takes MDNode arguments, verify that they are either global |
| 5564 | // or are local to *this* function. |
| 5565 | for (Value *V : Call.args()) { |
| 5566 | if (auto *MD = dyn_cast<MetadataAsValue>(Val: V)) |
| 5567 | visitMetadataAsValue(MDV: *MD, F: Call.getCaller()); |
| 5568 | if (auto *Const = dyn_cast<Constant>(Val: V)) |
| 5569 | Check(!Const->getType()->isX86_AMXTy(), |
| 5570 | "const x86_amx is not allowed in argument!" ); |
| 5571 | } |
| 5572 | |
| 5573 | switch (ID) { |
| 5574 | default: |
| 5575 | break; |
| 5576 | case Intrinsic::assume: { |
| 5577 | for (auto &Elem : Call.bundle_op_infos()) { |
| 5578 | unsigned ArgCount = Elem.End - Elem.Begin; |
| 5579 | // Separate storage assumptions are special insofar as they're the only |
| 5580 | // operand bundles allowed on assumes that aren't parameter attributes. |
| 5581 | if (Elem.Tag->getKey() == "separate_storage" ) { |
| 5582 | Check(ArgCount == 2, |
| 5583 | "separate_storage assumptions should have 2 arguments" , Call); |
| 5584 | Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() && |
| 5585 | Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(), |
| 5586 | "arguments to separate_storage assumptions should be pointers" , |
| 5587 | Call); |
| 5588 | continue; |
| 5589 | } |
| 5590 | Check(Elem.Tag->getKey() == "ignore" || |
| 5591 | Attribute::isExistingAttribute(Elem.Tag->getKey()), |
| 5592 | "tags must be valid attribute names" , Call); |
| 5593 | Attribute::AttrKind Kind = |
| 5594 | Attribute::getAttrKindFromName(AttrName: Elem.Tag->getKey()); |
| 5595 | if (Kind == Attribute::Alignment) { |
| 5596 | Check(ArgCount <= 3 && ArgCount >= 2, |
| 5597 | "alignment assumptions should have 2 or 3 arguments" , Call); |
| 5598 | Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(), |
| 5599 | "first argument should be a pointer" , Call); |
| 5600 | Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(), |
| 5601 | "second argument should be an integer" , Call); |
| 5602 | if (ArgCount == 3) |
| 5603 | Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(), |
| 5604 | "third argument should be an integer if present" , Call); |
| 5605 | continue; |
| 5606 | } |
| 5607 | Check(ArgCount <= 2, "too many arguments" , Call); |
| 5608 | if (Kind == Attribute::None) |
| 5609 | break; |
| 5610 | if (Attribute::isIntAttrKind(Kind)) { |
| 5611 | Check(ArgCount == 2, "this attribute should have 2 arguments" , Call); |
| 5612 | Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)), |
| 5613 | "the second argument should be a constant integral value" , Call); |
| 5614 | } else if (Attribute::canUseAsParamAttr(Kind)) { |
| 5615 | Check((ArgCount) == 1, "this attribute should have one argument" , Call); |
| 5616 | } else if (Attribute::canUseAsFnAttr(Kind)) { |
| 5617 | Check((ArgCount) == 0, "this attribute has no argument" , Call); |
| 5618 | } |
| 5619 | } |
| 5620 | break; |
| 5621 | } |
| 5622 | case Intrinsic::ucmp: |
| 5623 | case Intrinsic::scmp: { |
| 5624 | Type *SrcTy = Call.getOperand(i_nocapture: 0)->getType(); |
| 5625 | Type *DestTy = Call.getType(); |
| 5626 | |
| 5627 | Check(DestTy->getScalarSizeInBits() >= 2, |
| 5628 | "result type must be at least 2 bits wide" , Call); |
| 5629 | |
| 5630 | bool IsDestTypeVector = DestTy->isVectorTy(); |
| 5631 | Check(SrcTy->isVectorTy() == IsDestTypeVector, |
| 5632 | "ucmp/scmp argument and result types must both be either vector or " |
| 5633 | "scalar types" , |
| 5634 | Call); |
| 5635 | if (IsDestTypeVector) { |
| 5636 | auto SrcVecLen = cast<VectorType>(Val: SrcTy)->getElementCount(); |
| 5637 | auto DestVecLen = cast<VectorType>(Val: DestTy)->getElementCount(); |
| 5638 | Check(SrcVecLen == DestVecLen, |
| 5639 | "return type and arguments must have the same number of " |
| 5640 | "elements" , |
| 5641 | Call); |
| 5642 | } |
| 5643 | break; |
| 5644 | } |
| 5645 | case Intrinsic::coro_id: { |
| 5646 | auto *InfoArg = Call.getArgOperand(i: 3)->stripPointerCasts(); |
| 5647 | if (isa<ConstantPointerNull>(Val: InfoArg)) |
| 5648 | break; |
| 5649 | auto *GV = dyn_cast<GlobalVariable>(Val: InfoArg); |
| 5650 | Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(), |
| 5651 | "info argument of llvm.coro.id must refer to an initialized " |
| 5652 | "constant" ); |
| 5653 | Constant *Init = GV->getInitializer(); |
| 5654 | Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init), |
| 5655 | "info argument of llvm.coro.id must refer to either a struct or " |
| 5656 | "an array" ); |
| 5657 | break; |
| 5658 | } |
| 5659 | case Intrinsic::is_fpclass: { |
| 5660 | const ConstantInt *TestMask = cast<ConstantInt>(Val: Call.getOperand(i_nocapture: 1)); |
| 5661 | Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0, |
| 5662 | "unsupported bits for llvm.is.fpclass test mask" ); |
| 5663 | break; |
| 5664 | } |
| 5665 | case Intrinsic::fptrunc_round: { |
| 5666 | // Check the rounding mode |
| 5667 | Metadata *MD = nullptr; |
| 5668 | auto *MAV = dyn_cast<MetadataAsValue>(Val: Call.getOperand(i_nocapture: 1)); |
| 5669 | if (MAV) |
| 5670 | MD = MAV->getMetadata(); |
| 5671 | |
| 5672 | Check(MD != nullptr, "missing rounding mode argument" , Call); |
| 5673 | |
| 5674 | Check(isa<MDString>(MD), |
| 5675 | ("invalid value for llvm.fptrunc.round metadata operand" |
| 5676 | " (the operand should be a string)" ), |
| 5677 | MD); |
| 5678 | |
| 5679 | std::optional<RoundingMode> RoundMode = |
| 5680 | convertStrToRoundingMode(cast<MDString>(Val: MD)->getString()); |
| 5681 | Check(RoundMode && *RoundMode != RoundingMode::Dynamic, |
| 5682 | "unsupported rounding mode argument" , Call); |
| 5683 | break; |
| 5684 | } |
| 5685 | #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: |
| 5686 | #include "llvm/IR/VPIntrinsics.def" |
| 5687 | #undef BEGIN_REGISTER_VP_INTRINSIC |
| 5688 | visitVPIntrinsic(VPI&: cast<VPIntrinsic>(Val&: Call)); |
| 5689 | break; |
| 5690 | #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \ |
| 5691 | case Intrinsic::INTRINSIC: |
| 5692 | #include "llvm/IR/ConstrainedOps.def" |
| 5693 | #undef INSTRUCTION |
| 5694 | visitConstrainedFPIntrinsic(FPI&: cast<ConstrainedFPIntrinsic>(Val&: Call)); |
| 5695 | break; |
| 5696 | case Intrinsic::dbg_declare: // llvm.dbg.declare |
| 5697 | Check(isa<MetadataAsValue>(Call.getArgOperand(0)), |
| 5698 | "invalid llvm.dbg.declare intrinsic call 1" , Call); |
| 5699 | visitDbgIntrinsic(Kind: "declare" , DII&: cast<DbgVariableIntrinsic>(Val&: Call)); |
| 5700 | break; |
| 5701 | case Intrinsic::dbg_value: // llvm.dbg.value |
| 5702 | visitDbgIntrinsic(Kind: "value" , DII&: cast<DbgVariableIntrinsic>(Val&: Call)); |
| 5703 | break; |
| 5704 | case Intrinsic::dbg_assign: // llvm.dbg.assign |
| 5705 | visitDbgIntrinsic(Kind: "assign" , DII&: cast<DbgVariableIntrinsic>(Val&: Call)); |
| 5706 | break; |
| 5707 | case Intrinsic::dbg_label: // llvm.dbg.label |
| 5708 | visitDbgLabelIntrinsic(Kind: "label" , DLI&: cast<DbgLabelInst>(Val&: Call)); |
| 5709 | break; |
| 5710 | case Intrinsic::memcpy: |
| 5711 | case Intrinsic::memcpy_inline: |
| 5712 | case Intrinsic::memmove: |
| 5713 | case Intrinsic::memset: |
| 5714 | case Intrinsic::memset_inline: |
| 5715 | break; |
| 5716 | case Intrinsic::experimental_memset_pattern: { |
| 5717 | const auto Memset = cast<MemSetPatternInst>(Val: &Call); |
| 5718 | Check(Memset->getValue()->getType()->isSized(), |
| 5719 | "unsized types cannot be used as memset patterns" , Call); |
| 5720 | break; |
| 5721 | } |
| 5722 | case Intrinsic::memcpy_element_unordered_atomic: |
| 5723 | case Intrinsic::memmove_element_unordered_atomic: |
| 5724 | case Intrinsic::memset_element_unordered_atomic: { |
| 5725 | const auto *AMI = cast<AnyMemIntrinsic>(Val: &Call); |
| 5726 | |
| 5727 | ConstantInt *ElementSizeCI = |
| 5728 | cast<ConstantInt>(Val: AMI->getRawElementSizeInBytes()); |
| 5729 | const APInt &ElementSizeVal = ElementSizeCI->getValue(); |
| 5730 | Check(ElementSizeVal.isPowerOf2(), |
| 5731 | "element size of the element-wise atomic memory intrinsic " |
| 5732 | "must be a power of 2" , |
| 5733 | Call); |
| 5734 | |
| 5735 | auto IsValidAlignment = [&](MaybeAlign Alignment) { |
| 5736 | return Alignment && ElementSizeVal.ule(RHS: Alignment->value()); |
| 5737 | }; |
| 5738 | Check(IsValidAlignment(AMI->getDestAlign()), |
| 5739 | "incorrect alignment of the destination argument" , Call); |
| 5740 | if (const auto *AMT = dyn_cast<AnyMemTransferInst>(Val: AMI)) { |
| 5741 | Check(IsValidAlignment(AMT->getSourceAlign()), |
| 5742 | "incorrect alignment of the source argument" , Call); |
| 5743 | } |
| 5744 | break; |
| 5745 | } |
| 5746 | case Intrinsic::call_preallocated_setup: { |
| 5747 | auto *NumArgs = dyn_cast<ConstantInt>(Val: Call.getArgOperand(i: 0)); |
| 5748 | Check(NumArgs != nullptr, |
| 5749 | "llvm.call.preallocated.setup argument must be a constant" ); |
| 5750 | bool FoundCall = false; |
| 5751 | for (User *U : Call.users()) { |
| 5752 | auto *UseCall = dyn_cast<CallBase>(Val: U); |
| 5753 | Check(UseCall != nullptr, |
| 5754 | "Uses of llvm.call.preallocated.setup must be calls" ); |
| 5755 | Intrinsic::ID IID = UseCall->getIntrinsicID(); |
| 5756 | if (IID == Intrinsic::call_preallocated_arg) { |
| 5757 | auto *AllocArgIndex = dyn_cast<ConstantInt>(Val: UseCall->getArgOperand(i: 1)); |
| 5758 | Check(AllocArgIndex != nullptr, |
| 5759 | "llvm.call.preallocated.alloc arg index must be a constant" ); |
| 5760 | auto AllocArgIndexInt = AllocArgIndex->getValue(); |
| 5761 | Check(AllocArgIndexInt.sge(0) && |
| 5762 | AllocArgIndexInt.slt(NumArgs->getValue()), |
| 5763 | "llvm.call.preallocated.alloc arg index must be between 0 and " |
| 5764 | "corresponding " |
| 5765 | "llvm.call.preallocated.setup's argument count" ); |
| 5766 | } else if (IID == Intrinsic::call_preallocated_teardown) { |
| 5767 | // nothing to do |
| 5768 | } else { |
| 5769 | Check(!FoundCall, "Can have at most one call corresponding to a " |
| 5770 | "llvm.call.preallocated.setup" ); |
| 5771 | FoundCall = true; |
| 5772 | size_t NumPreallocatedArgs = 0; |
| 5773 | for (unsigned i = 0; i < UseCall->arg_size(); i++) { |
| 5774 | if (UseCall->paramHasAttr(ArgNo: i, Kind: Attribute::Preallocated)) { |
| 5775 | ++NumPreallocatedArgs; |
| 5776 | } |
| 5777 | } |
| 5778 | Check(NumPreallocatedArgs != 0, |
| 5779 | "cannot use preallocated intrinsics on a call without " |
| 5780 | "preallocated arguments" ); |
| 5781 | Check(NumArgs->equalsInt(NumPreallocatedArgs), |
| 5782 | "llvm.call.preallocated.setup arg size must be equal to number " |
| 5783 | "of preallocated arguments " |
| 5784 | "at call site" , |
| 5785 | Call, *UseCall); |
| 5786 | // getOperandBundle() cannot be called if more than one of the operand |
| 5787 | // bundle exists. There is already a check elsewhere for this, so skip |
| 5788 | // here if we see more than one. |
| 5789 | if (UseCall->countOperandBundlesOfType(ID: LLVMContext::OB_preallocated) > |
| 5790 | 1) { |
| 5791 | return; |
| 5792 | } |
| 5793 | auto PreallocatedBundle = |
| 5794 | UseCall->getOperandBundle(ID: LLVMContext::OB_preallocated); |
| 5795 | Check(PreallocatedBundle, |
| 5796 | "Use of llvm.call.preallocated.setup outside intrinsics " |
| 5797 | "must be in \"preallocated\" operand bundle" ); |
| 5798 | Check(PreallocatedBundle->Inputs.front().get() == &Call, |
| 5799 | "preallocated bundle must have token from corresponding " |
| 5800 | "llvm.call.preallocated.setup" ); |
| 5801 | } |
| 5802 | } |
| 5803 | break; |
| 5804 | } |
| 5805 | case Intrinsic::call_preallocated_arg: { |
| 5806 | auto *Token = dyn_cast<CallBase>(Val: Call.getArgOperand(i: 0)); |
| 5807 | Check(Token && |
| 5808 | Token->getIntrinsicID() == Intrinsic::call_preallocated_setup, |
| 5809 | "llvm.call.preallocated.arg token argument must be a " |
| 5810 | "llvm.call.preallocated.setup" ); |
| 5811 | Check(Call.hasFnAttr(Attribute::Preallocated), |
| 5812 | "llvm.call.preallocated.arg must be called with a \"preallocated\" " |
| 5813 | "call site attribute" ); |
| 5814 | break; |
| 5815 | } |
| 5816 | case Intrinsic::call_preallocated_teardown: { |
| 5817 | auto *Token = dyn_cast<CallBase>(Val: Call.getArgOperand(i: 0)); |
| 5818 | Check(Token && |
| 5819 | Token->getIntrinsicID() == Intrinsic::call_preallocated_setup, |
| 5820 | "llvm.call.preallocated.teardown token argument must be a " |
| 5821 | "llvm.call.preallocated.setup" ); |
| 5822 | break; |
| 5823 | } |
| 5824 | case Intrinsic::gcroot: |
| 5825 | case Intrinsic::gcwrite: |
| 5826 | case Intrinsic::gcread: |
| 5827 | if (ID == Intrinsic::gcroot) { |
| 5828 | AllocaInst *AI = |
| 5829 | dyn_cast<AllocaInst>(Val: Call.getArgOperand(i: 0)->stripPointerCasts()); |
| 5830 | Check(AI, "llvm.gcroot parameter #1 must be an alloca." , Call); |
| 5831 | Check(isa<Constant>(Call.getArgOperand(1)), |
| 5832 | "llvm.gcroot parameter #2 must be a constant." , Call); |
| 5833 | if (!AI->getAllocatedType()->isPointerTy()) { |
| 5834 | Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)), |
| 5835 | "llvm.gcroot parameter #1 must either be a pointer alloca, " |
| 5836 | "or argument #2 must be a non-null constant." , |
| 5837 | Call); |
| 5838 | } |
| 5839 | } |
| 5840 | |
| 5841 | Check(Call.getParent()->getParent()->hasGC(), |
| 5842 | "Enclosing function does not use GC." , Call); |
| 5843 | break; |
| 5844 | case Intrinsic::init_trampoline: |
| 5845 | Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()), |
| 5846 | "llvm.init_trampoline parameter #2 must resolve to a function." , |
| 5847 | Call); |
| 5848 | break; |
| 5849 | case Intrinsic::prefetch: |
| 5850 | Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2, |
| 5851 | "rw argument to llvm.prefetch must be 0-1" , Call); |
| 5852 | Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4, |
| 5853 | "locality argument to llvm.prefetch must be 0-3" , Call); |
| 5854 | Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2, |
| 5855 | "cache type argument to llvm.prefetch must be 0-1" , Call); |
| 5856 | break; |
| 5857 | case Intrinsic::stackprotector: |
| 5858 | Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()), |
| 5859 | "llvm.stackprotector parameter #2 must resolve to an alloca." , Call); |
| 5860 | break; |
| 5861 | case Intrinsic::localescape: { |
| 5862 | BasicBlock *BB = Call.getParent(); |
| 5863 | Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block" , |
| 5864 | Call); |
| 5865 | Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function" , |
| 5866 | Call); |
| 5867 | for (Value *Arg : Call.args()) { |
| 5868 | if (isa<ConstantPointerNull>(Val: Arg)) |
| 5869 | continue; // Null values are allowed as placeholders. |
| 5870 | auto *AI = dyn_cast<AllocaInst>(Val: Arg->stripPointerCasts()); |
| 5871 | Check(AI && AI->isStaticAlloca(), |
| 5872 | "llvm.localescape only accepts static allocas" , Call); |
| 5873 | } |
| 5874 | FrameEscapeInfo[BB->getParent()].first = Call.arg_size(); |
| 5875 | SawFrameEscape = true; |
| 5876 | break; |
| 5877 | } |
| 5878 | case Intrinsic::localrecover: { |
| 5879 | Value *FnArg = Call.getArgOperand(i: 0)->stripPointerCasts(); |
| 5880 | Function *Fn = dyn_cast<Function>(Val: FnArg); |
| 5881 | Check(Fn && !Fn->isDeclaration(), |
| 5882 | "llvm.localrecover first " |
| 5883 | "argument must be function defined in this module" , |
| 5884 | Call); |
| 5885 | auto *IdxArg = cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 5886 | auto &Entry = FrameEscapeInfo[Fn]; |
| 5887 | Entry.second = unsigned( |
| 5888 | std::max(a: uint64_t(Entry.second), b: IdxArg->getLimitedValue(Limit: ~0U) + 1)); |
| 5889 | break; |
| 5890 | } |
| 5891 | |
| 5892 | case Intrinsic::experimental_gc_statepoint: |
| 5893 | if (auto *CI = dyn_cast<CallInst>(Val: &Call)) |
| 5894 | Check(!CI->isInlineAsm(), |
| 5895 | "gc.statepoint support for inline assembly unimplemented" , CI); |
| 5896 | Check(Call.getParent()->getParent()->hasGC(), |
| 5897 | "Enclosing function does not use GC." , Call); |
| 5898 | |
| 5899 | verifyStatepoint(Call); |
| 5900 | break; |
| 5901 | case Intrinsic::experimental_gc_result: { |
| 5902 | Check(Call.getParent()->getParent()->hasGC(), |
| 5903 | "Enclosing function does not use GC." , Call); |
| 5904 | |
| 5905 | auto *Statepoint = Call.getArgOperand(i: 0); |
| 5906 | if (isa<UndefValue>(Val: Statepoint)) |
| 5907 | break; |
| 5908 | |
| 5909 | // Are we tied to a statepoint properly? |
| 5910 | const auto *StatepointCall = dyn_cast<CallBase>(Val: Statepoint); |
| 5911 | Check(StatepointCall && StatepointCall->getIntrinsicID() == |
| 5912 | Intrinsic::experimental_gc_statepoint, |
| 5913 | "gc.result operand #1 must be from a statepoint" , Call, |
| 5914 | Call.getArgOperand(0)); |
| 5915 | |
| 5916 | // Check that result type matches wrapped callee. |
| 5917 | auto *TargetFuncType = |
| 5918 | cast<FunctionType>(Val: StatepointCall->getParamElementType(ArgNo: 2)); |
| 5919 | Check(Call.getType() == TargetFuncType->getReturnType(), |
| 5920 | "gc.result result type does not match wrapped callee" , Call); |
| 5921 | break; |
| 5922 | } |
| 5923 | case Intrinsic::experimental_gc_relocate: { |
| 5924 | Check(Call.arg_size() == 3, "wrong number of arguments" , Call); |
| 5925 | |
| 5926 | Check(isa<PointerType>(Call.getType()->getScalarType()), |
| 5927 | "gc.relocate must return a pointer or a vector of pointers" , Call); |
| 5928 | |
| 5929 | // Check that this relocate is correctly tied to the statepoint |
| 5930 | |
| 5931 | // This is case for relocate on the unwinding path of an invoke statepoint |
| 5932 | if (LandingPadInst *LandingPad = |
| 5933 | dyn_cast<LandingPadInst>(Val: Call.getArgOperand(i: 0))) { |
| 5934 | |
| 5935 | const BasicBlock *InvokeBB = |
| 5936 | LandingPad->getParent()->getUniquePredecessor(); |
| 5937 | |
| 5938 | // Landingpad relocates should have only one predecessor with invoke |
| 5939 | // statepoint terminator |
| 5940 | Check(InvokeBB, "safepoints should have unique landingpads" , |
| 5941 | LandingPad->getParent()); |
| 5942 | Check(InvokeBB->getTerminator(), "safepoint block should be well formed" , |
| 5943 | InvokeBB); |
| 5944 | Check(isa<GCStatepointInst>(InvokeBB->getTerminator()), |
| 5945 | "gc relocate should be linked to a statepoint" , InvokeBB); |
| 5946 | } else { |
| 5947 | // In all other cases relocate should be tied to the statepoint directly. |
| 5948 | // This covers relocates on a normal return path of invoke statepoint and |
| 5949 | // relocates of a call statepoint. |
| 5950 | auto *Token = Call.getArgOperand(i: 0); |
| 5951 | Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token), |
| 5952 | "gc relocate is incorrectly tied to the statepoint" , Call, Token); |
| 5953 | } |
| 5954 | |
| 5955 | // Verify rest of the relocate arguments. |
| 5956 | const Value &StatepointCall = *cast<GCRelocateInst>(Val&: Call).getStatepoint(); |
| 5957 | |
| 5958 | // Both the base and derived must be piped through the safepoint. |
| 5959 | Value *Base = Call.getArgOperand(i: 1); |
| 5960 | Check(isa<ConstantInt>(Base), |
| 5961 | "gc.relocate operand #2 must be integer offset" , Call); |
| 5962 | |
| 5963 | Value *Derived = Call.getArgOperand(i: 2); |
| 5964 | Check(isa<ConstantInt>(Derived), |
| 5965 | "gc.relocate operand #3 must be integer offset" , Call); |
| 5966 | |
| 5967 | const uint64_t BaseIndex = cast<ConstantInt>(Val: Base)->getZExtValue(); |
| 5968 | const uint64_t DerivedIndex = cast<ConstantInt>(Val: Derived)->getZExtValue(); |
| 5969 | |
| 5970 | // Check the bounds |
| 5971 | if (isa<UndefValue>(Val: StatepointCall)) |
| 5972 | break; |
| 5973 | if (auto Opt = cast<GCStatepointInst>(Val: StatepointCall) |
| 5974 | .getOperandBundle(ID: LLVMContext::OB_gc_live)) { |
| 5975 | Check(BaseIndex < Opt->Inputs.size(), |
| 5976 | "gc.relocate: statepoint base index out of bounds" , Call); |
| 5977 | Check(DerivedIndex < Opt->Inputs.size(), |
| 5978 | "gc.relocate: statepoint derived index out of bounds" , Call); |
| 5979 | } |
| 5980 | |
| 5981 | // Relocated value must be either a pointer type or vector-of-pointer type, |
| 5982 | // but gc_relocate does not need to return the same pointer type as the |
| 5983 | // relocated pointer. It can be casted to the correct type later if it's |
| 5984 | // desired. However, they must have the same address space and 'vectorness' |
| 5985 | GCRelocateInst &Relocate = cast<GCRelocateInst>(Val&: Call); |
| 5986 | auto *ResultType = Call.getType(); |
| 5987 | auto *DerivedType = Relocate.getDerivedPtr()->getType(); |
| 5988 | auto *BaseType = Relocate.getBasePtr()->getType(); |
| 5989 | |
| 5990 | Check(BaseType->isPtrOrPtrVectorTy(), |
| 5991 | "gc.relocate: relocated value must be a pointer" , Call); |
| 5992 | Check(DerivedType->isPtrOrPtrVectorTy(), |
| 5993 | "gc.relocate: relocated value must be a pointer" , Call); |
| 5994 | |
| 5995 | Check(ResultType->isVectorTy() == DerivedType->isVectorTy(), |
| 5996 | "gc.relocate: vector relocates to vector and pointer to pointer" , |
| 5997 | Call); |
| 5998 | Check( |
| 5999 | ResultType->getPointerAddressSpace() == |
| 6000 | DerivedType->getPointerAddressSpace(), |
| 6001 | "gc.relocate: relocating a pointer shouldn't change its address space" , |
| 6002 | Call); |
| 6003 | |
| 6004 | auto GC = llvm::getGCStrategy(Name: Relocate.getFunction()->getGC()); |
| 6005 | Check(GC, "gc.relocate: calling function must have GCStrategy" , |
| 6006 | Call.getFunction()); |
| 6007 | if (GC) { |
| 6008 | auto isGCPtr = [&GC](Type *PTy) { |
| 6009 | return GC->isGCManagedPointer(Ty: PTy->getScalarType()).value_or(u: true); |
| 6010 | }; |
| 6011 | Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer" , Call); |
| 6012 | Check(isGCPtr(BaseType), |
| 6013 | "gc.relocate: relocated value must be a gc pointer" , Call); |
| 6014 | Check(isGCPtr(DerivedType), |
| 6015 | "gc.relocate: relocated value must be a gc pointer" , Call); |
| 6016 | } |
| 6017 | break; |
| 6018 | } |
| 6019 | case Intrinsic::experimental_patchpoint: { |
| 6020 | if (Call.getCallingConv() == CallingConv::AnyReg) { |
| 6021 | Check(Call.getType()->isSingleValueType(), |
| 6022 | "patchpoint: invalid return type used with anyregcc" , Call); |
| 6023 | } |
| 6024 | break; |
| 6025 | } |
| 6026 | case Intrinsic::eh_exceptioncode: |
| 6027 | case Intrinsic::eh_exceptionpointer: { |
| 6028 | Check(isa<CatchPadInst>(Call.getArgOperand(0)), |
| 6029 | "eh.exceptionpointer argument must be a catchpad" , Call); |
| 6030 | break; |
| 6031 | } |
| 6032 | case Intrinsic::get_active_lane_mask: { |
| 6033 | Check(Call.getType()->isVectorTy(), |
| 6034 | "get_active_lane_mask: must return a " |
| 6035 | "vector" , |
| 6036 | Call); |
| 6037 | auto *ElemTy = Call.getType()->getScalarType(); |
| 6038 | Check(ElemTy->isIntegerTy(1), |
| 6039 | "get_active_lane_mask: element type is not " |
| 6040 | "i1" , |
| 6041 | Call); |
| 6042 | break; |
| 6043 | } |
| 6044 | case Intrinsic::experimental_get_vector_length: { |
| 6045 | ConstantInt *VF = cast<ConstantInt>(Val: Call.getArgOperand(i: 1)); |
| 6046 | Check(!VF->isNegative() && !VF->isZero(), |
| 6047 | "get_vector_length: VF must be positive" , Call); |
| 6048 | break; |
| 6049 | } |
| 6050 | case Intrinsic::masked_load: { |
| 6051 | Check(Call.getType()->isVectorTy(), "masked_load: must return a vector" , |
| 6052 | Call); |
| 6053 | |
| 6054 | ConstantInt *Alignment = cast<ConstantInt>(Val: Call.getArgOperand(i: 1)); |
| 6055 | Value *Mask = Call.getArgOperand(i: 2); |
| 6056 | Value *PassThru = Call.getArgOperand(i: 3); |
| 6057 | Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector" , |
| 6058 | Call); |
| 6059 | Check(Alignment->getValue().isPowerOf2(), |
| 6060 | "masked_load: alignment must be a power of 2" , Call); |
| 6061 | Check(PassThru->getType() == Call.getType(), |
| 6062 | "masked_load: pass through and return type must match" , Call); |
| 6063 | Check(cast<VectorType>(Mask->getType())->getElementCount() == |
| 6064 | cast<VectorType>(Call.getType())->getElementCount(), |
| 6065 | "masked_load: vector mask must be same length as return" , Call); |
| 6066 | break; |
| 6067 | } |
| 6068 | case Intrinsic::masked_store: { |
| 6069 | Value *Val = Call.getArgOperand(i: 0); |
| 6070 | ConstantInt *Alignment = cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 6071 | Value *Mask = Call.getArgOperand(i: 3); |
| 6072 | Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector" , |
| 6073 | Call); |
| 6074 | Check(Alignment->getValue().isPowerOf2(), |
| 6075 | "masked_store: alignment must be a power of 2" , Call); |
| 6076 | Check(cast<VectorType>(Mask->getType())->getElementCount() == |
| 6077 | cast<VectorType>(Val->getType())->getElementCount(), |
| 6078 | "masked_store: vector mask must be same length as value" , Call); |
| 6079 | break; |
| 6080 | } |
| 6081 | |
| 6082 | case Intrinsic::masked_gather: { |
| 6083 | const APInt &Alignment = |
| 6084 | cast<ConstantInt>(Val: Call.getArgOperand(i: 1))->getValue(); |
| 6085 | Check(Alignment.isZero() || Alignment.isPowerOf2(), |
| 6086 | "masked_gather: alignment must be 0 or a power of 2" , Call); |
| 6087 | break; |
| 6088 | } |
| 6089 | case Intrinsic::masked_scatter: { |
| 6090 | const APInt &Alignment = |
| 6091 | cast<ConstantInt>(Val: Call.getArgOperand(i: 2))->getValue(); |
| 6092 | Check(Alignment.isZero() || Alignment.isPowerOf2(), |
| 6093 | "masked_scatter: alignment must be 0 or a power of 2" , Call); |
| 6094 | break; |
| 6095 | } |
| 6096 | |
| 6097 | case Intrinsic::experimental_guard: { |
| 6098 | Check(isa<CallInst>(Call), "experimental_guard cannot be invoked" , Call); |
| 6099 | Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, |
| 6100 | "experimental_guard must have exactly one " |
| 6101 | "\"deopt\" operand bundle" ); |
| 6102 | break; |
| 6103 | } |
| 6104 | |
| 6105 | case Intrinsic::experimental_deoptimize: { |
| 6106 | Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked" , |
| 6107 | Call); |
| 6108 | Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, |
| 6109 | "experimental_deoptimize must have exactly one " |
| 6110 | "\"deopt\" operand bundle" ); |
| 6111 | Check(Call.getType() == Call.getFunction()->getReturnType(), |
| 6112 | "experimental_deoptimize return type must match caller return type" ); |
| 6113 | |
| 6114 | if (isa<CallInst>(Val: Call)) { |
| 6115 | auto *RI = dyn_cast<ReturnInst>(Val: Call.getNextNode()); |
| 6116 | Check(RI, |
| 6117 | "calls to experimental_deoptimize must be followed by a return" ); |
| 6118 | |
| 6119 | if (!Call.getType()->isVoidTy() && RI) |
| 6120 | Check(RI->getReturnValue() == &Call, |
| 6121 | "calls to experimental_deoptimize must be followed by a return " |
| 6122 | "of the value computed by experimental_deoptimize" ); |
| 6123 | } |
| 6124 | |
| 6125 | break; |
| 6126 | } |
| 6127 | case Intrinsic::vastart: { |
| 6128 | Check(Call.getFunction()->isVarArg(), |
| 6129 | "va_start called in a non-varargs function" ); |
| 6130 | break; |
| 6131 | } |
| 6132 | case Intrinsic::get_dynamic_area_offset: { |
| 6133 | auto *IntTy = dyn_cast<IntegerType>(Val: Call.getType()); |
| 6134 | Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) == |
| 6135 | IntTy->getBitWidth(), |
| 6136 | "get_dynamic_area_offset result type must be scalar integer matching " |
| 6137 | "alloca address space width" , |
| 6138 | Call); |
| 6139 | break; |
| 6140 | } |
| 6141 | case Intrinsic::vector_reduce_and: |
| 6142 | case Intrinsic::vector_reduce_or: |
| 6143 | case Intrinsic::vector_reduce_xor: |
| 6144 | case Intrinsic::vector_reduce_add: |
| 6145 | case Intrinsic::vector_reduce_mul: |
| 6146 | case Intrinsic::vector_reduce_smax: |
| 6147 | case Intrinsic::vector_reduce_smin: |
| 6148 | case Intrinsic::vector_reduce_umax: |
| 6149 | case Intrinsic::vector_reduce_umin: { |
| 6150 | Type *ArgTy = Call.getArgOperand(i: 0)->getType(); |
| 6151 | Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(), |
| 6152 | "Intrinsic has incorrect argument type!" ); |
| 6153 | break; |
| 6154 | } |
| 6155 | case Intrinsic::vector_reduce_fmax: |
| 6156 | case Intrinsic::vector_reduce_fmin: { |
| 6157 | Type *ArgTy = Call.getArgOperand(i: 0)->getType(); |
| 6158 | Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(), |
| 6159 | "Intrinsic has incorrect argument type!" ); |
| 6160 | break; |
| 6161 | } |
| 6162 | case Intrinsic::vector_reduce_fadd: |
| 6163 | case Intrinsic::vector_reduce_fmul: { |
| 6164 | // Unlike the other reductions, the first argument is a start value. The |
| 6165 | // second argument is the vector to be reduced. |
| 6166 | Type *ArgTy = Call.getArgOperand(i: 1)->getType(); |
| 6167 | Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(), |
| 6168 | "Intrinsic has incorrect argument type!" ); |
| 6169 | break; |
| 6170 | } |
| 6171 | case Intrinsic::smul_fix: |
| 6172 | case Intrinsic::smul_fix_sat: |
| 6173 | case Intrinsic::umul_fix: |
| 6174 | case Intrinsic::umul_fix_sat: |
| 6175 | case Intrinsic::sdiv_fix: |
| 6176 | case Intrinsic::sdiv_fix_sat: |
| 6177 | case Intrinsic::udiv_fix: |
| 6178 | case Intrinsic::udiv_fix_sat: { |
| 6179 | Value *Op1 = Call.getArgOperand(i: 0); |
| 6180 | Value *Op2 = Call.getArgOperand(i: 1); |
| 6181 | Check(Op1->getType()->isIntOrIntVectorTy(), |
| 6182 | "first operand of [us][mul|div]_fix[_sat] must be an int type or " |
| 6183 | "vector of ints" ); |
| 6184 | Check(Op2->getType()->isIntOrIntVectorTy(), |
| 6185 | "second operand of [us][mul|div]_fix[_sat] must be an int type or " |
| 6186 | "vector of ints" ); |
| 6187 | |
| 6188 | auto *Op3 = cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 6189 | Check(Op3->getType()->isIntegerTy(), |
| 6190 | "third operand of [us][mul|div]_fix[_sat] must be an int type" ); |
| 6191 | Check(Op3->getBitWidth() <= 32, |
| 6192 | "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits" ); |
| 6193 | |
| 6194 | if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat || |
| 6195 | ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) { |
| 6196 | Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(), |
| 6197 | "the scale of s[mul|div]_fix[_sat] must be less than the width of " |
| 6198 | "the operands" ); |
| 6199 | } else { |
| 6200 | Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(), |
| 6201 | "the scale of u[mul|div]_fix[_sat] must be less than or equal " |
| 6202 | "to the width of the operands" ); |
| 6203 | } |
| 6204 | break; |
| 6205 | } |
| 6206 | case Intrinsic::lrint: |
| 6207 | case Intrinsic::llrint: |
| 6208 | case Intrinsic::lround: |
| 6209 | case Intrinsic::llround: { |
| 6210 | Type *ValTy = Call.getArgOperand(i: 0)->getType(); |
| 6211 | Type *ResultTy = Call.getType(); |
| 6212 | auto *VTy = dyn_cast<VectorType>(Val: ValTy); |
| 6213 | auto *RTy = dyn_cast<VectorType>(Val: ResultTy); |
| 6214 | Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(), |
| 6215 | ExpectedName + ": argument must be floating-point or vector " |
| 6216 | "of floating-points, and result must be integer or " |
| 6217 | "vector of integers" , |
| 6218 | &Call); |
| 6219 | Check(ValTy->isVectorTy() == ResultTy->isVectorTy(), |
| 6220 | ExpectedName + ": argument and result disagree on vector use" , &Call); |
| 6221 | if (VTy) { |
| 6222 | Check(VTy->getElementCount() == RTy->getElementCount(), |
| 6223 | ExpectedName + ": argument must be same length as result" , &Call); |
| 6224 | } |
| 6225 | break; |
| 6226 | } |
| 6227 | case Intrinsic::bswap: { |
| 6228 | Type *Ty = Call.getType(); |
| 6229 | unsigned Size = Ty->getScalarSizeInBits(); |
| 6230 | Check(Size % 16 == 0, "bswap must be an even number of bytes" , &Call); |
| 6231 | break; |
| 6232 | } |
| 6233 | case Intrinsic::invariant_start: { |
| 6234 | ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Val: Call.getArgOperand(i: 0)); |
| 6235 | Check(InvariantSize && |
| 6236 | (!InvariantSize->isNegative() || InvariantSize->isMinusOne()), |
| 6237 | "invariant_start parameter must be -1, 0 or a positive number" , |
| 6238 | &Call); |
| 6239 | break; |
| 6240 | } |
| 6241 | case Intrinsic::matrix_multiply: |
| 6242 | case Intrinsic::matrix_transpose: |
| 6243 | case Intrinsic::matrix_column_major_load: |
| 6244 | case Intrinsic::matrix_column_major_store: { |
| 6245 | Function *IF = Call.getCalledFunction(); |
| 6246 | ConstantInt *Stride = nullptr; |
| 6247 | ConstantInt *NumRows; |
| 6248 | ConstantInt *NumColumns; |
| 6249 | VectorType *ResultTy; |
| 6250 | Type *Op0ElemTy = nullptr; |
| 6251 | Type *Op1ElemTy = nullptr; |
| 6252 | switch (ID) { |
| 6253 | case Intrinsic::matrix_multiply: { |
| 6254 | NumRows = cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 6255 | ConstantInt *N = cast<ConstantInt>(Val: Call.getArgOperand(i: 3)); |
| 6256 | NumColumns = cast<ConstantInt>(Val: Call.getArgOperand(i: 4)); |
| 6257 | Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType()) |
| 6258 | ->getNumElements() == |
| 6259 | NumRows->getZExtValue() * N->getZExtValue(), |
| 6260 | "First argument of a matrix operation does not match specified " |
| 6261 | "shape!" ); |
| 6262 | Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType()) |
| 6263 | ->getNumElements() == |
| 6264 | N->getZExtValue() * NumColumns->getZExtValue(), |
| 6265 | "Second argument of a matrix operation does not match specified " |
| 6266 | "shape!" ); |
| 6267 | |
| 6268 | ResultTy = cast<VectorType>(Val: Call.getType()); |
| 6269 | Op0ElemTy = |
| 6270 | cast<VectorType>(Val: Call.getArgOperand(i: 0)->getType())->getElementType(); |
| 6271 | Op1ElemTy = |
| 6272 | cast<VectorType>(Val: Call.getArgOperand(i: 1)->getType())->getElementType(); |
| 6273 | break; |
| 6274 | } |
| 6275 | case Intrinsic::matrix_transpose: |
| 6276 | NumRows = cast<ConstantInt>(Val: Call.getArgOperand(i: 1)); |
| 6277 | NumColumns = cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 6278 | ResultTy = cast<VectorType>(Val: Call.getType()); |
| 6279 | Op0ElemTy = |
| 6280 | cast<VectorType>(Val: Call.getArgOperand(i: 0)->getType())->getElementType(); |
| 6281 | break; |
| 6282 | case Intrinsic::matrix_column_major_load: { |
| 6283 | Stride = dyn_cast<ConstantInt>(Val: Call.getArgOperand(i: 1)); |
| 6284 | NumRows = cast<ConstantInt>(Val: Call.getArgOperand(i: 3)); |
| 6285 | NumColumns = cast<ConstantInt>(Val: Call.getArgOperand(i: 4)); |
| 6286 | ResultTy = cast<VectorType>(Val: Call.getType()); |
| 6287 | break; |
| 6288 | } |
| 6289 | case Intrinsic::matrix_column_major_store: { |
| 6290 | Stride = dyn_cast<ConstantInt>(Val: Call.getArgOperand(i: 2)); |
| 6291 | NumRows = cast<ConstantInt>(Val: Call.getArgOperand(i: 4)); |
| 6292 | NumColumns = cast<ConstantInt>(Val: Call.getArgOperand(i: 5)); |
| 6293 | ResultTy = cast<VectorType>(Val: Call.getArgOperand(i: 0)->getType()); |
| 6294 | Op0ElemTy = |
| 6295 | cast<VectorType>(Val: Call.getArgOperand(i: 0)->getType())->getElementType(); |
| 6296 | break; |
| 6297 | } |
| 6298 | default: |
| 6299 | llvm_unreachable("unexpected intrinsic" ); |
| 6300 | } |
| 6301 | |
| 6302 | Check(ResultTy->getElementType()->isIntegerTy() || |
| 6303 | ResultTy->getElementType()->isFloatingPointTy(), |
| 6304 | "Result type must be an integer or floating-point type!" , IF); |
| 6305 | |
| 6306 | if (Op0ElemTy) |
| 6307 | Check(ResultTy->getElementType() == Op0ElemTy, |
| 6308 | "Vector element type mismatch of the result and first operand " |
| 6309 | "vector!" , |
| 6310 | IF); |
| 6311 | |
| 6312 | if (Op1ElemTy) |
| 6313 | Check(ResultTy->getElementType() == Op1ElemTy, |
| 6314 | "Vector element type mismatch of the result and second operand " |
| 6315 | "vector!" , |
| 6316 | IF); |
| 6317 | |
| 6318 | Check(cast<FixedVectorType>(ResultTy)->getNumElements() == |
| 6319 | NumRows->getZExtValue() * NumColumns->getZExtValue(), |
| 6320 | "Result of a matrix operation does not fit in the returned vector!" ); |
| 6321 | |
| 6322 | if (Stride) |
| 6323 | Check(Stride->getZExtValue() >= NumRows->getZExtValue(), |
| 6324 | "Stride must be greater or equal than the number of rows!" , IF); |
| 6325 | |
| 6326 | break; |
| 6327 | } |
| 6328 | case Intrinsic::vector_splice: { |
| 6329 | VectorType *VecTy = cast<VectorType>(Val: Call.getType()); |
| 6330 | int64_t Idx = cast<ConstantInt>(Val: Call.getArgOperand(i: 2))->getSExtValue(); |
| 6331 | int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue(); |
| 6332 | if (Call.getParent() && Call.getParent()->getParent()) { |
| 6333 | AttributeList Attrs = Call.getParent()->getParent()->getAttributes(); |
| 6334 | if (Attrs.hasFnAttr(Kind: Attribute::VScaleRange)) |
| 6335 | KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin(); |
| 6336 | } |
| 6337 | Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) || |
| 6338 | (Idx >= 0 && Idx < KnownMinNumElements), |
| 6339 | "The splice index exceeds the range [-VL, VL-1] where VL is the " |
| 6340 | "known minimum number of elements in the vector. For scalable " |
| 6341 | "vectors the minimum number of elements is determined from " |
| 6342 | "vscale_range." , |
| 6343 | &Call); |
| 6344 | break; |
| 6345 | } |
| 6346 | case Intrinsic::stepvector: { |
| 6347 | VectorType *VecTy = dyn_cast<VectorType>(Val: Call.getType()); |
| 6348 | Check(VecTy && VecTy->getScalarType()->isIntegerTy() && |
| 6349 | VecTy->getScalarSizeInBits() >= 8, |
| 6350 | "stepvector only supported for vectors of integers " |
| 6351 | "with a bitwidth of at least 8." , |
| 6352 | &Call); |
| 6353 | break; |
| 6354 | } |
| 6355 | case Intrinsic::experimental_vector_match: { |
| 6356 | Value *Op1 = Call.getArgOperand(i: 0); |
| 6357 | Value *Op2 = Call.getArgOperand(i: 1); |
| 6358 | Value *Mask = Call.getArgOperand(i: 2); |
| 6359 | |
| 6360 | VectorType *Op1Ty = dyn_cast<VectorType>(Val: Op1->getType()); |
| 6361 | VectorType *Op2Ty = dyn_cast<VectorType>(Val: Op2->getType()); |
| 6362 | VectorType *MaskTy = dyn_cast<VectorType>(Val: Mask->getType()); |
| 6363 | |
| 6364 | Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors." , &Call); |
| 6365 | Check(isa<FixedVectorType>(Op2Ty), |
| 6366 | "Second operand must be a fixed length vector." , &Call); |
| 6367 | Check(Op1Ty->getElementType()->isIntegerTy(), |
| 6368 | "First operand must be a vector of integers." , &Call); |
| 6369 | Check(Op1Ty->getElementType() == Op2Ty->getElementType(), |
| 6370 | "First two operands must have the same element type." , &Call); |
| 6371 | Check(Op1Ty->getElementCount() == MaskTy->getElementCount(), |
| 6372 | "First operand and mask must have the same number of elements." , |
| 6373 | &Call); |
| 6374 | Check(MaskTy->getElementType()->isIntegerTy(1), |
| 6375 | "Mask must be a vector of i1's." , &Call); |
| 6376 | Check(Call.getType() == MaskTy, "Return type must match the mask type." , |
| 6377 | &Call); |
| 6378 | break; |
| 6379 | } |
| 6380 | case Intrinsic::vector_insert: { |
| 6381 | Value *Vec = Call.getArgOperand(i: 0); |
| 6382 | Value *SubVec = Call.getArgOperand(i: 1); |
| 6383 | Value *Idx = Call.getArgOperand(i: 2); |
| 6384 | unsigned IdxN = cast<ConstantInt>(Val: Idx)->getZExtValue(); |
| 6385 | |
| 6386 | VectorType *VecTy = cast<VectorType>(Val: Vec->getType()); |
| 6387 | VectorType *SubVecTy = cast<VectorType>(Val: SubVec->getType()); |
| 6388 | |
| 6389 | ElementCount VecEC = VecTy->getElementCount(); |
| 6390 | ElementCount SubVecEC = SubVecTy->getElementCount(); |
| 6391 | Check(VecTy->getElementType() == SubVecTy->getElementType(), |
| 6392 | "vector_insert parameters must have the same element " |
| 6393 | "type." , |
| 6394 | &Call); |
| 6395 | Check(IdxN % SubVecEC.getKnownMinValue() == 0, |
| 6396 | "vector_insert index must be a constant multiple of " |
| 6397 | "the subvector's known minimum vector length." ); |
| 6398 | |
| 6399 | // If this insertion is not the 'mixed' case where a fixed vector is |
| 6400 | // inserted into a scalable vector, ensure that the insertion of the |
| 6401 | // subvector does not overrun the parent vector. |
| 6402 | if (VecEC.isScalable() == SubVecEC.isScalable()) { |
| 6403 | Check(IdxN < VecEC.getKnownMinValue() && |
| 6404 | IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(), |
| 6405 | "subvector operand of vector_insert would overrun the " |
| 6406 | "vector being inserted into." ); |
| 6407 | } |
| 6408 | break; |
| 6409 | } |
| 6410 | case Intrinsic::vector_extract: { |
| 6411 | Value *Vec = Call.getArgOperand(i: 0); |
| 6412 | Value *Idx = Call.getArgOperand(i: 1); |
| 6413 | unsigned IdxN = cast<ConstantInt>(Val: Idx)->getZExtValue(); |
| 6414 | |
| 6415 | VectorType *ResultTy = cast<VectorType>(Val: Call.getType()); |
| 6416 | VectorType *VecTy = cast<VectorType>(Val: Vec->getType()); |
| 6417 | |
| 6418 | ElementCount VecEC = VecTy->getElementCount(); |
| 6419 | ElementCount ResultEC = ResultTy->getElementCount(); |
| 6420 | |
| 6421 | Check(ResultTy->getElementType() == VecTy->getElementType(), |
| 6422 | "vector_extract result must have the same element " |
| 6423 | "type as the input vector." , |
| 6424 | &Call); |
| 6425 | Check(IdxN % ResultEC.getKnownMinValue() == 0, |
| 6426 | "vector_extract index must be a constant multiple of " |
| 6427 | "the result type's known minimum vector length." ); |
| 6428 | |
| 6429 | // If this extraction is not the 'mixed' case where a fixed vector is |
| 6430 | // extracted from a scalable vector, ensure that the extraction does not |
| 6431 | // overrun the parent vector. |
| 6432 | if (VecEC.isScalable() == ResultEC.isScalable()) { |
| 6433 | Check(IdxN < VecEC.getKnownMinValue() && |
| 6434 | IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(), |
| 6435 | "vector_extract would overrun." ); |
| 6436 | } |
| 6437 | break; |
| 6438 | } |
| 6439 | case Intrinsic::experimental_vector_partial_reduce_add: { |
| 6440 | VectorType *AccTy = cast<VectorType>(Val: Call.getArgOperand(i: 0)->getType()); |
| 6441 | VectorType *VecTy = cast<VectorType>(Val: Call.getArgOperand(i: 1)->getType()); |
| 6442 | |
| 6443 | unsigned VecWidth = VecTy->getElementCount().getKnownMinValue(); |
| 6444 | unsigned AccWidth = AccTy->getElementCount().getKnownMinValue(); |
| 6445 | |
| 6446 | Check((VecWidth % AccWidth) == 0, |
| 6447 | "Invalid vector widths for partial " |
| 6448 | "reduction. The width of the input vector " |
| 6449 | "must be a positive integer multiple of " |
| 6450 | "the width of the accumulator vector." ); |
| 6451 | break; |
| 6452 | } |
| 6453 | case Intrinsic::experimental_noalias_scope_decl: { |
| 6454 | NoAliasScopeDecls.push_back(Elt: cast<IntrinsicInst>(Val: &Call)); |
| 6455 | break; |
| 6456 | } |
| 6457 | case Intrinsic::preserve_array_access_index: |
| 6458 | case Intrinsic::preserve_struct_access_index: |
| 6459 | case Intrinsic::aarch64_ldaxr: |
| 6460 | case Intrinsic::aarch64_ldxr: |
| 6461 | case Intrinsic::arm_ldaex: |
| 6462 | case Intrinsic::arm_ldrex: { |
| 6463 | Type *ElemTy = Call.getParamElementType(ArgNo: 0); |
| 6464 | Check(ElemTy, "Intrinsic requires elementtype attribute on first argument." , |
| 6465 | &Call); |
| 6466 | break; |
| 6467 | } |
| 6468 | case Intrinsic::aarch64_stlxr: |
| 6469 | case Intrinsic::aarch64_stxr: |
| 6470 | case Intrinsic::arm_stlex: |
| 6471 | case Intrinsic::arm_strex: { |
| 6472 | Type *ElemTy = Call.getAttributes().getParamElementType(ArgNo: 1); |
| 6473 | Check(ElemTy, |
| 6474 | "Intrinsic requires elementtype attribute on second argument." , |
| 6475 | &Call); |
| 6476 | break; |
| 6477 | } |
| 6478 | case Intrinsic::aarch64_prefetch: { |
| 6479 | Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2, |
| 6480 | "write argument to llvm.aarch64.prefetch must be 0 or 1" , Call); |
| 6481 | Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4, |
| 6482 | "target argument to llvm.aarch64.prefetch must be 0-3" , Call); |
| 6483 | Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2, |
| 6484 | "stream argument to llvm.aarch64.prefetch must be 0 or 1" , Call); |
| 6485 | Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2, |
| 6486 | "isdata argument to llvm.aarch64.prefetch must be 0 or 1" , Call); |
| 6487 | break; |
| 6488 | } |
| 6489 | case Intrinsic::callbr_landingpad: { |
| 6490 | const auto *CBR = dyn_cast<CallBrInst>(Val: Call.getOperand(i_nocapture: 0)); |
| 6491 | Check(CBR, "intrinstic requires callbr operand" , &Call); |
| 6492 | if (!CBR) |
| 6493 | break; |
| 6494 | |
| 6495 | const BasicBlock *LandingPadBB = Call.getParent(); |
| 6496 | const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor(); |
| 6497 | if (!PredBB) { |
| 6498 | CheckFailed(Message: "Intrinsic in block must have 1 unique predecessor" , V1: &Call); |
| 6499 | break; |
| 6500 | } |
| 6501 | if (!isa<CallBrInst>(Val: PredBB->getTerminator())) { |
| 6502 | CheckFailed(Message: "Intrinsic must have corresponding callbr in predecessor" , |
| 6503 | V1: &Call); |
| 6504 | break; |
| 6505 | } |
| 6506 | Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB), |
| 6507 | "Intrinsic's corresponding callbr must have intrinsic's parent basic " |
| 6508 | "block in indirect destination list" , |
| 6509 | &Call); |
| 6510 | const Instruction &First = *LandingPadBB->begin(); |
| 6511 | Check(&First == &Call, "No other instructions may proceed intrinsic" , |
| 6512 | &Call); |
| 6513 | break; |
| 6514 | } |
| 6515 | case Intrinsic::amdgcn_cs_chain: { |
| 6516 | auto CallerCC = Call.getCaller()->getCallingConv(); |
| 6517 | switch (CallerCC) { |
| 6518 | case CallingConv::AMDGPU_CS: |
| 6519 | case CallingConv::AMDGPU_CS_Chain: |
| 6520 | case CallingConv::AMDGPU_CS_ChainPreserve: |
| 6521 | break; |
| 6522 | default: |
| 6523 | CheckFailed(Message: "Intrinsic can only be used from functions with the " |
| 6524 | "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve " |
| 6525 | "calling conventions" , |
| 6526 | V1: &Call); |
| 6527 | break; |
| 6528 | } |
| 6529 | |
| 6530 | Check(Call.paramHasAttr(2, Attribute::InReg), |
| 6531 | "SGPR arguments must have the `inreg` attribute" , &Call); |
| 6532 | Check(!Call.paramHasAttr(3, Attribute::InReg), |
| 6533 | "VGPR arguments must not have the `inreg` attribute" , &Call); |
| 6534 | |
| 6535 | auto *Next = Call.getNextNonDebugInstruction(); |
| 6536 | bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Val: Next) && |
| 6537 | cast<IntrinsicInst>(Val: Next)->getIntrinsicID() == |
| 6538 | Intrinsic::amdgcn_unreachable; |
| 6539 | Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable), |
| 6540 | "llvm.amdgcn.cs.chain must be followed by unreachable" , &Call); |
| 6541 | break; |
| 6542 | } |
| 6543 | case Intrinsic::amdgcn_init_exec_from_input: { |
| 6544 | const Argument *Arg = dyn_cast<Argument>(Val: Call.getOperand(i_nocapture: 0)); |
| 6545 | Check(Arg && Arg->hasInRegAttr(), |
| 6546 | "only inreg arguments to the parent function are valid as inputs to " |
| 6547 | "this intrinsic" , |
| 6548 | &Call); |
| 6549 | break; |
| 6550 | } |
| 6551 | case Intrinsic::amdgcn_set_inactive_chain_arg: { |
| 6552 | auto CallerCC = Call.getCaller()->getCallingConv(); |
| 6553 | switch (CallerCC) { |
| 6554 | case CallingConv::AMDGPU_CS_Chain: |
| 6555 | case CallingConv::AMDGPU_CS_ChainPreserve: |
| 6556 | break; |
| 6557 | default: |
| 6558 | CheckFailed(Message: "Intrinsic can only be used from functions with the " |
| 6559 | "amdgpu_cs_chain or amdgpu_cs_chain_preserve " |
| 6560 | "calling conventions" , |
| 6561 | V1: &Call); |
| 6562 | break; |
| 6563 | } |
| 6564 | |
| 6565 | unsigned InactiveIdx = 1; |
| 6566 | Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg), |
| 6567 | "Value for inactive lanes must not have the `inreg` attribute" , |
| 6568 | &Call); |
| 6569 | Check(isa<Argument>(Call.getArgOperand(InactiveIdx)), |
| 6570 | "Value for inactive lanes must be a function argument" , &Call); |
| 6571 | Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(), |
| 6572 | "Value for inactive lanes must be a VGPR function argument" , &Call); |
| 6573 | break; |
| 6574 | } |
| 6575 | case Intrinsic::amdgcn_s_prefetch_data: { |
| 6576 | Check( |
| 6577 | AMDGPU::isFlatGlobalAddrSpace( |
| 6578 | Call.getArgOperand(0)->getType()->getPointerAddressSpace()), |
| 6579 | "llvm.amdgcn.s.prefetch.data only supports global or constant memory" ); |
| 6580 | break; |
| 6581 | } |
| 6582 | case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4: |
| 6583 | case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: { |
| 6584 | Value *Src0 = Call.getArgOperand(i: 0); |
| 6585 | Value *Src1 = Call.getArgOperand(i: 1); |
| 6586 | |
| 6587 | uint64_t CBSZ = cast<ConstantInt>(Val: Call.getArgOperand(i: 3))->getZExtValue(); |
| 6588 | uint64_t BLGP = cast<ConstantInt>(Val: Call.getArgOperand(i: 4))->getZExtValue(); |
| 6589 | Check(CBSZ <= 4, "invalid value for cbsz format" , Call, |
| 6590 | Call.getArgOperand(3)); |
| 6591 | Check(BLGP <= 4, "invalid value for blgp format" , Call, |
| 6592 | Call.getArgOperand(4)); |
| 6593 | |
| 6594 | // AMDGPU::MFMAScaleFormats values |
| 6595 | auto getFormatNumRegs = [](unsigned FormatVal) { |
| 6596 | switch (FormatVal) { |
| 6597 | case 0: |
| 6598 | case 1: |
| 6599 | return 8u; |
| 6600 | case 2: |
| 6601 | case 3: |
| 6602 | return 6u; |
| 6603 | case 4: |
| 6604 | return 4u; |
| 6605 | default: |
| 6606 | llvm_unreachable("invalid format value" ); |
| 6607 | } |
| 6608 | }; |
| 6609 | |
| 6610 | auto isValidSrcASrcBVector = [](FixedVectorType *Ty) { |
| 6611 | if (!Ty || !Ty->getElementType()->isIntegerTy(Bitwidth: 32)) |
| 6612 | return false; |
| 6613 | unsigned NumElts = Ty->getNumElements(); |
| 6614 | return NumElts == 4 || NumElts == 6 || NumElts == 8; |
| 6615 | }; |
| 6616 | |
| 6617 | auto *Src0Ty = dyn_cast<FixedVectorType>(Val: Src0->getType()); |
| 6618 | auto *Src1Ty = dyn_cast<FixedVectorType>(Val: Src1->getType()); |
| 6619 | Check(isValidSrcASrcBVector(Src0Ty), |
| 6620 | "operand 0 must be 4, 6 or 8 element i32 vector" , &Call, Src0); |
| 6621 | Check(isValidSrcASrcBVector(Src1Ty), |
| 6622 | "operand 1 must be 4, 6 or 8 element i32 vector" , &Call, Src1); |
| 6623 | |
| 6624 | // Permit excess registers for the format. |
| 6625 | Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ), |
| 6626 | "invalid vector type for format" , &Call, Src0, Call.getArgOperand(3)); |
| 6627 | Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP), |
| 6628 | "invalid vector type for format" , &Call, Src1, Call.getArgOperand(5)); |
| 6629 | break; |
| 6630 | } |
| 6631 | case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32: |
| 6632 | case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: { |
| 6633 | Value *V = Call.getArgOperand(i: 0); |
| 6634 | unsigned RegCount = cast<ConstantInt>(Val: V)->getZExtValue(); |
| 6635 | Check(RegCount % 8 == 0, |
| 6636 | "reg_count argument to nvvm.setmaxnreg must be in multiples of 8" ); |
| 6637 | break; |
| 6638 | } |
| 6639 | case Intrinsic::experimental_convergence_entry: |
| 6640 | case Intrinsic::experimental_convergence_anchor: |
| 6641 | break; |
| 6642 | case Intrinsic::experimental_convergence_loop: |
| 6643 | break; |
| 6644 | case Intrinsic::ptrmask: { |
| 6645 | Type *Ty0 = Call.getArgOperand(i: 0)->getType(); |
| 6646 | Type *Ty1 = Call.getArgOperand(i: 1)->getType(); |
| 6647 | Check(Ty0->isPtrOrPtrVectorTy(), |
| 6648 | "llvm.ptrmask intrinsic first argument must be pointer or vector " |
| 6649 | "of pointers" , |
| 6650 | &Call); |
| 6651 | Check( |
| 6652 | Ty0->isVectorTy() == Ty1->isVectorTy(), |
| 6653 | "llvm.ptrmask intrinsic arguments must be both scalars or both vectors" , |
| 6654 | &Call); |
| 6655 | if (Ty0->isVectorTy()) |
| 6656 | Check(cast<VectorType>(Ty0)->getElementCount() == |
| 6657 | cast<VectorType>(Ty1)->getElementCount(), |
| 6658 | "llvm.ptrmask intrinsic arguments must have the same number of " |
| 6659 | "elements" , |
| 6660 | &Call); |
| 6661 | Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(), |
| 6662 | "llvm.ptrmask intrinsic second argument bitwidth must match " |
| 6663 | "pointer index type size of first argument" , |
| 6664 | &Call); |
| 6665 | break; |
| 6666 | } |
| 6667 | case Intrinsic::thread_pointer: { |
| 6668 | Check(Call.getType()->getPointerAddressSpace() == |
| 6669 | DL.getDefaultGlobalsAddressSpace(), |
| 6670 | "llvm.thread.pointer intrinsic return type must be for the globals " |
| 6671 | "address space" , |
| 6672 | &Call); |
| 6673 | break; |
| 6674 | } |
| 6675 | case Intrinsic::threadlocal_address: { |
| 6676 | const Value &Arg0 = *Call.getArgOperand(i: 0); |
| 6677 | Check(isa<GlobalValue>(Arg0), |
| 6678 | "llvm.threadlocal.address first argument must be a GlobalValue" ); |
| 6679 | Check(cast<GlobalValue>(Arg0).isThreadLocal(), |
| 6680 | "llvm.threadlocal.address operand isThreadLocal() must be true" ); |
| 6681 | break; |
| 6682 | } |
| 6683 | }; |
| 6684 | |
| 6685 | // Verify that there aren't any unmediated control transfers between funclets. |
| 6686 | if (IntrinsicInst::mayLowerToFunctionCall(IID: ID)) { |
| 6687 | Function *F = Call.getParent()->getParent(); |
| 6688 | if (F->hasPersonalityFn() && |
| 6689 | isScopedEHPersonality(Pers: classifyEHPersonality(Pers: F->getPersonalityFn()))) { |
| 6690 | // Run EH funclet coloring on-demand and cache results for other intrinsic |
| 6691 | // calls in this function |
| 6692 | if (BlockEHFuncletColors.empty()) |
| 6693 | BlockEHFuncletColors = colorEHFunclets(F&: *F); |
| 6694 | |
| 6695 | // Check for catch-/cleanup-pad in first funclet block |
| 6696 | bool InEHFunclet = false; |
| 6697 | BasicBlock *CallBB = Call.getParent(); |
| 6698 | const ColorVector &CV = BlockEHFuncletColors.find(Val: CallBB)->second; |
| 6699 | assert(CV.size() > 0 && "Uncolored block" ); |
| 6700 | for (BasicBlock *ColorFirstBB : CV) |
| 6701 | if (auto It = ColorFirstBB->getFirstNonPHIIt(); |
| 6702 | It != ColorFirstBB->end()) |
| 6703 | if (isa_and_nonnull<FuncletPadInst>(Val: &*It)) |
| 6704 | InEHFunclet = true; |
| 6705 | |
| 6706 | // Check for funclet operand bundle |
| 6707 | bool HasToken = false; |
| 6708 | for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I) |
| 6709 | if (Call.getOperandBundleAt(Index: I).getTagID() == LLVMContext::OB_funclet) |
| 6710 | HasToken = true; |
| 6711 | |
| 6712 | // This would cause silent code truncation in WinEHPrepare |
| 6713 | if (InEHFunclet) |
| 6714 | Check(HasToken, "Missing funclet token on intrinsic call" , &Call); |
| 6715 | } |
| 6716 | } |
| 6717 | } |
| 6718 | |
| 6719 | /// Carefully grab the subprogram from a local scope. |
| 6720 | /// |
| 6721 | /// This carefully grabs the subprogram from a local scope, avoiding the |
| 6722 | /// built-in assertions that would typically fire. |
| 6723 | static DISubprogram *getSubprogram(Metadata *LocalScope) { |
| 6724 | if (!LocalScope) |
| 6725 | return nullptr; |
| 6726 | |
| 6727 | if (auto *SP = dyn_cast<DISubprogram>(Val: LocalScope)) |
| 6728 | return SP; |
| 6729 | |
| 6730 | if (auto *LB = dyn_cast<DILexicalBlockBase>(Val: LocalScope)) |
| 6731 | return getSubprogram(LocalScope: LB->getRawScope()); |
| 6732 | |
| 6733 | // Just return null; broken scope chains are checked elsewhere. |
| 6734 | assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope" ); |
| 6735 | return nullptr; |
| 6736 | } |
| 6737 | |
| 6738 | void Verifier::visit(DbgLabelRecord &DLR) { |
| 6739 | CheckDI(isa<DILabel>(DLR.getRawLabel()), |
| 6740 | "invalid #dbg_label intrinsic variable" , &DLR, DLR.getRawLabel()); |
| 6741 | |
| 6742 | // Ignore broken !dbg attachments; they're checked elsewhere. |
| 6743 | if (MDNode *N = DLR.getDebugLoc().getAsMDNode()) |
| 6744 | if (!isa<DILocation>(Val: N)) |
| 6745 | return; |
| 6746 | |
| 6747 | BasicBlock *BB = DLR.getParent(); |
| 6748 | Function *F = BB ? BB->getParent() : nullptr; |
| 6749 | |
| 6750 | // The scopes for variables and !dbg attachments must agree. |
| 6751 | DILabel *Label = DLR.getLabel(); |
| 6752 | DILocation *Loc = DLR.getDebugLoc(); |
| 6753 | CheckDI(Loc, "#dbg_label record requires a !dbg attachment" , &DLR, BB, F); |
| 6754 | |
| 6755 | DISubprogram *LabelSP = getSubprogram(LocalScope: Label->getRawScope()); |
| 6756 | DISubprogram *LocSP = getSubprogram(LocalScope: Loc->getRawScope()); |
| 6757 | if (!LabelSP || !LocSP) |
| 6758 | return; |
| 6759 | |
| 6760 | CheckDI(LabelSP == LocSP, |
| 6761 | "mismatched subprogram between #dbg_label label and !dbg attachment" , |
| 6762 | &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc, |
| 6763 | Loc->getScope()->getSubprogram()); |
| 6764 | } |
| 6765 | |
| 6766 | void Verifier::visit(DbgVariableRecord &DVR) { |
| 6767 | BasicBlock *BB = DVR.getParent(); |
| 6768 | Function *F = BB->getParent(); |
| 6769 | |
| 6770 | CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value || |
| 6771 | DVR.getType() == DbgVariableRecord::LocationType::Declare || |
| 6772 | DVR.getType() == DbgVariableRecord::LocationType::Assign, |
| 6773 | "invalid #dbg record type" , &DVR, DVR.getType(), BB, F); |
| 6774 | |
| 6775 | // The location for a DbgVariableRecord must be either a ValueAsMetadata, |
| 6776 | // DIArgList, or an empty MDNode (which is a legacy representation for an |
| 6777 | // "undef" location). |
| 6778 | auto *MD = DVR.getRawLocation(); |
| 6779 | CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) || |
| 6780 | (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())), |
| 6781 | "invalid #dbg record address/value" , &DVR, MD, BB, F); |
| 6782 | if (auto *VAM = dyn_cast<ValueAsMetadata>(Val: MD)) { |
| 6783 | visitValueAsMetadata(MD: *VAM, F); |
| 6784 | if (DVR.isDbgDeclare()) { |
| 6785 | // Allow integers here to support inttoptr salvage. |
| 6786 | Type *Ty = VAM->getValue()->getType(); |
| 6787 | CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(), |
| 6788 | "location of #dbg_declare must be a pointer or int" , &DVR, MD, BB, |
| 6789 | F); |
| 6790 | } |
| 6791 | } else if (auto *AL = dyn_cast<DIArgList>(Val: MD)) { |
| 6792 | visitDIArgList(AL: *AL, F); |
| 6793 | } |
| 6794 | |
| 6795 | CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()), |
| 6796 | "invalid #dbg record variable" , &DVR, DVR.getRawVariable(), BB, F); |
| 6797 | visitMDNode(MD: *DVR.getRawVariable(), AllowLocs: AreDebugLocsAllowed::No); |
| 6798 | |
| 6799 | CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()), |
| 6800 | "invalid #dbg record expression" , &DVR, DVR.getRawExpression(), BB, |
| 6801 | F); |
| 6802 | visitMDNode(MD: *DVR.getExpression(), AllowLocs: AreDebugLocsAllowed::No); |
| 6803 | |
| 6804 | if (DVR.isDbgAssign()) { |
| 6805 | CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()), |
| 6806 | "invalid #dbg_assign DIAssignID" , &DVR, DVR.getRawAssignID(), BB, |
| 6807 | F); |
| 6808 | visitMDNode(MD: *cast<DIAssignID>(Val: DVR.getRawAssignID()), |
| 6809 | AllowLocs: AreDebugLocsAllowed::No); |
| 6810 | |
| 6811 | const auto *RawAddr = DVR.getRawAddress(); |
| 6812 | // Similarly to the location above, the address for an assign |
| 6813 | // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which |
| 6814 | // represents an undef address. |
| 6815 | CheckDI( |
| 6816 | isa<ValueAsMetadata>(RawAddr) || |
| 6817 | (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()), |
| 6818 | "invalid #dbg_assign address" , &DVR, DVR.getRawAddress(), BB, F); |
| 6819 | if (auto *VAM = dyn_cast<ValueAsMetadata>(Val: RawAddr)) |
| 6820 | visitValueAsMetadata(MD: *VAM, F); |
| 6821 | |
| 6822 | CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()), |
| 6823 | "invalid #dbg_assign address expression" , &DVR, |
| 6824 | DVR.getRawAddressExpression(), BB, F); |
| 6825 | visitMDNode(MD: *DVR.getAddressExpression(), AllowLocs: AreDebugLocsAllowed::No); |
| 6826 | |
| 6827 | // All of the linked instructions should be in the same function as DVR. |
| 6828 | for (Instruction *I : at::getAssignmentInsts(DVR: &DVR)) |
| 6829 | CheckDI(DVR.getFunction() == I->getFunction(), |
| 6830 | "inst not in same function as #dbg_assign" , I, &DVR, BB, F); |
| 6831 | } |
| 6832 | |
| 6833 | // This check is redundant with one in visitLocalVariable(). |
| 6834 | DILocalVariable *Var = DVR.getVariable(); |
| 6835 | CheckDI(isType(Var->getRawType()), "invalid type ref" , Var, Var->getRawType(), |
| 6836 | BB, F); |
| 6837 | |
| 6838 | auto *DLNode = DVR.getDebugLoc().getAsMDNode(); |
| 6839 | CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation" , |
| 6840 | &DVR, DLNode, BB, F); |
| 6841 | DILocation *Loc = DVR.getDebugLoc(); |
| 6842 | |
| 6843 | // The scopes for variables and !dbg attachments must agree. |
| 6844 | DISubprogram *VarSP = getSubprogram(LocalScope: Var->getRawScope()); |
| 6845 | DISubprogram *LocSP = getSubprogram(LocalScope: Loc->getRawScope()); |
| 6846 | if (!VarSP || !LocSP) |
| 6847 | return; // Broken scope chains are checked elsewhere. |
| 6848 | |
| 6849 | CheckDI(VarSP == LocSP, |
| 6850 | "mismatched subprogram between #dbg record variable and DILocation" , |
| 6851 | &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc, |
| 6852 | Loc->getScope()->getSubprogram(), BB, F); |
| 6853 | |
| 6854 | verifyFnArgs(DVR); |
| 6855 | } |
| 6856 | |
| 6857 | void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) { |
| 6858 | if (auto *VPCast = dyn_cast<VPCastIntrinsic>(Val: &VPI)) { |
| 6859 | auto *RetTy = cast<VectorType>(Val: VPCast->getType()); |
| 6860 | auto *ValTy = cast<VectorType>(Val: VPCast->getOperand(i_nocapture: 0)->getType()); |
| 6861 | Check(RetTy->getElementCount() == ValTy->getElementCount(), |
| 6862 | "VP cast intrinsic first argument and result vector lengths must be " |
| 6863 | "equal" , |
| 6864 | *VPCast); |
| 6865 | |
| 6866 | switch (VPCast->getIntrinsicID()) { |
| 6867 | default: |
| 6868 | llvm_unreachable("Unknown VP cast intrinsic" ); |
| 6869 | case Intrinsic::vp_trunc: |
| 6870 | Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(), |
| 6871 | "llvm.vp.trunc intrinsic first argument and result element type " |
| 6872 | "must be integer" , |
| 6873 | *VPCast); |
| 6874 | Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(), |
| 6875 | "llvm.vp.trunc intrinsic the bit size of first argument must be " |
| 6876 | "larger than the bit size of the return type" , |
| 6877 | *VPCast); |
| 6878 | break; |
| 6879 | case Intrinsic::vp_zext: |
| 6880 | case Intrinsic::vp_sext: |
| 6881 | Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(), |
| 6882 | "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result " |
| 6883 | "element type must be integer" , |
| 6884 | *VPCast); |
| 6885 | Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(), |
| 6886 | "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first " |
| 6887 | "argument must be smaller than the bit size of the return type" , |
| 6888 | *VPCast); |
| 6889 | break; |
| 6890 | case Intrinsic::vp_fptoui: |
| 6891 | case Intrinsic::vp_fptosi: |
| 6892 | case Intrinsic::vp_lrint: |
| 6893 | case Intrinsic::vp_llrint: |
| 6894 | Check( |
| 6895 | RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(), |
| 6896 | "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element " |
| 6897 | "type must be floating-point and result element type must be integer" , |
| 6898 | *VPCast); |
| 6899 | break; |
| 6900 | case Intrinsic::vp_uitofp: |
| 6901 | case Intrinsic::vp_sitofp: |
| 6902 | Check( |
| 6903 | RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(), |
| 6904 | "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element " |
| 6905 | "type must be integer and result element type must be floating-point" , |
| 6906 | *VPCast); |
| 6907 | break; |
| 6908 | case Intrinsic::vp_fptrunc: |
| 6909 | Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(), |
| 6910 | "llvm.vp.fptrunc intrinsic first argument and result element type " |
| 6911 | "must be floating-point" , |
| 6912 | *VPCast); |
| 6913 | Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(), |
| 6914 | "llvm.vp.fptrunc intrinsic the bit size of first argument must be " |
| 6915 | "larger than the bit size of the return type" , |
| 6916 | *VPCast); |
| 6917 | break; |
| 6918 | case Intrinsic::vp_fpext: |
| 6919 | Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(), |
| 6920 | "llvm.vp.fpext intrinsic first argument and result element type " |
| 6921 | "must be floating-point" , |
| 6922 | *VPCast); |
| 6923 | Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(), |
| 6924 | "llvm.vp.fpext intrinsic the bit size of first argument must be " |
| 6925 | "smaller than the bit size of the return type" , |
| 6926 | *VPCast); |
| 6927 | break; |
| 6928 | case Intrinsic::vp_ptrtoint: |
| 6929 | Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(), |
| 6930 | "llvm.vp.ptrtoint intrinsic first argument element type must be " |
| 6931 | "pointer and result element type must be integer" , |
| 6932 | *VPCast); |
| 6933 | break; |
| 6934 | case Intrinsic::vp_inttoptr: |
| 6935 | Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(), |
| 6936 | "llvm.vp.inttoptr intrinsic first argument element type must be " |
| 6937 | "integer and result element type must be pointer" , |
| 6938 | *VPCast); |
| 6939 | break; |
| 6940 | } |
| 6941 | } |
| 6942 | if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) { |
| 6943 | auto Pred = cast<VPCmpIntrinsic>(Val: &VPI)->getPredicate(); |
| 6944 | Check(CmpInst::isFPPredicate(Pred), |
| 6945 | "invalid predicate for VP FP comparison intrinsic" , &VPI); |
| 6946 | } |
| 6947 | if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) { |
| 6948 | auto Pred = cast<VPCmpIntrinsic>(Val: &VPI)->getPredicate(); |
| 6949 | Check(CmpInst::isIntPredicate(Pred), |
| 6950 | "invalid predicate for VP integer comparison intrinsic" , &VPI); |
| 6951 | } |
| 6952 | if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) { |
| 6953 | auto TestMask = cast<ConstantInt>(Val: VPI.getOperand(i_nocapture: 1)); |
| 6954 | Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0, |
| 6955 | "unsupported bits for llvm.vp.is.fpclass test mask" ); |
| 6956 | } |
| 6957 | } |
| 6958 | |
| 6959 | void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { |
| 6960 | unsigned NumOperands = FPI.getNonMetadataArgCount(); |
| 6961 | bool HasRoundingMD = |
| 6962 | Intrinsic::hasConstrainedFPRoundingModeOperand(QID: FPI.getIntrinsicID()); |
| 6963 | |
| 6964 | // Add the expected number of metadata operands. |
| 6965 | NumOperands += (1 + HasRoundingMD); |
| 6966 | |
| 6967 | // Compare intrinsics carry an extra predicate metadata operand. |
| 6968 | if (isa<ConstrainedFPCmpIntrinsic>(Val: FPI)) |
| 6969 | NumOperands += 1; |
| 6970 | Check((FPI.arg_size() == NumOperands), |
| 6971 | "invalid arguments for constrained FP intrinsic" , &FPI); |
| 6972 | |
| 6973 | switch (FPI.getIntrinsicID()) { |
| 6974 | case Intrinsic::experimental_constrained_lrint: |
| 6975 | case Intrinsic::experimental_constrained_llrint: { |
| 6976 | Type *ValTy = FPI.getArgOperand(i: 0)->getType(); |
| 6977 | Type *ResultTy = FPI.getType(); |
| 6978 | Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), |
| 6979 | "Intrinsic does not support vectors" , &FPI); |
| 6980 | break; |
| 6981 | } |
| 6982 | |
| 6983 | case Intrinsic::experimental_constrained_lround: |
| 6984 | case Intrinsic::experimental_constrained_llround: { |
| 6985 | Type *ValTy = FPI.getArgOperand(i: 0)->getType(); |
| 6986 | Type *ResultTy = FPI.getType(); |
| 6987 | Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), |
| 6988 | "Intrinsic does not support vectors" , &FPI); |
| 6989 | break; |
| 6990 | } |
| 6991 | |
| 6992 | case Intrinsic::experimental_constrained_fcmp: |
| 6993 | case Intrinsic::experimental_constrained_fcmps: { |
| 6994 | auto Pred = cast<ConstrainedFPCmpIntrinsic>(Val: &FPI)->getPredicate(); |
| 6995 | Check(CmpInst::isFPPredicate(Pred), |
| 6996 | "invalid predicate for constrained FP comparison intrinsic" , &FPI); |
| 6997 | break; |
| 6998 | } |
| 6999 | |
| 7000 | case Intrinsic::experimental_constrained_fptosi: |
| 7001 | case Intrinsic::experimental_constrained_fptoui: { |
| 7002 | Value *Operand = FPI.getArgOperand(i: 0); |
| 7003 | ElementCount SrcEC; |
| 7004 | Check(Operand->getType()->isFPOrFPVectorTy(), |
| 7005 | "Intrinsic first argument must be floating point" , &FPI); |
| 7006 | if (auto *OperandT = dyn_cast<VectorType>(Val: Operand->getType())) { |
| 7007 | SrcEC = cast<VectorType>(Val: OperandT)->getElementCount(); |
| 7008 | } |
| 7009 | |
| 7010 | Operand = &FPI; |
| 7011 | Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(), |
| 7012 | "Intrinsic first argument and result disagree on vector use" , &FPI); |
| 7013 | Check(Operand->getType()->isIntOrIntVectorTy(), |
| 7014 | "Intrinsic result must be an integer" , &FPI); |
| 7015 | if (auto *OperandT = dyn_cast<VectorType>(Val: Operand->getType())) { |
| 7016 | Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(), |
| 7017 | "Intrinsic first argument and result vector lengths must be equal" , |
| 7018 | &FPI); |
| 7019 | } |
| 7020 | break; |
| 7021 | } |
| 7022 | |
| 7023 | case Intrinsic::experimental_constrained_sitofp: |
| 7024 | case Intrinsic::experimental_constrained_uitofp: { |
| 7025 | Value *Operand = FPI.getArgOperand(i: 0); |
| 7026 | ElementCount SrcEC; |
| 7027 | Check(Operand->getType()->isIntOrIntVectorTy(), |
| 7028 | "Intrinsic first argument must be integer" , &FPI); |
| 7029 | if (auto *OperandT = dyn_cast<VectorType>(Val: Operand->getType())) { |
| 7030 | SrcEC = cast<VectorType>(Val: OperandT)->getElementCount(); |
| 7031 | } |
| 7032 | |
| 7033 | Operand = &FPI; |
| 7034 | Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(), |
| 7035 | "Intrinsic first argument and result disagree on vector use" , &FPI); |
| 7036 | Check(Operand->getType()->isFPOrFPVectorTy(), |
| 7037 | "Intrinsic result must be a floating point" , &FPI); |
| 7038 | if (auto *OperandT = dyn_cast<VectorType>(Val: Operand->getType())) { |
| 7039 | Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(), |
| 7040 | "Intrinsic first argument and result vector lengths must be equal" , |
| 7041 | &FPI); |
| 7042 | } |
| 7043 | break; |
| 7044 | } |
| 7045 | |
| 7046 | case Intrinsic::experimental_constrained_fptrunc: |
| 7047 | case Intrinsic::experimental_constrained_fpext: { |
| 7048 | Value *Operand = FPI.getArgOperand(i: 0); |
| 7049 | Type *OperandTy = Operand->getType(); |
| 7050 | Value *Result = &FPI; |
| 7051 | Type *ResultTy = Result->getType(); |
| 7052 | Check(OperandTy->isFPOrFPVectorTy(), |
| 7053 | "Intrinsic first argument must be FP or FP vector" , &FPI); |
| 7054 | Check(ResultTy->isFPOrFPVectorTy(), |
| 7055 | "Intrinsic result must be FP or FP vector" , &FPI); |
| 7056 | Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(), |
| 7057 | "Intrinsic first argument and result disagree on vector use" , &FPI); |
| 7058 | if (OperandTy->isVectorTy()) { |
| 7059 | Check(cast<VectorType>(OperandTy)->getElementCount() == |
| 7060 | cast<VectorType>(ResultTy)->getElementCount(), |
| 7061 | "Intrinsic first argument and result vector lengths must be equal" , |
| 7062 | &FPI); |
| 7063 | } |
| 7064 | if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) { |
| 7065 | Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(), |
| 7066 | "Intrinsic first argument's type must be larger than result type" , |
| 7067 | &FPI); |
| 7068 | } else { |
| 7069 | Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(), |
| 7070 | "Intrinsic first argument's type must be smaller than result type" , |
| 7071 | &FPI); |
| 7072 | } |
| 7073 | break; |
| 7074 | } |
| 7075 | |
| 7076 | default: |
| 7077 | break; |
| 7078 | } |
| 7079 | |
| 7080 | // If a non-metadata argument is passed in a metadata slot then the |
| 7081 | // error will be caught earlier when the incorrect argument doesn't |
| 7082 | // match the specification in the intrinsic call table. Thus, no |
| 7083 | // argument type check is needed here. |
| 7084 | |
| 7085 | Check(FPI.getExceptionBehavior().has_value(), |
| 7086 | "invalid exception behavior argument" , &FPI); |
| 7087 | if (HasRoundingMD) { |
| 7088 | Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument" , |
| 7089 | &FPI); |
| 7090 | } |
| 7091 | } |
| 7092 | |
| 7093 | void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) { |
| 7094 | auto *MD = DII.getRawLocation(); |
| 7095 | CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) || |
| 7096 | (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()), |
| 7097 | "invalid llvm.dbg." + Kind + " intrinsic address/value" , &DII, MD); |
| 7098 | CheckDI(isa<DILocalVariable>(DII.getRawVariable()), |
| 7099 | "invalid llvm.dbg." + Kind + " intrinsic variable" , &DII, |
| 7100 | DII.getRawVariable()); |
| 7101 | CheckDI(isa<DIExpression>(DII.getRawExpression()), |
| 7102 | "invalid llvm.dbg." + Kind + " intrinsic expression" , &DII, |
| 7103 | DII.getRawExpression()); |
| 7104 | |
| 7105 | if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: &DII)) { |
| 7106 | CheckDI(isa<DIAssignID>(DAI->getRawAssignID()), |
| 7107 | "invalid llvm.dbg.assign intrinsic DIAssignID" , &DII, |
| 7108 | DAI->getRawAssignID()); |
| 7109 | const auto *RawAddr = DAI->getRawAddress(); |
| 7110 | CheckDI( |
| 7111 | isa<ValueAsMetadata>(RawAddr) || |
| 7112 | (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()), |
| 7113 | "invalid llvm.dbg.assign intrinsic address" , &DII, |
| 7114 | DAI->getRawAddress()); |
| 7115 | CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()), |
| 7116 | "invalid llvm.dbg.assign intrinsic address expression" , &DII, |
| 7117 | DAI->getRawAddressExpression()); |
| 7118 | // All of the linked instructions should be in the same function as DII. |
| 7119 | for (Instruction *I : at::getAssignmentInsts(DAI)) |
| 7120 | CheckDI(DAI->getFunction() == I->getFunction(), |
| 7121 | "inst not in same function as dbg.assign" , I, DAI); |
| 7122 | } |
| 7123 | |
| 7124 | // Ignore broken !dbg attachments; they're checked elsewhere. |
| 7125 | if (MDNode *N = DII.getDebugLoc().getAsMDNode()) |
| 7126 | if (!isa<DILocation>(Val: N)) |
| 7127 | return; |
| 7128 | |
| 7129 | BasicBlock *BB = DII.getParent(); |
| 7130 | Function *F = BB ? BB->getParent() : nullptr; |
| 7131 | |
| 7132 | // The scopes for variables and !dbg attachments must agree. |
| 7133 | DILocalVariable *Var = DII.getVariable(); |
| 7134 | DILocation *Loc = DII.getDebugLoc(); |
| 7135 | CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment" , |
| 7136 | &DII, BB, F); |
| 7137 | |
| 7138 | DISubprogram *VarSP = getSubprogram(LocalScope: Var->getRawScope()); |
| 7139 | DISubprogram *LocSP = getSubprogram(LocalScope: Loc->getRawScope()); |
| 7140 | if (!VarSP || !LocSP) |
| 7141 | return; // Broken scope chains are checked elsewhere. |
| 7142 | |
| 7143 | CheckDI(VarSP == LocSP, |
| 7144 | "mismatched subprogram between llvm.dbg." + Kind + |
| 7145 | " variable and !dbg attachment" , |
| 7146 | &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc, |
| 7147 | Loc->getScope()->getSubprogram()); |
| 7148 | |
| 7149 | // This check is redundant with one in visitLocalVariable(). |
| 7150 | CheckDI(isType(Var->getRawType()), "invalid type ref" , Var, |
| 7151 | Var->getRawType()); |
| 7152 | verifyFnArgs(I: DII); |
| 7153 | } |
| 7154 | |
| 7155 | void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) { |
| 7156 | CheckDI(isa<DILabel>(DLI.getRawLabel()), |
| 7157 | "invalid llvm.dbg." + Kind + " intrinsic variable" , &DLI, |
| 7158 | DLI.getRawLabel()); |
| 7159 | |
| 7160 | // Ignore broken !dbg attachments; they're checked elsewhere. |
| 7161 | if (MDNode *N = DLI.getDebugLoc().getAsMDNode()) |
| 7162 | if (!isa<DILocation>(Val: N)) |
| 7163 | return; |
| 7164 | |
| 7165 | BasicBlock *BB = DLI.getParent(); |
| 7166 | Function *F = BB ? BB->getParent() : nullptr; |
| 7167 | |
| 7168 | // The scopes for variables and !dbg attachments must agree. |
| 7169 | DILabel *Label = DLI.getLabel(); |
| 7170 | DILocation *Loc = DLI.getDebugLoc(); |
| 7171 | Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment" , &DLI, |
| 7172 | BB, F); |
| 7173 | |
| 7174 | DISubprogram *LabelSP = getSubprogram(LocalScope: Label->getRawScope()); |
| 7175 | DISubprogram *LocSP = getSubprogram(LocalScope: Loc->getRawScope()); |
| 7176 | if (!LabelSP || !LocSP) |
| 7177 | return; |
| 7178 | |
| 7179 | CheckDI(LabelSP == LocSP, |
| 7180 | "mismatched subprogram between llvm.dbg." + Kind + |
| 7181 | " label and !dbg attachment" , |
| 7182 | &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc, |
| 7183 | Loc->getScope()->getSubprogram()); |
| 7184 | } |
| 7185 | |
| 7186 | void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) { |
| 7187 | DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(Val: I.getRawVariable()); |
| 7188 | DIExpression *E = dyn_cast_or_null<DIExpression>(Val: I.getRawExpression()); |
| 7189 | |
| 7190 | // We don't know whether this intrinsic verified correctly. |
| 7191 | if (!V || !E || !E->isValid()) |
| 7192 | return; |
| 7193 | |
| 7194 | // Nothing to do if this isn't a DW_OP_LLVM_fragment expression. |
| 7195 | auto Fragment = E->getFragmentInfo(); |
| 7196 | if (!Fragment) |
| 7197 | return; |
| 7198 | |
| 7199 | // The frontend helps out GDB by emitting the members of local anonymous |
| 7200 | // unions as artificial local variables with shared storage. When SROA splits |
| 7201 | // the storage for artificial local variables that are smaller than the entire |
| 7202 | // union, the overhang piece will be outside of the allotted space for the |
| 7203 | // variable and this check fails. |
| 7204 | // FIXME: Remove this check as soon as clang stops doing this; it hides bugs. |
| 7205 | if (V->isArtificial()) |
| 7206 | return; |
| 7207 | |
| 7208 | verifyFragmentExpression(V: *V, Fragment: *Fragment, Desc: &I); |
| 7209 | } |
| 7210 | void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) { |
| 7211 | DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(Val: DVR.getRawVariable()); |
| 7212 | DIExpression *E = dyn_cast_or_null<DIExpression>(Val: DVR.getRawExpression()); |
| 7213 | |
| 7214 | // We don't know whether this intrinsic verified correctly. |
| 7215 | if (!V || !E || !E->isValid()) |
| 7216 | return; |
| 7217 | |
| 7218 | // Nothing to do if this isn't a DW_OP_LLVM_fragment expression. |
| 7219 | auto Fragment = E->getFragmentInfo(); |
| 7220 | if (!Fragment) |
| 7221 | return; |
| 7222 | |
| 7223 | // The frontend helps out GDB by emitting the members of local anonymous |
| 7224 | // unions as artificial local variables with shared storage. When SROA splits |
| 7225 | // the storage for artificial local variables that are smaller than the entire |
| 7226 | // union, the overhang piece will be outside of the allotted space for the |
| 7227 | // variable and this check fails. |
| 7228 | // FIXME: Remove this check as soon as clang stops doing this; it hides bugs. |
| 7229 | if (V->isArtificial()) |
| 7230 | return; |
| 7231 | |
| 7232 | verifyFragmentExpression(V: *V, Fragment: *Fragment, Desc: &DVR); |
| 7233 | } |
| 7234 | |
| 7235 | template <typename ValueOrMetadata> |
| 7236 | void Verifier::verifyFragmentExpression(const DIVariable &V, |
| 7237 | DIExpression::FragmentInfo Fragment, |
| 7238 | ValueOrMetadata *Desc) { |
| 7239 | // If there's no size, the type is broken, but that should be checked |
| 7240 | // elsewhere. |
| 7241 | auto VarSize = V.getSizeInBits(); |
| 7242 | if (!VarSize) |
| 7243 | return; |
| 7244 | |
| 7245 | unsigned FragSize = Fragment.SizeInBits; |
| 7246 | unsigned FragOffset = Fragment.OffsetInBits; |
| 7247 | CheckDI(FragSize + FragOffset <= *VarSize, |
| 7248 | "fragment is larger than or outside of variable" , Desc, &V); |
| 7249 | CheckDI(FragSize != *VarSize, "fragment covers entire variable" , Desc, &V); |
| 7250 | } |
| 7251 | |
| 7252 | void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) { |
| 7253 | // This function does not take the scope of noninlined function arguments into |
| 7254 | // account. Don't run it if current function is nodebug, because it may |
| 7255 | // contain inlined debug intrinsics. |
| 7256 | if (!HasDebugInfo) |
| 7257 | return; |
| 7258 | |
| 7259 | // For performance reasons only check non-inlined ones. |
| 7260 | if (I.getDebugLoc()->getInlinedAt()) |
| 7261 | return; |
| 7262 | |
| 7263 | DILocalVariable *Var = I.getVariable(); |
| 7264 | CheckDI(Var, "dbg intrinsic without variable" ); |
| 7265 | |
| 7266 | unsigned ArgNo = Var->getArg(); |
| 7267 | if (!ArgNo) |
| 7268 | return; |
| 7269 | |
| 7270 | // Verify there are no duplicate function argument debug info entries. |
| 7271 | // These will cause hard-to-debug assertions in the DWARF backend. |
| 7272 | if (DebugFnArgs.size() < ArgNo) |
| 7273 | DebugFnArgs.resize(N: ArgNo, NV: nullptr); |
| 7274 | |
| 7275 | auto *Prev = DebugFnArgs[ArgNo - 1]; |
| 7276 | DebugFnArgs[ArgNo - 1] = Var; |
| 7277 | CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument" , &I, |
| 7278 | Prev, Var); |
| 7279 | } |
| 7280 | void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) { |
| 7281 | // This function does not take the scope of noninlined function arguments into |
| 7282 | // account. Don't run it if current function is nodebug, because it may |
| 7283 | // contain inlined debug intrinsics. |
| 7284 | if (!HasDebugInfo) |
| 7285 | return; |
| 7286 | |
| 7287 | // For performance reasons only check non-inlined ones. |
| 7288 | if (DVR.getDebugLoc()->getInlinedAt()) |
| 7289 | return; |
| 7290 | |
| 7291 | DILocalVariable *Var = DVR.getVariable(); |
| 7292 | CheckDI(Var, "#dbg record without variable" ); |
| 7293 | |
| 7294 | unsigned ArgNo = Var->getArg(); |
| 7295 | if (!ArgNo) |
| 7296 | return; |
| 7297 | |
| 7298 | // Verify there are no duplicate function argument debug info entries. |
| 7299 | // These will cause hard-to-debug assertions in the DWARF backend. |
| 7300 | if (DebugFnArgs.size() < ArgNo) |
| 7301 | DebugFnArgs.resize(N: ArgNo, NV: nullptr); |
| 7302 | |
| 7303 | auto *Prev = DebugFnArgs[ArgNo - 1]; |
| 7304 | DebugFnArgs[ArgNo - 1] = Var; |
| 7305 | CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument" , &DVR, |
| 7306 | Prev, Var); |
| 7307 | } |
| 7308 | |
| 7309 | void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) { |
| 7310 | DIExpression *E = dyn_cast_or_null<DIExpression>(Val: I.getRawExpression()); |
| 7311 | |
| 7312 | // We don't know whether this intrinsic verified correctly. |
| 7313 | if (!E || !E->isValid()) |
| 7314 | return; |
| 7315 | |
| 7316 | if (isa<ValueAsMetadata>(Val: I.getRawLocation())) { |
| 7317 | Value *VarValue = I.getVariableLocationOp(OpIdx: 0); |
| 7318 | if (isa<UndefValue>(Val: VarValue) || isa<PoisonValue>(Val: VarValue)) |
| 7319 | return; |
| 7320 | // We allow EntryValues for swift async arguments, as they have an |
| 7321 | // ABI-guarantee to be turned into a specific register. |
| 7322 | if (auto *ArgLoc = dyn_cast_or_null<Argument>(Val: VarValue); |
| 7323 | ArgLoc && ArgLoc->hasAttribute(Kind: Attribute::SwiftAsync)) |
| 7324 | return; |
| 7325 | } |
| 7326 | |
| 7327 | CheckDI(!E->isEntryValue(), |
| 7328 | "Entry values are only allowed in MIR unless they target a " |
| 7329 | "swiftasync Argument" , |
| 7330 | &I); |
| 7331 | } |
| 7332 | void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) { |
| 7333 | DIExpression *E = dyn_cast_or_null<DIExpression>(Val: DVR.getRawExpression()); |
| 7334 | |
| 7335 | // We don't know whether this intrinsic verified correctly. |
| 7336 | if (!E || !E->isValid()) |
| 7337 | return; |
| 7338 | |
| 7339 | if (isa<ValueAsMetadata>(Val: DVR.getRawLocation())) { |
| 7340 | Value *VarValue = DVR.getVariableLocationOp(OpIdx: 0); |
| 7341 | if (isa<UndefValue>(Val: VarValue) || isa<PoisonValue>(Val: VarValue)) |
| 7342 | return; |
| 7343 | // We allow EntryValues for swift async arguments, as they have an |
| 7344 | // ABI-guarantee to be turned into a specific register. |
| 7345 | if (auto *ArgLoc = dyn_cast_or_null<Argument>(Val: VarValue); |
| 7346 | ArgLoc && ArgLoc->hasAttribute(Kind: Attribute::SwiftAsync)) |
| 7347 | return; |
| 7348 | } |
| 7349 | |
| 7350 | CheckDI(!E->isEntryValue(), |
| 7351 | "Entry values are only allowed in MIR unless they target a " |
| 7352 | "swiftasync Argument" , |
| 7353 | &DVR); |
| 7354 | } |
| 7355 | |
| 7356 | void Verifier::verifyCompileUnits() { |
| 7357 | // When more than one Module is imported into the same context, such as during |
| 7358 | // an LTO build before linking the modules, ODR type uniquing may cause types |
| 7359 | // to point to a different CU. This check does not make sense in this case. |
| 7360 | if (M.getContext().isODRUniquingDebugTypes()) |
| 7361 | return; |
| 7362 | auto *CUs = M.getNamedMetadata(Name: "llvm.dbg.cu" ); |
| 7363 | SmallPtrSet<const Metadata *, 2> Listed; |
| 7364 | if (CUs) |
| 7365 | Listed.insert_range(R: CUs->operands()); |
| 7366 | for (const auto *CU : CUVisited) |
| 7367 | CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu" , CU); |
| 7368 | CUVisited.clear(); |
| 7369 | } |
| 7370 | |
| 7371 | void Verifier::verifyDeoptimizeCallingConvs() { |
| 7372 | if (DeoptimizeDeclarations.empty()) |
| 7373 | return; |
| 7374 | |
| 7375 | const Function *First = DeoptimizeDeclarations[0]; |
| 7376 | for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(N: 1)) { |
| 7377 | Check(First->getCallingConv() == F->getCallingConv(), |
| 7378 | "All llvm.experimental.deoptimize declarations must have the same " |
| 7379 | "calling convention" , |
| 7380 | First, F); |
| 7381 | } |
| 7382 | } |
| 7383 | |
| 7384 | void Verifier::verifyAttachedCallBundle(const CallBase &Call, |
| 7385 | const OperandBundleUse &BU) { |
| 7386 | FunctionType *FTy = Call.getFunctionType(); |
| 7387 | |
| 7388 | Check((FTy->getReturnType()->isPointerTy() || |
| 7389 | (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())), |
| 7390 | "a call with operand bundle \"clang.arc.attachedcall\" must call a " |
| 7391 | "function returning a pointer or a non-returning function that has a " |
| 7392 | "void return type" , |
| 7393 | Call); |
| 7394 | |
| 7395 | Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()), |
| 7396 | "operand bundle \"clang.arc.attachedcall\" requires one function as " |
| 7397 | "an argument" , |
| 7398 | Call); |
| 7399 | |
| 7400 | auto *Fn = cast<Function>(Val: BU.Inputs.front()); |
| 7401 | Intrinsic::ID IID = Fn->getIntrinsicID(); |
| 7402 | |
| 7403 | if (IID) { |
| 7404 | Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue || |
| 7405 | IID == Intrinsic::objc_claimAutoreleasedReturnValue || |
| 7406 | IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue), |
| 7407 | "invalid function argument" , Call); |
| 7408 | } else { |
| 7409 | StringRef FnName = Fn->getName(); |
| 7410 | Check((FnName == "objc_retainAutoreleasedReturnValue" || |
| 7411 | FnName == "objc_claimAutoreleasedReturnValue" || |
| 7412 | FnName == "objc_unsafeClaimAutoreleasedReturnValue" ), |
| 7413 | "invalid function argument" , Call); |
| 7414 | } |
| 7415 | } |
| 7416 | |
| 7417 | void Verifier::verifyNoAliasScopeDecl() { |
| 7418 | if (NoAliasScopeDecls.empty()) |
| 7419 | return; |
| 7420 | |
| 7421 | // only a single scope must be declared at a time. |
| 7422 | for (auto *II : NoAliasScopeDecls) { |
| 7423 | assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl && |
| 7424 | "Not a llvm.experimental.noalias.scope.decl ?" ); |
| 7425 | const auto *ScopeListMV = dyn_cast<MetadataAsValue>( |
| 7426 | Val: II->getOperand(i_nocapture: Intrinsic::NoAliasScopeDeclScopeArg)); |
| 7427 | Check(ScopeListMV != nullptr, |
| 7428 | "llvm.experimental.noalias.scope.decl must have a MetadataAsValue " |
| 7429 | "argument" , |
| 7430 | II); |
| 7431 | |
| 7432 | const auto *ScopeListMD = dyn_cast<MDNode>(Val: ScopeListMV->getMetadata()); |
| 7433 | Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode" , II); |
| 7434 | Check(ScopeListMD->getNumOperands() == 1, |
| 7435 | "!id.scope.list must point to a list with a single scope" , II); |
| 7436 | visitAliasScopeListMetadata(MD: ScopeListMD); |
| 7437 | } |
| 7438 | |
| 7439 | // Only check the domination rule when requested. Once all passes have been |
| 7440 | // adapted this option can go away. |
| 7441 | if (!VerifyNoAliasScopeDomination) |
| 7442 | return; |
| 7443 | |
| 7444 | // Now sort the intrinsics based on the scope MDNode so that declarations of |
| 7445 | // the same scopes are next to each other. |
| 7446 | auto GetScope = [](IntrinsicInst *II) { |
| 7447 | const auto *ScopeListMV = cast<MetadataAsValue>( |
| 7448 | Val: II->getOperand(i_nocapture: Intrinsic::NoAliasScopeDeclScopeArg)); |
| 7449 | return &cast<MDNode>(Val: ScopeListMV->getMetadata())->getOperand(I: 0); |
| 7450 | }; |
| 7451 | |
| 7452 | // We are sorting on MDNode pointers here. For valid input IR this is ok. |
| 7453 | // TODO: Sort on Metadata ID to avoid non-deterministic error messages. |
| 7454 | auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) { |
| 7455 | return GetScope(Lhs) < GetScope(Rhs); |
| 7456 | }; |
| 7457 | |
| 7458 | llvm::sort(C&: NoAliasScopeDecls, Comp: Compare); |
| 7459 | |
| 7460 | // Go over the intrinsics and check that for the same scope, they are not |
| 7461 | // dominating each other. |
| 7462 | auto ItCurrent = NoAliasScopeDecls.begin(); |
| 7463 | while (ItCurrent != NoAliasScopeDecls.end()) { |
| 7464 | auto CurScope = GetScope(*ItCurrent); |
| 7465 | auto ItNext = ItCurrent; |
| 7466 | do { |
| 7467 | ++ItNext; |
| 7468 | } while (ItNext != NoAliasScopeDecls.end() && |
| 7469 | GetScope(*ItNext) == CurScope); |
| 7470 | |
| 7471 | // [ItCurrent, ItNext) represents the declarations for the same scope. |
| 7472 | // Ensure they are not dominating each other.. but only if it is not too |
| 7473 | // expensive. |
| 7474 | if (ItNext - ItCurrent < 32) |
| 7475 | for (auto *I : llvm::make_range(x: ItCurrent, y: ItNext)) |
| 7476 | for (auto *J : llvm::make_range(x: ItCurrent, y: ItNext)) |
| 7477 | if (I != J) |
| 7478 | Check(!DT.dominates(I, J), |
| 7479 | "llvm.experimental.noalias.scope.decl dominates another one " |
| 7480 | "with the same scope" , |
| 7481 | I); |
| 7482 | ItCurrent = ItNext; |
| 7483 | } |
| 7484 | } |
| 7485 | |
| 7486 | //===----------------------------------------------------------------------===// |
| 7487 | // Implement the public interfaces to this file... |
| 7488 | //===----------------------------------------------------------------------===// |
| 7489 | |
| 7490 | bool llvm::verifyFunction(const Function &f, raw_ostream *OS) { |
| 7491 | Function &F = const_cast<Function &>(f); |
| 7492 | |
| 7493 | // Don't use a raw_null_ostream. Printing IR is expensive. |
| 7494 | Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent()); |
| 7495 | |
| 7496 | // Note that this function's return value is inverted from what you would |
| 7497 | // expect of a function called "verify". |
| 7498 | return !V.verify(F); |
| 7499 | } |
| 7500 | |
| 7501 | bool llvm::verifyModule(const Module &M, raw_ostream *OS, |
| 7502 | bool *BrokenDebugInfo) { |
| 7503 | // Don't use a raw_null_ostream. Printing IR is expensive. |
| 7504 | Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M); |
| 7505 | |
| 7506 | bool Broken = false; |
| 7507 | for (const Function &F : M) |
| 7508 | Broken |= !V.verify(F); |
| 7509 | |
| 7510 | Broken |= !V.verify(); |
| 7511 | if (BrokenDebugInfo) |
| 7512 | *BrokenDebugInfo = V.hasBrokenDebugInfo(); |
| 7513 | // Note that this function's return value is inverted from what you would |
| 7514 | // expect of a function called "verify". |
| 7515 | return Broken; |
| 7516 | } |
| 7517 | |
| 7518 | namespace { |
| 7519 | |
| 7520 | struct VerifierLegacyPass : public FunctionPass { |
| 7521 | static char ID; |
| 7522 | |
| 7523 | std::unique_ptr<Verifier> V; |
| 7524 | bool FatalErrors = true; |
| 7525 | |
| 7526 | VerifierLegacyPass() : FunctionPass(ID) { |
| 7527 | initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); |
| 7528 | } |
| 7529 | explicit VerifierLegacyPass(bool FatalErrors) |
| 7530 | : FunctionPass(ID), |
| 7531 | FatalErrors(FatalErrors) { |
| 7532 | initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); |
| 7533 | } |
| 7534 | |
| 7535 | bool doInitialization(Module &M) override { |
| 7536 | V = std::make_unique<Verifier>( |
| 7537 | args: &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/args: false, args&: M); |
| 7538 | return false; |
| 7539 | } |
| 7540 | |
| 7541 | bool runOnFunction(Function &F) override { |
| 7542 | if (!V->verify(F) && FatalErrors) { |
| 7543 | errs() << "in function " << F.getName() << '\n'; |
| 7544 | report_fatal_error(reason: "Broken function found, compilation aborted!" ); |
| 7545 | } |
| 7546 | return false; |
| 7547 | } |
| 7548 | |
| 7549 | bool doFinalization(Module &M) override { |
| 7550 | bool HasErrors = false; |
| 7551 | for (Function &F : M) |
| 7552 | if (F.isDeclaration()) |
| 7553 | HasErrors |= !V->verify(F); |
| 7554 | |
| 7555 | HasErrors |= !V->verify(); |
| 7556 | if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo())) |
| 7557 | report_fatal_error(reason: "Broken module found, compilation aborted!" ); |
| 7558 | return false; |
| 7559 | } |
| 7560 | |
| 7561 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 7562 | AU.setPreservesAll(); |
| 7563 | } |
| 7564 | }; |
| 7565 | |
| 7566 | } // end anonymous namespace |
| 7567 | |
| 7568 | /// Helper to issue failure from the TBAA verification |
| 7569 | template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) { |
| 7570 | if (Diagnostic) |
| 7571 | return Diagnostic->CheckFailed(Args...); |
| 7572 | } |
| 7573 | |
| 7574 | #define CheckTBAA(C, ...) \ |
| 7575 | do { \ |
| 7576 | if (!(C)) { \ |
| 7577 | CheckFailed(__VA_ARGS__); \ |
| 7578 | return false; \ |
| 7579 | } \ |
| 7580 | } while (false) |
| 7581 | |
| 7582 | /// Verify that \p BaseNode can be used as the "base type" in the struct-path |
| 7583 | /// TBAA scheme. This means \p BaseNode is either a scalar node, or a |
| 7584 | /// struct-type node describing an aggregate data structure (like a struct). |
| 7585 | TBAAVerifier::TBAABaseNodeSummary |
| 7586 | TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode, |
| 7587 | bool IsNewFormat) { |
| 7588 | if (BaseNode->getNumOperands() < 2) { |
| 7589 | CheckFailed(Args: "Base nodes must have at least two operands" , Args: &I, Args&: BaseNode); |
| 7590 | return {true, ~0u}; |
| 7591 | } |
| 7592 | |
| 7593 | auto Itr = TBAABaseNodes.find(Val: BaseNode); |
| 7594 | if (Itr != TBAABaseNodes.end()) |
| 7595 | return Itr->second; |
| 7596 | |
| 7597 | auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat); |
| 7598 | auto InsertResult = TBAABaseNodes.insert(KV: {BaseNode, Result}); |
| 7599 | (void)InsertResult; |
| 7600 | assert(InsertResult.second && "We just checked!" ); |
| 7601 | return Result; |
| 7602 | } |
| 7603 | |
| 7604 | TBAAVerifier::TBAABaseNodeSummary |
| 7605 | TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, |
| 7606 | bool IsNewFormat) { |
| 7607 | const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u}; |
| 7608 | |
| 7609 | if (BaseNode->getNumOperands() == 2) { |
| 7610 | // Scalar nodes can only be accessed at offset 0. |
| 7611 | return isValidScalarTBAANode(MD: BaseNode) |
| 7612 | ? TBAAVerifier::TBAABaseNodeSummary({false, 0}) |
| 7613 | : InvalidNode; |
| 7614 | } |
| 7615 | |
| 7616 | if (IsNewFormat) { |
| 7617 | if (BaseNode->getNumOperands() % 3 != 0) { |
| 7618 | CheckFailed(Args: "Access tag nodes must have the number of operands that is a " |
| 7619 | "multiple of 3!" , Args&: BaseNode); |
| 7620 | return InvalidNode; |
| 7621 | } |
| 7622 | } else { |
| 7623 | if (BaseNode->getNumOperands() % 2 != 1) { |
| 7624 | CheckFailed(Args: "Struct tag nodes must have an odd number of operands!" , |
| 7625 | Args&: BaseNode); |
| 7626 | return InvalidNode; |
| 7627 | } |
| 7628 | } |
| 7629 | |
| 7630 | // Check the type size field. |
| 7631 | if (IsNewFormat) { |
| 7632 | auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>( |
| 7633 | MD: BaseNode->getOperand(I: 1)); |
| 7634 | if (!TypeSizeNode) { |
| 7635 | CheckFailed(Args: "Type size nodes must be constants!" , Args: &I, Args&: BaseNode); |
| 7636 | return InvalidNode; |
| 7637 | } |
| 7638 | } |
| 7639 | |
| 7640 | // Check the type name field. In the new format it can be anything. |
| 7641 | if (!IsNewFormat && !isa<MDString>(Val: BaseNode->getOperand(I: 0))) { |
| 7642 | CheckFailed(Args: "Struct tag nodes have a string as their first operand" , |
| 7643 | Args&: BaseNode); |
| 7644 | return InvalidNode; |
| 7645 | } |
| 7646 | |
| 7647 | bool Failed = false; |
| 7648 | |
| 7649 | std::optional<APInt> PrevOffset; |
| 7650 | unsigned BitWidth = ~0u; |
| 7651 | |
| 7652 | // We've already checked that BaseNode is not a degenerate root node with one |
| 7653 | // operand in \c verifyTBAABaseNode, so this loop should run at least once. |
| 7654 | unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1; |
| 7655 | unsigned NumOpsPerField = IsNewFormat ? 3 : 2; |
| 7656 | for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands(); |
| 7657 | Idx += NumOpsPerField) { |
| 7658 | const MDOperand &FieldTy = BaseNode->getOperand(I: Idx); |
| 7659 | const MDOperand &FieldOffset = BaseNode->getOperand(I: Idx + 1); |
| 7660 | if (!isa<MDNode>(Val: FieldTy)) { |
| 7661 | CheckFailed(Args: "Incorrect field entry in struct type node!" , Args: &I, Args&: BaseNode); |
| 7662 | Failed = true; |
| 7663 | continue; |
| 7664 | } |
| 7665 | |
| 7666 | auto *OffsetEntryCI = |
| 7667 | mdconst::dyn_extract_or_null<ConstantInt>(MD: FieldOffset); |
| 7668 | if (!OffsetEntryCI) { |
| 7669 | CheckFailed(Args: "Offset entries must be constants!" , Args: &I, Args&: BaseNode); |
| 7670 | Failed = true; |
| 7671 | continue; |
| 7672 | } |
| 7673 | |
| 7674 | if (BitWidth == ~0u) |
| 7675 | BitWidth = OffsetEntryCI->getBitWidth(); |
| 7676 | |
| 7677 | if (OffsetEntryCI->getBitWidth() != BitWidth) { |
| 7678 | CheckFailed( |
| 7679 | Args: "Bitwidth between the offsets and struct type entries must match" , Args: &I, |
| 7680 | Args&: BaseNode); |
| 7681 | Failed = true; |
| 7682 | continue; |
| 7683 | } |
| 7684 | |
| 7685 | // NB! As far as I can tell, we generate a non-strictly increasing offset |
| 7686 | // sequence only from structs that have zero size bit fields. When |
| 7687 | // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we |
| 7688 | // pick the field lexically the latest in struct type metadata node. This |
| 7689 | // mirrors the actual behavior of the alias analysis implementation. |
| 7690 | bool IsAscending = |
| 7691 | !PrevOffset || PrevOffset->ule(RHS: OffsetEntryCI->getValue()); |
| 7692 | |
| 7693 | if (!IsAscending) { |
| 7694 | CheckFailed(Args: "Offsets must be increasing!" , Args: &I, Args&: BaseNode); |
| 7695 | Failed = true; |
| 7696 | } |
| 7697 | |
| 7698 | PrevOffset = OffsetEntryCI->getValue(); |
| 7699 | |
| 7700 | if (IsNewFormat) { |
| 7701 | auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>( |
| 7702 | MD: BaseNode->getOperand(I: Idx + 2)); |
| 7703 | if (!MemberSizeNode) { |
| 7704 | CheckFailed(Args: "Member size entries must be constants!" , Args: &I, Args&: BaseNode); |
| 7705 | Failed = true; |
| 7706 | continue; |
| 7707 | } |
| 7708 | } |
| 7709 | } |
| 7710 | |
| 7711 | return Failed ? InvalidNode |
| 7712 | : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth); |
| 7713 | } |
| 7714 | |
| 7715 | static bool IsRootTBAANode(const MDNode *MD) { |
| 7716 | return MD->getNumOperands() < 2; |
| 7717 | } |
| 7718 | |
| 7719 | static bool IsScalarTBAANodeImpl(const MDNode *MD, |
| 7720 | SmallPtrSetImpl<const MDNode *> &Visited) { |
| 7721 | if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3) |
| 7722 | return false; |
| 7723 | |
| 7724 | if (!isa<MDString>(Val: MD->getOperand(I: 0))) |
| 7725 | return false; |
| 7726 | |
| 7727 | if (MD->getNumOperands() == 3) { |
| 7728 | auto *Offset = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 2)); |
| 7729 | if (!(Offset && Offset->isZero() && isa<MDString>(Val: MD->getOperand(I: 0)))) |
| 7730 | return false; |
| 7731 | } |
| 7732 | |
| 7733 | auto *Parent = dyn_cast_or_null<MDNode>(Val: MD->getOperand(I: 1)); |
| 7734 | return Parent && Visited.insert(Ptr: Parent).second && |
| 7735 | (IsRootTBAANode(MD: Parent) || IsScalarTBAANodeImpl(MD: Parent, Visited)); |
| 7736 | } |
| 7737 | |
| 7738 | bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) { |
| 7739 | auto ResultIt = TBAAScalarNodes.find(Val: MD); |
| 7740 | if (ResultIt != TBAAScalarNodes.end()) |
| 7741 | return ResultIt->second; |
| 7742 | |
| 7743 | SmallPtrSet<const MDNode *, 4> Visited; |
| 7744 | bool Result = IsScalarTBAANodeImpl(MD, Visited); |
| 7745 | auto InsertResult = TBAAScalarNodes.insert(KV: {MD, Result}); |
| 7746 | (void)InsertResult; |
| 7747 | assert(InsertResult.second && "Just checked!" ); |
| 7748 | |
| 7749 | return Result; |
| 7750 | } |
| 7751 | |
| 7752 | /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p |
| 7753 | /// Offset in place to be the offset within the field node returned. |
| 7754 | /// |
| 7755 | /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode. |
| 7756 | MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I, |
| 7757 | const MDNode *BaseNode, |
| 7758 | APInt &Offset, |
| 7759 | bool IsNewFormat) { |
| 7760 | assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!" ); |
| 7761 | |
| 7762 | // Scalar nodes have only one possible "field" -- their parent in the access |
| 7763 | // hierarchy. Offset must be zero at this point, but our caller is supposed |
| 7764 | // to check that. |
| 7765 | if (BaseNode->getNumOperands() == 2) |
| 7766 | return cast<MDNode>(Val: BaseNode->getOperand(I: 1)); |
| 7767 | |
| 7768 | unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1; |
| 7769 | unsigned NumOpsPerField = IsNewFormat ? 3 : 2; |
| 7770 | for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands(); |
| 7771 | Idx += NumOpsPerField) { |
| 7772 | auto *OffsetEntryCI = |
| 7773 | mdconst::extract<ConstantInt>(MD: BaseNode->getOperand(I: Idx + 1)); |
| 7774 | if (OffsetEntryCI->getValue().ugt(RHS: Offset)) { |
| 7775 | if (Idx == FirstFieldOpNo) { |
| 7776 | CheckFailed(Args: "Could not find TBAA parent in struct type node" , Args: &I, |
| 7777 | Args&: BaseNode, Args: &Offset); |
| 7778 | return nullptr; |
| 7779 | } |
| 7780 | |
| 7781 | unsigned PrevIdx = Idx - NumOpsPerField; |
| 7782 | auto *PrevOffsetEntryCI = |
| 7783 | mdconst::extract<ConstantInt>(MD: BaseNode->getOperand(I: PrevIdx + 1)); |
| 7784 | Offset -= PrevOffsetEntryCI->getValue(); |
| 7785 | return cast<MDNode>(Val: BaseNode->getOperand(I: PrevIdx)); |
| 7786 | } |
| 7787 | } |
| 7788 | |
| 7789 | unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField; |
| 7790 | auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>( |
| 7791 | MD: BaseNode->getOperand(I: LastIdx + 1)); |
| 7792 | Offset -= LastOffsetEntryCI->getValue(); |
| 7793 | return cast<MDNode>(Val: BaseNode->getOperand(I: LastIdx)); |
| 7794 | } |
| 7795 | |
| 7796 | static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) { |
| 7797 | if (!Type || Type->getNumOperands() < 3) |
| 7798 | return false; |
| 7799 | |
| 7800 | // In the new format type nodes shall have a reference to the parent type as |
| 7801 | // its first operand. |
| 7802 | return isa_and_nonnull<MDNode>(Val: Type->getOperand(I: 0)); |
| 7803 | } |
| 7804 | |
| 7805 | bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { |
| 7806 | CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands" , |
| 7807 | &I, MD); |
| 7808 | |
| 7809 | CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || |
| 7810 | isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) || |
| 7811 | isa<AtomicCmpXchgInst>(I), |
| 7812 | "This instruction shall not have a TBAA access tag!" , &I); |
| 7813 | |
| 7814 | bool IsStructPathTBAA = |
| 7815 | isa<MDNode>(Val: MD->getOperand(I: 0)) && MD->getNumOperands() >= 3; |
| 7816 | |
| 7817 | CheckTBAA(IsStructPathTBAA, |
| 7818 | "Old-style TBAA is no longer allowed, use struct-path TBAA instead" , |
| 7819 | &I); |
| 7820 | |
| 7821 | MDNode *BaseNode = dyn_cast_or_null<MDNode>(Val: MD->getOperand(I: 0)); |
| 7822 | MDNode *AccessType = dyn_cast_or_null<MDNode>(Val: MD->getOperand(I: 1)); |
| 7823 | |
| 7824 | bool IsNewFormat = isNewFormatTBAATypeNode(Type: AccessType); |
| 7825 | |
| 7826 | if (IsNewFormat) { |
| 7827 | CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5, |
| 7828 | "Access tag metadata must have either 4 or 5 operands" , &I, MD); |
| 7829 | } else { |
| 7830 | CheckTBAA(MD->getNumOperands() < 5, |
| 7831 | "Struct tag metadata must have either 3 or 4 operands" , &I, MD); |
| 7832 | } |
| 7833 | |
| 7834 | // Check the access size field. |
| 7835 | if (IsNewFormat) { |
| 7836 | auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>( |
| 7837 | MD: MD->getOperand(I: 3)); |
| 7838 | CheckTBAA(AccessSizeNode, "Access size field must be a constant" , &I, MD); |
| 7839 | } |
| 7840 | |
| 7841 | // Check the immutability flag. |
| 7842 | unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3; |
| 7843 | if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) { |
| 7844 | auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>( |
| 7845 | MD: MD->getOperand(I: ImmutabilityFlagOpNo)); |
| 7846 | CheckTBAA(IsImmutableCI, |
| 7847 | "Immutability tag on struct tag metadata must be a constant" , &I, |
| 7848 | MD); |
| 7849 | CheckTBAA( |
| 7850 | IsImmutableCI->isZero() || IsImmutableCI->isOne(), |
| 7851 | "Immutability part of the struct tag metadata must be either 0 or 1" , |
| 7852 | &I, MD); |
| 7853 | } |
| 7854 | |
| 7855 | CheckTBAA(BaseNode && AccessType, |
| 7856 | "Malformed struct tag metadata: base and access-type " |
| 7857 | "should be non-null and point to Metadata nodes" , |
| 7858 | &I, MD, BaseNode, AccessType); |
| 7859 | |
| 7860 | if (!IsNewFormat) { |
| 7861 | CheckTBAA(isValidScalarTBAANode(AccessType), |
| 7862 | "Access type node must be a valid scalar type" , &I, MD, |
| 7863 | AccessType); |
| 7864 | } |
| 7865 | |
| 7866 | auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD: MD->getOperand(I: 2)); |
| 7867 | CheckTBAA(OffsetCI, "Offset must be constant integer" , &I, MD); |
| 7868 | |
| 7869 | APInt Offset = OffsetCI->getValue(); |
| 7870 | bool SeenAccessTypeInPath = false; |
| 7871 | |
| 7872 | SmallPtrSet<MDNode *, 4> StructPath; |
| 7873 | |
| 7874 | for (/* empty */; BaseNode && !IsRootTBAANode(MD: BaseNode); |
| 7875 | BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, |
| 7876 | IsNewFormat)) { |
| 7877 | if (!StructPath.insert(Ptr: BaseNode).second) { |
| 7878 | CheckFailed(Args: "Cycle detected in struct path" , Args: &I, Args&: MD); |
| 7879 | return false; |
| 7880 | } |
| 7881 | |
| 7882 | bool Invalid; |
| 7883 | unsigned BaseNodeBitWidth; |
| 7884 | std::tie(args&: Invalid, args&: BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode, |
| 7885 | IsNewFormat); |
| 7886 | |
| 7887 | // If the base node is invalid in itself, then we've already printed all the |
| 7888 | // errors we wanted to print. |
| 7889 | if (Invalid) |
| 7890 | return false; |
| 7891 | |
| 7892 | SeenAccessTypeInPath |= BaseNode == AccessType; |
| 7893 | |
| 7894 | if (isValidScalarTBAANode(MD: BaseNode) || BaseNode == AccessType) |
| 7895 | CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access" , |
| 7896 | &I, MD, &Offset); |
| 7897 | |
| 7898 | CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() || |
| 7899 | (BaseNodeBitWidth == 0 && Offset == 0) || |
| 7900 | (IsNewFormat && BaseNodeBitWidth == ~0u), |
| 7901 | "Access bit-width not the same as description bit-width" , &I, MD, |
| 7902 | BaseNodeBitWidth, Offset.getBitWidth()); |
| 7903 | |
| 7904 | if (IsNewFormat && SeenAccessTypeInPath) |
| 7905 | break; |
| 7906 | } |
| 7907 | |
| 7908 | CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!" , &I, |
| 7909 | MD); |
| 7910 | return true; |
| 7911 | } |
| 7912 | |
| 7913 | char VerifierLegacyPass::ID = 0; |
| 7914 | INITIALIZE_PASS(VerifierLegacyPass, "verify" , "Module Verifier" , false, false) |
| 7915 | |
| 7916 | FunctionPass *llvm::createVerifierPass(bool FatalErrors) { |
| 7917 | return new VerifierLegacyPass(FatalErrors); |
| 7918 | } |
| 7919 | |
| 7920 | AnalysisKey VerifierAnalysis::Key; |
| 7921 | VerifierAnalysis::Result VerifierAnalysis::run(Module &M, |
| 7922 | ModuleAnalysisManager &) { |
| 7923 | Result Res; |
| 7924 | Res.IRBroken = llvm::verifyModule(M, OS: &dbgs(), BrokenDebugInfo: &Res.DebugInfoBroken); |
| 7925 | return Res; |
| 7926 | } |
| 7927 | |
| 7928 | VerifierAnalysis::Result VerifierAnalysis::run(Function &F, |
| 7929 | FunctionAnalysisManager &) { |
| 7930 | return { .IRBroken: llvm::verifyFunction(f: F, OS: &dbgs()), .DebugInfoBroken: false }; |
| 7931 | } |
| 7932 | |
| 7933 | PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) { |
| 7934 | auto Res = AM.getResult<VerifierAnalysis>(IR&: M); |
| 7935 | if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken)) |
| 7936 | report_fatal_error(reason: "Broken module found, compilation aborted!" ); |
| 7937 | |
| 7938 | return PreservedAnalyses::all(); |
| 7939 | } |
| 7940 | |
| 7941 | PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) { |
| 7942 | auto res = AM.getResult<VerifierAnalysis>(IR&: F); |
| 7943 | if (res.IRBroken && FatalErrors) |
| 7944 | report_fatal_error(reason: "Broken function found, compilation aborted!" ); |
| 7945 | |
| 7946 | return PreservedAnalyses::all(); |
| 7947 | } |
| 7948 | |