| 1 | //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the primary stateless implementation of the |
| 10 | // Alias Analysis interface that implements identities (two different |
| 11 | // globals cannot alias, etc), but does no stateful analysis. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "llvm/Analysis/BasicAliasAnalysis.h" |
| 16 | #include "llvm/ADT/APInt.h" |
| 17 | #include "llvm/ADT/ScopeExit.h" |
| 18 | #include "llvm/ADT/SmallPtrSet.h" |
| 19 | #include "llvm/ADT/SmallVector.h" |
| 20 | #include "llvm/ADT/Statistic.h" |
| 21 | #include "llvm/Analysis/AliasAnalysis.h" |
| 22 | #include "llvm/Analysis/AssumptionCache.h" |
| 23 | #include "llvm/Analysis/CFG.h" |
| 24 | #include "llvm/Analysis/CaptureTracking.h" |
| 25 | #include "llvm/Analysis/MemoryBuiltins.h" |
| 26 | #include "llvm/Analysis/MemoryLocation.h" |
| 27 | #include "llvm/Analysis/TargetLibraryInfo.h" |
| 28 | #include "llvm/Analysis/ValueTracking.h" |
| 29 | #include "llvm/IR/Argument.h" |
| 30 | #include "llvm/IR/Attributes.h" |
| 31 | #include "llvm/IR/Constant.h" |
| 32 | #include "llvm/IR/ConstantRange.h" |
| 33 | #include "llvm/IR/Constants.h" |
| 34 | #include "llvm/IR/DataLayout.h" |
| 35 | #include "llvm/IR/DerivedTypes.h" |
| 36 | #include "llvm/IR/Dominators.h" |
| 37 | #include "llvm/IR/Function.h" |
| 38 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 39 | #include "llvm/IR/GlobalAlias.h" |
| 40 | #include "llvm/IR/GlobalVariable.h" |
| 41 | #include "llvm/IR/InstrTypes.h" |
| 42 | #include "llvm/IR/Instruction.h" |
| 43 | #include "llvm/IR/Instructions.h" |
| 44 | #include "llvm/IR/IntrinsicInst.h" |
| 45 | #include "llvm/IR/Intrinsics.h" |
| 46 | #include "llvm/IR/Operator.h" |
| 47 | #include "llvm/IR/PatternMatch.h" |
| 48 | #include "llvm/IR/Type.h" |
| 49 | #include "llvm/IR/User.h" |
| 50 | #include "llvm/IR/Value.h" |
| 51 | #include "llvm/InitializePasses.h" |
| 52 | #include "llvm/Pass.h" |
| 53 | #include "llvm/Support/Casting.h" |
| 54 | #include "llvm/Support/CommandLine.h" |
| 55 | #include "llvm/Support/Compiler.h" |
| 56 | #include "llvm/Support/KnownBits.h" |
| 57 | #include "llvm/Support/SaveAndRestore.h" |
| 58 | #include <cassert> |
| 59 | #include <cstdint> |
| 60 | #include <cstdlib> |
| 61 | #include <optional> |
| 62 | #include <utility> |
| 63 | |
| 64 | #define DEBUG_TYPE "basicaa" |
| 65 | |
| 66 | using namespace llvm; |
| 67 | |
| 68 | /// Enable analysis of recursive PHI nodes. |
| 69 | static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi" , cl::Hidden, |
| 70 | cl::init(Val: true)); |
| 71 | |
| 72 | static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage" , |
| 73 | cl::Hidden, cl::init(Val: true)); |
| 74 | |
| 75 | /// SearchLimitReached / SearchTimes shows how often the limit of |
| 76 | /// to decompose GEPs is reached. It will affect the precision |
| 77 | /// of basic alias analysis. |
| 78 | STATISTIC(SearchLimitReached, "Number of times the limit to " |
| 79 | "decompose GEPs is reached" ); |
| 80 | STATISTIC(SearchTimes, "Number of times a GEP is decomposed" ); |
| 81 | |
| 82 | bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, |
| 83 | FunctionAnalysisManager::Invalidator &Inv) { |
| 84 | // We don't care if this analysis itself is preserved, it has no state. But |
| 85 | // we need to check that the analyses it depends on have been. Note that we |
| 86 | // may be created without handles to some analyses and in that case don't |
| 87 | // depend on them. |
| 88 | if (Inv.invalidate<AssumptionAnalysis>(IR&: Fn, PA) || |
| 89 | (DT_ && Inv.invalidate<DominatorTreeAnalysis>(IR&: Fn, PA))) |
| 90 | return true; |
| 91 | |
| 92 | // Otherwise this analysis result remains valid. |
| 93 | return false; |
| 94 | } |
| 95 | |
| 96 | //===----------------------------------------------------------------------===// |
| 97 | // Useful predicates |
| 98 | //===----------------------------------------------------------------------===// |
| 99 | |
| 100 | /// Returns the size of the object specified by V or UnknownSize if unknown. |
| 101 | static std::optional<TypeSize> getObjectSize(const Value *V, |
| 102 | const DataLayout &DL, |
| 103 | const TargetLibraryInfo &TLI, |
| 104 | bool NullIsValidLoc, |
| 105 | bool RoundToAlign = false) { |
| 106 | uint64_t Size; |
| 107 | ObjectSizeOpts Opts; |
| 108 | Opts.RoundToAlign = RoundToAlign; |
| 109 | Opts.NullIsUnknownSize = NullIsValidLoc; |
| 110 | if (getObjectSize(Ptr: V, Size, DL, TLI: &TLI, Opts)) |
| 111 | return TypeSize::getFixed(ExactSize: Size); |
| 112 | return std::nullopt; |
| 113 | } |
| 114 | |
| 115 | /// Returns true if we can prove that the object specified by V is smaller than |
| 116 | /// Size. Bails out early unless the root object is passed as the first |
| 117 | /// parameter. |
| 118 | static bool isObjectSmallerThan(const Value *V, TypeSize Size, |
| 119 | const DataLayout &DL, |
| 120 | const TargetLibraryInfo &TLI, |
| 121 | bool NullIsValidLoc) { |
| 122 | // Note that the meanings of the "object" are slightly different in the |
| 123 | // following contexts: |
| 124 | // c1: llvm::getObjectSize() |
| 125 | // c2: llvm.objectsize() intrinsic |
| 126 | // c3: isObjectSmallerThan() |
| 127 | // c1 and c2 share the same meaning; however, the meaning of "object" in c3 |
| 128 | // refers to the "entire object". |
| 129 | // |
| 130 | // Consider this example: |
| 131 | // char *p = (char*)malloc(100) |
| 132 | // char *q = p+80; |
| 133 | // |
| 134 | // In the context of c1 and c2, the "object" pointed by q refers to the |
| 135 | // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. |
| 136 | // |
| 137 | // In the context of c3, the "object" refers to the chunk of memory being |
| 138 | // allocated. So, the "object" has 100 bytes, and q points to the middle the |
| 139 | // "object". However, unless p, the root object, is passed as the first |
| 140 | // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan() |
| 141 | // bail out early. |
| 142 | if (!isIdentifiedObject(V)) |
| 143 | return false; |
| 144 | |
| 145 | // This function needs to use the aligned object size because we allow |
| 146 | // reads a bit past the end given sufficient alignment. |
| 147 | std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, |
| 148 | /*RoundToAlign*/ true); |
| 149 | |
| 150 | return ObjectSize && TypeSize::isKnownLT(LHS: *ObjectSize, RHS: Size); |
| 151 | } |
| 152 | |
| 153 | /// Return the minimal extent from \p V to the end of the underlying object, |
| 154 | /// assuming the result is used in an aliasing query. E.g., we do use the query |
| 155 | /// location size and the fact that null pointers cannot alias here. |
| 156 | static TypeSize getMinimalExtentFrom(const Value &V, |
| 157 | const LocationSize &LocSize, |
| 158 | const DataLayout &DL, |
| 159 | bool NullIsValidLoc) { |
| 160 | // If we have dereferenceability information we know a lower bound for the |
| 161 | // extent as accesses for a lower offset would be valid. We need to exclude |
| 162 | // the "or null" part if null is a valid pointer. We can ignore frees, as an |
| 163 | // access after free would be undefined behavior. |
| 164 | bool CanBeNull, CanBeFreed; |
| 165 | uint64_t DerefBytes = |
| 166 | V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); |
| 167 | DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; |
| 168 | // If queried with a precise location size, we assume that location size to be |
| 169 | // accessed, thus valid. |
| 170 | if (LocSize.isPrecise()) |
| 171 | DerefBytes = std::max(a: DerefBytes, b: LocSize.getValue().getKnownMinValue()); |
| 172 | return TypeSize::getFixed(ExactSize: DerefBytes); |
| 173 | } |
| 174 | |
| 175 | /// Returns true if we can prove that the object specified by V has size Size. |
| 176 | static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, |
| 177 | const TargetLibraryInfo &TLI, bool NullIsValidLoc) { |
| 178 | std::optional<TypeSize> ObjectSize = |
| 179 | getObjectSize(V, DL, TLI, NullIsValidLoc); |
| 180 | return ObjectSize && *ObjectSize == Size; |
| 181 | } |
| 182 | |
| 183 | /// Return true if both V1 and V2 are VScale |
| 184 | static bool areBothVScale(const Value *V1, const Value *V2) { |
| 185 | return PatternMatch::match(V: V1, P: PatternMatch::m_VScale()) && |
| 186 | PatternMatch::match(V: V2, P: PatternMatch::m_VScale()); |
| 187 | } |
| 188 | |
| 189 | //===----------------------------------------------------------------------===// |
| 190 | // CaptureAnalysis implementations |
| 191 | //===----------------------------------------------------------------------===// |
| 192 | |
| 193 | CaptureAnalysis::~CaptureAnalysis() = default; |
| 194 | |
| 195 | CaptureComponents SimpleCaptureAnalysis::getCapturesBefore(const Value *Object, |
| 196 | const Instruction *I, |
| 197 | bool OrAt) { |
| 198 | if (!isIdentifiedFunctionLocal(V: Object)) |
| 199 | return CaptureComponents::Provenance; |
| 200 | |
| 201 | auto [CacheIt, Inserted] = |
| 202 | IsCapturedCache.insert(KV: {Object, CaptureComponents::Provenance}); |
| 203 | if (!Inserted) |
| 204 | return CacheIt->second; |
| 205 | |
| 206 | CaptureComponents Ret = PointerMayBeCaptured( |
| 207 | V: Object, /*ReturnCaptures=*/false, Mask: CaptureComponents::Provenance, |
| 208 | StopFn: [](CaptureComponents CC) { return capturesFullProvenance(CC); }); |
| 209 | CacheIt->second = Ret; |
| 210 | return Ret; |
| 211 | } |
| 212 | |
| 213 | static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, |
| 214 | const LoopInfo *LI) { |
| 215 | BasicBlock *BB = const_cast<BasicBlock *>(I->getParent()); |
| 216 | SmallVector<BasicBlock *> Succs(successors(BB)); |
| 217 | return Succs.empty() || |
| 218 | !isPotentiallyReachableFromMany(Worklist&: Succs, StopBB: BB, ExclusionSet: nullptr, DT, LI); |
| 219 | } |
| 220 | |
| 221 | CaptureComponents |
| 222 | EarliestEscapeAnalysis::getCapturesBefore(const Value *Object, |
| 223 | const Instruction *I, bool OrAt) { |
| 224 | if (!isIdentifiedFunctionLocal(V: Object)) |
| 225 | return CaptureComponents::Provenance; |
| 226 | |
| 227 | auto Iter = EarliestEscapes.try_emplace(Key: Object); |
| 228 | if (Iter.second) { |
| 229 | std::pair<Instruction *, CaptureComponents> EarliestCapture = |
| 230 | FindEarliestCapture( |
| 231 | V: Object, F&: *const_cast<Function *>(DT.getRoot()->getParent()), |
| 232 | /*ReturnCaptures=*/false, DT, Mask: CaptureComponents::Provenance); |
| 233 | if (EarliestCapture.first) |
| 234 | Inst2Obj[EarliestCapture.first].push_back(NewVal: Object); |
| 235 | Iter.first->second = EarliestCapture; |
| 236 | } |
| 237 | |
| 238 | auto IsNotCapturedBefore = [&]() { |
| 239 | // No capturing instruction. |
| 240 | Instruction *CaptureInst = Iter.first->second.first; |
| 241 | if (!CaptureInst) |
| 242 | return true; |
| 243 | |
| 244 | // No context instruction means any use is capturing. |
| 245 | if (!I) |
| 246 | return false; |
| 247 | |
| 248 | if (I == CaptureInst) { |
| 249 | if (OrAt) |
| 250 | return false; |
| 251 | return isNotInCycle(I, DT: &DT, LI); |
| 252 | } |
| 253 | |
| 254 | return !isPotentiallyReachable(From: CaptureInst, To: I, ExclusionSet: nullptr, DT: &DT, LI); |
| 255 | }; |
| 256 | if (IsNotCapturedBefore()) |
| 257 | return CaptureComponents::None; |
| 258 | return Iter.first->second.second; |
| 259 | } |
| 260 | |
| 261 | void EarliestEscapeAnalysis::removeInstruction(Instruction *I) { |
| 262 | auto Iter = Inst2Obj.find(Val: I); |
| 263 | if (Iter != Inst2Obj.end()) { |
| 264 | for (const Value *Obj : Iter->second) |
| 265 | EarliestEscapes.erase(Val: Obj); |
| 266 | Inst2Obj.erase(Val: I); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | //===----------------------------------------------------------------------===// |
| 271 | // GetElementPtr Instruction Decomposition and Analysis |
| 272 | //===----------------------------------------------------------------------===// |
| 273 | |
| 274 | namespace { |
| 275 | /// Represents zext(sext(trunc(V))). |
| 276 | struct CastedValue { |
| 277 | const Value *V; |
| 278 | unsigned ZExtBits = 0; |
| 279 | unsigned SExtBits = 0; |
| 280 | unsigned TruncBits = 0; |
| 281 | /// Whether trunc(V) is non-negative. |
| 282 | bool IsNonNegative = false; |
| 283 | |
| 284 | explicit CastedValue(const Value *V) : V(V) {} |
| 285 | explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, |
| 286 | unsigned TruncBits, bool IsNonNegative) |
| 287 | : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits), |
| 288 | IsNonNegative(IsNonNegative) {} |
| 289 | |
| 290 | unsigned getBitWidth() const { |
| 291 | return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + |
| 292 | SExtBits; |
| 293 | } |
| 294 | |
| 295 | CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const { |
| 296 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits, |
| 297 | IsNonNegative && PreserveNonNeg); |
| 298 | } |
| 299 | |
| 300 | /// Replace V with zext(NewV) |
| 301 | CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const { |
| 302 | unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - |
| 303 | NewV->getType()->getPrimitiveSizeInBits(); |
| 304 | if (ExtendBy <= TruncBits) |
| 305 | // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV)) |
| 306 | // The nneg can be preserved on the outer zext here. |
| 307 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy, |
| 308 | IsNonNegative); |
| 309 | |
| 310 | // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) |
| 311 | ExtendBy -= TruncBits; |
| 312 | // zext<nneg>(zext(NewV)) == zext(NewV) |
| 313 | // zext(zext<nneg>(NewV)) == zext<nneg>(NewV) |
| 314 | // The nneg can be preserved from the inner zext here but must be dropped |
| 315 | // from the outer. |
| 316 | return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0, |
| 317 | ZExtNonNegative); |
| 318 | } |
| 319 | |
| 320 | /// Replace V with sext(NewV) |
| 321 | CastedValue withSExtOfValue(const Value *NewV) const { |
| 322 | unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - |
| 323 | NewV->getType()->getPrimitiveSizeInBits(); |
| 324 | if (ExtendBy <= TruncBits) |
| 325 | // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV)) |
| 326 | // The nneg can be preserved on the outer zext here |
| 327 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy, |
| 328 | IsNonNegative); |
| 329 | |
| 330 | // zext(sext(sext(NewV))) |
| 331 | ExtendBy -= TruncBits; |
| 332 | // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV)) |
| 333 | // The nneg can be preserved on the outer zext here |
| 334 | return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative); |
| 335 | } |
| 336 | |
| 337 | APInt evaluateWith(APInt N) const { |
| 338 | assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && |
| 339 | "Incompatible bit width" ); |
| 340 | if (TruncBits) N = N.trunc(width: N.getBitWidth() - TruncBits); |
| 341 | if (SExtBits) N = N.sext(width: N.getBitWidth() + SExtBits); |
| 342 | if (ZExtBits) N = N.zext(width: N.getBitWidth() + ZExtBits); |
| 343 | return N; |
| 344 | } |
| 345 | |
| 346 | ConstantRange evaluateWith(ConstantRange N) const { |
| 347 | assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && |
| 348 | "Incompatible bit width" ); |
| 349 | if (TruncBits) N = N.truncate(BitWidth: N.getBitWidth() - TruncBits); |
| 350 | if (IsNonNegative && !N.isAllNonNegative()) |
| 351 | N = N.intersectWith( |
| 352 | CR: ConstantRange(APInt::getZero(numBits: N.getBitWidth()), |
| 353 | APInt::getSignedMinValue(numBits: N.getBitWidth()))); |
| 354 | if (SExtBits) N = N.signExtend(BitWidth: N.getBitWidth() + SExtBits); |
| 355 | if (ZExtBits) N = N.zeroExtend(BitWidth: N.getBitWidth() + ZExtBits); |
| 356 | return N; |
| 357 | } |
| 358 | |
| 359 | bool canDistributeOver(bool NUW, bool NSW) const { |
| 360 | // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) |
| 361 | // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) |
| 362 | // trunc(x op y) == trunc(x) op trunc(y) |
| 363 | return (!ZExtBits || NUW) && (!SExtBits || NSW); |
| 364 | } |
| 365 | |
| 366 | bool hasSameCastsAs(const CastedValue &Other) const { |
| 367 | if (V->getType() != Other.V->getType()) |
| 368 | return false; |
| 369 | |
| 370 | if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && |
| 371 | TruncBits == Other.TruncBits) |
| 372 | return true; |
| 373 | // If either CastedValue has a nneg zext then the sext/zext bits are |
| 374 | // interchangable for that value. |
| 375 | if (IsNonNegative || Other.IsNonNegative) |
| 376 | return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits && |
| 377 | TruncBits == Other.TruncBits); |
| 378 | return false; |
| 379 | } |
| 380 | }; |
| 381 | |
| 382 | /// Represents zext(sext(trunc(V))) * Scale + Offset. |
| 383 | struct LinearExpression { |
| 384 | CastedValue Val; |
| 385 | APInt Scale; |
| 386 | APInt Offset; |
| 387 | |
| 388 | /// True if all operations in this expression are NUW. |
| 389 | bool IsNUW; |
| 390 | /// True if all operations in this expression are NSW. |
| 391 | bool IsNSW; |
| 392 | |
| 393 | LinearExpression(const CastedValue &Val, const APInt &Scale, |
| 394 | const APInt &Offset, bool IsNUW, bool IsNSW) |
| 395 | : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {} |
| 396 | |
| 397 | LinearExpression(const CastedValue &Val) |
| 398 | : Val(Val), IsNUW(true), IsNSW(true) { |
| 399 | unsigned BitWidth = Val.getBitWidth(); |
| 400 | Scale = APInt(BitWidth, 1); |
| 401 | Offset = APInt(BitWidth, 0); |
| 402 | } |
| 403 | |
| 404 | LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const { |
| 405 | // The check for zero offset is necessary, because generally |
| 406 | // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z). |
| 407 | bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero())); |
| 408 | bool NUW = IsNUW && (Other.isOne() || MulIsNUW); |
| 409 | return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW); |
| 410 | } |
| 411 | }; |
| 412 | } |
| 413 | |
| 414 | /// Analyzes the specified value as a linear expression: "A*V + B", where A and |
| 415 | /// B are constant integers. |
| 416 | static LinearExpression GetLinearExpression( |
| 417 | const CastedValue &Val, const DataLayout &DL, unsigned Depth, |
| 418 | AssumptionCache *AC, DominatorTree *DT) { |
| 419 | // Limit our recursion depth. |
| 420 | if (Depth == 6) |
| 421 | return Val; |
| 422 | |
| 423 | if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val: Val.V)) |
| 424 | return LinearExpression(Val, APInt(Val.getBitWidth(), 0), |
| 425 | Val.evaluateWith(N: Const->getValue()), true, true); |
| 426 | |
| 427 | if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: Val.V)) { |
| 428 | if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Val: BOp->getOperand(i_nocapture: 1))) { |
| 429 | APInt RHS = Val.evaluateWith(N: RHSC->getValue()); |
| 430 | // The only non-OBO case we deal with is or, and only limited to the |
| 431 | // case where it is both nuw and nsw. |
| 432 | bool NUW = true, NSW = true; |
| 433 | if (isa<OverflowingBinaryOperator>(Val: BOp)) { |
| 434 | NUW &= BOp->hasNoUnsignedWrap(); |
| 435 | NSW &= BOp->hasNoSignedWrap(); |
| 436 | } |
| 437 | if (!Val.canDistributeOver(NUW, NSW)) |
| 438 | return Val; |
| 439 | |
| 440 | // While we can distribute over trunc, we cannot preserve nowrap flags |
| 441 | // in that case. |
| 442 | if (Val.TruncBits) |
| 443 | NUW = NSW = false; |
| 444 | |
| 445 | LinearExpression E(Val); |
| 446 | switch (BOp->getOpcode()) { |
| 447 | default: |
| 448 | // We don't understand this instruction, so we can't decompose it any |
| 449 | // further. |
| 450 | return Val; |
| 451 | case Instruction::Or: |
| 452 | // X|C == X+C if it is disjoint. Otherwise we can't analyze it. |
| 453 | if (!cast<PossiblyDisjointInst>(Val: BOp)->isDisjoint()) |
| 454 | return Val; |
| 455 | |
| 456 | [[fallthrough]]; |
| 457 | case Instruction::Add: { |
| 458 | E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL, |
| 459 | Depth: Depth + 1, AC, DT); |
| 460 | E.Offset += RHS; |
| 461 | E.IsNUW &= NUW; |
| 462 | E.IsNSW &= NSW; |
| 463 | break; |
| 464 | } |
| 465 | case Instruction::Sub: { |
| 466 | E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL, |
| 467 | Depth: Depth + 1, AC, DT); |
| 468 | E.Offset -= RHS; |
| 469 | E.IsNUW = false; // sub nuw x, y is not add nuw x, -y. |
| 470 | E.IsNSW &= NSW; |
| 471 | break; |
| 472 | } |
| 473 | case Instruction::Mul: |
| 474 | E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL, |
| 475 | Depth: Depth + 1, AC, DT) |
| 476 | .mul(Other: RHS, MulIsNUW: NUW, MulIsNSW: NSW); |
| 477 | break; |
| 478 | case Instruction::Shl: |
| 479 | // We're trying to linearize an expression of the kind: |
| 480 | // shl i8 -128, 36 |
| 481 | // where the shift count exceeds the bitwidth of the type. |
| 482 | // We can't decompose this further (the expression would return |
| 483 | // a poison value). |
| 484 | if (RHS.getLimitedValue() > Val.getBitWidth()) |
| 485 | return Val; |
| 486 | |
| 487 | E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: NSW), DL, |
| 488 | Depth: Depth + 1, AC, DT); |
| 489 | E.Offset <<= RHS.getLimitedValue(); |
| 490 | E.Scale <<= RHS.getLimitedValue(); |
| 491 | E.IsNUW &= NUW; |
| 492 | E.IsNSW &= NSW; |
| 493 | break; |
| 494 | } |
| 495 | return E; |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | if (const auto *ZExt = dyn_cast<ZExtInst>(Val: Val.V)) |
| 500 | return GetLinearExpression( |
| 501 | Val: Val.withZExtOfValue(NewV: ZExt->getOperand(i_nocapture: 0), ZExtNonNegative: ZExt->hasNonNeg()), DL, |
| 502 | Depth: Depth + 1, AC, DT); |
| 503 | |
| 504 | if (isa<SExtInst>(Val: Val.V)) |
| 505 | return GetLinearExpression( |
| 506 | Val: Val.withSExtOfValue(NewV: cast<CastInst>(Val: Val.V)->getOperand(i_nocapture: 0)), |
| 507 | DL, Depth: Depth + 1, AC, DT); |
| 508 | |
| 509 | return Val; |
| 510 | } |
| 511 | |
| 512 | namespace { |
| 513 | // A linear transformation of a Value; this class represents |
| 514 | // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. |
| 515 | struct VariableGEPIndex { |
| 516 | CastedValue Val; |
| 517 | APInt Scale; |
| 518 | |
| 519 | // Context instruction to use when querying information about this index. |
| 520 | const Instruction *CxtI; |
| 521 | |
| 522 | /// True if all operations in this expression are NSW. |
| 523 | bool IsNSW; |
| 524 | |
| 525 | /// True if the index should be subtracted rather than added. We don't simply |
| 526 | /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be |
| 527 | /// non-wrapping, while X + INT_MIN*(-1) wraps. |
| 528 | bool IsNegated; |
| 529 | |
| 530 | bool hasNegatedScaleOf(const VariableGEPIndex &Other) const { |
| 531 | if (IsNegated == Other.IsNegated) |
| 532 | return Scale == -Other.Scale; |
| 533 | return Scale == Other.Scale; |
| 534 | } |
| 535 | |
| 536 | void dump() const { |
| 537 | print(OS&: dbgs()); |
| 538 | dbgs() << "\n" ; |
| 539 | } |
| 540 | void print(raw_ostream &OS) const { |
| 541 | OS << "(V=" << Val.V->getName() |
| 542 | << ", zextbits=" << Val.ZExtBits |
| 543 | << ", sextbits=" << Val.SExtBits |
| 544 | << ", truncbits=" << Val.TruncBits |
| 545 | << ", scale=" << Scale |
| 546 | << ", nsw=" << IsNSW |
| 547 | << ", negated=" << IsNegated << ")" ; |
| 548 | } |
| 549 | }; |
| 550 | } |
| 551 | |
| 552 | // Represents the internal structure of a GEP, decomposed into a base pointer, |
| 553 | // constant offsets, and variable scaled indices. |
| 554 | struct BasicAAResult::DecomposedGEP { |
| 555 | // Base pointer of the GEP |
| 556 | const Value *Base; |
| 557 | // Total constant offset from base. |
| 558 | APInt Offset; |
| 559 | // Scaled variable (non-constant) indices. |
| 560 | SmallVector<VariableGEPIndex, 4> VarIndices; |
| 561 | // Nowrap flags common to all GEP operations involved in expression. |
| 562 | GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all(); |
| 563 | |
| 564 | void dump() const { |
| 565 | print(OS&: dbgs()); |
| 566 | dbgs() << "\n" ; |
| 567 | } |
| 568 | void print(raw_ostream &OS) const { |
| 569 | OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0" ) |
| 570 | << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0" ) |
| 571 | << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset |
| 572 | << ", VarIndices=[" ; |
| 573 | for (size_t i = 0; i < VarIndices.size(); i++) { |
| 574 | if (i != 0) |
| 575 | OS << ", " ; |
| 576 | VarIndices[i].print(OS); |
| 577 | } |
| 578 | OS << "])" ; |
| 579 | } |
| 580 | }; |
| 581 | |
| 582 | |
| 583 | /// If V is a symbolic pointer expression, decompose it into a base pointer |
| 584 | /// with a constant offset and a number of scaled symbolic offsets. |
| 585 | /// |
| 586 | /// The scaled symbolic offsets (represented by pairs of a Value* and a scale |
| 587 | /// in the VarIndices vector) are Value*'s that are known to be scaled by the |
| 588 | /// specified amount, but which may have other unrepresented high bits. As |
| 589 | /// such, the gep cannot necessarily be reconstructed from its decomposed form. |
| 590 | BasicAAResult::DecomposedGEP |
| 591 | BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, |
| 592 | AssumptionCache *AC, DominatorTree *DT) { |
| 593 | // Limit recursion depth to limit compile time in crazy cases. |
| 594 | unsigned MaxLookup = MaxLookupSearchDepth; |
| 595 | SearchTimes++; |
| 596 | const Instruction *CxtI = dyn_cast<Instruction>(Val: V); |
| 597 | |
| 598 | unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: V->getType()); |
| 599 | DecomposedGEP Decomposed; |
| 600 | Decomposed.Offset = APInt(IndexSize, 0); |
| 601 | do { |
| 602 | // See if this is a bitcast or GEP. |
| 603 | const Operator *Op = dyn_cast<Operator>(Val: V); |
| 604 | if (!Op) { |
| 605 | // The only non-operator case we can handle are GlobalAliases. |
| 606 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: V)) { |
| 607 | if (!GA->isInterposable()) { |
| 608 | V = GA->getAliasee(); |
| 609 | continue; |
| 610 | } |
| 611 | } |
| 612 | Decomposed.Base = V; |
| 613 | return Decomposed; |
| 614 | } |
| 615 | |
| 616 | if (Op->getOpcode() == Instruction::BitCast || |
| 617 | Op->getOpcode() == Instruction::AddrSpaceCast) { |
| 618 | Value *NewV = Op->getOperand(i: 0); |
| 619 | // Don't look through casts between address spaces with differing index |
| 620 | // widths. |
| 621 | if (DL.getIndexTypeSizeInBits(Ty: NewV->getType()) != IndexSize) { |
| 622 | Decomposed.Base = V; |
| 623 | return Decomposed; |
| 624 | } |
| 625 | V = NewV; |
| 626 | continue; |
| 627 | } |
| 628 | |
| 629 | const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Val: Op); |
| 630 | if (!GEPOp) { |
| 631 | if (const auto *PHI = dyn_cast<PHINode>(Val: V)) { |
| 632 | // Look through single-arg phi nodes created by LCSSA. |
| 633 | if (PHI->getNumIncomingValues() == 1) { |
| 634 | V = PHI->getIncomingValue(i: 0); |
| 635 | continue; |
| 636 | } |
| 637 | } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
| 638 | // CaptureTracking can know about special capturing properties of some |
| 639 | // intrinsics like launder.invariant.group, that can't be expressed with |
| 640 | // the attributes, but have properties like returning aliasing pointer. |
| 641 | // Because some analysis may assume that nocaptured pointer is not |
| 642 | // returned from some special intrinsic (because function would have to |
| 643 | // be marked with returns attribute), it is crucial to use this function |
| 644 | // because it should be in sync with CaptureTracking. Not using it may |
| 645 | // cause weird miscompilations where 2 aliasing pointers are assumed to |
| 646 | // noalias. |
| 647 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: false)) { |
| 648 | V = RP; |
| 649 | continue; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | Decomposed.Base = V; |
| 654 | return Decomposed; |
| 655 | } |
| 656 | |
| 657 | // Track the common nowrap flags for all GEPs we see. |
| 658 | Decomposed.NWFlags &= GEPOp->getNoWrapFlags(); |
| 659 | |
| 660 | assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized" ); |
| 661 | |
| 662 | // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. |
| 663 | gep_type_iterator GTI = gep_type_begin(GEP: GEPOp); |
| 664 | for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); |
| 665 | I != E; ++I, ++GTI) { |
| 666 | const Value *Index = *I; |
| 667 | // Compute the (potentially symbolic) offset in bytes for this index. |
| 668 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
| 669 | // For a struct, add the member offset. |
| 670 | unsigned FieldNo = cast<ConstantInt>(Val: Index)->getZExtValue(); |
| 671 | if (FieldNo == 0) |
| 672 | continue; |
| 673 | |
| 674 | Decomposed.Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo); |
| 675 | continue; |
| 676 | } |
| 677 | |
| 678 | // For an array/pointer, add the element offset, explicitly scaled. |
| 679 | if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Val: Index)) { |
| 680 | if (CIdx->isZero()) |
| 681 | continue; |
| 682 | |
| 683 | // Don't attempt to analyze GEPs if the scalable index is not zero. |
| 684 | TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL); |
| 685 | if (AllocTypeSize.isScalable()) { |
| 686 | Decomposed.Base = V; |
| 687 | return Decomposed; |
| 688 | } |
| 689 | |
| 690 | Decomposed.Offset += AllocTypeSize.getFixedValue() * |
| 691 | CIdx->getValue().sextOrTrunc(width: IndexSize); |
| 692 | continue; |
| 693 | } |
| 694 | |
| 695 | TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL); |
| 696 | if (AllocTypeSize.isScalable()) { |
| 697 | Decomposed.Base = V; |
| 698 | return Decomposed; |
| 699 | } |
| 700 | |
| 701 | // If the integer type is smaller than the index size, it is implicitly |
| 702 | // sign extended or truncated to index size. |
| 703 | bool NUSW = GEPOp->hasNoUnsignedSignedWrap(); |
| 704 | bool NUW = GEPOp->hasNoUnsignedWrap(); |
| 705 | bool NonNeg = NUSW && NUW; |
| 706 | unsigned Width = Index->getType()->getIntegerBitWidth(); |
| 707 | unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0; |
| 708 | unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0; |
| 709 | LinearExpression LE = GetLinearExpression( |
| 710 | Val: CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, Depth: 0, AC, DT); |
| 711 | |
| 712 | // Scale by the type size. |
| 713 | unsigned TypeSize = AllocTypeSize.getFixedValue(); |
| 714 | LE = LE.mul(Other: APInt(IndexSize, TypeSize), MulIsNUW: NUW, MulIsNSW: NUSW); |
| 715 | Decomposed.Offset += LE.Offset; |
| 716 | APInt Scale = LE.Scale; |
| 717 | if (!LE.IsNUW) |
| 718 | Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap(); |
| 719 | |
| 720 | // If we already had an occurrence of this index variable, merge this |
| 721 | // scale into it. For example, we want to handle: |
| 722 | // A[x][x] -> x*16 + x*4 -> x*20 |
| 723 | // This also ensures that 'x' only appears in the index list once. |
| 724 | for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { |
| 725 | if ((Decomposed.VarIndices[i].Val.V == LE.Val.V || |
| 726 | areBothVScale(V1: Decomposed.VarIndices[i].Val.V, V2: LE.Val.V)) && |
| 727 | Decomposed.VarIndices[i].Val.hasSameCastsAs(Other: LE.Val)) { |
| 728 | Scale += Decomposed.VarIndices[i].Scale; |
| 729 | // We cannot guarantee no-wrap for the merge. |
| 730 | LE.IsNSW = LE.IsNUW = false; |
| 731 | Decomposed.VarIndices.erase(CI: Decomposed.VarIndices.begin() + i); |
| 732 | break; |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | if (!!Scale) { |
| 737 | VariableGEPIndex Entry = {.Val: LE.Val, .Scale: Scale, .CxtI: CxtI, .IsNSW: LE.IsNSW, |
| 738 | /* IsNegated */ false}; |
| 739 | Decomposed.VarIndices.push_back(Elt: Entry); |
| 740 | } |
| 741 | } |
| 742 | |
| 743 | // Analyze the base pointer next. |
| 744 | V = GEPOp->getOperand(i_nocapture: 0); |
| 745 | } while (--MaxLookup); |
| 746 | |
| 747 | // If the chain of expressions is too deep, just return early. |
| 748 | Decomposed.Base = V; |
| 749 | SearchLimitReached++; |
| 750 | return Decomposed; |
| 751 | } |
| 752 | |
| 753 | ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc, |
| 754 | AAQueryInfo &AAQI, |
| 755 | bool IgnoreLocals) { |
| 756 | assert(Visited.empty() && "Visited must be cleared after use!" ); |
| 757 | auto _ = make_scope_exit(F: [&] { Visited.clear(); }); |
| 758 | |
| 759 | unsigned MaxLookup = 8; |
| 760 | SmallVector<const Value *, 16> Worklist; |
| 761 | Worklist.push_back(Elt: Loc.Ptr); |
| 762 | ModRefInfo Result = ModRefInfo::NoModRef; |
| 763 | |
| 764 | do { |
| 765 | const Value *V = getUnderlyingObject(V: Worklist.pop_back_val()); |
| 766 | if (!Visited.insert(Ptr: V).second) |
| 767 | continue; |
| 768 | |
| 769 | // Ignore allocas if we were instructed to do so. |
| 770 | if (IgnoreLocals && isa<AllocaInst>(Val: V)) |
| 771 | continue; |
| 772 | |
| 773 | // If the location points to memory that is known to be invariant for |
| 774 | // the life of the underlying SSA value, then we can exclude Mod from |
| 775 | // the set of valid memory effects. |
| 776 | // |
| 777 | // An argument that is marked readonly and noalias is known to be |
| 778 | // invariant while that function is executing. |
| 779 | if (const Argument *Arg = dyn_cast<Argument>(Val: V)) { |
| 780 | if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) { |
| 781 | Result |= ModRefInfo::Ref; |
| 782 | continue; |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | // A global constant can't be mutated. |
| 787 | if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: V)) { |
| 788 | // Note: this doesn't require GV to be "ODR" because it isn't legal for a |
| 789 | // global to be marked constant in some modules and non-constant in |
| 790 | // others. GV may even be a declaration, not a definition. |
| 791 | if (!GV->isConstant()) |
| 792 | return ModRefInfo::ModRef; |
| 793 | continue; |
| 794 | } |
| 795 | |
| 796 | // If both select values point to local memory, then so does the select. |
| 797 | if (const SelectInst *SI = dyn_cast<SelectInst>(Val: V)) { |
| 798 | Worklist.push_back(Elt: SI->getTrueValue()); |
| 799 | Worklist.push_back(Elt: SI->getFalseValue()); |
| 800 | continue; |
| 801 | } |
| 802 | |
| 803 | // If all values incoming to a phi node point to local memory, then so does |
| 804 | // the phi. |
| 805 | if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) { |
| 806 | // Don't bother inspecting phi nodes with many operands. |
| 807 | if (PN->getNumIncomingValues() > MaxLookup) |
| 808 | return ModRefInfo::ModRef; |
| 809 | append_range(C&: Worklist, R: PN->incoming_values()); |
| 810 | continue; |
| 811 | } |
| 812 | |
| 813 | // Otherwise be conservative. |
| 814 | return ModRefInfo::ModRef; |
| 815 | } while (!Worklist.empty() && --MaxLookup); |
| 816 | |
| 817 | // If we hit the maximum number of instructions to examine, be conservative. |
| 818 | if (!Worklist.empty()) |
| 819 | return ModRefInfo::ModRef; |
| 820 | |
| 821 | return Result; |
| 822 | } |
| 823 | |
| 824 | static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { |
| 825 | const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Call); |
| 826 | return II && II->getIntrinsicID() == IID; |
| 827 | } |
| 828 | |
| 829 | /// Returns the behavior when calling the given call site. |
| 830 | MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call, |
| 831 | AAQueryInfo &AAQI) { |
| 832 | MemoryEffects Min = Call->getAttributes().getMemoryEffects(); |
| 833 | |
| 834 | if (const Function *F = dyn_cast<Function>(Val: Call->getCalledOperand())) { |
| 835 | MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F); |
| 836 | // Operand bundles on the call may also read or write memory, in addition |
| 837 | // to the behavior of the called function. |
| 838 | if (Call->hasReadingOperandBundles()) |
| 839 | FuncME |= MemoryEffects::readOnly(); |
| 840 | if (Call->hasClobberingOperandBundles()) |
| 841 | FuncME |= MemoryEffects::writeOnly(); |
| 842 | if (Call->isVolatile()) { |
| 843 | // Volatile operations also access inaccessible memory. |
| 844 | FuncME |= MemoryEffects::inaccessibleMemOnly(); |
| 845 | } |
| 846 | Min &= FuncME; |
| 847 | } |
| 848 | |
| 849 | return Min; |
| 850 | } |
| 851 | |
| 852 | /// Returns the behavior when calling the given function. For use when the call |
| 853 | /// site is not known. |
| 854 | MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) { |
| 855 | switch (F->getIntrinsicID()) { |
| 856 | case Intrinsic::experimental_guard: |
| 857 | case Intrinsic::experimental_deoptimize: |
| 858 | // These intrinsics can read arbitrary memory, and additionally modref |
| 859 | // inaccessible memory to model control dependence. |
| 860 | return MemoryEffects::readOnly() | |
| 861 | MemoryEffects::inaccessibleMemOnly(MR: ModRefInfo::ModRef); |
| 862 | } |
| 863 | |
| 864 | return F->getMemoryEffects(); |
| 865 | } |
| 866 | |
| 867 | ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, |
| 868 | unsigned ArgIdx) { |
| 869 | if (Call->doesNotAccessMemory(OpNo: ArgIdx)) |
| 870 | return ModRefInfo::NoModRef; |
| 871 | |
| 872 | if (Call->onlyWritesMemory(OpNo: ArgIdx)) |
| 873 | return ModRefInfo::Mod; |
| 874 | |
| 875 | if (Call->onlyReadsMemory(OpNo: ArgIdx)) |
| 876 | return ModRefInfo::Ref; |
| 877 | |
| 878 | return ModRefInfo::ModRef; |
| 879 | } |
| 880 | |
| 881 | #ifndef NDEBUG |
| 882 | static const Function *getParent(const Value *V) { |
| 883 | if (const Instruction *inst = dyn_cast<Instruction>(V)) { |
| 884 | if (!inst->getParent()) |
| 885 | return nullptr; |
| 886 | return inst->getParent()->getParent(); |
| 887 | } |
| 888 | |
| 889 | if (const Argument *arg = dyn_cast<Argument>(V)) |
| 890 | return arg->getParent(); |
| 891 | |
| 892 | return nullptr; |
| 893 | } |
| 894 | |
| 895 | static bool notDifferentParent(const Value *O1, const Value *O2) { |
| 896 | |
| 897 | const Function *F1 = getParent(O1); |
| 898 | const Function *F2 = getParent(O2); |
| 899 | |
| 900 | return !F1 || !F2 || F1 == F2; |
| 901 | } |
| 902 | #endif |
| 903 | |
| 904 | AliasResult BasicAAResult::alias(const MemoryLocation &LocA, |
| 905 | const MemoryLocation &LocB, AAQueryInfo &AAQI, |
| 906 | const Instruction *CtxI) { |
| 907 | assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && |
| 908 | "BasicAliasAnalysis doesn't support interprocedural queries." ); |
| 909 | return aliasCheck(V1: LocA.Ptr, V1Size: LocA.Size, V2: LocB.Ptr, V2Size: LocB.Size, AAQI, CtxI); |
| 910 | } |
| 911 | |
| 912 | /// Checks to see if the specified callsite can clobber the specified memory |
| 913 | /// object. |
| 914 | /// |
| 915 | /// Since we only look at local properties of this function, we really can't |
| 916 | /// say much about this query. We do, however, use simple "address taken" |
| 917 | /// analysis on local objects. |
| 918 | ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, |
| 919 | const MemoryLocation &Loc, |
| 920 | AAQueryInfo &AAQI) { |
| 921 | assert(notDifferentParent(Call, Loc.Ptr) && |
| 922 | "AliasAnalysis query involving multiple functions!" ); |
| 923 | |
| 924 | const Value *Object = getUnderlyingObject(V: Loc.Ptr); |
| 925 | |
| 926 | // Calls marked 'tail' cannot read or write allocas from the current frame |
| 927 | // because the current frame might be destroyed by the time they run. However, |
| 928 | // a tail call may use an alloca with byval. Calling with byval copies the |
| 929 | // contents of the alloca into argument registers or stack slots, so there is |
| 930 | // no lifetime issue. |
| 931 | if (isa<AllocaInst>(Val: Object)) |
| 932 | if (const CallInst *CI = dyn_cast<CallInst>(Val: Call)) |
| 933 | if (CI->isTailCall() && |
| 934 | !CI->getAttributes().hasAttrSomewhere(Kind: Attribute::ByVal)) |
| 935 | return ModRefInfo::NoModRef; |
| 936 | |
| 937 | // Stack restore is able to modify unescaped dynamic allocas. Assume it may |
| 938 | // modify them even though the alloca is not escaped. |
| 939 | if (auto *AI = dyn_cast<AllocaInst>(Val: Object)) |
| 940 | if (!AI->isStaticAlloca() && isIntrinsicCall(Call, IID: Intrinsic::stackrestore)) |
| 941 | return ModRefInfo::Mod; |
| 942 | |
| 943 | // We can completely ignore inaccessible memory here, because MemoryLocations |
| 944 | // can only reference accessible memory. |
| 945 | auto ME = AAQI.AAR.getMemoryEffects(Call, AAQI) |
| 946 | .getWithoutLoc(Loc: IRMemLocation::InaccessibleMem); |
| 947 | if (ME.doesNotAccessMemory()) |
| 948 | return ModRefInfo::NoModRef; |
| 949 | |
| 950 | ModRefInfo ArgMR = ME.getModRef(Loc: IRMemLocation::ArgMem); |
| 951 | ModRefInfo OtherMR = ME.getWithoutLoc(Loc: IRMemLocation::ArgMem).getModRef(); |
| 952 | |
| 953 | // An identified function-local object that does not escape can only be |
| 954 | // accessed via call arguments. Reduce OtherMR (which includes accesses to |
| 955 | // escaped memory) based on that. |
| 956 | // |
| 957 | // We model calls that can return twice (setjmp) as clobbering non-escaping |
| 958 | // objects, to model any accesses that may occur prior to the second return. |
| 959 | // As an exception, ignore allocas, as setjmp is not required to preserve |
| 960 | // non-volatile stores for them. |
| 961 | if (isModOrRefSet(MRI: OtherMR) && !isa<Constant>(Val: Object) && Call != Object && |
| 962 | (isa<AllocaInst>(Val: Object) || !Call->hasFnAttr(Kind: Attribute::ReturnsTwice))) { |
| 963 | CaptureComponents CC = |
| 964 | AAQI.CA->getCapturesBefore(Object, I: Call, /*OrAt=*/false); |
| 965 | if (capturesNothing(CC)) |
| 966 | OtherMR = ModRefInfo::NoModRef; |
| 967 | else if (capturesReadProvenanceOnly(CC)) |
| 968 | OtherMR = ModRefInfo::Ref; |
| 969 | } |
| 970 | |
| 971 | // Refine the modref info for argument memory. We only bother to do this |
| 972 | // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact |
| 973 | // on the final result. |
| 974 | if ((ArgMR | OtherMR) != OtherMR) { |
| 975 | ModRefInfo NewArgMR = ModRefInfo::NoModRef; |
| 976 | for (const Use &U : Call->data_ops()) { |
| 977 | const Value *Arg = U; |
| 978 | if (!Arg->getType()->isPointerTy()) |
| 979 | continue; |
| 980 | unsigned ArgIdx = Call->getDataOperandNo(U: &U); |
| 981 | MemoryLocation ArgLoc = |
| 982 | Call->isArgOperand(U: &U) |
| 983 | ? MemoryLocation::getForArgument(Call, ArgIdx, TLI) |
| 984 | : MemoryLocation::getBeforeOrAfter(Ptr: Arg); |
| 985 | AliasResult ArgAlias = AAQI.AAR.alias(LocA: ArgLoc, LocB: Loc, AAQI, CtxI: Call); |
| 986 | if (ArgAlias != AliasResult::NoAlias) |
| 987 | NewArgMR |= ArgMR & AAQI.AAR.getArgModRefInfo(Call, ArgIdx); |
| 988 | |
| 989 | // Exit early if we cannot improve over the original ArgMR. |
| 990 | if (NewArgMR == ArgMR) |
| 991 | break; |
| 992 | } |
| 993 | ArgMR = NewArgMR; |
| 994 | } |
| 995 | |
| 996 | ModRefInfo Result = ArgMR | OtherMR; |
| 997 | if (!isModAndRefSet(MRI: Result)) |
| 998 | return Result; |
| 999 | |
| 1000 | // If the call is malloc/calloc like, we can assume that it doesn't |
| 1001 | // modify any IR visible value. This is only valid because we assume these |
| 1002 | // routines do not read values visible in the IR. TODO: Consider special |
| 1003 | // casing realloc and strdup routines which access only their arguments as |
| 1004 | // well. Or alternatively, replace all of this with inaccessiblememonly once |
| 1005 | // that's implemented fully. |
| 1006 | if (isMallocOrCallocLikeFn(V: Call, TLI: &TLI)) { |
| 1007 | // Be conservative if the accessed pointer may alias the allocation - |
| 1008 | // fallback to the generic handling below. |
| 1009 | if (AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: Call), LocB: Loc, AAQI) == |
| 1010 | AliasResult::NoAlias) |
| 1011 | return ModRefInfo::NoModRef; |
| 1012 | } |
| 1013 | |
| 1014 | // Like assumes, invariant.start intrinsics were also marked as arbitrarily |
| 1015 | // writing so that proper control dependencies are maintained but they never |
| 1016 | // mod any particular memory location visible to the IR. |
| 1017 | // *Unlike* assumes (which are now modeled as NoModRef), invariant.start |
| 1018 | // intrinsic is now modeled as reading memory. This prevents hoisting the |
| 1019 | // invariant.start intrinsic over stores. Consider: |
| 1020 | // *ptr = 40; |
| 1021 | // *ptr = 50; |
| 1022 | // invariant_start(ptr) |
| 1023 | // int val = *ptr; |
| 1024 | // print(val); |
| 1025 | // |
| 1026 | // This cannot be transformed to: |
| 1027 | // |
| 1028 | // *ptr = 40; |
| 1029 | // invariant_start(ptr) |
| 1030 | // *ptr = 50; |
| 1031 | // int val = *ptr; |
| 1032 | // print(val); |
| 1033 | // |
| 1034 | // The transformation will cause the second store to be ignored (based on |
| 1035 | // rules of invariant.start) and print 40, while the first program always |
| 1036 | // prints 50. |
| 1037 | if (isIntrinsicCall(Call, IID: Intrinsic::invariant_start)) |
| 1038 | return ModRefInfo::Ref; |
| 1039 | |
| 1040 | // Be conservative. |
| 1041 | return ModRefInfo::ModRef; |
| 1042 | } |
| 1043 | |
| 1044 | ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, |
| 1045 | const CallBase *Call2, |
| 1046 | AAQueryInfo &AAQI) { |
| 1047 | // Guard intrinsics are marked as arbitrarily writing so that proper control |
| 1048 | // dependencies are maintained but they never mods any particular memory |
| 1049 | // location. |
| 1050 | // |
| 1051 | // *Unlike* assumes, guard intrinsics are modeled as reading memory since the |
| 1052 | // heap state at the point the guard is issued needs to be consistent in case |
| 1053 | // the guard invokes the "deopt" continuation. |
| 1054 | |
| 1055 | // NB! This function is *not* commutative, so we special case two |
| 1056 | // possibilities for guard intrinsics. |
| 1057 | |
| 1058 | if (isIntrinsicCall(Call: Call1, IID: Intrinsic::experimental_guard)) |
| 1059 | return isModSet(MRI: getMemoryEffects(Call: Call2, AAQI).getModRef()) |
| 1060 | ? ModRefInfo::Ref |
| 1061 | : ModRefInfo::NoModRef; |
| 1062 | |
| 1063 | if (isIntrinsicCall(Call: Call2, IID: Intrinsic::experimental_guard)) |
| 1064 | return isModSet(MRI: getMemoryEffects(Call: Call1, AAQI).getModRef()) |
| 1065 | ? ModRefInfo::Mod |
| 1066 | : ModRefInfo::NoModRef; |
| 1067 | |
| 1068 | // Be conservative. |
| 1069 | return ModRefInfo::ModRef; |
| 1070 | } |
| 1071 | |
| 1072 | /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against |
| 1073 | /// another pointer. |
| 1074 | /// |
| 1075 | /// We know that V1 is a GEP, but we don't know anything about V2. |
| 1076 | /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for |
| 1077 | /// V2. |
| 1078 | AliasResult BasicAAResult::aliasGEP( |
| 1079 | const GEPOperator *GEP1, LocationSize V1Size, |
| 1080 | const Value *V2, LocationSize V2Size, |
| 1081 | const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { |
| 1082 | auto BaseObjectsAlias = [&]() { |
| 1083 | AliasResult BaseAlias = |
| 1084 | AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV1), |
| 1085 | LocB: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV2), AAQI); |
| 1086 | return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias |
| 1087 | : AliasResult::MayAlias; |
| 1088 | }; |
| 1089 | |
| 1090 | if (!V1Size.hasValue() && !V2Size.hasValue()) { |
| 1091 | // TODO: This limitation exists for compile-time reasons. Relax it if we |
| 1092 | // can avoid exponential pathological cases. |
| 1093 | if (!isa<GEPOperator>(Val: V2)) |
| 1094 | return AliasResult::MayAlias; |
| 1095 | |
| 1096 | // If both accesses have unknown size, we can only check whether the base |
| 1097 | // objects don't alias. |
| 1098 | return BaseObjectsAlias(); |
| 1099 | } |
| 1100 | |
| 1101 | DominatorTree *DT = getDT(AAQI); |
| 1102 | DecomposedGEP DecompGEP1 = DecomposeGEPExpression(V: GEP1, DL, AC: &AC, DT); |
| 1103 | DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V: V2, DL, AC: &AC, DT); |
| 1104 | |
| 1105 | // Bail if we were not able to decompose anything. |
| 1106 | if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) |
| 1107 | return AliasResult::MayAlias; |
| 1108 | |
| 1109 | // Fall back to base objects if pointers have different index widths. |
| 1110 | if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth()) |
| 1111 | return BaseObjectsAlias(); |
| 1112 | |
| 1113 | // Swap GEP1 and GEP2 if GEP2 has more variable indices. |
| 1114 | if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) { |
| 1115 | std::swap(a&: DecompGEP1, b&: DecompGEP2); |
| 1116 | std::swap(a&: V1Size, b&: V2Size); |
| 1117 | std::swap(a&: UnderlyingV1, b&: UnderlyingV2); |
| 1118 | } |
| 1119 | |
| 1120 | // Subtract the GEP2 pointer from the GEP1 pointer to find out their |
| 1121 | // symbolic difference. |
| 1122 | subtractDecomposedGEPs(DestGEP&: DecompGEP1, SrcGEP: DecompGEP2, AAQI); |
| 1123 | |
| 1124 | // If an inbounds GEP would have to start from an out of bounds address |
| 1125 | // for the two to alias, then we can assume noalias. |
| 1126 | // TODO: Remove !isScalable() once BasicAA fully support scalable location |
| 1127 | // size |
| 1128 | |
| 1129 | if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() && |
| 1130 | V2Size.hasValue() && !V2Size.isScalable() && |
| 1131 | DecompGEP1.Offset.sge(RHS: V2Size.getValue()) && |
| 1132 | isBaseOfObject(V: DecompGEP2.Base)) |
| 1133 | return AliasResult::NoAlias; |
| 1134 | |
| 1135 | // Symmetric case to above. |
| 1136 | if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() && |
| 1137 | V1Size.hasValue() && !V1Size.isScalable() && |
| 1138 | DecompGEP1.Offset.sle(RHS: -V1Size.getValue()) && |
| 1139 | isBaseOfObject(V: DecompGEP1.Base)) |
| 1140 | return AliasResult::NoAlias; |
| 1141 | |
| 1142 | // For GEPs with identical offsets, we can preserve the size and AAInfo |
| 1143 | // when performing the alias check on the underlying objects. |
| 1144 | if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) |
| 1145 | return AAQI.AAR.alias(LocA: MemoryLocation(DecompGEP1.Base, V1Size), |
| 1146 | LocB: MemoryLocation(DecompGEP2.Base, V2Size), AAQI); |
| 1147 | |
| 1148 | // Do the base pointers alias? |
| 1149 | AliasResult BaseAlias = |
| 1150 | AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP1.Base), |
| 1151 | LocB: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP2.Base), AAQI); |
| 1152 | |
| 1153 | // If we get a No or May, then return it immediately, no amount of analysis |
| 1154 | // will improve this situation. |
| 1155 | if (BaseAlias != AliasResult::MustAlias) { |
| 1156 | assert(BaseAlias == AliasResult::NoAlias || |
| 1157 | BaseAlias == AliasResult::MayAlias); |
| 1158 | return BaseAlias; |
| 1159 | } |
| 1160 | |
| 1161 | // If there is a constant difference between the pointers, but the difference |
| 1162 | // is less than the size of the associated memory object, then we know |
| 1163 | // that the objects are partially overlapping. If the difference is |
| 1164 | // greater, we know they do not overlap. |
| 1165 | if (DecompGEP1.VarIndices.empty()) { |
| 1166 | APInt &Off = DecompGEP1.Offset; |
| 1167 | |
| 1168 | // Initialize for Off >= 0 (V2 <= GEP1) case. |
| 1169 | LocationSize VLeftSize = V2Size; |
| 1170 | LocationSize VRightSize = V1Size; |
| 1171 | const bool Swapped = Off.isNegative(); |
| 1172 | |
| 1173 | if (Swapped) { |
| 1174 | // Swap if we have the situation where: |
| 1175 | // + + |
| 1176 | // | BaseOffset | |
| 1177 | // ---------------->| |
| 1178 | // |-->V1Size |-------> V2Size |
| 1179 | // GEP1 V2 |
| 1180 | std::swap(a&: VLeftSize, b&: VRightSize); |
| 1181 | Off = -Off; |
| 1182 | } |
| 1183 | |
| 1184 | if (!VLeftSize.hasValue()) |
| 1185 | return AliasResult::MayAlias; |
| 1186 | |
| 1187 | const TypeSize LSize = VLeftSize.getValue(); |
| 1188 | if (!LSize.isScalable()) { |
| 1189 | if (Off.ult(RHS: LSize)) { |
| 1190 | // Conservatively drop processing if a phi was visited and/or offset is |
| 1191 | // too big. |
| 1192 | AliasResult AR = AliasResult::PartialAlias; |
| 1193 | if (VRightSize.hasValue() && !VRightSize.isScalable() && |
| 1194 | Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(RHS: LSize)) { |
| 1195 | // Memory referenced by right pointer is nested. Save the offset in |
| 1196 | // cache. Note that originally offset estimated as GEP1-V2, but |
| 1197 | // AliasResult contains the shift that represents GEP1+Offset=V2. |
| 1198 | AR.setOffset(-Off.getSExtValue()); |
| 1199 | AR.swap(DoSwap: Swapped); |
| 1200 | } |
| 1201 | return AR; |
| 1202 | } |
| 1203 | return AliasResult::NoAlias; |
| 1204 | } else { |
| 1205 | // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize). |
| 1206 | ConstantRange CR = getVScaleRange(F: &F, BitWidth: Off.getBitWidth()); |
| 1207 | bool Overflow; |
| 1208 | APInt UpperRange = CR.getUnsignedMax().umul_ov( |
| 1209 | RHS: APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow); |
| 1210 | if (!Overflow && Off.uge(RHS: UpperRange)) |
| 1211 | return AliasResult::NoAlias; |
| 1212 | } |
| 1213 | } |
| 1214 | |
| 1215 | // VScale Alias Analysis - Given one scalable offset between accesses and a |
| 1216 | // scalable typesize, we can divide each side by vscale, treating both values |
| 1217 | // as a constant. We prove that Offset/vscale >= TypeSize/vscale. |
| 1218 | if (DecompGEP1.VarIndices.size() == 1 && |
| 1219 | DecompGEP1.VarIndices[0].Val.TruncBits == 0 && |
| 1220 | DecompGEP1.Offset.isZero() && |
| 1221 | PatternMatch::match(V: DecompGEP1.VarIndices[0].Val.V, |
| 1222 | P: PatternMatch::m_VScale())) { |
| 1223 | const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0]; |
| 1224 | APInt Scale = |
| 1225 | ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale; |
| 1226 | LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size; |
| 1227 | |
| 1228 | // Check if the offset is known to not overflow, if it does then attempt to |
| 1229 | // prove it with the known values of vscale_range. |
| 1230 | bool Overflows = !DecompGEP1.VarIndices[0].IsNSW; |
| 1231 | if (Overflows) { |
| 1232 | ConstantRange CR = getVScaleRange(F: &F, BitWidth: Scale.getBitWidth()); |
| 1233 | (void)CR.getSignedMax().smul_ov(RHS: Scale, Overflow&: Overflows); |
| 1234 | } |
| 1235 | |
| 1236 | if (!Overflows) { |
| 1237 | // Note that we do not check that the typesize is scalable, as vscale >= 1 |
| 1238 | // so noalias still holds so long as the dependency distance is at least |
| 1239 | // as big as the typesize. |
| 1240 | if (VLeftSize.hasValue() && |
| 1241 | Scale.abs().uge(RHS: VLeftSize.getValue().getKnownMinValue())) |
| 1242 | return AliasResult::NoAlias; |
| 1243 | } |
| 1244 | } |
| 1245 | |
| 1246 | // If the difference between pointers is Offset +<nuw> Indices then we know |
| 1247 | // that the addition does not wrap the pointer index type (add nuw) and the |
| 1248 | // constant Offset is a lower bound on the distance between the pointers. We |
| 1249 | // can then prove NoAlias via Offset u>= VLeftSize. |
| 1250 | // + + + |
| 1251 | // | BaseOffset | +<nuw> Indices | |
| 1252 | // ---------------->|-------------------->| |
| 1253 | // |-->V2Size | |-------> V1Size |
| 1254 | // LHS RHS |
| 1255 | if (!DecompGEP1.VarIndices.empty() && |
| 1256 | DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() && |
| 1257 | !V2Size.isScalable() && DecompGEP1.Offset.uge(RHS: V2Size.getValue())) |
| 1258 | return AliasResult::NoAlias; |
| 1259 | |
| 1260 | // Bail on analysing scalable LocationSize |
| 1261 | if (V1Size.isScalable() || V2Size.isScalable()) |
| 1262 | return AliasResult::MayAlias; |
| 1263 | |
| 1264 | // We need to know both access sizes for all the following heuristics. Don't |
| 1265 | // try to reason about sizes larger than the index space. |
| 1266 | unsigned BW = DecompGEP1.Offset.getBitWidth(); |
| 1267 | if (!V1Size.hasValue() || !V2Size.hasValue() || |
| 1268 | !isUIntN(N: BW, x: V1Size.getValue()) || !isUIntN(N: BW, x: V2Size.getValue())) |
| 1269 | return AliasResult::MayAlias; |
| 1270 | |
| 1271 | APInt GCD; |
| 1272 | ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset); |
| 1273 | for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { |
| 1274 | const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; |
| 1275 | const APInt &Scale = Index.Scale; |
| 1276 | APInt ScaleForGCD = Scale; |
| 1277 | if (!Index.IsNSW) |
| 1278 | ScaleForGCD = |
| 1279 | APInt::getOneBitSet(numBits: Scale.getBitWidth(), BitNo: Scale.countr_zero()); |
| 1280 | |
| 1281 | if (i == 0) |
| 1282 | GCD = ScaleForGCD.abs(); |
| 1283 | else |
| 1284 | GCD = APIntOps::GreatestCommonDivisor(A: GCD, B: ScaleForGCD.abs()); |
| 1285 | |
| 1286 | ConstantRange CR = computeConstantRange(V: Index.Val.V, /* ForSigned */ false, |
| 1287 | UseInstrInfo: true, AC: &AC, CtxI: Index.CxtI); |
| 1288 | KnownBits Known = computeKnownBits(V: Index.Val.V, DL, AC: &AC, CxtI: Index.CxtI, DT); |
| 1289 | CR = CR.intersectWith( |
| 1290 | CR: ConstantRange::fromKnownBits(Known, /* Signed */ IsSigned: true), |
| 1291 | Type: ConstantRange::Signed); |
| 1292 | CR = Index.Val.evaluateWith(N: CR).sextOrTrunc(BitWidth: OffsetRange.getBitWidth()); |
| 1293 | |
| 1294 | assert(OffsetRange.getBitWidth() == Scale.getBitWidth() && |
| 1295 | "Bit widths are normalized to MaxIndexSize" ); |
| 1296 | if (Index.IsNSW) |
| 1297 | CR = CR.smul_sat(Other: ConstantRange(Scale)); |
| 1298 | else |
| 1299 | CR = CR.smul_fast(Other: ConstantRange(Scale)); |
| 1300 | |
| 1301 | if (Index.IsNegated) |
| 1302 | OffsetRange = OffsetRange.sub(Other: CR); |
| 1303 | else |
| 1304 | OffsetRange = OffsetRange.add(Other: CR); |
| 1305 | } |
| 1306 | |
| 1307 | // We now have accesses at two offsets from the same base: |
| 1308 | // 1. (...)*GCD + DecompGEP1.Offset with size V1Size |
| 1309 | // 2. 0 with size V2Size |
| 1310 | // Using arithmetic modulo GCD, the accesses are at |
| 1311 | // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits |
| 1312 | // into the range [V2Size..GCD), then we know they cannot overlap. |
| 1313 | APInt ModOffset = DecompGEP1.Offset.srem(RHS: GCD); |
| 1314 | if (ModOffset.isNegative()) |
| 1315 | ModOffset += GCD; // We want mod, not rem. |
| 1316 | if (ModOffset.uge(RHS: V2Size.getValue()) && |
| 1317 | (GCD - ModOffset).uge(RHS: V1Size.getValue())) |
| 1318 | return AliasResult::NoAlias; |
| 1319 | |
| 1320 | // Compute ranges of potentially accessed bytes for both accesses. If the |
| 1321 | // interseciton is empty, there can be no overlap. |
| 1322 | ConstantRange Range1 = OffsetRange.add( |
| 1323 | Other: ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); |
| 1324 | ConstantRange Range2 = |
| 1325 | ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue())); |
| 1326 | if (Range1.intersectWith(CR: Range2).isEmptySet()) |
| 1327 | return AliasResult::NoAlias; |
| 1328 | |
| 1329 | // Check if abs(V*Scale) >= abs(Scale) holds in the presence of |
| 1330 | // potentially wrapping math. |
| 1331 | auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) { |
| 1332 | if (Var.IsNSW) |
| 1333 | return true; |
| 1334 | |
| 1335 | int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits(); |
| 1336 | // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds. |
| 1337 | // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a |
| 1338 | // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap. |
| 1339 | int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW; |
| 1340 | if (MaxScaleValueBW <= 0) |
| 1341 | return false; |
| 1342 | return Var.Scale.ule( |
| 1343 | RHS: APInt::getMaxValue(numBits: MaxScaleValueBW).zext(width: Var.Scale.getBitWidth())); |
| 1344 | }; |
| 1345 | |
| 1346 | // Try to determine the range of values for VarIndex such that |
| 1347 | // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex. |
| 1348 | std::optional<APInt> MinAbsVarIndex; |
| 1349 | if (DecompGEP1.VarIndices.size() == 1) { |
| 1350 | // VarIndex = Scale*V. |
| 1351 | const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; |
| 1352 | if (Var.Val.TruncBits == 0 && |
| 1353 | isKnownNonZero(V: Var.Val.V, Q: SimplifyQuery(DL, DT, &AC, Var.CxtI))) { |
| 1354 | // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the |
| 1355 | // presence of potentially wrapping math. |
| 1356 | if (MultiplyByScaleNoWrap(Var)) { |
| 1357 | // If V != 0 then abs(VarIndex) >= abs(Scale). |
| 1358 | MinAbsVarIndex = Var.Scale.abs(); |
| 1359 | } |
| 1360 | } |
| 1361 | } else if (DecompGEP1.VarIndices.size() == 2) { |
| 1362 | // VarIndex = Scale*V0 + (-Scale)*V1. |
| 1363 | // If V0 != V1 then abs(VarIndex) >= abs(Scale). |
| 1364 | // Check that MayBeCrossIteration is false, to avoid reasoning about |
| 1365 | // inequality of values across loop iterations. |
| 1366 | const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; |
| 1367 | const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; |
| 1368 | if (Var0.hasNegatedScaleOf(Other: Var1) && Var0.Val.TruncBits == 0 && |
| 1369 | Var0.Val.hasSameCastsAs(Other: Var1.Val) && !AAQI.MayBeCrossIteration && |
| 1370 | MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) && |
| 1371 | isKnownNonEqual(V1: Var0.Val.V, V2: Var1.Val.V, |
| 1372 | SQ: SimplifyQuery(DL, DT, &AC, /*CxtI=*/Var0.CxtI |
| 1373 | ? Var0.CxtI |
| 1374 | : Var1.CxtI))) |
| 1375 | MinAbsVarIndex = Var0.Scale.abs(); |
| 1376 | } |
| 1377 | |
| 1378 | if (MinAbsVarIndex) { |
| 1379 | // The constant offset will have added at least +/-MinAbsVarIndex to it. |
| 1380 | APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; |
| 1381 | APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; |
| 1382 | // We know that Offset <= OffsetLo || Offset >= OffsetHi |
| 1383 | if (OffsetLo.isNegative() && (-OffsetLo).uge(RHS: V1Size.getValue()) && |
| 1384 | OffsetHi.isNonNegative() && OffsetHi.uge(RHS: V2Size.getValue())) |
| 1385 | return AliasResult::NoAlias; |
| 1386 | } |
| 1387 | |
| 1388 | if (constantOffsetHeuristic(GEP: DecompGEP1, V1Size, V2Size, AC: &AC, DT, AAQI)) |
| 1389 | return AliasResult::NoAlias; |
| 1390 | |
| 1391 | // Statically, we can see that the base objects are the same, but the |
| 1392 | // pointers have dynamic offsets which we can't resolve. And none of our |
| 1393 | // little tricks above worked. |
| 1394 | return AliasResult::MayAlias; |
| 1395 | } |
| 1396 | |
| 1397 | static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { |
| 1398 | // If the results agree, take it. |
| 1399 | if (A == B) |
| 1400 | return A; |
| 1401 | // A mix of PartialAlias and MustAlias is PartialAlias. |
| 1402 | if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || |
| 1403 | (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) |
| 1404 | return AliasResult::PartialAlias; |
| 1405 | // Otherwise, we don't know anything. |
| 1406 | return AliasResult::MayAlias; |
| 1407 | } |
| 1408 | |
| 1409 | /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction |
| 1410 | /// against another. |
| 1411 | AliasResult |
| 1412 | BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, |
| 1413 | const Value *V2, LocationSize V2Size, |
| 1414 | AAQueryInfo &AAQI) { |
| 1415 | // If the values are Selects with the same condition, we can do a more precise |
| 1416 | // check: just check for aliases between the values on corresponding arms. |
| 1417 | if (const SelectInst *SI2 = dyn_cast<SelectInst>(Val: V2)) |
| 1418 | if (isValueEqualInPotentialCycles(V1: SI->getCondition(), V2: SI2->getCondition(), |
| 1419 | AAQI)) { |
| 1420 | AliasResult Alias = |
| 1421 | AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize), |
| 1422 | LocB: MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); |
| 1423 | if (Alias == AliasResult::MayAlias) |
| 1424 | return AliasResult::MayAlias; |
| 1425 | AliasResult ThisAlias = |
| 1426 | AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize), |
| 1427 | LocB: MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); |
| 1428 | return MergeAliasResults(A: ThisAlias, B: Alias); |
| 1429 | } |
| 1430 | |
| 1431 | // If both arms of the Select node NoAlias or MustAlias V2, then returns |
| 1432 | // NoAlias / MustAlias. Otherwise, returns MayAlias. |
| 1433 | AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize), |
| 1434 | LocB: MemoryLocation(V2, V2Size), AAQI); |
| 1435 | if (Alias == AliasResult::MayAlias) |
| 1436 | return AliasResult::MayAlias; |
| 1437 | |
| 1438 | AliasResult ThisAlias = |
| 1439 | AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize), |
| 1440 | LocB: MemoryLocation(V2, V2Size), AAQI); |
| 1441 | return MergeAliasResults(A: ThisAlias, B: Alias); |
| 1442 | } |
| 1443 | |
| 1444 | /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against |
| 1445 | /// another. |
| 1446 | AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, |
| 1447 | const Value *V2, LocationSize V2Size, |
| 1448 | AAQueryInfo &AAQI) { |
| 1449 | if (!PN->getNumIncomingValues()) |
| 1450 | return AliasResult::NoAlias; |
| 1451 | // If the values are PHIs in the same block, we can do a more precise |
| 1452 | // as well as efficient check: just check for aliases between the values |
| 1453 | // on corresponding edges. Don't do this if we are analyzing across |
| 1454 | // iterations, as we may pick a different phi entry in different iterations. |
| 1455 | if (const PHINode *PN2 = dyn_cast<PHINode>(Val: V2)) |
| 1456 | if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) { |
| 1457 | std::optional<AliasResult> Alias; |
| 1458 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 1459 | AliasResult ThisAlias = AAQI.AAR.alias( |
| 1460 | LocA: MemoryLocation(PN->getIncomingValue(i), PNSize), |
| 1461 | LocB: MemoryLocation( |
| 1462 | PN2->getIncomingValueForBlock(BB: PN->getIncomingBlock(i)), V2Size), |
| 1463 | AAQI); |
| 1464 | if (Alias) |
| 1465 | *Alias = MergeAliasResults(A: *Alias, B: ThisAlias); |
| 1466 | else |
| 1467 | Alias = ThisAlias; |
| 1468 | if (*Alias == AliasResult::MayAlias) |
| 1469 | break; |
| 1470 | } |
| 1471 | return *Alias; |
| 1472 | } |
| 1473 | |
| 1474 | SmallVector<Value *, 4> V1Srcs; |
| 1475 | // If a phi operand recurses back to the phi, we can still determine NoAlias |
| 1476 | // if we don't alias the underlying objects of the other phi operands, as we |
| 1477 | // know that the recursive phi needs to be based on them in some way. |
| 1478 | bool isRecursive = false; |
| 1479 | auto CheckForRecPhi = [&](Value *PV) { |
| 1480 | if (!EnableRecPhiAnalysis) |
| 1481 | return false; |
| 1482 | if (getUnderlyingObject(V: PV) == PN) { |
| 1483 | isRecursive = true; |
| 1484 | return true; |
| 1485 | } |
| 1486 | return false; |
| 1487 | }; |
| 1488 | |
| 1489 | SmallPtrSet<Value *, 4> UniqueSrc; |
| 1490 | Value *OnePhi = nullptr; |
| 1491 | for (Value *PV1 : PN->incoming_values()) { |
| 1492 | // Skip the phi itself being the incoming value. |
| 1493 | if (PV1 == PN) |
| 1494 | continue; |
| 1495 | |
| 1496 | if (isa<PHINode>(Val: PV1)) { |
| 1497 | if (OnePhi && OnePhi != PV1) { |
| 1498 | // To control potential compile time explosion, we choose to be |
| 1499 | // conserviate when we have more than one Phi input. It is important |
| 1500 | // that we handle the single phi case as that lets us handle LCSSA |
| 1501 | // phi nodes and (combined with the recursive phi handling) simple |
| 1502 | // pointer induction variable patterns. |
| 1503 | return AliasResult::MayAlias; |
| 1504 | } |
| 1505 | OnePhi = PV1; |
| 1506 | } |
| 1507 | |
| 1508 | if (CheckForRecPhi(PV1)) |
| 1509 | continue; |
| 1510 | |
| 1511 | if (UniqueSrc.insert(Ptr: PV1).second) |
| 1512 | V1Srcs.push_back(Elt: PV1); |
| 1513 | } |
| 1514 | |
| 1515 | if (OnePhi && UniqueSrc.size() > 1) |
| 1516 | // Out of an abundance of caution, allow only the trivial lcssa and |
| 1517 | // recursive phi cases. |
| 1518 | return AliasResult::MayAlias; |
| 1519 | |
| 1520 | // If V1Srcs is empty then that means that the phi has no underlying non-phi |
| 1521 | // value. This should only be possible in blocks unreachable from the entry |
| 1522 | // block, but return MayAlias just in case. |
| 1523 | if (V1Srcs.empty()) |
| 1524 | return AliasResult::MayAlias; |
| 1525 | |
| 1526 | // If this PHI node is recursive, indicate that the pointer may be moved |
| 1527 | // across iterations. We can only prove NoAlias if different underlying |
| 1528 | // objects are involved. |
| 1529 | if (isRecursive) |
| 1530 | PNSize = LocationSize::beforeOrAfterPointer(); |
| 1531 | |
| 1532 | // In the recursive alias queries below, we may compare values from two |
| 1533 | // different loop iterations. |
| 1534 | SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true); |
| 1535 | |
| 1536 | AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(V1Srcs[0], PNSize), |
| 1537 | LocB: MemoryLocation(V2, V2Size), AAQI); |
| 1538 | |
| 1539 | // Early exit if the check of the first PHI source against V2 is MayAlias. |
| 1540 | // Other results are not possible. |
| 1541 | if (Alias == AliasResult::MayAlias) |
| 1542 | return AliasResult::MayAlias; |
| 1543 | // With recursive phis we cannot guarantee that MustAlias/PartialAlias will |
| 1544 | // remain valid to all elements and needs to conservatively return MayAlias. |
| 1545 | if (isRecursive && Alias != AliasResult::NoAlias) |
| 1546 | return AliasResult::MayAlias; |
| 1547 | |
| 1548 | // If all sources of the PHI node NoAlias or MustAlias V2, then returns |
| 1549 | // NoAlias / MustAlias. Otherwise, returns MayAlias. |
| 1550 | for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { |
| 1551 | Value *V = V1Srcs[i]; |
| 1552 | |
| 1553 | AliasResult ThisAlias = AAQI.AAR.alias( |
| 1554 | LocA: MemoryLocation(V, PNSize), LocB: MemoryLocation(V2, V2Size), AAQI); |
| 1555 | Alias = MergeAliasResults(A: ThisAlias, B: Alias); |
| 1556 | if (Alias == AliasResult::MayAlias) |
| 1557 | break; |
| 1558 | } |
| 1559 | |
| 1560 | return Alias; |
| 1561 | } |
| 1562 | |
| 1563 | // Return true for an Argument or extractvalue(Argument). These are all known |
| 1564 | // to not alias with FunctionLocal objects and can come up from coerced function |
| 1565 | // arguments. |
| 1566 | static bool isArgumentOrArgumentLike(const Value *V) { |
| 1567 | if (isa<Argument>(Val: V)) |
| 1568 | return true; |
| 1569 | auto *E = dyn_cast<ExtractValueInst>(Val: V); |
| 1570 | return E && isa<Argument>(Val: E->getOperand(i_nocapture: 0)); |
| 1571 | } |
| 1572 | |
| 1573 | /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as |
| 1574 | /// array references. |
| 1575 | AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, |
| 1576 | const Value *V2, LocationSize V2Size, |
| 1577 | AAQueryInfo &AAQI, |
| 1578 | const Instruction *CtxI) { |
| 1579 | // If either of the memory references is empty, it doesn't matter what the |
| 1580 | // pointer values are. |
| 1581 | if (V1Size.isZero() || V2Size.isZero()) |
| 1582 | return AliasResult::NoAlias; |
| 1583 | |
| 1584 | // Strip off any casts if they exist. |
| 1585 | V1 = V1->stripPointerCastsForAliasAnalysis(); |
| 1586 | V2 = V2->stripPointerCastsForAliasAnalysis(); |
| 1587 | |
| 1588 | // If V1 or V2 is undef, the result is NoAlias because we can always pick a |
| 1589 | // value for undef that aliases nothing in the program. |
| 1590 | if (isa<UndefValue>(Val: V1) || isa<UndefValue>(Val: V2)) |
| 1591 | return AliasResult::NoAlias; |
| 1592 | |
| 1593 | // Are we checking for alias of the same value? |
| 1594 | // Because we look 'through' phi nodes, we could look at "Value" pointers from |
| 1595 | // different iterations. We must therefore make sure that this is not the |
| 1596 | // case. The function isValueEqualInPotentialCycles ensures that this cannot |
| 1597 | // happen by looking at the visited phi nodes and making sure they cannot |
| 1598 | // reach the value. |
| 1599 | if (isValueEqualInPotentialCycles(V1, V2, AAQI)) |
| 1600 | return AliasResult::MustAlias; |
| 1601 | |
| 1602 | // Figure out what objects these things are pointing to if we can. |
| 1603 | const Value *O1 = getUnderlyingObject(V: V1, MaxLookup: MaxLookupSearchDepth); |
| 1604 | const Value *O2 = getUnderlyingObject(V: V2, MaxLookup: MaxLookupSearchDepth); |
| 1605 | |
| 1606 | // Null values in the default address space don't point to any object, so they |
| 1607 | // don't alias any other pointer. |
| 1608 | if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O1)) |
| 1609 | if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace())) |
| 1610 | return AliasResult::NoAlias; |
| 1611 | if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O2)) |
| 1612 | if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace())) |
| 1613 | return AliasResult::NoAlias; |
| 1614 | |
| 1615 | if (O1 != O2) { |
| 1616 | // If V1/V2 point to two different objects, we know that we have no alias. |
| 1617 | if (isIdentifiedObject(V: O1) && isIdentifiedObject(V: O2)) |
| 1618 | return AliasResult::NoAlias; |
| 1619 | |
| 1620 | // Function arguments can't alias with things that are known to be |
| 1621 | // unambigously identified at the function level. |
| 1622 | if ((isArgumentOrArgumentLike(V: O1) && isIdentifiedFunctionLocal(V: O2)) || |
| 1623 | (isArgumentOrArgumentLike(V: O2) && isIdentifiedFunctionLocal(V: O1))) |
| 1624 | return AliasResult::NoAlias; |
| 1625 | |
| 1626 | // If one pointer is the result of a call/invoke or load and the other is a |
| 1627 | // non-escaping local object within the same function, then we know the |
| 1628 | // object couldn't escape to a point where the call could return it. |
| 1629 | // |
| 1630 | // Note that if the pointers are in different functions, there are a |
| 1631 | // variety of complications. A call with a nocapture argument may still |
| 1632 | // temporary store the nocapture argument's value in a temporary memory |
| 1633 | // location if that memory location doesn't escape. Or it may pass a |
| 1634 | // nocapture value to other functions as long as they don't capture it. |
| 1635 | if (isEscapeSource(V: O1) && |
| 1636 | capturesNothing(CC: AAQI.CA->getCapturesBefore( |
| 1637 | Object: O2, I: dyn_cast<Instruction>(Val: O1), /*OrAt*/ true))) |
| 1638 | return AliasResult::NoAlias; |
| 1639 | if (isEscapeSource(V: O2) && |
| 1640 | capturesNothing(CC: AAQI.CA->getCapturesBefore( |
| 1641 | Object: O1, I: dyn_cast<Instruction>(Val: O2), /*OrAt*/ true))) |
| 1642 | return AliasResult::NoAlias; |
| 1643 | } |
| 1644 | |
| 1645 | // If the size of one access is larger than the entire object on the other |
| 1646 | // side, then we know such behavior is undefined and can assume no alias. |
| 1647 | bool NullIsValidLocation = NullPointerIsDefined(F: &F); |
| 1648 | if ((isObjectSmallerThan( |
| 1649 | V: O2, Size: getMinimalExtentFrom(V: *V1, LocSize: V1Size, DL, NullIsValidLoc: NullIsValidLocation), DL, |
| 1650 | TLI, NullIsValidLoc: NullIsValidLocation)) || |
| 1651 | (isObjectSmallerThan( |
| 1652 | V: O1, Size: getMinimalExtentFrom(V: *V2, LocSize: V2Size, DL, NullIsValidLoc: NullIsValidLocation), DL, |
| 1653 | TLI, NullIsValidLoc: NullIsValidLocation))) |
| 1654 | return AliasResult::NoAlias; |
| 1655 | |
| 1656 | if (EnableSeparateStorageAnalysis) { |
| 1657 | for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(V: O1)) { |
| 1658 | if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx) |
| 1659 | continue; |
| 1660 | |
| 1661 | AssumeInst *Assume = cast<AssumeInst>(Val&: Elem); |
| 1662 | OperandBundleUse OBU = Assume->getOperandBundleAt(Index: Elem.Index); |
| 1663 | if (OBU.getTagName() == "separate_storage" ) { |
| 1664 | assert(OBU.Inputs.size() == 2); |
| 1665 | const Value *Hint1 = OBU.Inputs[0].get(); |
| 1666 | const Value *Hint2 = OBU.Inputs[1].get(); |
| 1667 | // This is often a no-op; instcombine rewrites this for us. No-op |
| 1668 | // getUnderlyingObject calls are fast, though. |
| 1669 | const Value *HintO1 = getUnderlyingObject(V: Hint1); |
| 1670 | const Value *HintO2 = getUnderlyingObject(V: Hint2); |
| 1671 | |
| 1672 | DominatorTree *DT = getDT(AAQI); |
| 1673 | auto ValidAssumeForPtrContext = [&](const Value *Ptr) { |
| 1674 | if (const Instruction *PtrI = dyn_cast<Instruction>(Val: Ptr)) { |
| 1675 | return isValidAssumeForContext(I: Assume, CxtI: PtrI, DT, |
| 1676 | /* AllowEphemerals */ true); |
| 1677 | } |
| 1678 | if (const Argument *PtrA = dyn_cast<Argument>(Val: Ptr)) { |
| 1679 | const Instruction *FirstI = |
| 1680 | &*PtrA->getParent()->getEntryBlock().begin(); |
| 1681 | return isValidAssumeForContext(I: Assume, CxtI: FirstI, DT, |
| 1682 | /* AllowEphemerals */ true); |
| 1683 | } |
| 1684 | return false; |
| 1685 | }; |
| 1686 | |
| 1687 | if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) { |
| 1688 | // Note that we go back to V1 and V2 for the |
| 1689 | // ValidAssumeForPtrContext checks; they're dominated by O1 and O2, |
| 1690 | // so strictly more assumptions are valid for them. |
| 1691 | if ((CtxI && isValidAssumeForContext(I: Assume, CxtI: CtxI, DT, |
| 1692 | /* AllowEphemerals */ true)) || |
| 1693 | ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) { |
| 1694 | return AliasResult::NoAlias; |
| 1695 | } |
| 1696 | } |
| 1697 | } |
| 1698 | } |
| 1699 | } |
| 1700 | |
| 1701 | // If one the accesses may be before the accessed pointer, canonicalize this |
| 1702 | // by using unknown after-pointer sizes for both accesses. This is |
| 1703 | // equivalent, because regardless of which pointer is lower, one of them |
| 1704 | // will always came after the other, as long as the underlying objects aren't |
| 1705 | // disjoint. We do this so that the rest of BasicAA does not have to deal |
| 1706 | // with accesses before the base pointer, and to improve cache utilization by |
| 1707 | // merging equivalent states. |
| 1708 | if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { |
| 1709 | V1Size = LocationSize::afterPointer(); |
| 1710 | V2Size = LocationSize::afterPointer(); |
| 1711 | } |
| 1712 | |
| 1713 | // FIXME: If this depth limit is hit, then we may cache sub-optimal results |
| 1714 | // for recursive queries. For this reason, this limit is chosen to be large |
| 1715 | // enough to be very rarely hit, while still being small enough to avoid |
| 1716 | // stack overflows. |
| 1717 | if (AAQI.Depth >= 512) |
| 1718 | return AliasResult::MayAlias; |
| 1719 | |
| 1720 | // Check the cache before climbing up use-def chains. This also terminates |
| 1721 | // otherwise infinitely recursive queries. Include MayBeCrossIteration in the |
| 1722 | // cache key, because some cases where MayBeCrossIteration==false returns |
| 1723 | // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true. |
| 1724 | AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration}, |
| 1725 | {V2, V2Size, AAQI.MayBeCrossIteration}); |
| 1726 | const bool Swapped = V1 > V2; |
| 1727 | if (Swapped) |
| 1728 | std::swap(a&: Locs.first, b&: Locs.second); |
| 1729 | const auto &Pair = AAQI.AliasCache.try_emplace( |
| 1730 | Key: Locs, Args: AAQueryInfo::CacheEntry{.Result: AliasResult::NoAlias, .NumAssumptionUses: 0}); |
| 1731 | if (!Pair.second) { |
| 1732 | auto &Entry = Pair.first->second; |
| 1733 | if (!Entry.isDefinitive()) { |
| 1734 | // Remember that we used an assumption. This may either be a direct use |
| 1735 | // of an assumption, or a use of an entry that may itself be based on an |
| 1736 | // assumption. |
| 1737 | ++AAQI.NumAssumptionUses; |
| 1738 | if (Entry.isAssumption()) |
| 1739 | ++Entry.NumAssumptionUses; |
| 1740 | } |
| 1741 | // Cache contains sorted {V1,V2} pairs but we should return original order. |
| 1742 | auto Result = Entry.Result; |
| 1743 | Result.swap(DoSwap: Swapped); |
| 1744 | return Result; |
| 1745 | } |
| 1746 | |
| 1747 | int OrigNumAssumptionUses = AAQI.NumAssumptionUses; |
| 1748 | unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); |
| 1749 | AliasResult Result = |
| 1750 | aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); |
| 1751 | |
| 1752 | auto It = AAQI.AliasCache.find(Val: Locs); |
| 1753 | assert(It != AAQI.AliasCache.end() && "Must be in cache" ); |
| 1754 | auto &Entry = It->second; |
| 1755 | |
| 1756 | // Check whether a NoAlias assumption has been used, but disproven. |
| 1757 | bool AssumptionDisproven = |
| 1758 | Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; |
| 1759 | if (AssumptionDisproven) |
| 1760 | Result = AliasResult::MayAlias; |
| 1761 | |
| 1762 | // This is a definitive result now, when considered as a root query. |
| 1763 | AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; |
| 1764 | Entry.Result = Result; |
| 1765 | // Cache contains sorted {V1,V2} pairs. |
| 1766 | Entry.Result.swap(DoSwap: Swapped); |
| 1767 | |
| 1768 | // If the assumption has been disproven, remove any results that may have |
| 1769 | // been based on this assumption. Do this after the Entry updates above to |
| 1770 | // avoid iterator invalidation. |
| 1771 | if (AssumptionDisproven) |
| 1772 | while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) |
| 1773 | AAQI.AliasCache.erase(Val: AAQI.AssumptionBasedResults.pop_back_val()); |
| 1774 | |
| 1775 | // The result may still be based on assumptions higher up in the chain. |
| 1776 | // Remember it, so it can be purged from the cache later. |
| 1777 | if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && |
| 1778 | Result != AliasResult::MayAlias) { |
| 1779 | AAQI.AssumptionBasedResults.push_back(Elt: Locs); |
| 1780 | Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased; |
| 1781 | } else { |
| 1782 | Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive; |
| 1783 | } |
| 1784 | |
| 1785 | // Depth is incremented before this function is called, so Depth==1 indicates |
| 1786 | // a root query. |
| 1787 | if (AAQI.Depth == 1) { |
| 1788 | // Any remaining assumption based results must be based on proven |
| 1789 | // assumptions, so convert them to definitive results. |
| 1790 | for (const auto &Loc : AAQI.AssumptionBasedResults) { |
| 1791 | auto It = AAQI.AliasCache.find(Val: Loc); |
| 1792 | if (It != AAQI.AliasCache.end()) |
| 1793 | It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive; |
| 1794 | } |
| 1795 | AAQI.AssumptionBasedResults.clear(); |
| 1796 | AAQI.NumAssumptionUses = 0; |
| 1797 | } |
| 1798 | return Result; |
| 1799 | } |
| 1800 | |
| 1801 | AliasResult BasicAAResult::aliasCheckRecursive( |
| 1802 | const Value *V1, LocationSize V1Size, |
| 1803 | const Value *V2, LocationSize V2Size, |
| 1804 | AAQueryInfo &AAQI, const Value *O1, const Value *O2) { |
| 1805 | if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(Val: V1)) { |
| 1806 | AliasResult Result = aliasGEP(GEP1: GV1, V1Size, V2, V2Size, UnderlyingV1: O1, UnderlyingV2: O2, AAQI); |
| 1807 | if (Result != AliasResult::MayAlias) |
| 1808 | return Result; |
| 1809 | } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(Val: V2)) { |
| 1810 | AliasResult Result = aliasGEP(GEP1: GV2, V1Size: V2Size, V2: V1, V2Size: V1Size, UnderlyingV1: O2, UnderlyingV2: O1, AAQI); |
| 1811 | Result.swap(); |
| 1812 | if (Result != AliasResult::MayAlias) |
| 1813 | return Result; |
| 1814 | } |
| 1815 | |
| 1816 | if (const PHINode *PN = dyn_cast<PHINode>(Val: V1)) { |
| 1817 | AliasResult Result = aliasPHI(PN, PNSize: V1Size, V2, V2Size, AAQI); |
| 1818 | if (Result != AliasResult::MayAlias) |
| 1819 | return Result; |
| 1820 | } else if (const PHINode *PN = dyn_cast<PHINode>(Val: V2)) { |
| 1821 | AliasResult Result = aliasPHI(PN, PNSize: V2Size, V2: V1, V2Size: V1Size, AAQI); |
| 1822 | Result.swap(); |
| 1823 | if (Result != AliasResult::MayAlias) |
| 1824 | return Result; |
| 1825 | } |
| 1826 | |
| 1827 | if (const SelectInst *S1 = dyn_cast<SelectInst>(Val: V1)) { |
| 1828 | AliasResult Result = aliasSelect(SI: S1, SISize: V1Size, V2, V2Size, AAQI); |
| 1829 | if (Result != AliasResult::MayAlias) |
| 1830 | return Result; |
| 1831 | } else if (const SelectInst *S2 = dyn_cast<SelectInst>(Val: V2)) { |
| 1832 | AliasResult Result = aliasSelect(SI: S2, SISize: V2Size, V2: V1, V2Size: V1Size, AAQI); |
| 1833 | Result.swap(); |
| 1834 | if (Result != AliasResult::MayAlias) |
| 1835 | return Result; |
| 1836 | } |
| 1837 | |
| 1838 | // If both pointers are pointing into the same object and one of them |
| 1839 | // accesses the entire object, then the accesses must overlap in some way. |
| 1840 | if (O1 == O2) { |
| 1841 | bool NullIsValidLocation = NullPointerIsDefined(F: &F); |
| 1842 | if (V1Size.isPrecise() && V2Size.isPrecise() && |
| 1843 | (isObjectSize(V: O1, Size: V1Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation) || |
| 1844 | isObjectSize(V: O2, Size: V2Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation))) |
| 1845 | return AliasResult::PartialAlias; |
| 1846 | } |
| 1847 | |
| 1848 | return AliasResult::MayAlias; |
| 1849 | } |
| 1850 | |
| 1851 | /// Check whether two Values can be considered equivalent. |
| 1852 | /// |
| 1853 | /// If the values may come from different cycle iterations, this will also |
| 1854 | /// check that the values are not part of cycle. We have to do this because we |
| 1855 | /// are looking through phi nodes, that is we say |
| 1856 | /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). |
| 1857 | bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, |
| 1858 | const Value *V2, |
| 1859 | const AAQueryInfo &AAQI) { |
| 1860 | if (V != V2) |
| 1861 | return false; |
| 1862 | |
| 1863 | if (!AAQI.MayBeCrossIteration) |
| 1864 | return true; |
| 1865 | |
| 1866 | // Non-instructions and instructions in the entry block cannot be part of |
| 1867 | // a loop. |
| 1868 | const Instruction *Inst = dyn_cast<Instruction>(Val: V); |
| 1869 | if (!Inst || Inst->getParent()->isEntryBlock()) |
| 1870 | return true; |
| 1871 | |
| 1872 | return isNotInCycle(I: Inst, DT: getDT(AAQI), /*LI*/ nullptr); |
| 1873 | } |
| 1874 | |
| 1875 | /// Computes the symbolic difference between two de-composed GEPs. |
| 1876 | void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, |
| 1877 | const DecomposedGEP &SrcGEP, |
| 1878 | const AAQueryInfo &AAQI) { |
| 1879 | // Drop nuw flag from GEP if subtraction of constant offsets overflows in an |
| 1880 | // unsigned sense. |
| 1881 | if (DestGEP.Offset.ult(RHS: SrcGEP.Offset)) |
| 1882 | DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap(); |
| 1883 | |
| 1884 | DestGEP.Offset -= SrcGEP.Offset; |
| 1885 | for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { |
| 1886 | // Find V in Dest. This is N^2, but pointer indices almost never have more |
| 1887 | // than a few variable indexes. |
| 1888 | bool Found = false; |
| 1889 | for (auto I : enumerate(First&: DestGEP.VarIndices)) { |
| 1890 | VariableGEPIndex &Dest = I.value(); |
| 1891 | if ((!isValueEqualInPotentialCycles(V: Dest.Val.V, V2: Src.Val.V, AAQI) && |
| 1892 | !areBothVScale(V1: Dest.Val.V, V2: Src.Val.V)) || |
| 1893 | !Dest.Val.hasSameCastsAs(Other: Src.Val)) |
| 1894 | continue; |
| 1895 | |
| 1896 | // Normalize IsNegated if we're going to lose the NSW flag anyway. |
| 1897 | if (Dest.IsNegated) { |
| 1898 | Dest.Scale = -Dest.Scale; |
| 1899 | Dest.IsNegated = false; |
| 1900 | Dest.IsNSW = false; |
| 1901 | } |
| 1902 | |
| 1903 | // If we found it, subtract off Scale V's from the entry in Dest. If it |
| 1904 | // goes to zero, remove the entry. |
| 1905 | if (Dest.Scale != Src.Scale) { |
| 1906 | // Drop nuw flag from GEP if subtraction of V's Scale overflows in an |
| 1907 | // unsigned sense. |
| 1908 | if (Dest.Scale.ult(RHS: Src.Scale)) |
| 1909 | DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap(); |
| 1910 | |
| 1911 | Dest.Scale -= Src.Scale; |
| 1912 | Dest.IsNSW = false; |
| 1913 | } else { |
| 1914 | DestGEP.VarIndices.erase(CI: DestGEP.VarIndices.begin() + I.index()); |
| 1915 | } |
| 1916 | Found = true; |
| 1917 | break; |
| 1918 | } |
| 1919 | |
| 1920 | // If we didn't consume this entry, add it to the end of the Dest list. |
| 1921 | if (!Found) { |
| 1922 | VariableGEPIndex Entry = {.Val: Src.Val, .Scale: Src.Scale, .CxtI: Src.CxtI, .IsNSW: Src.IsNSW, |
| 1923 | /* IsNegated */ true}; |
| 1924 | DestGEP.VarIndices.push_back(Elt: Entry); |
| 1925 | |
| 1926 | // Drop nuw flag when we have unconsumed variable indices from SrcGEP. |
| 1927 | DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap(); |
| 1928 | } |
| 1929 | } |
| 1930 | } |
| 1931 | |
| 1932 | bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP, |
| 1933 | LocationSize MaybeV1Size, |
| 1934 | LocationSize MaybeV2Size, |
| 1935 | AssumptionCache *AC, |
| 1936 | DominatorTree *DT, |
| 1937 | const AAQueryInfo &AAQI) { |
| 1938 | if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || |
| 1939 | !MaybeV2Size.hasValue()) |
| 1940 | return false; |
| 1941 | |
| 1942 | const uint64_t V1Size = MaybeV1Size.getValue(); |
| 1943 | const uint64_t V2Size = MaybeV2Size.getValue(); |
| 1944 | |
| 1945 | const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; |
| 1946 | |
| 1947 | if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Other: Var1.Val) || |
| 1948 | !Var0.hasNegatedScaleOf(Other: Var1) || |
| 1949 | Var0.Val.V->getType() != Var1.Val.V->getType()) |
| 1950 | return false; |
| 1951 | |
| 1952 | // We'll strip off the Extensions of Var0 and Var1 and do another round |
| 1953 | // of GetLinearExpression decomposition. In the example above, if Var0 |
| 1954 | // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. |
| 1955 | |
| 1956 | LinearExpression E0 = |
| 1957 | GetLinearExpression(Val: CastedValue(Var0.Val.V), DL, Depth: 0, AC, DT); |
| 1958 | LinearExpression E1 = |
| 1959 | GetLinearExpression(Val: CastedValue(Var1.Val.V), DL, Depth: 0, AC, DT); |
| 1960 | if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(Other: E1.Val) || |
| 1961 | !isValueEqualInPotentialCycles(V: E0.Val.V, V2: E1.Val.V, AAQI)) |
| 1962 | return false; |
| 1963 | |
| 1964 | // We have a hit - Var0 and Var1 only differ by a constant offset! |
| 1965 | |
| 1966 | // If we've been sext'ed then zext'd the maximum difference between Var0 and |
| 1967 | // Var1 is possible to calculate, but we're just interested in the absolute |
| 1968 | // minimum difference between the two. The minimum distance may occur due to |
| 1969 | // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so |
| 1970 | // the minimum distance between %i and %i + 5 is 3. |
| 1971 | APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; |
| 1972 | MinDiff = APIntOps::umin(A: MinDiff, B: Wrapped); |
| 1973 | APInt MinDiffBytes = |
| 1974 | MinDiff.zextOrTrunc(width: Var0.Scale.getBitWidth()) * Var0.Scale.abs(); |
| 1975 | |
| 1976 | // We can't definitely say whether GEP1 is before or after V2 due to wrapping |
| 1977 | // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other |
| 1978 | // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and |
| 1979 | // V2Size can fit in the MinDiffBytes gap. |
| 1980 | return MinDiffBytes.uge(RHS: V1Size + GEP.Offset.abs()) && |
| 1981 | MinDiffBytes.uge(RHS: V2Size + GEP.Offset.abs()); |
| 1982 | } |
| 1983 | |
| 1984 | //===----------------------------------------------------------------------===// |
| 1985 | // BasicAliasAnalysis Pass |
| 1986 | //===----------------------------------------------------------------------===// |
| 1987 | |
| 1988 | AnalysisKey BasicAA::Key; |
| 1989 | |
| 1990 | BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { |
| 1991 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F); |
| 1992 | auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F); |
| 1993 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F); |
| 1994 | return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT); |
| 1995 | } |
| 1996 | |
| 1997 | BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {} |
| 1998 | |
| 1999 | char BasicAAWrapperPass::ID = 0; |
| 2000 | |
| 2001 | void BasicAAWrapperPass::anchor() {} |
| 2002 | |
| 2003 | INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa" , |
| 2004 | "Basic Alias Analysis (stateless AA impl)" , true, true) |
| 2005 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) |
| 2006 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
| 2007 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
| 2008 | INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa" , |
| 2009 | "Basic Alias Analysis (stateless AA impl)" , true, true) |
| 2010 | |
| 2011 | FunctionPass *llvm::createBasicAAWrapperPass() { |
| 2012 | return new BasicAAWrapperPass(); |
| 2013 | } |
| 2014 | |
| 2015 | bool BasicAAWrapperPass::runOnFunction(Function &F) { |
| 2016 | auto &ACT = getAnalysis<AssumptionCacheTracker>(); |
| 2017 | auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); |
| 2018 | auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); |
| 2019 | |
| 2020 | Result.reset(p: new BasicAAResult(F.getDataLayout(), F, |
| 2021 | TLIWP.getTLI(F), ACT.getAssumptionCache(F), |
| 2022 | &DTWP.getDomTree())); |
| 2023 | |
| 2024 | return false; |
| 2025 | } |
| 2026 | |
| 2027 | void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
| 2028 | AU.setPreservesAll(); |
| 2029 | AU.addRequiredTransitive<AssumptionCacheTracker>(); |
| 2030 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
| 2031 | AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); |
| 2032 | } |
| 2033 | |