1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/BasicAliasAnalysis.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalAlias.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/PatternMatch.h"
48#include "llvm/IR/Type.h"
49#include "llvm/IR/User.h"
50#include "llvm/IR/Value.h"
51#include "llvm/InitializePasses.h"
52#include "llvm/Pass.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Compiler.h"
56#include "llvm/Support/KnownBits.h"
57#include "llvm/Support/SaveAndRestore.h"
58#include <cassert>
59#include <cstdint>
60#include <cstdlib>
61#include <optional>
62#include <utility>
63
64#define DEBUG_TYPE "basicaa"
65
66using namespace llvm;
67
68/// Enable analysis of recursive PHI nodes.
69static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70 cl::init(Val: true));
71
72static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73 cl::Hidden, cl::init(Val: true));
74
75/// SearchLimitReached / SearchTimes shows how often the limit of
76/// to decompose GEPs is reached. It will affect the precision
77/// of basic alias analysis.
78STATISTIC(SearchLimitReached, "Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81
82bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
83 FunctionAnalysisManager::Invalidator &Inv) {
84 // We don't care if this analysis itself is preserved, it has no state. But
85 // we need to check that the analyses it depends on have been. Note that we
86 // may be created without handles to some analyses and in that case don't
87 // depend on them.
88 if (Inv.invalidate<AssumptionAnalysis>(IR&: Fn, PA) ||
89 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(IR&: Fn, PA)) ||
90 Inv.invalidate<TargetLibraryAnalysis>(IR&: Fn, PA))
91 return true;
92
93 // Otherwise this analysis result remains valid.
94 return false;
95}
96
97//===----------------------------------------------------------------------===//
98// Useful predicates
99//===----------------------------------------------------------------------===//
100
101/// Returns the size of the object specified by V or UnknownSize if unknown.
102static std::optional<TypeSize> getObjectSize(const Value *V,
103 const DataLayout &DL,
104 const TargetLibraryInfo &TLI,
105 bool NullIsValidLoc,
106 bool RoundToAlign = false) {
107 ObjectSizeOpts Opts;
108 Opts.RoundToAlign = RoundToAlign;
109 Opts.NullIsUnknownSize = NullIsValidLoc;
110 if (std::optional<TypeSize> Size = getBaseObjectSize(Ptr: V, DL, TLI: &TLI, Opts)) {
111 // FIXME: Remove this check, only exists to preserve previous behavior.
112 if (Size->isScalable())
113 return std::nullopt;
114 return Size;
115 }
116 return std::nullopt;
117}
118
119/// Returns true if we can prove that the object specified by V is smaller than
120/// Size. Bails out early unless the root object is passed as the first
121/// parameter.
122static bool isObjectSmallerThan(const Value *V, TypeSize Size,
123 const DataLayout &DL,
124 const TargetLibraryInfo &TLI,
125 bool NullIsValidLoc) {
126 // Note that the meanings of the "object" are slightly different in the
127 // following contexts:
128 // c1: llvm::getObjectSize()
129 // c2: llvm.objectsize() intrinsic
130 // c3: isObjectSmallerThan()
131 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
132 // refers to the "entire object".
133 //
134 // Consider this example:
135 // char *p = (char*)malloc(100)
136 // char *q = p+80;
137 //
138 // In the context of c1 and c2, the "object" pointed by q refers to the
139 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
140 //
141 // In the context of c3, the "object" refers to the chunk of memory being
142 // allocated. So, the "object" has 100 bytes, and q points to the middle the
143 // "object". However, unless p, the root object, is passed as the first
144 // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
145 // bail out early.
146 if (!isIdentifiedObject(V))
147 return false;
148
149 // This function needs to use the aligned object size because we allow
150 // reads a bit past the end given sufficient alignment.
151 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
152 /*RoundToAlign*/ true);
153
154 return ObjectSize && TypeSize::isKnownLT(LHS: *ObjectSize, RHS: Size);
155}
156
157/// Return the minimal extent from \p V to the end of the underlying object,
158/// assuming the result is used in an aliasing query. E.g., we do use the query
159/// location size and the fact that null pointers cannot alias here.
160static TypeSize getMinimalExtentFrom(const Value &V,
161 const LocationSize &LocSize,
162 const DataLayout &DL,
163 bool NullIsValidLoc) {
164 // If we have dereferenceability information we know a lower bound for the
165 // extent as accesses for a lower offset would be valid. We need to exclude
166 // the "or null" part if null is a valid pointer. We can ignore frees, as an
167 // access after free would be undefined behavior.
168 bool CanBeNull, CanBeFreed;
169 uint64_t DerefBytes =
170 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
171 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
172 // If queried with a precise location size, we assume that location size to be
173 // accessed, thus valid.
174 if (LocSize.isPrecise())
175 DerefBytes = std::max(a: DerefBytes, b: LocSize.getValue().getKnownMinValue());
176 return TypeSize::getFixed(ExactSize: DerefBytes);
177}
178
179/// Returns true if we can prove that the object specified by V has size Size.
180static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
181 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
182 std::optional<TypeSize> ObjectSize =
183 getObjectSize(V, DL, TLI, NullIsValidLoc);
184 return ObjectSize && *ObjectSize == Size;
185}
186
187/// Return true if both V1 and V2 are VScale
188static bool areBothVScale(const Value *V1, const Value *V2) {
189 return PatternMatch::match(V: V1, P: PatternMatch::m_VScale()) &&
190 PatternMatch::match(V: V2, P: PatternMatch::m_VScale());
191}
192
193//===----------------------------------------------------------------------===//
194// CaptureAnalysis implementations
195//===----------------------------------------------------------------------===//
196
197CaptureAnalysis::~CaptureAnalysis() = default;
198
199CaptureComponents SimpleCaptureAnalysis::getCapturesBefore(const Value *Object,
200 const Instruction *I,
201 bool OrAt) {
202 if (!isIdentifiedFunctionLocal(V: Object))
203 return CaptureComponents::Provenance;
204
205 auto [CacheIt, Inserted] =
206 IsCapturedCache.insert(KV: {Object, CaptureComponents::Provenance});
207 if (!Inserted)
208 return CacheIt->second;
209
210 CaptureComponents Ret = PointerMayBeCaptured(
211 V: Object, /*ReturnCaptures=*/false, Mask: CaptureComponents::Provenance,
212 StopFn: [](CaptureComponents CC) { return capturesFullProvenance(CC); });
213 CacheIt->second = Ret;
214 return Ret;
215}
216
217static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
218 const LoopInfo *LI) {
219 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
220 SmallVector<BasicBlock *> Succs(successors(BB));
221 return Succs.empty() ||
222 !isPotentiallyReachableFromMany(Worklist&: Succs, StopBB: BB, ExclusionSet: nullptr, DT, LI);
223}
224
225CaptureComponents
226EarliestEscapeAnalysis::getCapturesBefore(const Value *Object,
227 const Instruction *I, bool OrAt) {
228 if (!isIdentifiedFunctionLocal(V: Object))
229 return CaptureComponents::Provenance;
230
231 auto Iter = EarliestEscapes.try_emplace(Key: Object);
232 if (Iter.second) {
233 std::pair<Instruction *, CaptureComponents> EarliestCapture =
234 FindEarliestCapture(V: Object, F&: *DT.getRoot()->getParent(),
235 /*ReturnCaptures=*/false, DT,
236 Mask: CaptureComponents::Provenance);
237 if (EarliestCapture.first)
238 Inst2Obj[EarliestCapture.first].push_back(NewVal: Object);
239 Iter.first->second = EarliestCapture;
240 }
241
242 auto IsNotCapturedBefore = [&]() {
243 // No capturing instruction.
244 Instruction *CaptureInst = Iter.first->second.first;
245 if (!CaptureInst)
246 return true;
247
248 // No context instruction means any use is capturing.
249 if (!I)
250 return false;
251
252 if (I == CaptureInst) {
253 if (OrAt)
254 return false;
255 return isNotInCycle(I, DT: &DT, LI);
256 }
257
258 return !isPotentiallyReachable(From: CaptureInst, To: I, ExclusionSet: nullptr, DT: &DT, LI);
259 };
260 if (IsNotCapturedBefore())
261 return CaptureComponents::None;
262 return Iter.first->second.second;
263}
264
265void EarliestEscapeAnalysis::removeInstruction(Instruction *I) {
266 auto Iter = Inst2Obj.find(Val: I);
267 if (Iter != Inst2Obj.end()) {
268 for (const Value *Obj : Iter->second)
269 EarliestEscapes.erase(Val: Obj);
270 Inst2Obj.erase(Val: I);
271 }
272}
273
274//===----------------------------------------------------------------------===//
275// GetElementPtr Instruction Decomposition and Analysis
276//===----------------------------------------------------------------------===//
277
278namespace {
279/// Represents zext(sext(trunc(V))).
280struct CastedValue {
281 const Value *V;
282 unsigned ZExtBits = 0;
283 unsigned SExtBits = 0;
284 unsigned TruncBits = 0;
285 /// Whether trunc(V) is non-negative.
286 bool IsNonNegative = false;
287
288 explicit CastedValue(const Value *V) : V(V) {}
289 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
290 unsigned TruncBits, bool IsNonNegative)
291 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
292 IsNonNegative(IsNonNegative) {}
293
294 unsigned getBitWidth() const {
295 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
296 SExtBits;
297 }
298
299 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
300 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
301 IsNonNegative && PreserveNonNeg);
302 }
303
304 /// Replace V with zext(NewV)
305 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
306 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
307 NewV->getType()->getPrimitiveSizeInBits();
308 if (ExtendBy <= TruncBits)
309 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
310 // The nneg can be preserved on the outer zext here.
311 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
312 IsNonNegative);
313
314 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
315 ExtendBy -= TruncBits;
316 // zext<nneg>(zext(NewV)) == zext(NewV)
317 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
318 // The nneg can be preserved from the inner zext here but must be dropped
319 // from the outer.
320 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
321 ZExtNonNegative);
322 }
323
324 /// Replace V with sext(NewV)
325 CastedValue withSExtOfValue(const Value *NewV) const {
326 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
327 NewV->getType()->getPrimitiveSizeInBits();
328 if (ExtendBy <= TruncBits)
329 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
330 // The nneg can be preserved on the outer zext here
331 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
332 IsNonNegative);
333
334 // zext(sext(sext(NewV)))
335 ExtendBy -= TruncBits;
336 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
337 // The nneg can be preserved on the outer zext here
338 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
339 }
340
341 APInt evaluateWith(APInt N) const {
342 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
343 "Incompatible bit width");
344 if (TruncBits) N = N.trunc(width: N.getBitWidth() - TruncBits);
345 if (SExtBits) N = N.sext(width: N.getBitWidth() + SExtBits);
346 if (ZExtBits) N = N.zext(width: N.getBitWidth() + ZExtBits);
347 return N;
348 }
349
350 ConstantRange evaluateWith(ConstantRange N) const {
351 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
352 "Incompatible bit width");
353 if (TruncBits) N = N.truncate(BitWidth: N.getBitWidth() - TruncBits);
354 if (IsNonNegative && !N.isAllNonNegative())
355 N = N.intersectWith(
356 CR: ConstantRange(APInt::getZero(numBits: N.getBitWidth()),
357 APInt::getSignedMinValue(numBits: N.getBitWidth())));
358 if (SExtBits) N = N.signExtend(BitWidth: N.getBitWidth() + SExtBits);
359 if (ZExtBits) N = N.zeroExtend(BitWidth: N.getBitWidth() + ZExtBits);
360 return N;
361 }
362
363 bool canDistributeOver(bool NUW, bool NSW) const {
364 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
365 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
366 // trunc(x op y) == trunc(x) op trunc(y)
367 return (!ZExtBits || NUW) && (!SExtBits || NSW);
368 }
369
370 bool hasSameCastsAs(const CastedValue &Other) const {
371 if (V->getType() != Other.V->getType())
372 return false;
373
374 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
375 TruncBits == Other.TruncBits)
376 return true;
377 // If either CastedValue has a nneg zext then the sext/zext bits are
378 // interchangable for that value.
379 if (IsNonNegative || Other.IsNonNegative)
380 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
381 TruncBits == Other.TruncBits);
382 return false;
383 }
384};
385
386/// Represents zext(sext(trunc(V))) * Scale + Offset.
387struct LinearExpression {
388 CastedValue Val;
389 APInt Scale;
390 APInt Offset;
391
392 /// True if all operations in this expression are NUW.
393 bool IsNUW;
394 /// True if all operations in this expression are NSW.
395 bool IsNSW;
396
397 LinearExpression(const CastedValue &Val, const APInt &Scale,
398 const APInt &Offset, bool IsNUW, bool IsNSW)
399 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
400
401 LinearExpression(const CastedValue &Val)
402 : Val(Val), IsNUW(true), IsNSW(true) {
403 unsigned BitWidth = Val.getBitWidth();
404 Scale = APInt(BitWidth, 1);
405 Offset = APInt(BitWidth, 0);
406 }
407
408 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
409 // The check for zero offset is necessary, because generally
410 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
411 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
412 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
413 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
414 }
415};
416}
417
418/// Analyzes the specified value as a linear expression: "A*V + B", where A and
419/// B are constant integers.
420static LinearExpression GetLinearExpression(
421 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
422 AssumptionCache *AC, DominatorTree *DT) {
423 // Limit our recursion depth.
424 if (Depth == 6)
425 return Val;
426
427 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val: Val.V))
428 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
429 Val.evaluateWith(N: Const->getValue()), true, true);
430
431 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: Val.V)) {
432 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Val: BOp->getOperand(i_nocapture: 1))) {
433 APInt RHS = Val.evaluateWith(N: RHSC->getValue());
434 // The only non-OBO case we deal with is or, and only limited to the
435 // case where it is both nuw and nsw.
436 bool NUW = true, NSW = true;
437 if (isa<OverflowingBinaryOperator>(Val: BOp)) {
438 NUW &= BOp->hasNoUnsignedWrap();
439 NSW &= BOp->hasNoSignedWrap();
440 }
441 if (!Val.canDistributeOver(NUW, NSW))
442 return Val;
443
444 // While we can distribute over trunc, we cannot preserve nowrap flags
445 // in that case.
446 if (Val.TruncBits)
447 NUW = NSW = false;
448
449 LinearExpression E(Val);
450 switch (BOp->getOpcode()) {
451 default:
452 // We don't understand this instruction, so we can't decompose it any
453 // further.
454 return Val;
455 case Instruction::Or:
456 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
457 if (!cast<PossiblyDisjointInst>(Val: BOp)->isDisjoint())
458 return Val;
459
460 [[fallthrough]];
461 case Instruction::Add: {
462 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
463 Depth: Depth + 1, AC, DT);
464 E.Offset += RHS;
465 E.IsNUW &= NUW;
466 E.IsNSW &= NSW;
467 break;
468 }
469 case Instruction::Sub: {
470 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
471 Depth: Depth + 1, AC, DT);
472 E.Offset -= RHS;
473 E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
474 E.IsNSW &= NSW;
475 break;
476 }
477 case Instruction::Mul:
478 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
479 Depth: Depth + 1, AC, DT)
480 .mul(Other: RHS, MulIsNUW: NUW, MulIsNSW: NSW);
481 break;
482 case Instruction::Shl:
483 // We're trying to linearize an expression of the kind:
484 // shl i8 -128, 36
485 // where the shift count exceeds the bitwidth of the type.
486 // We can't decompose this further (the expression would return
487 // a poison value).
488 if (RHS.getLimitedValue() > Val.getBitWidth())
489 return Val;
490
491 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: NSW), DL,
492 Depth: Depth + 1, AC, DT);
493 E.Offset <<= RHS.getLimitedValue();
494 E.Scale <<= RHS.getLimitedValue();
495 E.IsNUW &= NUW;
496 E.IsNSW &= NSW;
497 break;
498 }
499 return E;
500 }
501 }
502
503 if (const auto *ZExt = dyn_cast<ZExtInst>(Val: Val.V))
504 return GetLinearExpression(
505 Val: Val.withZExtOfValue(NewV: ZExt->getOperand(i_nocapture: 0), ZExtNonNegative: ZExt->hasNonNeg()), DL,
506 Depth: Depth + 1, AC, DT);
507
508 if (isa<SExtInst>(Val: Val.V))
509 return GetLinearExpression(
510 Val: Val.withSExtOfValue(NewV: cast<CastInst>(Val: Val.V)->getOperand(i_nocapture: 0)),
511 DL, Depth: Depth + 1, AC, DT);
512
513 return Val;
514}
515
516namespace {
517// A linear transformation of a Value; this class represents
518// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
519struct VariableGEPIndex {
520 CastedValue Val;
521 APInt Scale;
522
523 // Context instruction to use when querying information about this index.
524 const Instruction *CxtI;
525
526 /// True if all operations in this expression are NSW.
527 bool IsNSW;
528
529 /// True if the index should be subtracted rather than added. We don't simply
530 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
531 /// non-wrapping, while X + INT_MIN*(-1) wraps.
532 bool IsNegated;
533
534 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
535 if (IsNegated == Other.IsNegated)
536 return Scale == -Other.Scale;
537 return Scale == Other.Scale;
538 }
539
540 void dump() const {
541 print(OS&: dbgs());
542 dbgs() << "\n";
543 }
544 void print(raw_ostream &OS) const {
545 OS << "(V=" << Val.V->getName()
546 << ", zextbits=" << Val.ZExtBits
547 << ", sextbits=" << Val.SExtBits
548 << ", truncbits=" << Val.TruncBits
549 << ", scale=" << Scale
550 << ", nsw=" << IsNSW
551 << ", negated=" << IsNegated << ")";
552 }
553};
554}
555
556// Represents the internal structure of a GEP, decomposed into a base pointer,
557// constant offsets, and variable scaled indices.
558struct BasicAAResult::DecomposedGEP {
559 // Base pointer of the GEP
560 const Value *Base;
561 // Total constant offset from base.
562 APInt Offset;
563 // Scaled variable (non-constant) indices.
564 SmallVector<VariableGEPIndex, 4> VarIndices;
565 // Nowrap flags common to all GEP operations involved in expression.
566 GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all();
567
568 void dump() const {
569 print(OS&: dbgs());
570 dbgs() << "\n";
571 }
572 void print(raw_ostream &OS) const {
573 OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
574 << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
575 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
576 << ", VarIndices=[";
577 for (size_t i = 0; i < VarIndices.size(); i++) {
578 if (i != 0)
579 OS << ", ";
580 VarIndices[i].print(OS);
581 }
582 OS << "])";
583 }
584};
585
586
587/// If V is a symbolic pointer expression, decompose it into a base pointer
588/// with a constant offset and a number of scaled symbolic offsets.
589///
590/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
591/// in the VarIndices vector) are Value*'s that are known to be scaled by the
592/// specified amount, but which may have other unrepresented high bits. As
593/// such, the gep cannot necessarily be reconstructed from its decomposed form.
594BasicAAResult::DecomposedGEP
595BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
596 AssumptionCache *AC, DominatorTree *DT) {
597 // Limit recursion depth to limit compile time in crazy cases.
598 unsigned MaxLookup = MaxLookupSearchDepth;
599 SearchTimes++;
600 const Instruction *CxtI = dyn_cast<Instruction>(Val: V);
601
602 unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: V->getType());
603 DecomposedGEP Decomposed;
604 Decomposed.Offset = APInt(IndexSize, 0);
605 do {
606 // See if this is a bitcast or GEP.
607 const Operator *Op = dyn_cast<Operator>(Val: V);
608 if (!Op) {
609 // The only non-operator case we can handle are GlobalAliases.
610 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: V)) {
611 if (!GA->isInterposable()) {
612 V = GA->getAliasee();
613 continue;
614 }
615 }
616 Decomposed.Base = V;
617 return Decomposed;
618 }
619
620 if (Op->getOpcode() == Instruction::BitCast ||
621 Op->getOpcode() == Instruction::AddrSpaceCast) {
622 Value *NewV = Op->getOperand(i: 0);
623 // Don't look through casts between address spaces with differing index
624 // widths.
625 if (DL.getIndexTypeSizeInBits(Ty: NewV->getType()) != IndexSize) {
626 Decomposed.Base = V;
627 return Decomposed;
628 }
629 V = NewV;
630 continue;
631 }
632
633 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Val: Op);
634 if (!GEPOp) {
635 if (const auto *PHI = dyn_cast<PHINode>(Val: V)) {
636 // Look through single-arg phi nodes created by LCSSA.
637 if (PHI->getNumIncomingValues() == 1) {
638 V = PHI->getIncomingValue(i: 0);
639 continue;
640 }
641 } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
642 // CaptureTracking can know about special capturing properties of some
643 // intrinsics like launder.invariant.group, that can't be expressed with
644 // the attributes, but have properties like returning aliasing pointer.
645 // Because some analysis may assume that nocaptured pointer is not
646 // returned from some special intrinsic (because function would have to
647 // be marked with returns attribute), it is crucial to use this function
648 // because it should be in sync with CaptureTracking. Not using it may
649 // cause weird miscompilations where 2 aliasing pointers are assumed to
650 // noalias.
651 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: false)) {
652 V = RP;
653 continue;
654 }
655 }
656
657 Decomposed.Base = V;
658 return Decomposed;
659 }
660
661 // Track the common nowrap flags for all GEPs we see.
662 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
663
664 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
665
666 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
667 gep_type_iterator GTI = gep_type_begin(GEP: GEPOp);
668 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
669 I != E; ++I, ++GTI) {
670 const Value *Index = *I;
671 // Compute the (potentially symbolic) offset in bytes for this index.
672 if (StructType *STy = GTI.getStructTypeOrNull()) {
673 // For a struct, add the member offset.
674 unsigned FieldNo = cast<ConstantInt>(Val: Index)->getZExtValue();
675 if (FieldNo == 0)
676 continue;
677
678 Decomposed.Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo);
679 continue;
680 }
681
682 // For an array/pointer, add the element offset, explicitly scaled.
683 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Val: Index)) {
684 if (CIdx->isZero())
685 continue;
686
687 // Don't attempt to analyze GEPs if the scalable index is not zero.
688 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
689 if (AllocTypeSize.isScalable()) {
690 Decomposed.Base = V;
691 return Decomposed;
692 }
693
694 Decomposed.Offset += AllocTypeSize.getFixedValue() *
695 CIdx->getValue().sextOrTrunc(width: IndexSize);
696 continue;
697 }
698
699 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
700 if (AllocTypeSize.isScalable()) {
701 Decomposed.Base = V;
702 return Decomposed;
703 }
704
705 // If the integer type is smaller than the index size, it is implicitly
706 // sign extended or truncated to index size.
707 bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
708 bool NUW = GEPOp->hasNoUnsignedWrap();
709 bool NonNeg = NUSW && NUW;
710 unsigned Width = Index->getType()->getIntegerBitWidth();
711 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
712 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
713 LinearExpression LE = GetLinearExpression(
714 Val: CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, Depth: 0, AC, DT);
715
716 // Scale by the type size.
717 unsigned TypeSize = AllocTypeSize.getFixedValue();
718 LE = LE.mul(Other: APInt(IndexSize, TypeSize), MulIsNUW: NUW, MulIsNSW: NUSW);
719 Decomposed.Offset += LE.Offset;
720 APInt Scale = LE.Scale;
721 if (!LE.IsNUW)
722 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
723
724 // If we already had an occurrence of this index variable, merge this
725 // scale into it. For example, we want to handle:
726 // A[x][x] -> x*16 + x*4 -> x*20
727 // This also ensures that 'x' only appears in the index list once.
728 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
729 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
730 areBothVScale(V1: Decomposed.VarIndices[i].Val.V, V2: LE.Val.V)) &&
731 Decomposed.VarIndices[i].Val.hasSameCastsAs(Other: LE.Val)) {
732 Scale += Decomposed.VarIndices[i].Scale;
733 // We cannot guarantee no-wrap for the merge.
734 LE.IsNSW = LE.IsNUW = false;
735 Decomposed.VarIndices.erase(CI: Decomposed.VarIndices.begin() + i);
736 break;
737 }
738 }
739
740 if (!!Scale) {
741 VariableGEPIndex Entry = {.Val: LE.Val, .Scale: Scale, .CxtI: CxtI, .IsNSW: LE.IsNSW,
742 /* IsNegated */ false};
743 Decomposed.VarIndices.push_back(Elt: Entry);
744 }
745 }
746
747 // Analyze the base pointer next.
748 V = GEPOp->getOperand(i_nocapture: 0);
749 } while (--MaxLookup);
750
751 // If the chain of expressions is too deep, just return early.
752 Decomposed.Base = V;
753 SearchLimitReached++;
754 return Decomposed;
755}
756
757ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
758 AAQueryInfo &AAQI,
759 bool IgnoreLocals) {
760 assert(Visited.empty() && "Visited must be cleared after use!");
761 llvm::scope_exit _([&] { Visited.clear(); });
762
763 unsigned MaxLookup = 8;
764 SmallVector<const Value *, 16> Worklist;
765 Worklist.push_back(Elt: Loc.Ptr);
766 ModRefInfo Result = ModRefInfo::NoModRef;
767
768 do {
769 const Value *V = getUnderlyingObject(V: Worklist.pop_back_val());
770 if (!Visited.insert(Ptr: V).second)
771 continue;
772
773 // Ignore allocas if we were instructed to do so.
774 if (IgnoreLocals && isa<AllocaInst>(Val: V))
775 continue;
776
777 // If the location points to memory that is known to be invariant for
778 // the life of the underlying SSA value, then we can exclude Mod from
779 // the set of valid memory effects.
780 //
781 // An argument that is marked readonly and noalias is known to be
782 // invariant while that function is executing.
783 if (const Argument *Arg = dyn_cast<Argument>(Val: V)) {
784 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
785 Result |= ModRefInfo::Ref;
786 continue;
787 }
788 }
789
790 // A global constant can't be mutated.
791 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: V)) {
792 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
793 // global to be marked constant in some modules and non-constant in
794 // others. GV may even be a declaration, not a definition.
795 if (!GV->isConstant())
796 return ModRefInfo::ModRef;
797 continue;
798 }
799
800 // If both select values point to local memory, then so does the select.
801 if (const SelectInst *SI = dyn_cast<SelectInst>(Val: V)) {
802 Worklist.push_back(Elt: SI->getTrueValue());
803 Worklist.push_back(Elt: SI->getFalseValue());
804 continue;
805 }
806
807 // If all values incoming to a phi node point to local memory, then so does
808 // the phi.
809 if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) {
810 // Don't bother inspecting phi nodes with many operands.
811 if (PN->getNumIncomingValues() > MaxLookup)
812 return ModRefInfo::ModRef;
813 append_range(C&: Worklist, R: PN->incoming_values());
814 continue;
815 }
816
817 // Otherwise be conservative.
818 return ModRefInfo::ModRef;
819 } while (!Worklist.empty() && --MaxLookup);
820
821 // If we hit the maximum number of instructions to examine, be conservative.
822 if (!Worklist.empty())
823 return ModRefInfo::ModRef;
824
825 return Result;
826}
827
828static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
829 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Call);
830 return II && II->getIntrinsicID() == IID;
831}
832
833/// Returns the behavior when calling the given call site.
834MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
835 AAQueryInfo &AAQI) {
836 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
837
838 if (const Function *F = dyn_cast<Function>(Val: Call->getCalledOperand())) {
839 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
840 // Operand bundles on the call may also read or write memory, in addition
841 // to the behavior of the called function.
842 if (Call->hasReadingOperandBundles())
843 FuncME |= MemoryEffects::readOnly();
844 if (Call->hasClobberingOperandBundles())
845 FuncME |= MemoryEffects::writeOnly();
846 if (Call->isVolatile()) {
847 // Volatile operations also access inaccessible memory.
848 FuncME |= MemoryEffects::inaccessibleMemOnly();
849 }
850 Min &= FuncME;
851 }
852
853 return Min;
854}
855
856/// Returns the behavior when calling the given function. For use when the call
857/// site is not known.
858MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
859 switch (F->getIntrinsicID()) {
860 case Intrinsic::experimental_guard:
861 case Intrinsic::experimental_deoptimize:
862 // These intrinsics can read arbitrary memory, and additionally modref
863 // inaccessible memory to model control dependence.
864 return MemoryEffects::readOnly() |
865 MemoryEffects::inaccessibleMemOnly(MR: ModRefInfo::ModRef);
866 }
867
868 return F->getMemoryEffects();
869}
870
871ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
872 unsigned ArgIdx) {
873 if (Call->doesNotAccessMemory(OpNo: ArgIdx))
874 return ModRefInfo::NoModRef;
875
876 if (Call->onlyWritesMemory(OpNo: ArgIdx))
877 return ModRefInfo::Mod;
878
879 if (Call->onlyReadsMemory(OpNo: ArgIdx))
880 return ModRefInfo::Ref;
881
882 return ModRefInfo::ModRef;
883}
884
885#ifndef NDEBUG
886static const Function *getParent(const Value *V) {
887 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
888 if (!inst->getParent())
889 return nullptr;
890 return inst->getParent()->getParent();
891 }
892
893 if (const Argument *arg = dyn_cast<Argument>(V))
894 return arg->getParent();
895
896 return nullptr;
897}
898
899static bool notDifferentParent(const Value *O1, const Value *O2) {
900
901 const Function *F1 = getParent(O1);
902 const Function *F2 = getParent(O2);
903
904 return !F1 || !F2 || F1 == F2;
905}
906#endif
907
908AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
909 const MemoryLocation &LocB, AAQueryInfo &AAQI,
910 const Instruction *CtxI) {
911 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
912 "BasicAliasAnalysis doesn't support interprocedural queries.");
913 return aliasCheck(V1: LocA.Ptr, V1Size: LocA.Size, V2: LocB.Ptr, V2Size: LocB.Size, AAQI, CtxI);
914}
915
916/// Checks to see if the specified callsite can clobber the specified memory
917/// object.
918///
919/// Since we only look at local properties of this function, we really can't
920/// say much about this query. We do, however, use simple "address taken"
921/// analysis on local objects.
922ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
923 const MemoryLocation &Loc,
924 AAQueryInfo &AAQI) {
925 assert(notDifferentParent(Call, Loc.Ptr) &&
926 "AliasAnalysis query involving multiple functions!");
927
928 const Value *Object = getUnderlyingObject(V: Loc.Ptr);
929
930 // Calls marked 'tail' cannot read or write allocas from the current frame
931 // because the current frame might be destroyed by the time they run. However,
932 // a tail call may use an alloca with byval. Calling with byval copies the
933 // contents of the alloca into argument registers or stack slots, so there is
934 // no lifetime issue.
935 if (isa<AllocaInst>(Val: Object))
936 if (const CallInst *CI = dyn_cast<CallInst>(Val: Call))
937 if (CI->isTailCall() &&
938 !CI->getAttributes().hasAttrSomewhere(Kind: Attribute::ByVal))
939 return ModRefInfo::NoModRef;
940
941 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
942 // modify them even though the alloca is not escaped.
943 if (auto *AI = dyn_cast<AllocaInst>(Val: Object))
944 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, IID: Intrinsic::stackrestore))
945 return ModRefInfo::Mod;
946
947 // We can completely ignore inaccessible memory here, because MemoryLocations
948 // can only reference accessible memory.
949 auto ME = AAQI.AAR.getMemoryEffects(Call, AAQI)
950 .getWithoutLoc(Loc: IRMemLocation::InaccessibleMem);
951 if (ME.doesNotAccessMemory())
952 return ModRefInfo::NoModRef;
953
954 ModRefInfo ArgMR = ME.getModRef(Loc: IRMemLocation::ArgMem);
955 ModRefInfo ErrnoMR = ME.getModRef(Loc: IRMemLocation::ErrnoMem);
956 ModRefInfo OtherMR = ME.getModRef(Loc: IRMemLocation::Other);
957
958 // An identified function-local object that does not escape can only be
959 // accessed via call arguments. Reduce OtherMR (which includes accesses to
960 // escaped memory) based on that.
961 //
962 // We model calls that can return twice (setjmp) as clobbering non-escaping
963 // objects, to model any accesses that may occur prior to the second return.
964 // As an exception, ignore allocas, as setjmp is not required to preserve
965 // non-volatile stores for them.
966 if (isModOrRefSet(MRI: OtherMR) && !isa<Constant>(Val: Object) && Call != Object &&
967 (isa<AllocaInst>(Val: Object) || !Call->hasFnAttr(Kind: Attribute::ReturnsTwice))) {
968 CaptureComponents CC =
969 AAQI.CA->getCapturesBefore(Object, I: Call, /*OrAt=*/false);
970 if (capturesNothing(CC))
971 OtherMR = ModRefInfo::NoModRef;
972 else if (capturesReadProvenanceOnly(CC))
973 OtherMR = ModRefInfo::Ref;
974 }
975
976 // Refine the modref info for argument memory. We only bother to do this
977 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
978 // on the final result.
979 if ((ArgMR | OtherMR) != OtherMR) {
980 ModRefInfo NewArgMR = ModRefInfo::NoModRef;
981 for (const Use &U : Call->data_ops()) {
982 const Value *Arg = U;
983 if (!Arg->getType()->isPointerTy())
984 continue;
985 unsigned ArgIdx = Call->getDataOperandNo(U: &U);
986 MemoryLocation ArgLoc =
987 Call->isArgOperand(U: &U)
988 ? MemoryLocation::getForArgument(Call, ArgIdx, TLI)
989 : MemoryLocation::getBeforeOrAfter(Ptr: Arg);
990 AliasResult ArgAlias = AAQI.AAR.alias(LocA: ArgLoc, LocB: Loc, AAQI, CtxI: Call);
991 if (ArgAlias != AliasResult::NoAlias)
992 NewArgMR |= ArgMR & AAQI.AAR.getArgModRefInfo(Call, ArgIdx);
993
994 // Exit early if we cannot improve over the original ArgMR.
995 if (NewArgMR == ArgMR)
996 break;
997 }
998 ArgMR = NewArgMR;
999 }
1000
1001 ModRefInfo Result = ArgMR | OtherMR;
1002
1003 // Refine accesses to errno memory.
1004 if ((ErrnoMR | Result) != Result) {
1005 if (AAQI.AAR.aliasErrno(Loc, M: Call->getModule()) != AliasResult::NoAlias) {
1006 // Exclusion conditions do not hold, this memory location may alias errno.
1007 Result |= ErrnoMR;
1008 }
1009 }
1010
1011 if (!isModAndRefSet(MRI: Result))
1012 return Result;
1013
1014 // If the call is malloc/calloc like, we can assume that it doesn't
1015 // modify any IR visible value. This is only valid because we assume these
1016 // routines do not read values visible in the IR. TODO: Consider special
1017 // casing realloc and strdup routines which access only their arguments as
1018 // well. Or alternatively, replace all of this with inaccessiblememonly once
1019 // that's implemented fully.
1020 if (isMallocOrCallocLikeFn(V: Call, TLI: &TLI)) {
1021 // Be conservative if the accessed pointer may alias the allocation -
1022 // fallback to the generic handling below.
1023 if (AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: Call), LocB: Loc, AAQI) ==
1024 AliasResult::NoAlias)
1025 return ModRefInfo::NoModRef;
1026 }
1027
1028 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1029 // writing so that proper control dependencies are maintained but they never
1030 // mod any particular memory location visible to the IR.
1031 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1032 // intrinsic is now modeled as reading memory. This prevents hoisting the
1033 // invariant.start intrinsic over stores. Consider:
1034 // *ptr = 40;
1035 // *ptr = 50;
1036 // invariant_start(ptr)
1037 // int val = *ptr;
1038 // print(val);
1039 //
1040 // This cannot be transformed to:
1041 //
1042 // *ptr = 40;
1043 // invariant_start(ptr)
1044 // *ptr = 50;
1045 // int val = *ptr;
1046 // print(val);
1047 //
1048 // The transformation will cause the second store to be ignored (based on
1049 // rules of invariant.start) and print 40, while the first program always
1050 // prints 50.
1051 if (isIntrinsicCall(Call, IID: Intrinsic::invariant_start))
1052 return ModRefInfo::Ref;
1053
1054 // Be conservative.
1055 return ModRefInfo::ModRef;
1056}
1057
1058ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1059 const CallBase *Call2,
1060 AAQueryInfo &AAQI) {
1061 // Guard intrinsics are marked as arbitrarily writing so that proper control
1062 // dependencies are maintained but they never mods any particular memory
1063 // location.
1064 //
1065 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1066 // heap state at the point the guard is issued needs to be consistent in case
1067 // the guard invokes the "deopt" continuation.
1068
1069 // NB! This function is *not* commutative, so we special case two
1070 // possibilities for guard intrinsics.
1071
1072 if (isIntrinsicCall(Call: Call1, IID: Intrinsic::experimental_guard))
1073 return isModSet(MRI: getMemoryEffects(Call: Call2, AAQI).getModRef())
1074 ? ModRefInfo::Ref
1075 : ModRefInfo::NoModRef;
1076
1077 if (isIntrinsicCall(Call: Call2, IID: Intrinsic::experimental_guard))
1078 return isModSet(MRI: getMemoryEffects(Call: Call1, AAQI).getModRef())
1079 ? ModRefInfo::Mod
1080 : ModRefInfo::NoModRef;
1081
1082 // Be conservative.
1083 return ModRefInfo::ModRef;
1084}
1085
1086/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1087/// another pointer.
1088///
1089/// We know that V1 is a GEP, but we don't know anything about V2.
1090/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1091/// V2.
1092AliasResult BasicAAResult::aliasGEP(
1093 const GEPOperator *GEP1, LocationSize V1Size,
1094 const Value *V2, LocationSize V2Size,
1095 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1096 auto BaseObjectsAlias = [&]() {
1097 AliasResult BaseAlias =
1098 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV1),
1099 LocB: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV2), AAQI);
1100 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1101 : AliasResult::MayAlias;
1102 };
1103
1104 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1105 // TODO: This limitation exists for compile-time reasons. Relax it if we
1106 // can avoid exponential pathological cases.
1107 if (!isa<GEPOperator>(Val: V2))
1108 return AliasResult::MayAlias;
1109
1110 // If both accesses have unknown size, we can only check whether the base
1111 // objects don't alias.
1112 return BaseObjectsAlias();
1113 }
1114
1115 DominatorTree *DT = getDT(AAQI);
1116 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(V: GEP1, DL, AC: &AC, DT);
1117 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V: V2, DL, AC: &AC, DT);
1118
1119 // Bail if we were not able to decompose anything.
1120 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1121 return AliasResult::MayAlias;
1122
1123 // Fall back to base objects if pointers have different index widths.
1124 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1125 return BaseObjectsAlias();
1126
1127 // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1128 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1129 std::swap(a&: DecompGEP1, b&: DecompGEP2);
1130 std::swap(a&: V1Size, b&: V2Size);
1131 std::swap(a&: UnderlyingV1, b&: UnderlyingV2);
1132 }
1133
1134 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1135 // symbolic difference.
1136 subtractDecomposedGEPs(DestGEP&: DecompGEP1, SrcGEP: DecompGEP2, AAQI);
1137
1138 // If an inbounds GEP would have to start from an out of bounds address
1139 // for the two to alias, then we can assume noalias.
1140 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1141 // size
1142
1143 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1144 V2Size.hasValue() && !V2Size.isScalable() &&
1145 DecompGEP1.Offset.sge(RHS: V2Size.getValue()) &&
1146 isBaseOfObject(V: DecompGEP2.Base))
1147 return AliasResult::NoAlias;
1148
1149 // Symmetric case to above.
1150 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1151 V1Size.hasValue() && !V1Size.isScalable() &&
1152 DecompGEP1.Offset.sle(RHS: -V1Size.getValue()) &&
1153 isBaseOfObject(V: DecompGEP1.Base))
1154 return AliasResult::NoAlias;
1155
1156 // For GEPs with identical offsets, we can preserve the size and AAInfo
1157 // when performing the alias check on the underlying objects.
1158 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1159 return AAQI.AAR.alias(LocA: MemoryLocation(DecompGEP1.Base, V1Size),
1160 LocB: MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1161
1162 // Do the base pointers alias?
1163 AliasResult BaseAlias =
1164 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP1.Base),
1165 LocB: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP2.Base), AAQI);
1166
1167 // If we get a No or May, then return it immediately, no amount of analysis
1168 // will improve this situation.
1169 if (BaseAlias != AliasResult::MustAlias) {
1170 assert(BaseAlias == AliasResult::NoAlias ||
1171 BaseAlias == AliasResult::MayAlias);
1172 return BaseAlias;
1173 }
1174
1175 // If there is a constant difference between the pointers, but the difference
1176 // is less than the size of the associated memory object, then we know
1177 // that the objects are partially overlapping. If the difference is
1178 // greater, we know they do not overlap.
1179 if (DecompGEP1.VarIndices.empty()) {
1180 APInt &Off = DecompGEP1.Offset;
1181
1182 // Initialize for Off >= 0 (V2 <= GEP1) case.
1183 LocationSize VLeftSize = V2Size;
1184 LocationSize VRightSize = V1Size;
1185 const bool Swapped = Off.isNegative();
1186
1187 if (Swapped) {
1188 // Swap if we have the situation where:
1189 // + +
1190 // | BaseOffset |
1191 // ---------------->|
1192 // |-->V1Size |-------> V2Size
1193 // GEP1 V2
1194 std::swap(a&: VLeftSize, b&: VRightSize);
1195 Off = -Off;
1196 }
1197
1198 if (!VLeftSize.hasValue())
1199 return AliasResult::MayAlias;
1200
1201 const TypeSize LSize = VLeftSize.getValue();
1202 if (!LSize.isScalable()) {
1203 if (Off.ult(RHS: LSize)) {
1204 // Conservatively drop processing if a phi was visited and/or offset is
1205 // too big.
1206 AliasResult AR = AliasResult::PartialAlias;
1207 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1208 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(RHS: LSize)) {
1209 // Memory referenced by right pointer is nested. Save the offset in
1210 // cache. Note that originally offset estimated as GEP1-V2, but
1211 // AliasResult contains the shift that represents GEP1+Offset=V2.
1212 AR.setOffset(-Off.getSExtValue());
1213 AR.swap(DoSwap: Swapped);
1214 }
1215 return AR;
1216 }
1217 return AliasResult::NoAlias;
1218 } else {
1219 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1220 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Off.getBitWidth());
1221 bool Overflow;
1222 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1223 RHS: APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1224 if (!Overflow && Off.uge(RHS: UpperRange))
1225 return AliasResult::NoAlias;
1226 }
1227 }
1228
1229 // VScale Alias Analysis - Given one scalable offset between accesses and a
1230 // scalable typesize, we can divide each side by vscale, treating both values
1231 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1232 if (DecompGEP1.VarIndices.size() == 1 &&
1233 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1234 DecompGEP1.Offset.isZero() &&
1235 PatternMatch::match(V: DecompGEP1.VarIndices[0].Val.V,
1236 P: PatternMatch::m_VScale())) {
1237 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1238 APInt Scale =
1239 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1240 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1241
1242 // Check if the offset is known to not overflow, if it does then attempt to
1243 // prove it with the known values of vscale_range.
1244 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1245 if (Overflows) {
1246 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Scale.getBitWidth());
1247 (void)CR.getSignedMax().smul_ov(RHS: Scale, Overflow&: Overflows);
1248 }
1249
1250 if (!Overflows) {
1251 // Note that we do not check that the typesize is scalable, as vscale >= 1
1252 // so noalias still holds so long as the dependency distance is at least
1253 // as big as the typesize.
1254 if (VLeftSize.hasValue() &&
1255 Scale.abs().uge(RHS: VLeftSize.getValue().getKnownMinValue()))
1256 return AliasResult::NoAlias;
1257 }
1258 }
1259
1260 // If the difference between pointers is Offset +<nuw> Indices then we know
1261 // that the addition does not wrap the pointer index type (add nuw) and the
1262 // constant Offset is a lower bound on the distance between the pointers. We
1263 // can then prove NoAlias via Offset u>= VLeftSize.
1264 // + + +
1265 // | BaseOffset | +<nuw> Indices |
1266 // ---------------->|-------------------->|
1267 // |-->V2Size | |-------> V1Size
1268 // LHS RHS
1269 if (!DecompGEP1.VarIndices.empty() &&
1270 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1271 !V2Size.isScalable() && DecompGEP1.Offset.uge(RHS: V2Size.getValue()))
1272 return AliasResult::NoAlias;
1273
1274 // Bail on analysing scalable LocationSize
1275 if (V1Size.isScalable() || V2Size.isScalable())
1276 return AliasResult::MayAlias;
1277
1278 // We need to know both access sizes for all the following heuristics. Don't
1279 // try to reason about sizes larger than the index space.
1280 unsigned BW = DecompGEP1.Offset.getBitWidth();
1281 if (!V1Size.hasValue() || !V2Size.hasValue() ||
1282 !isUIntN(N: BW, x: V1Size.getValue()) || !isUIntN(N: BW, x: V2Size.getValue()))
1283 return AliasResult::MayAlias;
1284
1285 APInt GCD;
1286 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1287 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1288 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1289 const APInt &Scale = Index.Scale;
1290 APInt ScaleForGCD = Scale;
1291 if (!Index.IsNSW)
1292 ScaleForGCD =
1293 APInt::getOneBitSet(numBits: Scale.getBitWidth(), BitNo: Scale.countr_zero());
1294
1295 if (i == 0)
1296 GCD = ScaleForGCD.abs();
1297 else
1298 GCD = APIntOps::GreatestCommonDivisor(A: GCD, B: ScaleForGCD.abs());
1299
1300 ConstantRange CR = computeConstantRange(V: Index.Val.V, /* ForSigned */ false,
1301 UseInstrInfo: true, AC: &AC, CtxI: Index.CxtI);
1302 KnownBits Known = computeKnownBits(V: Index.Val.V, DL, AC: &AC, CxtI: Index.CxtI, DT);
1303 CR = CR.intersectWith(
1304 CR: ConstantRange::fromKnownBits(Known, /* Signed */ IsSigned: true),
1305 Type: ConstantRange::Signed);
1306 CR = Index.Val.evaluateWith(N: CR).sextOrTrunc(BitWidth: OffsetRange.getBitWidth());
1307
1308 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1309 "Bit widths are normalized to MaxIndexSize");
1310 if (Index.IsNSW)
1311 CR = CR.smul_sat(Other: ConstantRange(Scale));
1312 else
1313 CR = CR.smul_fast(Other: ConstantRange(Scale));
1314
1315 if (Index.IsNegated)
1316 OffsetRange = OffsetRange.sub(Other: CR);
1317 else
1318 OffsetRange = OffsetRange.add(Other: CR);
1319 }
1320
1321 // We now have accesses at two offsets from the same base:
1322 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1323 // 2. 0 with size V2Size
1324 // Using arithmetic modulo GCD, the accesses are at
1325 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1326 // into the range [V2Size..GCD), then we know they cannot overlap.
1327 APInt ModOffset = DecompGEP1.Offset.srem(RHS: GCD);
1328 if (ModOffset.isNegative())
1329 ModOffset += GCD; // We want mod, not rem.
1330 if (ModOffset.uge(RHS: V2Size.getValue()) &&
1331 (GCD - ModOffset).uge(RHS: V1Size.getValue()))
1332 return AliasResult::NoAlias;
1333
1334 // Compute ranges of potentially accessed bytes for both accesses. If the
1335 // interseciton is empty, there can be no overlap.
1336 ConstantRange Range1 = OffsetRange.add(
1337 Other: ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1338 ConstantRange Range2 =
1339 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1340 if (Range1.intersectWith(CR: Range2).isEmptySet())
1341 return AliasResult::NoAlias;
1342
1343 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1344 // potentially wrapping math.
1345 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1346 if (Var.IsNSW)
1347 return true;
1348
1349 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1350 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1351 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1352 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1353 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1354 if (MaxScaleValueBW <= 0)
1355 return false;
1356 return Var.Scale.ule(
1357 RHS: APInt::getMaxValue(numBits: MaxScaleValueBW).zext(width: Var.Scale.getBitWidth()));
1358 };
1359
1360 // Try to determine the range of values for VarIndex such that
1361 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1362 std::optional<APInt> MinAbsVarIndex;
1363 if (DecompGEP1.VarIndices.size() == 1) {
1364 // VarIndex = Scale*V.
1365 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1366 if (Var.Val.TruncBits == 0 &&
1367 isKnownNonZero(V: Var.Val.V, Q: SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1368 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1369 // presence of potentially wrapping math.
1370 if (MultiplyByScaleNoWrap(Var)) {
1371 // If V != 0 then abs(VarIndex) >= abs(Scale).
1372 MinAbsVarIndex = Var.Scale.abs();
1373 }
1374 }
1375 } else if (DecompGEP1.VarIndices.size() == 2) {
1376 // VarIndex = Scale*V0 + (-Scale)*V1.
1377 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1378 // Check that MayBeCrossIteration is false, to avoid reasoning about
1379 // inequality of values across loop iterations.
1380 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1381 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1382 if (Var0.hasNegatedScaleOf(Other: Var1) && Var0.Val.TruncBits == 0 &&
1383 Var0.Val.hasSameCastsAs(Other: Var1.Val) && !AAQI.MayBeCrossIteration &&
1384 MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) &&
1385 isKnownNonEqual(V1: Var0.Val.V, V2: Var1.Val.V,
1386 SQ: SimplifyQuery(DL, DT, &AC, /*CxtI=*/Var0.CxtI
1387 ? Var0.CxtI
1388 : Var1.CxtI)))
1389 MinAbsVarIndex = Var0.Scale.abs();
1390 }
1391
1392 if (MinAbsVarIndex) {
1393 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1394 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1395 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1396 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1397 if (OffsetLo.isNegative() && (-OffsetLo).uge(RHS: V1Size.getValue()) &&
1398 OffsetHi.isNonNegative() && OffsetHi.uge(RHS: V2Size.getValue()))
1399 return AliasResult::NoAlias;
1400 }
1401
1402 if (constantOffsetHeuristic(GEP: DecompGEP1, V1Size, V2Size, AC: &AC, DT, AAQI))
1403 return AliasResult::NoAlias;
1404
1405 // Statically, we can see that the base objects are the same, but the
1406 // pointers have dynamic offsets which we can't resolve. And none of our
1407 // little tricks above worked.
1408 return AliasResult::MayAlias;
1409}
1410
1411static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1412 // If the results agree, take it.
1413 if (A == B)
1414 return A;
1415 // A mix of PartialAlias and MustAlias is PartialAlias.
1416 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1417 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1418 return AliasResult::PartialAlias;
1419 // Otherwise, we don't know anything.
1420 return AliasResult::MayAlias;
1421}
1422
1423/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1424/// against another.
1425AliasResult
1426BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1427 const Value *V2, LocationSize V2Size,
1428 AAQueryInfo &AAQI) {
1429 // If the values are Selects with the same condition, we can do a more precise
1430 // check: just check for aliases between the values on corresponding arms.
1431 if (const SelectInst *SI2 = dyn_cast<SelectInst>(Val: V2))
1432 if (isValueEqualInPotentialCycles(V1: SI->getCondition(), V2: SI2->getCondition(),
1433 AAQI)) {
1434 AliasResult Alias =
1435 AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1436 LocB: MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1437 if (Alias == AliasResult::MayAlias)
1438 return AliasResult::MayAlias;
1439 AliasResult ThisAlias =
1440 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1441 LocB: MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1442 return MergeAliasResults(A: ThisAlias, B: Alias);
1443 }
1444
1445 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1446 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1447 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1448 LocB: MemoryLocation(V2, V2Size), AAQI);
1449 if (Alias == AliasResult::MayAlias)
1450 return AliasResult::MayAlias;
1451
1452 AliasResult ThisAlias =
1453 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1454 LocB: MemoryLocation(V2, V2Size), AAQI);
1455 return MergeAliasResults(A: ThisAlias, B: Alias);
1456}
1457
1458/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1459/// another.
1460AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1461 const Value *V2, LocationSize V2Size,
1462 AAQueryInfo &AAQI) {
1463 if (!PN->getNumIncomingValues())
1464 return AliasResult::NoAlias;
1465 // If the values are PHIs in the same block, we can do a more precise
1466 // as well as efficient check: just check for aliases between the values
1467 // on corresponding edges. Don't do this if we are analyzing across
1468 // iterations, as we may pick a different phi entry in different iterations.
1469 if (const PHINode *PN2 = dyn_cast<PHINode>(Val: V2))
1470 if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1471 std::optional<AliasResult> Alias;
1472 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1473 AliasResult ThisAlias = AAQI.AAR.alias(
1474 LocA: MemoryLocation(PN->getIncomingValue(i), PNSize),
1475 LocB: MemoryLocation(
1476 PN2->getIncomingValueForBlock(BB: PN->getIncomingBlock(i)), V2Size),
1477 AAQI);
1478 if (Alias)
1479 *Alias = MergeAliasResults(A: *Alias, B: ThisAlias);
1480 else
1481 Alias = ThisAlias;
1482 if (*Alias == AliasResult::MayAlias)
1483 break;
1484 }
1485 return *Alias;
1486 }
1487
1488 SmallVector<Value *, 4> V1Srcs;
1489 // If a phi operand recurses back to the phi, we can still determine NoAlias
1490 // if we don't alias the underlying objects of the other phi operands, as we
1491 // know that the recursive phi needs to be based on them in some way.
1492 bool isRecursive = false;
1493 auto CheckForRecPhi = [&](Value *PV) {
1494 if (!EnableRecPhiAnalysis)
1495 return false;
1496 if (getUnderlyingObject(V: PV) == PN) {
1497 isRecursive = true;
1498 return true;
1499 }
1500 return false;
1501 };
1502
1503 SmallPtrSet<Value *, 4> UniqueSrc;
1504 Value *OnePhi = nullptr;
1505 for (Value *PV1 : PN->incoming_values()) {
1506 // Skip the phi itself being the incoming value.
1507 if (PV1 == PN)
1508 continue;
1509
1510 if (isa<PHINode>(Val: PV1)) {
1511 if (OnePhi && OnePhi != PV1) {
1512 // To control potential compile time explosion, we choose to be
1513 // conserviate when we have more than one Phi input. It is important
1514 // that we handle the single phi case as that lets us handle LCSSA
1515 // phi nodes and (combined with the recursive phi handling) simple
1516 // pointer induction variable patterns.
1517 return AliasResult::MayAlias;
1518 }
1519 OnePhi = PV1;
1520 }
1521
1522 if (CheckForRecPhi(PV1))
1523 continue;
1524
1525 if (UniqueSrc.insert(Ptr: PV1).second)
1526 V1Srcs.push_back(Elt: PV1);
1527 }
1528
1529 if (OnePhi && UniqueSrc.size() > 1)
1530 // Out of an abundance of caution, allow only the trivial lcssa and
1531 // recursive phi cases.
1532 return AliasResult::MayAlias;
1533
1534 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1535 // value. This should only be possible in blocks unreachable from the entry
1536 // block, but return MayAlias just in case.
1537 if (V1Srcs.empty())
1538 return AliasResult::MayAlias;
1539
1540 // If this PHI node is recursive, indicate that the pointer may be moved
1541 // across iterations. We can only prove NoAlias if different underlying
1542 // objects are involved.
1543 if (isRecursive)
1544 PNSize = LocationSize::beforeOrAfterPointer();
1545
1546 // In the recursive alias queries below, we may compare values from two
1547 // different loop iterations.
1548 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1549
1550 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(V1Srcs[0], PNSize),
1551 LocB: MemoryLocation(V2, V2Size), AAQI);
1552
1553 // Early exit if the check of the first PHI source against V2 is MayAlias.
1554 // Other results are not possible.
1555 if (Alias == AliasResult::MayAlias)
1556 return AliasResult::MayAlias;
1557 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1558 // remain valid to all elements and needs to conservatively return MayAlias.
1559 if (isRecursive && Alias != AliasResult::NoAlias)
1560 return AliasResult::MayAlias;
1561
1562 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1563 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1564 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1565 Value *V = V1Srcs[i];
1566
1567 AliasResult ThisAlias = AAQI.AAR.alias(
1568 LocA: MemoryLocation(V, PNSize), LocB: MemoryLocation(V2, V2Size), AAQI);
1569 Alias = MergeAliasResults(A: ThisAlias, B: Alias);
1570 if (Alias == AliasResult::MayAlias)
1571 break;
1572 }
1573
1574 return Alias;
1575}
1576
1577// Return true for an Argument or extractvalue(Argument). These are all known
1578// to not alias with FunctionLocal objects and can come up from coerced function
1579// arguments.
1580static bool isArgumentOrArgumentLike(const Value *V) {
1581 if (isa<Argument>(Val: V))
1582 return true;
1583 auto *E = dyn_cast<ExtractValueInst>(Val: V);
1584 return E && isa<Argument>(Val: E->getOperand(i_nocapture: 0));
1585}
1586
1587/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1588/// array references.
1589AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1590 const Value *V2, LocationSize V2Size,
1591 AAQueryInfo &AAQI,
1592 const Instruction *CtxI) {
1593 // If either of the memory references is empty, it doesn't matter what the
1594 // pointer values are.
1595 if (V1Size.isZero() || V2Size.isZero())
1596 return AliasResult::NoAlias;
1597
1598 // Strip off any casts if they exist.
1599 V1 = V1->stripPointerCastsForAliasAnalysis();
1600 V2 = V2->stripPointerCastsForAliasAnalysis();
1601
1602 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1603 // value for undef that aliases nothing in the program.
1604 if (isa<UndefValue>(Val: V1) || isa<UndefValue>(Val: V2))
1605 return AliasResult::NoAlias;
1606
1607 // Are we checking for alias of the same value?
1608 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1609 // different iterations. We must therefore make sure that this is not the
1610 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1611 // happen by looking at the visited phi nodes and making sure they cannot
1612 // reach the value.
1613 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1614 return AliasResult::MustAlias;
1615
1616 // Figure out what objects these things are pointing to if we can.
1617 const Value *O1 = getUnderlyingObject(V: V1, MaxLookup: MaxLookupSearchDepth);
1618 const Value *O2 = getUnderlyingObject(V: V2, MaxLookup: MaxLookupSearchDepth);
1619
1620 // Null values in the default address space don't point to any object, so they
1621 // don't alias any other pointer.
1622 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O1))
1623 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1624 return AliasResult::NoAlias;
1625 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O2))
1626 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1627 return AliasResult::NoAlias;
1628
1629 if (O1 != O2) {
1630 // If V1/V2 point to two different objects, we know that we have no alias.
1631 if (isIdentifiedObject(V: O1) && isIdentifiedObject(V: O2))
1632 return AliasResult::NoAlias;
1633
1634 // Function arguments can't alias with things that are known to be
1635 // unambigously identified at the function level.
1636 if ((isArgumentOrArgumentLike(V: O1) && isIdentifiedFunctionLocal(V: O2)) ||
1637 (isArgumentOrArgumentLike(V: O2) && isIdentifiedFunctionLocal(V: O1)))
1638 return AliasResult::NoAlias;
1639
1640 // If one pointer is the result of a call/invoke or load and the other is a
1641 // non-escaping local object within the same function, then we know the
1642 // object couldn't escape to a point where the call could return it.
1643 //
1644 // Note that if the pointers are in different functions, there are a
1645 // variety of complications. A call with a nocapture argument may still
1646 // temporary store the nocapture argument's value in a temporary memory
1647 // location if that memory location doesn't escape. Or it may pass a
1648 // nocapture value to other functions as long as they don't capture it.
1649 if (isEscapeSource(V: O1) &&
1650 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1651 Object: O2, I: dyn_cast<Instruction>(Val: O1), /*OrAt*/ true)))
1652 return AliasResult::NoAlias;
1653 if (isEscapeSource(V: O2) &&
1654 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1655 Object: O1, I: dyn_cast<Instruction>(Val: O2), /*OrAt*/ true)))
1656 return AliasResult::NoAlias;
1657 }
1658
1659 // If the size of one access is larger than the entire object on the other
1660 // side, then we know such behavior is undefined and can assume no alias.
1661 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1662 if ((isObjectSmallerThan(
1663 V: O2, Size: getMinimalExtentFrom(V: *V1, LocSize: V1Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1664 TLI, NullIsValidLoc: NullIsValidLocation)) ||
1665 (isObjectSmallerThan(
1666 V: O1, Size: getMinimalExtentFrom(V: *V2, LocSize: V2Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1667 TLI, NullIsValidLoc: NullIsValidLocation)))
1668 return AliasResult::NoAlias;
1669
1670 if (EnableSeparateStorageAnalysis) {
1671 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(V: O1)) {
1672 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1673 continue;
1674
1675 AssumeInst *Assume = cast<AssumeInst>(Val&: Elem);
1676 OperandBundleUse OBU = Assume->getOperandBundleAt(Index: Elem.Index);
1677 if (OBU.getTagName() == "separate_storage") {
1678 assert(OBU.Inputs.size() == 2);
1679 const Value *Hint1 = OBU.Inputs[0].get();
1680 const Value *Hint2 = OBU.Inputs[1].get();
1681 // This is often a no-op; instcombine rewrites this for us. No-op
1682 // getUnderlyingObject calls are fast, though.
1683 const Value *HintO1 = getUnderlyingObject(V: Hint1);
1684 const Value *HintO2 = getUnderlyingObject(V: Hint2);
1685
1686 DominatorTree *DT = getDT(AAQI);
1687 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1688 if (const Instruction *PtrI = dyn_cast<Instruction>(Val: Ptr)) {
1689 return isValidAssumeForContext(I: Assume, CxtI: PtrI, DT,
1690 /* AllowEphemerals */ true);
1691 }
1692 if (const Argument *PtrA = dyn_cast<Argument>(Val: Ptr)) {
1693 const Instruction *FirstI =
1694 &*PtrA->getParent()->getEntryBlock().begin();
1695 return isValidAssumeForContext(I: Assume, CxtI: FirstI, DT,
1696 /* AllowEphemerals */ true);
1697 }
1698 return false;
1699 };
1700
1701 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1702 // Note that we go back to V1 and V2 for the
1703 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1704 // so strictly more assumptions are valid for them.
1705 if ((CtxI && isValidAssumeForContext(I: Assume, CxtI: CtxI, DT,
1706 /* AllowEphemerals */ true)) ||
1707 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1708 return AliasResult::NoAlias;
1709 }
1710 }
1711 }
1712 }
1713 }
1714
1715 // If one the accesses may be before the accessed pointer, canonicalize this
1716 // by using unknown after-pointer sizes for both accesses. This is
1717 // equivalent, because regardless of which pointer is lower, one of them
1718 // will always came after the other, as long as the underlying objects aren't
1719 // disjoint. We do this so that the rest of BasicAA does not have to deal
1720 // with accesses before the base pointer, and to improve cache utilization by
1721 // merging equivalent states.
1722 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1723 V1Size = LocationSize::afterPointer();
1724 V2Size = LocationSize::afterPointer();
1725 }
1726
1727 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1728 // for recursive queries. For this reason, this limit is chosen to be large
1729 // enough to be very rarely hit, while still being small enough to avoid
1730 // stack overflows.
1731 if (AAQI.Depth >= 512)
1732 return AliasResult::MayAlias;
1733
1734 // Check the cache before climbing up use-def chains. This also terminates
1735 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1736 // cache key, because some cases where MayBeCrossIteration==false returns
1737 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1738 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1739 {V2, V2Size, AAQI.MayBeCrossIteration});
1740 const bool Swapped = V1 > V2;
1741 if (Swapped)
1742 std::swap(a&: Locs.first, b&: Locs.second);
1743 const auto &Pair = AAQI.AliasCache.try_emplace(
1744 Key: Locs, Args: AAQueryInfo::CacheEntry{.Result: AliasResult::NoAlias, .NumAssumptionUses: 0});
1745 if (!Pair.second) {
1746 auto &Entry = Pair.first->second;
1747 if (!Entry.isDefinitive()) {
1748 // Remember that we used an assumption. This may either be a direct use
1749 // of an assumption, or a use of an entry that may itself be based on an
1750 // assumption.
1751 ++AAQI.NumAssumptionUses;
1752 if (Entry.isAssumption())
1753 ++Entry.NumAssumptionUses;
1754 }
1755 // Cache contains sorted {V1,V2} pairs but we should return original order.
1756 auto Result = Entry.Result;
1757 Result.swap(DoSwap: Swapped);
1758 return Result;
1759 }
1760
1761 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1762 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1763 AliasResult Result =
1764 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1765
1766 auto It = AAQI.AliasCache.find(Val: Locs);
1767 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1768 auto &Entry = It->second;
1769
1770 // Check whether a NoAlias assumption has been used, but disproven.
1771 bool AssumptionDisproven =
1772 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1773 if (AssumptionDisproven)
1774 Result = AliasResult::MayAlias;
1775
1776 // This is a definitive result now, when considered as a root query.
1777 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1778 Entry.Result = Result;
1779 // Cache contains sorted {V1,V2} pairs.
1780 Entry.Result.swap(DoSwap: Swapped);
1781
1782 // If the assumption has been disproven, remove any results that may have
1783 // been based on this assumption. Do this after the Entry updates above to
1784 // avoid iterator invalidation.
1785 if (AssumptionDisproven)
1786 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1787 AAQI.AliasCache.erase(Val: AAQI.AssumptionBasedResults.pop_back_val());
1788
1789 // The result may still be based on assumptions higher up in the chain.
1790 // Remember it, so it can be purged from the cache later.
1791 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1792 Result != AliasResult::MayAlias) {
1793 AAQI.AssumptionBasedResults.push_back(Elt: Locs);
1794 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1795 } else {
1796 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1797 }
1798
1799 // Depth is incremented before this function is called, so Depth==1 indicates
1800 // a root query.
1801 if (AAQI.Depth == 1) {
1802 // Any remaining assumption based results must be based on proven
1803 // assumptions, so convert them to definitive results.
1804 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1805 auto It = AAQI.AliasCache.find(Val: Loc);
1806 if (It != AAQI.AliasCache.end())
1807 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1808 }
1809 AAQI.AssumptionBasedResults.clear();
1810 AAQI.NumAssumptionUses = 0;
1811 }
1812 return Result;
1813}
1814
1815AliasResult BasicAAResult::aliasCheckRecursive(
1816 const Value *V1, LocationSize V1Size,
1817 const Value *V2, LocationSize V2Size,
1818 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1819 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(Val: V1)) {
1820 AliasResult Result = aliasGEP(GEP1: GV1, V1Size, V2, V2Size, UnderlyingV1: O1, UnderlyingV2: O2, AAQI);
1821 if (Result != AliasResult::MayAlias)
1822 return Result;
1823 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(Val: V2)) {
1824 AliasResult Result = aliasGEP(GEP1: GV2, V1Size: V2Size, V2: V1, V2Size: V1Size, UnderlyingV1: O2, UnderlyingV2: O1, AAQI);
1825 Result.swap();
1826 if (Result != AliasResult::MayAlias)
1827 return Result;
1828 }
1829
1830 if (const PHINode *PN = dyn_cast<PHINode>(Val: V1)) {
1831 AliasResult Result = aliasPHI(PN, PNSize: V1Size, V2, V2Size, AAQI);
1832 if (Result != AliasResult::MayAlias)
1833 return Result;
1834 } else if (const PHINode *PN = dyn_cast<PHINode>(Val: V2)) {
1835 AliasResult Result = aliasPHI(PN, PNSize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1836 Result.swap();
1837 if (Result != AliasResult::MayAlias)
1838 return Result;
1839 }
1840
1841 if (const SelectInst *S1 = dyn_cast<SelectInst>(Val: V1)) {
1842 AliasResult Result = aliasSelect(SI: S1, SISize: V1Size, V2, V2Size, AAQI);
1843 if (Result != AliasResult::MayAlias)
1844 return Result;
1845 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(Val: V2)) {
1846 AliasResult Result = aliasSelect(SI: S2, SISize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1847 Result.swap();
1848 if (Result != AliasResult::MayAlias)
1849 return Result;
1850 }
1851
1852 // If both pointers are pointing into the same object and one of them
1853 // accesses the entire object, then the accesses must overlap in some way.
1854 if (O1 == O2) {
1855 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1856 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1857 (isObjectSize(V: O1, Size: V1Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation) ||
1858 isObjectSize(V: O2, Size: V2Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation)))
1859 return AliasResult::PartialAlias;
1860 }
1861
1862 return AliasResult::MayAlias;
1863}
1864
1865AliasResult BasicAAResult::aliasErrno(const MemoryLocation &Loc,
1866 const Module *M) {
1867 // There cannot be any alias with errno if the given memory location is an
1868 // identified function-local object, or the size of the memory access is
1869 // larger than the integer size.
1870 if (Loc.Size.hasValue() &&
1871 Loc.Size.getValue().getKnownMinValue() * 8 > TLI.getIntSize())
1872 return AliasResult::NoAlias;
1873
1874 if (isIdentifiedFunctionLocal(V: getUnderlyingObject(V: Loc.Ptr)))
1875 return AliasResult::NoAlias;
1876 return AliasResult::MayAlias;
1877}
1878
1879/// Check whether two Values can be considered equivalent.
1880///
1881/// If the values may come from different cycle iterations, this will also
1882/// check that the values are not part of cycle. We have to do this because we
1883/// are looking through phi nodes, that is we say
1884/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1885bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1886 const Value *V2,
1887 const AAQueryInfo &AAQI) {
1888 if (V != V2)
1889 return false;
1890
1891 if (!AAQI.MayBeCrossIteration)
1892 return true;
1893
1894 // Non-instructions and instructions in the entry block cannot be part of
1895 // a loop.
1896 const Instruction *Inst = dyn_cast<Instruction>(Val: V);
1897 if (!Inst || Inst->getParent()->isEntryBlock())
1898 return true;
1899
1900 return isNotInCycle(I: Inst, DT: getDT(AAQI), /*LI*/ nullptr);
1901}
1902
1903/// Computes the symbolic difference between two de-composed GEPs.
1904void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1905 const DecomposedGEP &SrcGEP,
1906 const AAQueryInfo &AAQI) {
1907 // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1908 // unsigned sense.
1909 if (DestGEP.Offset.ult(RHS: SrcGEP.Offset))
1910 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1911
1912 DestGEP.Offset -= SrcGEP.Offset;
1913 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1914 // Find V in Dest. This is N^2, but pointer indices almost never have more
1915 // than a few variable indexes.
1916 bool Found = false;
1917 for (auto I : enumerate(First&: DestGEP.VarIndices)) {
1918 VariableGEPIndex &Dest = I.value();
1919 if ((!isValueEqualInPotentialCycles(V: Dest.Val.V, V2: Src.Val.V, AAQI) &&
1920 !areBothVScale(V1: Dest.Val.V, V2: Src.Val.V)) ||
1921 !Dest.Val.hasSameCastsAs(Other: Src.Val))
1922 continue;
1923
1924 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1925 if (Dest.IsNegated) {
1926 Dest.Scale = -Dest.Scale;
1927 Dest.IsNegated = false;
1928 Dest.IsNSW = false;
1929 }
1930
1931 // If we found it, subtract off Scale V's from the entry in Dest. If it
1932 // goes to zero, remove the entry.
1933 if (Dest.Scale != Src.Scale) {
1934 // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1935 // unsigned sense.
1936 if (Dest.Scale.ult(RHS: Src.Scale))
1937 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1938
1939 Dest.Scale -= Src.Scale;
1940 Dest.IsNSW = false;
1941 } else {
1942 DestGEP.VarIndices.erase(CI: DestGEP.VarIndices.begin() + I.index());
1943 }
1944 Found = true;
1945 break;
1946 }
1947
1948 // If we didn't consume this entry, add it to the end of the Dest list.
1949 if (!Found) {
1950 VariableGEPIndex Entry = {.Val: Src.Val, .Scale: Src.Scale, .CxtI: Src.CxtI, .IsNSW: Src.IsNSW,
1951 /* IsNegated */ true};
1952 DestGEP.VarIndices.push_back(Elt: Entry);
1953
1954 // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1955 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1956 }
1957 }
1958}
1959
1960bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1961 LocationSize MaybeV1Size,
1962 LocationSize MaybeV2Size,
1963 AssumptionCache *AC,
1964 DominatorTree *DT,
1965 const AAQueryInfo &AAQI) {
1966 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1967 !MaybeV2Size.hasValue())
1968 return false;
1969
1970 const uint64_t V1Size = MaybeV1Size.getValue();
1971 const uint64_t V2Size = MaybeV2Size.getValue();
1972
1973 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1974
1975 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Other: Var1.Val) ||
1976 !Var0.hasNegatedScaleOf(Other: Var1) ||
1977 Var0.Val.V->getType() != Var1.Val.V->getType())
1978 return false;
1979
1980 // We'll strip off the Extensions of Var0 and Var1 and do another round
1981 // of GetLinearExpression decomposition. In the example above, if Var0
1982 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1983
1984 LinearExpression E0 =
1985 GetLinearExpression(Val: CastedValue(Var0.Val.V), DL, Depth: 0, AC, DT);
1986 LinearExpression E1 =
1987 GetLinearExpression(Val: CastedValue(Var1.Val.V), DL, Depth: 0, AC, DT);
1988 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(Other: E1.Val) ||
1989 !isValueEqualInPotentialCycles(V: E0.Val.V, V2: E1.Val.V, AAQI))
1990 return false;
1991
1992 // We have a hit - Var0 and Var1 only differ by a constant offset!
1993
1994 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1995 // Var1 is possible to calculate, but we're just interested in the absolute
1996 // minimum difference between the two. The minimum distance may occur due to
1997 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1998 // the minimum distance between %i and %i + 5 is 3.
1999 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
2000 MinDiff = APIntOps::umin(A: MinDiff, B: Wrapped);
2001 APInt MinDiffBytes =
2002 MinDiff.zextOrTrunc(width: Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2003
2004 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2005 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2006 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2007 // V2Size can fit in the MinDiffBytes gap.
2008 return MinDiffBytes.uge(RHS: V1Size + GEP.Offset.abs()) &&
2009 MinDiffBytes.uge(RHS: V2Size + GEP.Offset.abs());
2010}
2011
2012//===----------------------------------------------------------------------===//
2013// BasicAliasAnalysis Pass
2014//===----------------------------------------------------------------------===//
2015
2016AnalysisKey BasicAA::Key;
2017
2018BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2019 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2020 auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F);
2021 auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
2022 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
2023}
2024
2025BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {}
2026
2027char BasicAAWrapperPass::ID = 0;
2028
2029void BasicAAWrapperPass::anchor() {}
2030
2031INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2032 "Basic Alias Analysis (stateless AA impl)", true, true)
2033INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2034INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2035INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2036INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2037 "Basic Alias Analysis (stateless AA impl)", true, true)
2038
2039FunctionPass *llvm::createBasicAAWrapperPass() {
2040 return new BasicAAWrapperPass();
2041}
2042
2043bool BasicAAWrapperPass::runOnFunction(Function &F) {
2044 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2045 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2046 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2047
2048 Result.reset(p: new BasicAAResult(F.getDataLayout(), F,
2049 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2050 &DTWP.getDomTree()));
2051
2052 return false;
2053}
2054
2055void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2056 AU.setPreservesAll();
2057 AU.addRequiredTransitive<AssumptionCacheTracker>();
2058 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2059 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
2060}
2061