1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/BasicAliasAnalysis.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/CycleInfo.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GetElementPtrTypeIterator.h"
40#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/GlobalVariable.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Operator.h"
48#include "llvm/IR/PatternMatch.h"
49#include "llvm/IR/Type.h"
50#include "llvm/IR/User.h"
51#include "llvm/IR/Value.h"
52#include "llvm/InitializePasses.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Casting.h"
55#include "llvm/Support/CommandLine.h"
56#include "llvm/Support/Compiler.h"
57#include "llvm/Support/KnownBits.h"
58#include "llvm/Support/SaveAndRestore.h"
59#include <cassert>
60#include <cstdint>
61#include <cstdlib>
62#include <optional>
63#include <utility>
64
65#define DEBUG_TYPE "basicaa"
66
67using namespace llvm;
68
69/// Enable analysis of recursive PHI nodes.
70static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
71 cl::init(Val: true));
72
73static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
74 cl::Hidden, cl::init(Val: true));
75
76/// SearchLimitReached / SearchTimes shows how often the limit of
77/// to decompose GEPs is reached. It will affect the precision
78/// of basic alias analysis.
79STATISTIC(SearchLimitReached, "Number of times the limit to "
80 "decompose GEPs is reached");
81STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
82
83bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
84 FunctionAnalysisManager::Invalidator &Inv) {
85 // We don't care if this analysis itself is preserved, it has no state. But
86 // we need to check that the analyses it depends on have been. Note that we
87 // may be created without handles to some analyses and in that case don't
88 // depend on them.
89 if (Inv.invalidate<AssumptionAnalysis>(IR&: Fn, PA) ||
90 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(IR&: Fn, PA)) ||
91 Inv.invalidate<TargetLibraryAnalysis>(IR&: Fn, PA))
92 return true;
93
94 // Otherwise this analysis result remains valid.
95 return false;
96}
97
98//===----------------------------------------------------------------------===//
99// Useful predicates
100//===----------------------------------------------------------------------===//
101
102/// Returns the size of the object specified by V or UnknownSize if unknown.
103static std::optional<TypeSize> getObjectSize(const Value *V,
104 const DataLayout &DL,
105 const TargetLibraryInfo &TLI,
106 bool NullIsValidLoc,
107 bool RoundToAlign = false) {
108 ObjectSizeOpts Opts;
109 Opts.RoundToAlign = RoundToAlign;
110 Opts.NullIsUnknownSize = NullIsValidLoc;
111 if (std::optional<TypeSize> Size = getBaseObjectSize(Ptr: V, DL, TLI: &TLI, Opts)) {
112 // FIXME: Remove this check, only exists to preserve previous behavior.
113 if (Size->isScalable())
114 return std::nullopt;
115 return Size;
116 }
117 return std::nullopt;
118}
119
120/// Returns true if we can prove that the object specified by V is smaller than
121/// Size. Bails out early unless the root object is passed as the first
122/// parameter.
123static bool isObjectSmallerThan(const Value *V, TypeSize Size,
124 const DataLayout &DL,
125 const TargetLibraryInfo &TLI,
126 bool NullIsValidLoc) {
127 // Note that the meanings of the "object" are slightly different in the
128 // following contexts:
129 // c1: llvm::getObjectSize()
130 // c2: llvm.objectsize() intrinsic
131 // c3: isObjectSmallerThan()
132 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
133 // refers to the "entire object".
134 //
135 // Consider this example:
136 // char *p = (char*)malloc(100)
137 // char *q = p+80;
138 //
139 // In the context of c1 and c2, the "object" pointed by q refers to the
140 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
141 //
142 // In the context of c3, the "object" refers to the chunk of memory being
143 // allocated. So, the "object" has 100 bytes, and q points to the middle the
144 // "object". However, unless p, the root object, is passed as the first
145 // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
146 // bail out early.
147 if (!isIdentifiedObject(V))
148 return false;
149
150 // This function needs to use the aligned object size because we allow
151 // reads a bit past the end given sufficient alignment.
152 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
153 /*RoundToAlign*/ true);
154
155 return ObjectSize && TypeSize::isKnownLT(LHS: *ObjectSize, RHS: Size);
156}
157
158/// Return the minimal extent from \p V to the end of the underlying object,
159/// assuming the result is used in an aliasing query. E.g., we do use the query
160/// location size and the fact that null pointers cannot alias here.
161static TypeSize getMinimalExtentFrom(const Value &V,
162 const LocationSize &LocSize,
163 const DataLayout &DL,
164 bool NullIsValidLoc) {
165 // If we have dereferenceability information we know a lower bound for the
166 // extent as accesses for a lower offset would be valid. We need to exclude
167 // the "or null" part if null is a valid pointer. We can ignore frees, as an
168 // access after free would be undefined behavior.
169 bool CanBeNull, CanBeFreed;
170 uint64_t DerefBytes =
171 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
172 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
173 // If queried with a precise location size, we assume that location size to be
174 // accessed, thus valid.
175 if (LocSize.isPrecise())
176 DerefBytes = std::max(a: DerefBytes, b: LocSize.getValue().getKnownMinValue());
177 return TypeSize::getFixed(ExactSize: DerefBytes);
178}
179
180/// Returns true if we can prove that the object specified by V has size Size.
181static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
182 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
183 std::optional<TypeSize> ObjectSize =
184 getObjectSize(V, DL, TLI, NullIsValidLoc);
185 return ObjectSize && *ObjectSize == Size;
186}
187
188/// Return true if both V1 and V2 are VScale
189static bool areBothVScale(const Value *V1, const Value *V2) {
190 return PatternMatch::match(V: V1, P: PatternMatch::m_VScale()) &&
191 PatternMatch::match(V: V2, P: PatternMatch::m_VScale());
192}
193
194//===----------------------------------------------------------------------===//
195// CaptureAnalysis implementations
196//===----------------------------------------------------------------------===//
197
198CaptureAnalysis::~CaptureAnalysis() = default;
199
200CaptureComponents SimpleCaptureAnalysis::getCapturesBefore(const Value *Object,
201 const Instruction *I,
202 bool OrAt) {
203 if (!isIdentifiedFunctionLocal(V: Object))
204 return CaptureComponents::Provenance;
205
206 auto [CacheIt, Inserted] =
207 IsCapturedCache.insert(KV: {Object, CaptureComponents::Provenance});
208 if (!Inserted)
209 return CacheIt->second;
210
211 CaptureComponents Ret = PointerMayBeCaptured(
212 V: Object, /*ReturnCaptures=*/false, Mask: CaptureComponents::Provenance,
213 StopFn: [](CaptureComponents CC) { return capturesFullProvenance(CC); });
214 CacheIt->second = Ret;
215 return Ret;
216}
217
218static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
219 const LoopInfo *LI, const CycleInfo *CI) {
220 if (CI)
221 return !CI->getCycle(Block: I->getParent());
222
223 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
224 SmallVector<BasicBlock *> Succs(successors(BB));
225 return Succs.empty() ||
226 !isPotentiallyReachableFromMany(Worklist&: Succs, StopBB: BB, ExclusionSet: nullptr, DT, LI);
227}
228
229CaptureComponents
230EarliestEscapeAnalysis::getCapturesBefore(const Value *Object,
231 const Instruction *I, bool OrAt) {
232 if (!isIdentifiedFunctionLocal(V: Object))
233 return CaptureComponents::Provenance;
234
235 auto Iter = EarliestEscapes.try_emplace(Key: Object);
236 if (Iter.second) {
237 std::pair<Instruction *, CaptureComponents> EarliestCapture =
238 FindEarliestCapture(V: Object, F&: *DT.getRoot()->getParent(),
239 /*ReturnCaptures=*/false, DT,
240 Mask: CaptureComponents::Provenance);
241 if (EarliestCapture.first)
242 Inst2Obj[EarliestCapture.first].push_back(NewVal: Object);
243 Iter.first->second = EarliestCapture;
244 }
245
246 auto IsNotCapturedBefore = [&]() {
247 // No capturing instruction.
248 Instruction *CaptureInst = Iter.first->second.first;
249 if (!CaptureInst)
250 return true;
251
252 // No context instruction means any use is capturing.
253 if (!I)
254 return false;
255
256 if (I == CaptureInst) {
257 if (OrAt)
258 return false;
259 return isNotInCycle(I, DT: &DT, LI, CI);
260 }
261
262 return !isPotentiallyReachable(From: CaptureInst, To: I, ExclusionSet: nullptr, DT: &DT, LI, CI);
263 };
264 if (IsNotCapturedBefore())
265 return CaptureComponents::None;
266 return Iter.first->second.second;
267}
268
269void EarliestEscapeAnalysis::removeInstruction(Instruction *I) {
270 auto Iter = Inst2Obj.find(Val: I);
271 if (Iter != Inst2Obj.end()) {
272 for (const Value *Obj : Iter->second)
273 EarliestEscapes.erase(Val: Obj);
274 Inst2Obj.erase(Val: I);
275 }
276}
277
278//===----------------------------------------------------------------------===//
279// GetElementPtr Instruction Decomposition and Analysis
280//===----------------------------------------------------------------------===//
281
282namespace {
283/// Represents zext(sext(trunc(V))).
284struct CastedValue {
285 const Value *V;
286 unsigned ZExtBits = 0;
287 unsigned SExtBits = 0;
288 unsigned TruncBits = 0;
289 /// Whether trunc(V) is non-negative.
290 bool IsNonNegative = false;
291
292 explicit CastedValue(const Value *V) : V(V) {}
293 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
294 unsigned TruncBits, bool IsNonNegative)
295 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
296 IsNonNegative(IsNonNegative) {}
297
298 unsigned getBitWidth() const {
299 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
300 SExtBits;
301 }
302
303 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
304 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
305 IsNonNegative && PreserveNonNeg);
306 }
307
308 /// Replace V with zext(NewV)
309 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
310 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
311 NewV->getType()->getPrimitiveSizeInBits();
312 if (ExtendBy <= TruncBits)
313 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
314 // The nneg can be preserved on the outer zext here.
315 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
316 IsNonNegative);
317
318 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
319 ExtendBy -= TruncBits;
320 // zext<nneg>(zext(NewV)) == zext(NewV)
321 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
322 // The nneg can be preserved from the inner zext here but must be dropped
323 // from the outer.
324 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
325 ZExtNonNegative);
326 }
327
328 /// Replace V with sext(NewV)
329 CastedValue withSExtOfValue(const Value *NewV) const {
330 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
331 NewV->getType()->getPrimitiveSizeInBits();
332 if (ExtendBy <= TruncBits)
333 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
334 // The nneg can be preserved on the outer zext here
335 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
336 IsNonNegative);
337
338 // zext(sext(sext(NewV)))
339 ExtendBy -= TruncBits;
340 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
341 // The nneg can be preserved on the outer zext here
342 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
343 }
344
345 APInt evaluateWith(APInt N) const {
346 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
347 "Incompatible bit width");
348 if (TruncBits) N = N.trunc(width: N.getBitWidth() - TruncBits);
349 if (SExtBits) N = N.sext(width: N.getBitWidth() + SExtBits);
350 if (ZExtBits) N = N.zext(width: N.getBitWidth() + ZExtBits);
351 return N;
352 }
353
354 ConstantRange evaluateWith(ConstantRange N) const {
355 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
356 "Incompatible bit width");
357 if (TruncBits) N = N.truncate(BitWidth: N.getBitWidth() - TruncBits);
358 if (IsNonNegative && !N.isAllNonNegative())
359 N = N.intersectWith(
360 CR: ConstantRange(APInt::getZero(numBits: N.getBitWidth()),
361 APInt::getSignedMinValue(numBits: N.getBitWidth())));
362 if (SExtBits) N = N.signExtend(BitWidth: N.getBitWidth() + SExtBits);
363 if (ZExtBits) N = N.zeroExtend(BitWidth: N.getBitWidth() + ZExtBits);
364 return N;
365 }
366
367 bool canDistributeOver(bool NUW, bool NSW) const {
368 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
369 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
370 // trunc(x op y) == trunc(x) op trunc(y)
371 return (!ZExtBits || NUW) && (!SExtBits || NSW);
372 }
373
374 bool hasSameCastsAs(const CastedValue &Other) const {
375 if (V->getType() != Other.V->getType())
376 return false;
377
378 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
379 TruncBits == Other.TruncBits)
380 return true;
381 // If either CastedValue has a nneg zext then the sext/zext bits are
382 // interchangable for that value.
383 if (IsNonNegative || Other.IsNonNegative)
384 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
385 TruncBits == Other.TruncBits);
386 return false;
387 }
388};
389
390/// Represents zext(sext(trunc(V))) * Scale + Offset.
391struct LinearExpression {
392 CastedValue Val;
393 APInt Scale;
394 APInt Offset;
395
396 /// True if all operations in this expression are NUW.
397 bool IsNUW;
398 /// True if all operations in this expression are NSW.
399 bool IsNSW;
400
401 LinearExpression(const CastedValue &Val, const APInt &Scale,
402 const APInt &Offset, bool IsNUW, bool IsNSW)
403 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
404
405 LinearExpression(const CastedValue &Val)
406 : Val(Val), IsNUW(true), IsNSW(true) {
407 unsigned BitWidth = Val.getBitWidth();
408 Scale = APInt(BitWidth, 1);
409 Offset = APInt(BitWidth, 0);
410 }
411
412 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
413 // The check for zero offset is necessary, because generally
414 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
415 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
416 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
417 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
418 }
419};
420}
421
422/// Analyzes the specified value as a linear expression: "A*V + B", where A and
423/// B are constant integers.
424static LinearExpression GetLinearExpression(
425 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
426 AssumptionCache *AC, DominatorTree *DT) {
427 // Limit our recursion depth.
428 if (Depth == 6)
429 return Val;
430
431 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val: Val.V))
432 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
433 Val.evaluateWith(N: Const->getValue()), true, true);
434
435 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: Val.V)) {
436 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Val: BOp->getOperand(i_nocapture: 1))) {
437 APInt RHS = Val.evaluateWith(N: RHSC->getValue());
438 // The only non-OBO case we deal with is or, and only limited to the
439 // case where it is both nuw and nsw.
440 bool NUW = true, NSW = true;
441 if (isa<OverflowingBinaryOperator>(Val: BOp)) {
442 NUW &= BOp->hasNoUnsignedWrap();
443 NSW &= BOp->hasNoSignedWrap();
444 }
445 if (!Val.canDistributeOver(NUW, NSW))
446 return Val;
447
448 // While we can distribute over trunc, we cannot preserve nowrap flags
449 // in that case.
450 if (Val.TruncBits)
451 NUW = NSW = false;
452
453 LinearExpression E(Val);
454 switch (BOp->getOpcode()) {
455 default:
456 // We don't understand this instruction, so we can't decompose it any
457 // further.
458 return Val;
459 case Instruction::Or:
460 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
461 if (!cast<PossiblyDisjointInst>(Val: BOp)->isDisjoint())
462 return Val;
463
464 [[fallthrough]];
465 case Instruction::Add: {
466 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
467 Depth: Depth + 1, AC, DT);
468 E.Offset += RHS;
469 E.IsNUW &= NUW;
470 E.IsNSW &= NSW;
471 break;
472 }
473 case Instruction::Sub: {
474 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
475 Depth: Depth + 1, AC, DT);
476 E.Offset -= RHS;
477 E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
478 E.IsNSW &= NSW;
479 break;
480 }
481 case Instruction::Mul:
482 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
483 Depth: Depth + 1, AC, DT)
484 .mul(Other: RHS, MulIsNUW: NUW, MulIsNSW: NSW);
485 break;
486 case Instruction::Shl:
487 // We're trying to linearize an expression of the kind:
488 // shl i8 -128, 36
489 // where the shift count exceeds the bitwidth of the type.
490 // We can't decompose this further (the expression would return
491 // a poison value).
492 if (RHS.getLimitedValue() > Val.getBitWidth())
493 return Val;
494
495 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: NSW), DL,
496 Depth: Depth + 1, AC, DT);
497 E.Offset <<= RHS.getLimitedValue();
498 E.Scale <<= RHS.getLimitedValue();
499 E.IsNUW &= NUW;
500 E.IsNSW &= NSW;
501 break;
502 }
503 return E;
504 }
505 }
506
507 if (const auto *ZExt = dyn_cast<ZExtInst>(Val: Val.V))
508 return GetLinearExpression(
509 Val: Val.withZExtOfValue(NewV: ZExt->getOperand(i_nocapture: 0), ZExtNonNegative: ZExt->hasNonNeg()), DL,
510 Depth: Depth + 1, AC, DT);
511
512 if (isa<SExtInst>(Val: Val.V))
513 return GetLinearExpression(
514 Val: Val.withSExtOfValue(NewV: cast<CastInst>(Val: Val.V)->getOperand(i_nocapture: 0)),
515 DL, Depth: Depth + 1, AC, DT);
516
517 return Val;
518}
519
520namespace {
521// A linear transformation of a Value; this class represents
522// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
523struct VariableGEPIndex {
524 CastedValue Val;
525 APInt Scale;
526
527 // Context instruction to use when querying information about this index.
528 const Instruction *CxtI;
529
530 /// True if all operations in this expression are NSW.
531 bool IsNSW;
532
533 /// True if the index should be subtracted rather than added. We don't simply
534 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
535 /// non-wrapping, while X + INT_MIN*(-1) wraps.
536 bool IsNegated;
537
538 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
539 if (IsNegated == Other.IsNegated)
540 return Scale == -Other.Scale;
541 return Scale == Other.Scale;
542 }
543
544 void dump() const {
545 print(OS&: dbgs());
546 dbgs() << "\n";
547 }
548 void print(raw_ostream &OS) const {
549 OS << "(V=" << Val.V->getName()
550 << ", zextbits=" << Val.ZExtBits
551 << ", sextbits=" << Val.SExtBits
552 << ", truncbits=" << Val.TruncBits
553 << ", scale=" << Scale
554 << ", nsw=" << IsNSW
555 << ", negated=" << IsNegated << ")";
556 }
557};
558}
559
560// Represents the internal structure of a GEP, decomposed into a base pointer,
561// constant offsets, and variable scaled indices.
562struct BasicAAResult::DecomposedGEP {
563 // Base pointer of the GEP
564 const Value *Base;
565 // Total constant offset from base.
566 APInt Offset;
567 // Scaled variable (non-constant) indices.
568 SmallVector<VariableGEPIndex, 4> VarIndices;
569 // Nowrap flags common to all GEP operations involved in expression.
570 GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all();
571
572 void dump() const {
573 print(OS&: dbgs());
574 dbgs() << "\n";
575 }
576 void print(raw_ostream &OS) const {
577 OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
578 << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
579 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
580 << ", VarIndices=[";
581 for (size_t i = 0; i < VarIndices.size(); i++) {
582 if (i != 0)
583 OS << ", ";
584 VarIndices[i].print(OS);
585 }
586 OS << "])";
587 }
588};
589
590
591/// If V is a symbolic pointer expression, decompose it into a base pointer
592/// with a constant offset and a number of scaled symbolic offsets.
593///
594/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
595/// in the VarIndices vector) are Value*'s that are known to be scaled by the
596/// specified amount, but which may have other unrepresented high bits. As
597/// such, the gep cannot necessarily be reconstructed from its decomposed form.
598BasicAAResult::DecomposedGEP
599BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
600 AssumptionCache *AC, DominatorTree *DT) {
601 // Limit recursion depth to limit compile time in crazy cases.
602 unsigned MaxLookup = MaxLookupSearchDepth;
603 SearchTimes++;
604 const Instruction *CxtI = dyn_cast<Instruction>(Val: V);
605
606 unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: V->getType());
607 DecomposedGEP Decomposed;
608 Decomposed.Offset = APInt(IndexSize, 0);
609 do {
610 // See if this is a bitcast or GEP.
611 const Operator *Op = dyn_cast<Operator>(Val: V);
612 if (!Op) {
613 // The only non-operator case we can handle are GlobalAliases.
614 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: V)) {
615 if (!GA->isInterposable()) {
616 V = GA->getAliasee();
617 continue;
618 }
619 }
620 Decomposed.Base = V;
621 return Decomposed;
622 }
623
624 if (Op->getOpcode() == Instruction::BitCast ||
625 Op->getOpcode() == Instruction::AddrSpaceCast) {
626 Value *NewV = Op->getOperand(i: 0);
627 // Don't look through casts between address spaces with differing index
628 // widths.
629 if (DL.getIndexTypeSizeInBits(Ty: NewV->getType()) != IndexSize) {
630 Decomposed.Base = V;
631 return Decomposed;
632 }
633 V = NewV;
634 continue;
635 }
636
637 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Val: Op);
638 if (!GEPOp) {
639 if (const auto *PHI = dyn_cast<PHINode>(Val: V)) {
640 // Look through single-arg phi nodes created by LCSSA.
641 if (PHI->getNumIncomingValues() == 1) {
642 V = PHI->getIncomingValue(i: 0);
643 continue;
644 }
645 } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
646 // CaptureTracking can know about special capturing properties of some
647 // intrinsics like launder.invariant.group, that can't be expressed with
648 // the attributes, but have properties like returning aliasing pointer.
649 // Because some analysis may assume that nocaptured pointer is not
650 // returned from some special intrinsic (because function would have to
651 // be marked with returns attribute), it is crucial to use this function
652 // because it should be in sync with CaptureTracking. Not using it may
653 // cause weird miscompilations where 2 aliasing pointers are assumed to
654 // noalias.
655 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: false)) {
656 V = RP;
657 continue;
658 }
659 }
660
661 Decomposed.Base = V;
662 return Decomposed;
663 }
664
665 // Track the common nowrap flags for all GEPs we see.
666 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
667
668 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
669
670 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
671 gep_type_iterator GTI = gep_type_begin(GEP: GEPOp);
672 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
673 I != E; ++I, ++GTI) {
674 const Value *Index = *I;
675 // Compute the (potentially symbolic) offset in bytes for this index.
676 if (StructType *STy = GTI.getStructTypeOrNull()) {
677 // For a struct, add the member offset.
678 unsigned FieldNo = cast<ConstantInt>(Val: Index)->getZExtValue();
679 if (FieldNo == 0)
680 continue;
681
682 Decomposed.Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo);
683 continue;
684 }
685
686 // For an array/pointer, add the element offset, explicitly scaled.
687 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Val: Index)) {
688 if (CIdx->isZero())
689 continue;
690
691 // Don't attempt to analyze GEPs if the scalable index is not zero.
692 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
693 if (AllocTypeSize.isScalable()) {
694 Decomposed.Base = V;
695 return Decomposed;
696 }
697
698 Decomposed.Offset += AllocTypeSize.getFixedValue() *
699 CIdx->getValue().sextOrTrunc(width: IndexSize);
700 continue;
701 }
702
703 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
704 if (AllocTypeSize.isScalable()) {
705 Decomposed.Base = V;
706 return Decomposed;
707 }
708
709 // If the integer type is smaller than the index size, it is implicitly
710 // sign extended or truncated to index size.
711 bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
712 bool NUW = GEPOp->hasNoUnsignedWrap();
713 bool NonNeg = NUSW && NUW;
714 unsigned Width = Index->getType()->getIntegerBitWidth();
715 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
716 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
717 LinearExpression LE = GetLinearExpression(
718 Val: CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, Depth: 0, AC, DT);
719
720 // Scale by the type size.
721 unsigned TypeSize = AllocTypeSize.getFixedValue();
722 LE = LE.mul(Other: APInt(IndexSize, TypeSize), MulIsNUW: NUW, MulIsNSW: NUSW);
723 Decomposed.Offset += LE.Offset;
724 APInt Scale = LE.Scale;
725 if (!LE.IsNUW)
726 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
727
728 // If we already had an occurrence of this index variable, merge this
729 // scale into it. For example, we want to handle:
730 // A[x][x] -> x*16 + x*4 -> x*20
731 // This also ensures that 'x' only appears in the index list once.
732 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
733 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
734 areBothVScale(V1: Decomposed.VarIndices[i].Val.V, V2: LE.Val.V)) &&
735 Decomposed.VarIndices[i].Val.hasSameCastsAs(Other: LE.Val)) {
736 Scale += Decomposed.VarIndices[i].Scale;
737 // We cannot guarantee no-wrap for the merge.
738 LE.IsNSW = LE.IsNUW = false;
739 Decomposed.VarIndices.erase(CI: Decomposed.VarIndices.begin() + i);
740 break;
741 }
742 }
743
744 if (!!Scale) {
745 VariableGEPIndex Entry = {.Val: LE.Val, .Scale: Scale, .CxtI: CxtI, .IsNSW: LE.IsNSW,
746 /* IsNegated */ false};
747 Decomposed.VarIndices.push_back(Elt: Entry);
748 }
749 }
750
751 // Analyze the base pointer next.
752 V = GEPOp->getOperand(i_nocapture: 0);
753 } while (--MaxLookup);
754
755 // If the chain of expressions is too deep, just return early.
756 Decomposed.Base = V;
757 SearchLimitReached++;
758 return Decomposed;
759}
760
761ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
762 AAQueryInfo &AAQI,
763 bool IgnoreLocals) {
764 assert(Visited.empty() && "Visited must be cleared after use!");
765 llvm::scope_exit _([&] { Visited.clear(); });
766
767 unsigned MaxLookup = 8;
768 SmallVector<const Value *, 16> Worklist;
769 Worklist.push_back(Elt: Loc.Ptr);
770 ModRefInfo Result = ModRefInfo::NoModRef;
771
772 do {
773 const Value *V = getUnderlyingObject(V: Worklist.pop_back_val());
774 if (!Visited.insert(Ptr: V).second)
775 continue;
776
777 // Ignore allocas if we were instructed to do so.
778 if (IgnoreLocals && isa<AllocaInst>(Val: V))
779 continue;
780
781 // If the location points to memory that is known to be invariant for
782 // the life of the underlying SSA value, then we can exclude Mod from
783 // the set of valid memory effects.
784 //
785 // An argument that is marked readonly and noalias is known to be
786 // invariant while that function is executing.
787 if (const Argument *Arg = dyn_cast<Argument>(Val: V)) {
788 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
789 Result |= ModRefInfo::Ref;
790 continue;
791 }
792 }
793
794 // A global constant can't be mutated.
795 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: V)) {
796 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
797 // global to be marked constant in some modules and non-constant in
798 // others. GV may even be a declaration, not a definition.
799 if (!GV->isConstant())
800 return ModRefInfo::ModRef;
801 continue;
802 }
803
804 // If both select values point to local memory, then so does the select.
805 if (const SelectInst *SI = dyn_cast<SelectInst>(Val: V)) {
806 Worklist.push_back(Elt: SI->getTrueValue());
807 Worklist.push_back(Elt: SI->getFalseValue());
808 continue;
809 }
810
811 // If all values incoming to a phi node point to local memory, then so does
812 // the phi.
813 if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) {
814 // Don't bother inspecting phi nodes with many operands.
815 if (PN->getNumIncomingValues() > MaxLookup)
816 return ModRefInfo::ModRef;
817 append_range(C&: Worklist, R: PN->incoming_values());
818 continue;
819 }
820
821 // Otherwise be conservative.
822 return ModRefInfo::ModRef;
823 } while (!Worklist.empty() && --MaxLookup);
824
825 // If we hit the maximum number of instructions to examine, be conservative.
826 if (!Worklist.empty())
827 return ModRefInfo::ModRef;
828
829 return Result;
830}
831
832static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
833 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Call);
834 return II && II->getIntrinsicID() == IID;
835}
836
837/// Returns the behavior when calling the given call site.
838MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
839 AAQueryInfo &AAQI) {
840 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
841
842 if (const Function *F = dyn_cast<Function>(Val: Call->getCalledOperand())) {
843 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
844 // Operand bundles on the call may also read or write memory, in addition
845 // to the behavior of the called function.
846 if (Call->hasReadingOperandBundles())
847 FuncME |= MemoryEffects::readOnly();
848 if (Call->hasClobberingOperandBundles())
849 FuncME |= MemoryEffects::writeOnly();
850 if (Call->isVolatile()) {
851 // Volatile operations also access inaccessible memory.
852 FuncME |= MemoryEffects::inaccessibleMemOnly();
853 }
854 Min &= FuncME;
855 }
856
857 return Min;
858}
859
860/// Returns the behavior when calling the given function. For use when the call
861/// site is not known.
862MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
863 switch (F->getIntrinsicID()) {
864 case Intrinsic::experimental_guard:
865 case Intrinsic::experimental_deoptimize:
866 // These intrinsics can read arbitrary memory, and additionally modref
867 // inaccessible memory to model control dependence.
868 return MemoryEffects::readOnly() |
869 MemoryEffects::inaccessibleMemOnly(MR: ModRefInfo::ModRef);
870 }
871
872 return F->getMemoryEffects();
873}
874
875ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
876 unsigned ArgIdx) {
877 if (Call->doesNotAccessMemory(OpNo: ArgIdx))
878 return ModRefInfo::NoModRef;
879
880 if (Call->onlyWritesMemory(OpNo: ArgIdx))
881 return ModRefInfo::Mod;
882
883 if (Call->onlyReadsMemory(OpNo: ArgIdx))
884 return ModRefInfo::Ref;
885
886 return ModRefInfo::ModRef;
887}
888
889#ifndef NDEBUG
890static const Function *getParent(const Value *V) {
891 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
892 if (!inst->getParent())
893 return nullptr;
894 return inst->getParent()->getParent();
895 }
896
897 if (const Argument *arg = dyn_cast<Argument>(V))
898 return arg->getParent();
899
900 return nullptr;
901}
902
903static bool notDifferentParent(const Value *O1, const Value *O2) {
904
905 const Function *F1 = getParent(O1);
906 const Function *F2 = getParent(O2);
907
908 return !F1 || !F2 || F1 == F2;
909}
910#endif
911
912AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
913 const MemoryLocation &LocB, AAQueryInfo &AAQI,
914 const Instruction *CtxI) {
915 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
916 "BasicAliasAnalysis doesn't support interprocedural queries.");
917 return aliasCheck(V1: LocA.Ptr, V1Size: LocA.Size, V2: LocB.Ptr, V2Size: LocB.Size, AAQI, CtxI);
918}
919
920/// Checks to see if the specified callsite can clobber the specified memory
921/// object.
922///
923/// Since we only look at local properties of this function, we really can't
924/// say much about this query. We do, however, use simple "address taken"
925/// analysis on local objects.
926ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
927 const MemoryLocation &Loc,
928 AAQueryInfo &AAQI) {
929 assert(notDifferentParent(Call, Loc.Ptr) &&
930 "AliasAnalysis query involving multiple functions!");
931
932 const Value *Object = getUnderlyingObject(V: Loc.Ptr);
933
934 // Calls marked 'tail' cannot read or write allocas from the current frame
935 // because the current frame might be destroyed by the time they run. However,
936 // a tail call may use an alloca with byval. Calling with byval copies the
937 // contents of the alloca into argument registers or stack slots, so there is
938 // no lifetime issue.
939 if (isa<AllocaInst>(Val: Object))
940 if (const CallInst *CI = dyn_cast<CallInst>(Val: Call))
941 if (CI->isTailCall() &&
942 !CI->getAttributes().hasAttrSomewhere(Kind: Attribute::ByVal))
943 return ModRefInfo::NoModRef;
944
945 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
946 // modify them even though the alloca is not escaped.
947 if (auto *AI = dyn_cast<AllocaInst>(Val: Object))
948 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, IID: Intrinsic::stackrestore))
949 return ModRefInfo::Mod;
950
951 // We can completely ignore inaccessible memory here, because MemoryLocations
952 // can only reference accessible memory.
953 auto ME = AAQI.AAR.getMemoryEffects(Call, AAQI)
954 .getWithoutLoc(Loc: IRMemLocation::InaccessibleMem);
955 if (ME.doesNotAccessMemory())
956 return ModRefInfo::NoModRef;
957
958 ModRefInfo ArgMR = ME.getModRef(Loc: IRMemLocation::ArgMem);
959 ModRefInfo ErrnoMR = ME.getModRef(Loc: IRMemLocation::ErrnoMem);
960 ModRefInfo OtherMR = ME.getModRef(Loc: IRMemLocation::Other);
961
962 // An identified function-local object that does not escape can only be
963 // accessed via call arguments. Reduce OtherMR (which includes accesses to
964 // escaped memory) based on that.
965 //
966 // We model calls that can return twice (setjmp) as clobbering non-escaping
967 // objects, to model any accesses that may occur prior to the second return.
968 // As an exception, ignore allocas, as setjmp is not required to preserve
969 // non-volatile stores for them.
970 if (isModOrRefSet(MRI: OtherMR) && !isa<Constant>(Val: Object) && Call != Object &&
971 (isa<AllocaInst>(Val: Object) || !Call->hasFnAttr(Kind: Attribute::ReturnsTwice))) {
972 CaptureComponents CC =
973 AAQI.CA->getCapturesBefore(Object, I: Call, /*OrAt=*/false);
974 if (capturesNothing(CC))
975 OtherMR = ModRefInfo::NoModRef;
976 else if (capturesReadProvenanceOnly(CC))
977 OtherMR = ModRefInfo::Ref;
978 }
979
980 // Refine the modref info for argument memory. We only bother to do this
981 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
982 // on the final result.
983 if ((ArgMR | OtherMR) != OtherMR) {
984 ModRefInfo NewArgMR = ModRefInfo::NoModRef;
985 for (const Use &U : Call->data_ops()) {
986 const Value *Arg = U;
987 if (!Arg->getType()->isPointerTy())
988 continue;
989 unsigned ArgIdx = Call->getDataOperandNo(U: &U);
990 MemoryLocation ArgLoc =
991 Call->isArgOperand(U: &U)
992 ? MemoryLocation::getForArgument(Call, ArgIdx, TLI)
993 : MemoryLocation::getBeforeOrAfter(Ptr: Arg);
994 AliasResult ArgAlias = AAQI.AAR.alias(LocA: ArgLoc, LocB: Loc, AAQI, CtxI: Call);
995 if (ArgAlias != AliasResult::NoAlias)
996 NewArgMR |= ArgMR & AAQI.AAR.getArgModRefInfo(Call, ArgIdx);
997
998 // Exit early if we cannot improve over the original ArgMR.
999 if (NewArgMR == ArgMR)
1000 break;
1001 }
1002 ArgMR = NewArgMR;
1003 }
1004
1005 ModRefInfo Result = ArgMR | OtherMR;
1006
1007 // Refine accesses to errno memory.
1008 if ((ErrnoMR | Result) != Result) {
1009 if (AAQI.AAR.aliasErrno(Loc, M: Call->getModule()) != AliasResult::NoAlias) {
1010 // Exclusion conditions do not hold, this memory location may alias errno.
1011 Result |= ErrnoMR;
1012 }
1013 }
1014
1015 if (!isModAndRefSet(MRI: Result))
1016 return Result;
1017
1018 // If the call is malloc/calloc like, we can assume that it doesn't
1019 // modify any IR visible value. This is only valid because we assume these
1020 // routines do not read values visible in the IR. TODO: Consider special
1021 // casing realloc and strdup routines which access only their arguments as
1022 // well. Or alternatively, replace all of this with inaccessiblememonly once
1023 // that's implemented fully.
1024 if (isMallocOrCallocLikeFn(V: Call, TLI: &TLI)) {
1025 // Be conservative if the accessed pointer may alias the allocation -
1026 // fallback to the generic handling below.
1027 if (AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: Call), LocB: Loc, AAQI) ==
1028 AliasResult::NoAlias)
1029 return ModRefInfo::NoModRef;
1030 }
1031
1032 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1033 // writing so that proper control dependencies are maintained but they never
1034 // mod any particular memory location visible to the IR.
1035 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1036 // intrinsic is now modeled as reading memory. This prevents hoisting the
1037 // invariant.start intrinsic over stores. Consider:
1038 // *ptr = 40;
1039 // *ptr = 50;
1040 // invariant_start(ptr)
1041 // int val = *ptr;
1042 // print(val);
1043 //
1044 // This cannot be transformed to:
1045 //
1046 // *ptr = 40;
1047 // invariant_start(ptr)
1048 // *ptr = 50;
1049 // int val = *ptr;
1050 // print(val);
1051 //
1052 // The transformation will cause the second store to be ignored (based on
1053 // rules of invariant.start) and print 40, while the first program always
1054 // prints 50.
1055 if (isIntrinsicCall(Call, IID: Intrinsic::invariant_start))
1056 return ModRefInfo::Ref;
1057
1058 // Be conservative.
1059 return ModRefInfo::ModRef;
1060}
1061
1062ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1063 const CallBase *Call2,
1064 AAQueryInfo &AAQI) {
1065 // Guard intrinsics are marked as arbitrarily writing so that proper control
1066 // dependencies are maintained but they never mods any particular memory
1067 // location.
1068 //
1069 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1070 // heap state at the point the guard is issued needs to be consistent in case
1071 // the guard invokes the "deopt" continuation.
1072
1073 // NB! This function is *not* commutative, so we special case two
1074 // possibilities for guard intrinsics.
1075
1076 if (isIntrinsicCall(Call: Call1, IID: Intrinsic::experimental_guard))
1077 return isModSet(MRI: getMemoryEffects(Call: Call2, AAQI).getModRef())
1078 ? ModRefInfo::Ref
1079 : ModRefInfo::NoModRef;
1080
1081 if (isIntrinsicCall(Call: Call2, IID: Intrinsic::experimental_guard))
1082 return isModSet(MRI: getMemoryEffects(Call: Call1, AAQI).getModRef())
1083 ? ModRefInfo::Mod
1084 : ModRefInfo::NoModRef;
1085
1086 // Be conservative.
1087 return ModRefInfo::ModRef;
1088}
1089
1090/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1091/// another pointer.
1092///
1093/// We know that V1 is a GEP, but we don't know anything about V2.
1094/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1095/// V2.
1096AliasResult BasicAAResult::aliasGEP(
1097 const GEPOperator *GEP1, LocationSize V1Size,
1098 const Value *V2, LocationSize V2Size,
1099 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1100 auto BaseObjectsAlias = [&]() {
1101 AliasResult BaseAlias =
1102 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV1),
1103 LocB: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV2), AAQI);
1104 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1105 : AliasResult::MayAlias;
1106 };
1107
1108 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1109 // TODO: This limitation exists for compile-time reasons. Relax it if we
1110 // can avoid exponential pathological cases.
1111 if (!isa<GEPOperator>(Val: V2))
1112 return AliasResult::MayAlias;
1113
1114 // If both accesses have unknown size, we can only check whether the base
1115 // objects don't alias.
1116 return BaseObjectsAlias();
1117 }
1118
1119 DominatorTree *DT = getDT(AAQI);
1120 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(V: GEP1, DL, AC: &AC, DT);
1121 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V: V2, DL, AC: &AC, DT);
1122
1123 // Bail if we were not able to decompose anything.
1124 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1125 return AliasResult::MayAlias;
1126
1127 // Fall back to base objects if pointers have different index widths.
1128 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1129 return BaseObjectsAlias();
1130
1131 // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1132 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1133 std::swap(a&: DecompGEP1, b&: DecompGEP2);
1134 std::swap(a&: V1Size, b&: V2Size);
1135 std::swap(a&: UnderlyingV1, b&: UnderlyingV2);
1136 }
1137
1138 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1139 // symbolic difference.
1140 subtractDecomposedGEPs(DestGEP&: DecompGEP1, SrcGEP: DecompGEP2, AAQI);
1141
1142 // If an inbounds GEP would have to start from an out of bounds address
1143 // for the two to alias, then we can assume noalias.
1144 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1145 // size
1146
1147 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1148 V2Size.hasValue() && !V2Size.isScalable() &&
1149 DecompGEP1.Offset.sge(RHS: V2Size.getValue()) &&
1150 isBaseOfObject(V: DecompGEP2.Base))
1151 return AliasResult::NoAlias;
1152
1153 // Symmetric case to above.
1154 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1155 V1Size.hasValue() && !V1Size.isScalable() &&
1156 DecompGEP1.Offset.sle(RHS: -V1Size.getValue()) &&
1157 isBaseOfObject(V: DecompGEP1.Base))
1158 return AliasResult::NoAlias;
1159
1160 // For GEPs with identical offsets, we can preserve the size and AAInfo
1161 // when performing the alias check on the underlying objects.
1162 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1163 return AAQI.AAR.alias(LocA: MemoryLocation(DecompGEP1.Base, V1Size),
1164 LocB: MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1165
1166 // Do the base pointers alias?
1167 AliasResult BaseAlias =
1168 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP1.Base),
1169 LocB: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP2.Base), AAQI);
1170
1171 // If we get a No or May, then return it immediately, no amount of analysis
1172 // will improve this situation.
1173 if (BaseAlias != AliasResult::MustAlias) {
1174 assert(BaseAlias == AliasResult::NoAlias ||
1175 BaseAlias == AliasResult::MayAlias);
1176 return BaseAlias;
1177 }
1178
1179 // If there is a constant difference between the pointers, but the difference
1180 // is less than the size of the associated memory object, then we know
1181 // that the objects are partially overlapping. If the difference is
1182 // greater, we know they do not overlap.
1183 if (DecompGEP1.VarIndices.empty()) {
1184 APInt &Off = DecompGEP1.Offset;
1185
1186 // Initialize for Off >= 0 (V2 <= GEP1) case.
1187 LocationSize VLeftSize = V2Size;
1188 LocationSize VRightSize = V1Size;
1189 const bool Swapped = Off.isNegative();
1190
1191 if (Swapped) {
1192 // Swap if we have the situation where:
1193 // + +
1194 // | BaseOffset |
1195 // ---------------->|
1196 // |-->V1Size |-------> V2Size
1197 // GEP1 V2
1198 std::swap(a&: VLeftSize, b&: VRightSize);
1199 Off = -Off;
1200 }
1201
1202 if (!VLeftSize.hasValue())
1203 return AliasResult::MayAlias;
1204
1205 const TypeSize LSize = VLeftSize.getValue();
1206 if (!LSize.isScalable()) {
1207 if (Off.ult(RHS: LSize)) {
1208 // Conservatively drop processing if a phi was visited and/or offset is
1209 // too big.
1210 AliasResult AR = AliasResult::PartialAlias;
1211 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1212 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(RHS: LSize)) {
1213 // Memory referenced by right pointer is nested. Save the offset in
1214 // cache. Note that originally offset estimated as GEP1-V2, but
1215 // AliasResult contains the shift that represents GEP1+Offset=V2.
1216 AR.setOffset(-Off.getSExtValue());
1217 AR.swap(DoSwap: Swapped);
1218 }
1219 return AR;
1220 }
1221 return AliasResult::NoAlias;
1222 } else {
1223 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1224 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Off.getBitWidth());
1225 bool Overflow;
1226 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1227 RHS: APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1228 if (!Overflow && Off.uge(RHS: UpperRange))
1229 return AliasResult::NoAlias;
1230 }
1231 }
1232
1233 // VScale Alias Analysis - Given one scalable offset between accesses and a
1234 // scalable typesize, we can divide each side by vscale, treating both values
1235 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1236 if (DecompGEP1.VarIndices.size() == 1 &&
1237 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1238 DecompGEP1.Offset.isZero() &&
1239 PatternMatch::match(V: DecompGEP1.VarIndices[0].Val.V,
1240 P: PatternMatch::m_VScale())) {
1241 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1242 APInt Scale =
1243 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1244 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1245
1246 // Check if the offset is known to not overflow, if it does then attempt to
1247 // prove it with the known values of vscale_range.
1248 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1249 if (Overflows) {
1250 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Scale.getBitWidth());
1251 (void)CR.getSignedMax().smul_ov(RHS: Scale, Overflow&: Overflows);
1252 }
1253
1254 if (!Overflows) {
1255 // Note that we do not check that the typesize is scalable, as vscale >= 1
1256 // so noalias still holds so long as the dependency distance is at least
1257 // as big as the typesize.
1258 if (VLeftSize.hasValue() &&
1259 Scale.abs().uge(RHS: VLeftSize.getValue().getKnownMinValue()))
1260 return AliasResult::NoAlias;
1261 }
1262 }
1263
1264 // If the difference between pointers is Offset +<nuw> Indices then we know
1265 // that the addition does not wrap the pointer index type (add nuw) and the
1266 // constant Offset is a lower bound on the distance between the pointers. We
1267 // can then prove NoAlias via Offset u>= VLeftSize.
1268 // + + +
1269 // | BaseOffset | +<nuw> Indices |
1270 // ---------------->|-------------------->|
1271 // |-->V2Size | |-------> V1Size
1272 // LHS RHS
1273 if (!DecompGEP1.VarIndices.empty() &&
1274 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1275 !V2Size.isScalable() && DecompGEP1.Offset.uge(RHS: V2Size.getValue()))
1276 return AliasResult::NoAlias;
1277
1278 // Bail on analysing scalable LocationSize
1279 if (V1Size.isScalable() || V2Size.isScalable())
1280 return AliasResult::MayAlias;
1281
1282 // We need to know both access sizes for all the following heuristics. Don't
1283 // try to reason about sizes larger than the index space.
1284 unsigned BW = DecompGEP1.Offset.getBitWidth();
1285 if (!V1Size.hasValue() || !V2Size.hasValue() ||
1286 !isUIntN(N: BW, x: V1Size.getValue()) || !isUIntN(N: BW, x: V2Size.getValue()))
1287 return AliasResult::MayAlias;
1288
1289 APInt GCD;
1290 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1291 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1292 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1293 const APInt &Scale = Index.Scale;
1294
1295 KnownBits Known = computeKnownBits(V: Index.Val.V, DL, AC: &AC, CxtI: Index.CxtI, DT);
1296
1297 APInt ScaleForGCD = Scale;
1298 if (!Index.IsNSW)
1299 ScaleForGCD =
1300 APInt::getOneBitSet(numBits: Scale.getBitWidth(), BitNo: Scale.countr_zero());
1301
1302 // If V has known trailing zeros, V is a multiple of 2^VarTZ, so
1303 // V*Scale is a multiple of ScaleForGCD * 2^VarTZ. Shift ScaleForGCD
1304 // left to account for this (trailing zeros compose additively through
1305 // multiplication, even in Z/2^n).
1306 unsigned VarTZ = Known.countMinTrailingZeros();
1307 if (VarTZ > 0) {
1308 unsigned MaxShift =
1309 Scale.getBitWidth() - ScaleForGCD.getSignificantBits();
1310 ScaleForGCD <<= std::min(a: VarTZ, b: MaxShift);
1311 }
1312
1313 if (i == 0)
1314 GCD = ScaleForGCD.abs();
1315 else
1316 GCD = APIntOps::GreatestCommonDivisor(A: GCD, B: ScaleForGCD.abs());
1317
1318 ConstantRange CR = computeConstantRange(V: Index.Val.V, /* ForSigned */ false,
1319 UseInstrInfo: true, AC: &AC, CtxI: Index.CxtI);
1320 CR = CR.intersectWith(
1321 CR: ConstantRange::fromKnownBits(Known, /* Signed */ IsSigned: true),
1322 Type: ConstantRange::Signed);
1323 CR = Index.Val.evaluateWith(N: CR).sextOrTrunc(BitWidth: OffsetRange.getBitWidth());
1324
1325 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1326 "Bit widths are normalized to MaxIndexSize");
1327 if (Index.IsNSW)
1328 CR = CR.smul_sat(Other: ConstantRange(Scale));
1329 else
1330 CR = CR.smul_fast(Other: ConstantRange(Scale));
1331
1332 if (Index.IsNegated)
1333 OffsetRange = OffsetRange.sub(Other: CR);
1334 else
1335 OffsetRange = OffsetRange.add(Other: CR);
1336 }
1337
1338 // We now have accesses at two offsets from the same base:
1339 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1340 // 2. 0 with size V2Size
1341 // Using arithmetic modulo GCD, the accesses are at
1342 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1343 // into the range [V2Size..GCD), then we know they cannot overlap.
1344 APInt ModOffset = DecompGEP1.Offset.srem(RHS: GCD);
1345 if (ModOffset.isNegative())
1346 ModOffset += GCD; // We want mod, not rem.
1347 if (ModOffset.uge(RHS: V2Size.getValue()) &&
1348 (GCD - ModOffset).uge(RHS: V1Size.getValue()))
1349 return AliasResult::NoAlias;
1350
1351 // Compute ranges of potentially accessed bytes for both accesses. If the
1352 // interseciton is empty, there can be no overlap.
1353 ConstantRange Range1 = OffsetRange.add(
1354 Other: ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1355 ConstantRange Range2 =
1356 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1357 if (Range1.intersectWith(CR: Range2).isEmptySet())
1358 return AliasResult::NoAlias;
1359
1360 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1361 // potentially wrapping math.
1362 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1363 if (Var.IsNSW)
1364 return true;
1365
1366 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1367 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1368 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1369 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1370 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1371 if (MaxScaleValueBW <= 0)
1372 return false;
1373 return Var.Scale.ule(
1374 RHS: APInt::getMaxValue(numBits: MaxScaleValueBW).zext(width: Var.Scale.getBitWidth()));
1375 };
1376
1377 // Try to determine the range of values for VarIndex such that
1378 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1379 std::optional<APInt> MinAbsVarIndex;
1380 if (DecompGEP1.VarIndices.size() == 1) {
1381 // VarIndex = Scale*V.
1382 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1383 if (Var.Val.TruncBits == 0 &&
1384 isKnownNonZero(V: Var.Val.V, Q: SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1385 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1386 // presence of potentially wrapping math.
1387 if (MultiplyByScaleNoWrap(Var)) {
1388 // If V != 0 then abs(VarIndex) >= abs(Scale).
1389 MinAbsVarIndex = Var.Scale.abs();
1390 }
1391 }
1392 } else if (DecompGEP1.VarIndices.size() == 2) {
1393 // VarIndex = Scale*V0 + (-Scale)*V1.
1394 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1395 // Check that MayBeCrossIteration is false, to avoid reasoning about
1396 // inequality of values across loop iterations.
1397 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1398 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1399 if (Var0.hasNegatedScaleOf(Other: Var1) && Var0.Val.TruncBits == 0 &&
1400 Var0.Val.hasSameCastsAs(Other: Var1.Val) && !AAQI.MayBeCrossIteration &&
1401 MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) &&
1402 isKnownNonEqual(V1: Var0.Val.V, V2: Var1.Val.V,
1403 SQ: SimplifyQuery(DL, DT, &AC, /*CxtI=*/Var0.CxtI
1404 ? Var0.CxtI
1405 : Var1.CxtI)))
1406 MinAbsVarIndex = Var0.Scale.abs();
1407 }
1408
1409 if (MinAbsVarIndex) {
1410 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1411 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1412 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1413 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1414 if (OffsetLo.isNegative() && (-OffsetLo).uge(RHS: V1Size.getValue()) &&
1415 OffsetHi.isNonNegative() && OffsetHi.uge(RHS: V2Size.getValue()))
1416 return AliasResult::NoAlias;
1417 }
1418
1419 if (constantOffsetHeuristic(GEP: DecompGEP1, V1Size, V2Size, AC: &AC, DT, AAQI))
1420 return AliasResult::NoAlias;
1421
1422 // Statically, we can see that the base objects are the same, but the
1423 // pointers have dynamic offsets which we can't resolve. And none of our
1424 // little tricks above worked.
1425 return AliasResult::MayAlias;
1426}
1427
1428static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1429 // If the results agree, take it.
1430 if (A == B)
1431 return A;
1432 // A mix of PartialAlias and MustAlias is PartialAlias.
1433 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1434 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1435 return AliasResult::PartialAlias;
1436 // Otherwise, we don't know anything.
1437 return AliasResult::MayAlias;
1438}
1439
1440/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1441/// against another.
1442AliasResult
1443BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1444 const Value *V2, LocationSize V2Size,
1445 AAQueryInfo &AAQI) {
1446 // If the values are Selects with the same condition, we can do a more precise
1447 // check: just check for aliases between the values on corresponding arms.
1448 if (const SelectInst *SI2 = dyn_cast<SelectInst>(Val: V2))
1449 if (isValueEqualInPotentialCycles(V1: SI->getCondition(), V2: SI2->getCondition(),
1450 AAQI)) {
1451 AliasResult Alias =
1452 AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1453 LocB: MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1454 if (Alias == AliasResult::MayAlias)
1455 return AliasResult::MayAlias;
1456 AliasResult ThisAlias =
1457 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1458 LocB: MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1459 return MergeAliasResults(A: ThisAlias, B: Alias);
1460 }
1461
1462 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1463 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1464 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1465 LocB: MemoryLocation(V2, V2Size), AAQI);
1466 if (Alias == AliasResult::MayAlias)
1467 return AliasResult::MayAlias;
1468
1469 AliasResult ThisAlias =
1470 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1471 LocB: MemoryLocation(V2, V2Size), AAQI);
1472 return MergeAliasResults(A: ThisAlias, B: Alias);
1473}
1474
1475/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1476/// another.
1477AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1478 const Value *V2, LocationSize V2Size,
1479 AAQueryInfo &AAQI) {
1480 if (!PN->getNumIncomingValues())
1481 return AliasResult::NoAlias;
1482 // If the values are PHIs in the same block, we can do a more precise
1483 // as well as efficient check: just check for aliases between the values
1484 // on corresponding edges. Don't do this if we are analyzing across
1485 // iterations, as we may pick a different phi entry in different iterations.
1486 if (const PHINode *PN2 = dyn_cast<PHINode>(Val: V2))
1487 if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1488 std::optional<AliasResult> Alias;
1489 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1490 AliasResult ThisAlias = AAQI.AAR.alias(
1491 LocA: MemoryLocation(PN->getIncomingValue(i), PNSize),
1492 LocB: MemoryLocation(
1493 PN2->getIncomingValueForBlock(BB: PN->getIncomingBlock(i)), V2Size),
1494 AAQI);
1495 if (Alias)
1496 *Alias = MergeAliasResults(A: *Alias, B: ThisAlias);
1497 else
1498 Alias = ThisAlias;
1499 if (*Alias == AliasResult::MayAlias)
1500 break;
1501 }
1502 return *Alias;
1503 }
1504
1505 SmallVector<Value *, 4> V1Srcs;
1506 // If a phi operand recurses back to the phi, we can still determine NoAlias
1507 // if we don't alias the underlying objects of the other phi operands, as we
1508 // know that the recursive phi needs to be based on them in some way.
1509 bool isRecursive = false;
1510 auto CheckForRecPhi = [&](Value *PV) {
1511 if (!EnableRecPhiAnalysis)
1512 return false;
1513 if (getUnderlyingObject(V: PV) == PN) {
1514 isRecursive = true;
1515 return true;
1516 }
1517 return false;
1518 };
1519
1520 SmallPtrSet<Value *, 4> UniqueSrc;
1521 Value *OnePhi = nullptr;
1522 for (Value *PV1 : PN->incoming_values()) {
1523 // Skip the phi itself being the incoming value.
1524 if (PV1 == PN)
1525 continue;
1526
1527 if (isa<PHINode>(Val: PV1)) {
1528 if (OnePhi && OnePhi != PV1) {
1529 // To control potential compile time explosion, we choose to be
1530 // conserviate when we have more than one Phi input. It is important
1531 // that we handle the single phi case as that lets us handle LCSSA
1532 // phi nodes and (combined with the recursive phi handling) simple
1533 // pointer induction variable patterns.
1534 return AliasResult::MayAlias;
1535 }
1536 OnePhi = PV1;
1537 }
1538
1539 if (CheckForRecPhi(PV1))
1540 continue;
1541
1542 if (UniqueSrc.insert(Ptr: PV1).second)
1543 V1Srcs.push_back(Elt: PV1);
1544 }
1545
1546 if (OnePhi && UniqueSrc.size() > 1)
1547 // Out of an abundance of caution, allow only the trivial lcssa and
1548 // recursive phi cases.
1549 return AliasResult::MayAlias;
1550
1551 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1552 // value. This should only be possible in blocks unreachable from the entry
1553 // block, but return MayAlias just in case.
1554 if (V1Srcs.empty())
1555 return AliasResult::MayAlias;
1556
1557 // If this PHI node is recursive, indicate that the pointer may be moved
1558 // across iterations. We can only prove NoAlias if different underlying
1559 // objects are involved.
1560 if (isRecursive)
1561 PNSize = LocationSize::beforeOrAfterPointer();
1562
1563 // In the recursive alias queries below, we may compare values from two
1564 // different loop iterations.
1565 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1566
1567 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(V1Srcs[0], PNSize),
1568 LocB: MemoryLocation(V2, V2Size), AAQI);
1569
1570 // Early exit if the check of the first PHI source against V2 is MayAlias.
1571 // Other results are not possible.
1572 if (Alias == AliasResult::MayAlias)
1573 return AliasResult::MayAlias;
1574 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1575 // remain valid to all elements and needs to conservatively return MayAlias.
1576 if (isRecursive && Alias != AliasResult::NoAlias)
1577 return AliasResult::MayAlias;
1578
1579 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1580 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1581 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1582 Value *V = V1Srcs[i];
1583
1584 AliasResult ThisAlias = AAQI.AAR.alias(
1585 LocA: MemoryLocation(V, PNSize), LocB: MemoryLocation(V2, V2Size), AAQI);
1586 Alias = MergeAliasResults(A: ThisAlias, B: Alias);
1587 if (Alias == AliasResult::MayAlias)
1588 break;
1589 }
1590
1591 return Alias;
1592}
1593
1594// Return true for an Argument or extractvalue(Argument). These are all known
1595// to not alias with FunctionLocal objects and can come up from coerced function
1596// arguments.
1597static bool isArgumentOrArgumentLike(const Value *V) {
1598 if (isa<Argument>(Val: V))
1599 return true;
1600 auto *E = dyn_cast<ExtractValueInst>(Val: V);
1601 return E && isa<Argument>(Val: E->getOperand(i_nocapture: 0));
1602}
1603
1604/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1605/// array references.
1606AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1607 const Value *V2, LocationSize V2Size,
1608 AAQueryInfo &AAQI,
1609 const Instruction *CtxI) {
1610 // If either of the memory references is empty, it doesn't matter what the
1611 // pointer values are.
1612 if (V1Size.isZero() || V2Size.isZero())
1613 return AliasResult::NoAlias;
1614
1615 // Strip off any casts if they exist.
1616 V1 = V1->stripPointerCastsForAliasAnalysis();
1617 V2 = V2->stripPointerCastsForAliasAnalysis();
1618
1619 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1620 // value for undef that aliases nothing in the program.
1621 if (isa<UndefValue>(Val: V1) || isa<UndefValue>(Val: V2))
1622 return AliasResult::NoAlias;
1623
1624 // Are we checking for alias of the same value?
1625 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1626 // different iterations. We must therefore make sure that this is not the
1627 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1628 // happen by looking at the visited phi nodes and making sure they cannot
1629 // reach the value.
1630 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1631 return AliasResult::MustAlias;
1632
1633 // Figure out what objects these things are pointing to if we can.
1634 const Value *O1 = getUnderlyingObject(V: V1, MaxLookup: MaxLookupSearchDepth);
1635 const Value *O2 = getUnderlyingObject(V: V2, MaxLookup: MaxLookupSearchDepth);
1636
1637 // Null values in the default address space don't point to any object, so they
1638 // don't alias any other pointer.
1639 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O1))
1640 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1641 return AliasResult::NoAlias;
1642 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O2))
1643 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1644 return AliasResult::NoAlias;
1645
1646 if (O1 != O2) {
1647 // If V1/V2 point to two different objects, we know that we have no alias.
1648 if (isIdentifiedObject(V: O1) && isIdentifiedObject(V: O2))
1649 return AliasResult::NoAlias;
1650
1651 // Function arguments can't alias with things that are known to be
1652 // unambigously identified at the function level.
1653 if ((isArgumentOrArgumentLike(V: O1) && isIdentifiedFunctionLocal(V: O2)) ||
1654 (isArgumentOrArgumentLike(V: O2) && isIdentifiedFunctionLocal(V: O1)))
1655 return AliasResult::NoAlias;
1656
1657 // If one pointer is the result of a call/invoke or load and the other is a
1658 // non-escaping local object within the same function, then we know the
1659 // object couldn't escape to a point where the call could return it.
1660 //
1661 // Note that if the pointers are in different functions, there are a
1662 // variety of complications. A call with a nocapture argument may still
1663 // temporary store the nocapture argument's value in a temporary memory
1664 // location if that memory location doesn't escape. Or it may pass a
1665 // nocapture value to other functions as long as they don't capture it.
1666 if (isEscapeSource(V: O1) &&
1667 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1668 Object: O2, I: dyn_cast<Instruction>(Val: O1), /*OrAt*/ true)))
1669 return AliasResult::NoAlias;
1670 if (isEscapeSource(V: O2) &&
1671 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1672 Object: O1, I: dyn_cast<Instruction>(Val: O2), /*OrAt*/ true)))
1673 return AliasResult::NoAlias;
1674 }
1675
1676 // If the size of one access is larger than the entire object on the other
1677 // side, then we know such behavior is undefined and can assume no alias.
1678 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1679 if ((isObjectSmallerThan(
1680 V: O2, Size: getMinimalExtentFrom(V: *V1, LocSize: V1Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1681 TLI, NullIsValidLoc: NullIsValidLocation)) ||
1682 (isObjectSmallerThan(
1683 V: O1, Size: getMinimalExtentFrom(V: *V2, LocSize: V2Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1684 TLI, NullIsValidLoc: NullIsValidLocation)))
1685 return AliasResult::NoAlias;
1686
1687 if (EnableSeparateStorageAnalysis) {
1688 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(V: O1)) {
1689 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1690 continue;
1691
1692 AssumeInst *Assume = cast<AssumeInst>(Val&: Elem);
1693 OperandBundleUse OBU = Assume->getOperandBundleAt(Index: Elem.Index);
1694 if (OBU.getTagName() == "separate_storage") {
1695 assert(OBU.Inputs.size() == 2);
1696 const Value *Hint1 = OBU.Inputs[0].get();
1697 const Value *Hint2 = OBU.Inputs[1].get();
1698 // This is often a no-op; instcombine rewrites this for us. No-op
1699 // getUnderlyingObject calls are fast, though.
1700 const Value *HintO1 = getUnderlyingObject(V: Hint1);
1701 const Value *HintO2 = getUnderlyingObject(V: Hint2);
1702
1703 DominatorTree *DT = getDT(AAQI);
1704 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1705 if (const Instruction *PtrI = dyn_cast<Instruction>(Val: Ptr)) {
1706 return isValidAssumeForContext(I: Assume, CxtI: PtrI, DT,
1707 /* AllowEphemerals */ true);
1708 }
1709 if (const Argument *PtrA = dyn_cast<Argument>(Val: Ptr)) {
1710 const Instruction *FirstI =
1711 &*PtrA->getParent()->getEntryBlock().begin();
1712 return isValidAssumeForContext(I: Assume, CxtI: FirstI, DT,
1713 /* AllowEphemerals */ true);
1714 }
1715 return false;
1716 };
1717
1718 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1719 // Note that we go back to V1 and V2 for the
1720 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1721 // so strictly more assumptions are valid for them.
1722 if ((CtxI && isValidAssumeForContext(I: Assume, CxtI: CtxI, DT,
1723 /* AllowEphemerals */ true)) ||
1724 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1725 return AliasResult::NoAlias;
1726 }
1727 }
1728 }
1729 }
1730 }
1731
1732 // If one the accesses may be before the accessed pointer, canonicalize this
1733 // by using unknown after-pointer sizes for both accesses. This is
1734 // equivalent, because regardless of which pointer is lower, one of them
1735 // will always came after the other, as long as the underlying objects aren't
1736 // disjoint. We do this so that the rest of BasicAA does not have to deal
1737 // with accesses before the base pointer, and to improve cache utilization by
1738 // merging equivalent states.
1739 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1740 V1Size = LocationSize::afterPointer();
1741 V2Size = LocationSize::afterPointer();
1742 }
1743
1744 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1745 // for recursive queries. For this reason, this limit is chosen to be large
1746 // enough to be very rarely hit, while still being small enough to avoid
1747 // stack overflows.
1748 if (AAQI.Depth >= 512)
1749 return AliasResult::MayAlias;
1750
1751 // Check the cache before climbing up use-def chains. This also terminates
1752 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1753 // cache key, because some cases where MayBeCrossIteration==false returns
1754 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1755 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1756 {V2, V2Size, AAQI.MayBeCrossIteration});
1757 const bool Swapped = V1 > V2;
1758 if (Swapped)
1759 std::swap(a&: Locs.first, b&: Locs.second);
1760 const auto &Pair = AAQI.AliasCache.try_emplace(
1761 Key: Locs, Args: AAQueryInfo::CacheEntry{.Result: AliasResult::NoAlias, .NumAssumptionUses: 0});
1762 if (!Pair.second) {
1763 auto &Entry = Pair.first->second;
1764 if (!Entry.isDefinitive()) {
1765 // Remember that we used an assumption. This may either be a direct use
1766 // of an assumption, or a use of an entry that may itself be based on an
1767 // assumption.
1768 ++AAQI.NumAssumptionUses;
1769 if (Entry.isAssumption())
1770 ++Entry.NumAssumptionUses;
1771 }
1772 // Cache contains sorted {V1,V2} pairs but we should return original order.
1773 auto Result = Entry.Result;
1774 Result.swap(DoSwap: Swapped);
1775 return Result;
1776 }
1777
1778 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1779 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1780 AliasResult Result =
1781 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1782
1783 auto It = AAQI.AliasCache.find(Val: Locs);
1784 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1785 auto &Entry = It->second;
1786
1787 // Check whether a NoAlias assumption has been used, but disproven.
1788 bool AssumptionDisproven =
1789 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1790 if (AssumptionDisproven)
1791 Result = AliasResult::MayAlias;
1792
1793 // This is a definitive result now, when considered as a root query.
1794 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1795 Entry.Result = Result;
1796 // Cache contains sorted {V1,V2} pairs.
1797 Entry.Result.swap(DoSwap: Swapped);
1798
1799 // If the assumption has been disproven, remove any results that may have
1800 // been based on this assumption. Do this after the Entry updates above to
1801 // avoid iterator invalidation.
1802 if (AssumptionDisproven)
1803 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1804 AAQI.AliasCache.erase(Val: AAQI.AssumptionBasedResults.pop_back_val());
1805
1806 // The result may still be based on assumptions higher up in the chain.
1807 // Remember it, so it can be purged from the cache later.
1808 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1809 Result != AliasResult::MayAlias) {
1810 AAQI.AssumptionBasedResults.push_back(Elt: Locs);
1811 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1812 } else {
1813 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1814 }
1815
1816 // Depth is incremented before this function is called, so Depth==1 indicates
1817 // a root query.
1818 if (AAQI.Depth == 1) {
1819 // Any remaining assumption based results must be based on proven
1820 // assumptions, so convert them to definitive results.
1821 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1822 auto It = AAQI.AliasCache.find(Val: Loc);
1823 if (It != AAQI.AliasCache.end())
1824 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1825 }
1826 AAQI.AssumptionBasedResults.clear();
1827 AAQI.NumAssumptionUses = 0;
1828 }
1829 return Result;
1830}
1831
1832AliasResult BasicAAResult::aliasCheckRecursive(
1833 const Value *V1, LocationSize V1Size,
1834 const Value *V2, LocationSize V2Size,
1835 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1836 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(Val: V1)) {
1837 AliasResult Result = aliasGEP(GEP1: GV1, V1Size, V2, V2Size, UnderlyingV1: O1, UnderlyingV2: O2, AAQI);
1838 if (Result != AliasResult::MayAlias)
1839 return Result;
1840 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(Val: V2)) {
1841 AliasResult Result = aliasGEP(GEP1: GV2, V1Size: V2Size, V2: V1, V2Size: V1Size, UnderlyingV1: O2, UnderlyingV2: O1, AAQI);
1842 Result.swap();
1843 if (Result != AliasResult::MayAlias)
1844 return Result;
1845 }
1846
1847 if (const PHINode *PN = dyn_cast<PHINode>(Val: V1)) {
1848 AliasResult Result = aliasPHI(PN, PNSize: V1Size, V2, V2Size, AAQI);
1849 if (Result != AliasResult::MayAlias)
1850 return Result;
1851 } else if (const PHINode *PN = dyn_cast<PHINode>(Val: V2)) {
1852 AliasResult Result = aliasPHI(PN, PNSize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1853 Result.swap();
1854 if (Result != AliasResult::MayAlias)
1855 return Result;
1856 }
1857
1858 if (const SelectInst *S1 = dyn_cast<SelectInst>(Val: V1)) {
1859 AliasResult Result = aliasSelect(SI: S1, SISize: V1Size, V2, V2Size, AAQI);
1860 if (Result != AliasResult::MayAlias)
1861 return Result;
1862 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(Val: V2)) {
1863 AliasResult Result = aliasSelect(SI: S2, SISize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1864 Result.swap();
1865 if (Result != AliasResult::MayAlias)
1866 return Result;
1867 }
1868
1869 // If both pointers are pointing into the same object and one of them
1870 // accesses the entire object, then the accesses must overlap in some way.
1871 if (O1 == O2) {
1872 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1873 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1874 (isObjectSize(V: O1, Size: V1Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation) ||
1875 isObjectSize(V: O2, Size: V2Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation)))
1876 return AliasResult::PartialAlias;
1877 }
1878
1879 return AliasResult::MayAlias;
1880}
1881
1882AliasResult BasicAAResult::aliasErrno(const MemoryLocation &Loc,
1883 const Module *M) {
1884 // There cannot be any alias with errno if the given memory location is an
1885 // identified function-local object, or the size of the memory access is
1886 // larger than the integer size.
1887 if (Loc.Size.hasValue() &&
1888 Loc.Size.getValue().getKnownMinValue() * 8 > TLI.getIntSize())
1889 return AliasResult::NoAlias;
1890
1891 if (isIdentifiedFunctionLocal(V: getUnderlyingObject(V: Loc.Ptr)))
1892 return AliasResult::NoAlias;
1893 return AliasResult::MayAlias;
1894}
1895
1896/// Check whether two Values can be considered equivalent.
1897///
1898/// If the values may come from different cycle iterations, this will also
1899/// check that the values are not part of cycle. We have to do this because we
1900/// are looking through phi nodes, that is we say
1901/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1902bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1903 const Value *V2,
1904 const AAQueryInfo &AAQI) {
1905 if (V != V2)
1906 return false;
1907
1908 if (!AAQI.MayBeCrossIteration)
1909 return true;
1910
1911 // Non-instructions and instructions in the entry block cannot be part of
1912 // a loop.
1913 const Instruction *Inst = dyn_cast<Instruction>(Val: V);
1914 if (!Inst || Inst->getParent()->isEntryBlock())
1915 return true;
1916
1917 return isNotInCycle(I: Inst, DT: getDT(AAQI), /*LI=*/nullptr, /*CI=*/nullptr);
1918}
1919
1920/// Computes the symbolic difference between two de-composed GEPs.
1921void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1922 const DecomposedGEP &SrcGEP,
1923 const AAQueryInfo &AAQI) {
1924 // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1925 // unsigned sense.
1926 if (DestGEP.Offset.ult(RHS: SrcGEP.Offset))
1927 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1928
1929 DestGEP.Offset -= SrcGEP.Offset;
1930 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1931 // Find V in Dest. This is N^2, but pointer indices almost never have more
1932 // than a few variable indexes.
1933 bool Found = false;
1934 for (auto I : enumerate(First&: DestGEP.VarIndices)) {
1935 VariableGEPIndex &Dest = I.value();
1936 if ((!isValueEqualInPotentialCycles(V: Dest.Val.V, V2: Src.Val.V, AAQI) &&
1937 !areBothVScale(V1: Dest.Val.V, V2: Src.Val.V)) ||
1938 !Dest.Val.hasSameCastsAs(Other: Src.Val))
1939 continue;
1940
1941 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1942 if (Dest.IsNegated) {
1943 Dest.Scale = -Dest.Scale;
1944 Dest.IsNegated = false;
1945 Dest.IsNSW = false;
1946 }
1947
1948 // If we found it, subtract off Scale V's from the entry in Dest. If it
1949 // goes to zero, remove the entry.
1950 if (Dest.Scale != Src.Scale) {
1951 // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1952 // unsigned sense.
1953 if (Dest.Scale.ult(RHS: Src.Scale))
1954 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1955
1956 Dest.Scale -= Src.Scale;
1957 Dest.IsNSW = false;
1958 } else {
1959 DestGEP.VarIndices.erase(CI: DestGEP.VarIndices.begin() + I.index());
1960 }
1961 Found = true;
1962 break;
1963 }
1964
1965 // If we didn't consume this entry, add it to the end of the Dest list.
1966 if (!Found) {
1967 VariableGEPIndex Entry = {.Val: Src.Val, .Scale: Src.Scale, .CxtI: Src.CxtI, .IsNSW: Src.IsNSW,
1968 /* IsNegated */ true};
1969 DestGEP.VarIndices.push_back(Elt: Entry);
1970
1971 // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1972 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1973 }
1974 }
1975}
1976
1977bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1978 LocationSize MaybeV1Size,
1979 LocationSize MaybeV2Size,
1980 AssumptionCache *AC,
1981 DominatorTree *DT,
1982 const AAQueryInfo &AAQI) {
1983 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1984 !MaybeV2Size.hasValue())
1985 return false;
1986
1987 const uint64_t V1Size = MaybeV1Size.getValue();
1988 const uint64_t V2Size = MaybeV2Size.getValue();
1989
1990 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1991
1992 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Other: Var1.Val) ||
1993 !Var0.hasNegatedScaleOf(Other: Var1) ||
1994 Var0.Val.V->getType() != Var1.Val.V->getType())
1995 return false;
1996
1997 // We'll strip off the Extensions of Var0 and Var1 and do another round
1998 // of GetLinearExpression decomposition. In the example above, if Var0
1999 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
2000
2001 LinearExpression E0 =
2002 GetLinearExpression(Val: CastedValue(Var0.Val.V), DL, Depth: 0, AC, DT);
2003 LinearExpression E1 =
2004 GetLinearExpression(Val: CastedValue(Var1.Val.V), DL, Depth: 0, AC, DT);
2005 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(Other: E1.Val) ||
2006 !isValueEqualInPotentialCycles(V: E0.Val.V, V2: E1.Val.V, AAQI))
2007 return false;
2008
2009 // We have a hit - Var0 and Var1 only differ by a constant offset!
2010
2011 // If we've been sext'ed then zext'd the maximum difference between Var0 and
2012 // Var1 is possible to calculate, but we're just interested in the absolute
2013 // minimum difference between the two. The minimum distance may occur due to
2014 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
2015 // the minimum distance between %i and %i + 5 is 3.
2016 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
2017 MinDiff = APIntOps::umin(A: MinDiff, B: Wrapped);
2018 APInt MinDiffBytes =
2019 MinDiff.zextOrTrunc(width: Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2020
2021 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2022 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2023 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2024 // V2Size can fit in the MinDiffBytes gap.
2025 return MinDiffBytes.uge(RHS: V1Size + GEP.Offset.abs()) &&
2026 MinDiffBytes.uge(RHS: V2Size + GEP.Offset.abs());
2027}
2028
2029//===----------------------------------------------------------------------===//
2030// BasicAliasAnalysis Pass
2031//===----------------------------------------------------------------------===//
2032
2033AnalysisKey BasicAA::Key;
2034
2035BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2036 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2037 auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F);
2038 auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
2039 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
2040}
2041
2042BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {}
2043
2044char BasicAAWrapperPass::ID = 0;
2045
2046void BasicAAWrapperPass::anchor() {}
2047
2048INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2049 "Basic Alias Analysis (stateless AA impl)", true, true)
2050INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2051INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2052INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2053INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2054 "Basic Alias Analysis (stateless AA impl)", true, true)
2055
2056FunctionPass *llvm::createBasicAAWrapperPass() {
2057 return new BasicAAWrapperPass();
2058}
2059
2060bool BasicAAWrapperPass::runOnFunction(Function &F) {
2061 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2062 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2063 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2064
2065 Result.reset(p: new BasicAAResult(F.getDataLayout(), F,
2066 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2067 &DTWP.getDomTree()));
2068
2069 return false;
2070}
2071
2072void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2073 AU.setPreservesAll();
2074 AU.addRequiredTransitive<AssumptionCacheTracker>();
2075 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2076 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
2077}
2078