1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/BasicAliasAnalysis.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalAlias.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/PatternMatch.h"
48#include "llvm/IR/Type.h"
49#include "llvm/IR/User.h"
50#include "llvm/IR/Value.h"
51#include "llvm/InitializePasses.h"
52#include "llvm/Pass.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Compiler.h"
56#include "llvm/Support/KnownBits.h"
57#include "llvm/Support/SaveAndRestore.h"
58#include <cassert>
59#include <cstdint>
60#include <cstdlib>
61#include <optional>
62#include <utility>
63
64#define DEBUG_TYPE "basicaa"
65
66using namespace llvm;
67
68/// Enable analysis of recursive PHI nodes.
69static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70 cl::init(Val: true));
71
72static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73 cl::Hidden, cl::init(Val: true));
74
75/// SearchLimitReached / SearchTimes shows how often the limit of
76/// to decompose GEPs is reached. It will affect the precision
77/// of basic alias analysis.
78STATISTIC(SearchLimitReached, "Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81
82bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
83 FunctionAnalysisManager::Invalidator &Inv) {
84 // We don't care if this analysis itself is preserved, it has no state. But
85 // we need to check that the analyses it depends on have been. Note that we
86 // may be created without handles to some analyses and in that case don't
87 // depend on them.
88 if (Inv.invalidate<AssumptionAnalysis>(IR&: Fn, PA) ||
89 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(IR&: Fn, PA)))
90 return true;
91
92 // Otherwise this analysis result remains valid.
93 return false;
94}
95
96//===----------------------------------------------------------------------===//
97// Useful predicates
98//===----------------------------------------------------------------------===//
99
100/// Returns the size of the object specified by V or UnknownSize if unknown.
101static std::optional<TypeSize> getObjectSize(const Value *V,
102 const DataLayout &DL,
103 const TargetLibraryInfo &TLI,
104 bool NullIsValidLoc,
105 bool RoundToAlign = false) {
106 ObjectSizeOpts Opts;
107 Opts.RoundToAlign = RoundToAlign;
108 Opts.NullIsUnknownSize = NullIsValidLoc;
109 if (std::optional<TypeSize> Size = getBaseObjectSize(Ptr: V, DL, TLI: &TLI, Opts)) {
110 // FIXME: Remove this check, only exists to preserve previous behavior.
111 if (Size->isScalable())
112 return std::nullopt;
113 return Size;
114 }
115 return std::nullopt;
116}
117
118/// Returns true if we can prove that the object specified by V is smaller than
119/// Size. Bails out early unless the root object is passed as the first
120/// parameter.
121static bool isObjectSmallerThan(const Value *V, TypeSize Size,
122 const DataLayout &DL,
123 const TargetLibraryInfo &TLI,
124 bool NullIsValidLoc) {
125 // Note that the meanings of the "object" are slightly different in the
126 // following contexts:
127 // c1: llvm::getObjectSize()
128 // c2: llvm.objectsize() intrinsic
129 // c3: isObjectSmallerThan()
130 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131 // refers to the "entire object".
132 //
133 // Consider this example:
134 // char *p = (char*)malloc(100)
135 // char *q = p+80;
136 //
137 // In the context of c1 and c2, the "object" pointed by q refers to the
138 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139 //
140 // In the context of c3, the "object" refers to the chunk of memory being
141 // allocated. So, the "object" has 100 bytes, and q points to the middle the
142 // "object". However, unless p, the root object, is passed as the first
143 // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
144 // bail out early.
145 if (!isIdentifiedObject(V))
146 return false;
147
148 // This function needs to use the aligned object size because we allow
149 // reads a bit past the end given sufficient alignment.
150 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
151 /*RoundToAlign*/ true);
152
153 return ObjectSize && TypeSize::isKnownLT(LHS: *ObjectSize, RHS: Size);
154}
155
156/// Return the minimal extent from \p V to the end of the underlying object,
157/// assuming the result is used in an aliasing query. E.g., we do use the query
158/// location size and the fact that null pointers cannot alias here.
159static TypeSize getMinimalExtentFrom(const Value &V,
160 const LocationSize &LocSize,
161 const DataLayout &DL,
162 bool NullIsValidLoc) {
163 // If we have dereferenceability information we know a lower bound for the
164 // extent as accesses for a lower offset would be valid. We need to exclude
165 // the "or null" part if null is a valid pointer. We can ignore frees, as an
166 // access after free would be undefined behavior.
167 bool CanBeNull, CanBeFreed;
168 uint64_t DerefBytes =
169 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
170 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
171 // If queried with a precise location size, we assume that location size to be
172 // accessed, thus valid.
173 if (LocSize.isPrecise())
174 DerefBytes = std::max(a: DerefBytes, b: LocSize.getValue().getKnownMinValue());
175 return TypeSize::getFixed(ExactSize: DerefBytes);
176}
177
178/// Returns true if we can prove that the object specified by V has size Size.
179static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
180 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
181 std::optional<TypeSize> ObjectSize =
182 getObjectSize(V, DL, TLI, NullIsValidLoc);
183 return ObjectSize && *ObjectSize == Size;
184}
185
186/// Return true if both V1 and V2 are VScale
187static bool areBothVScale(const Value *V1, const Value *V2) {
188 return PatternMatch::match(V: V1, P: PatternMatch::m_VScale()) &&
189 PatternMatch::match(V: V2, P: PatternMatch::m_VScale());
190}
191
192//===----------------------------------------------------------------------===//
193// CaptureAnalysis implementations
194//===----------------------------------------------------------------------===//
195
196CaptureAnalysis::~CaptureAnalysis() = default;
197
198CaptureComponents SimpleCaptureAnalysis::getCapturesBefore(const Value *Object,
199 const Instruction *I,
200 bool OrAt) {
201 if (!isIdentifiedFunctionLocal(V: Object))
202 return CaptureComponents::Provenance;
203
204 auto [CacheIt, Inserted] =
205 IsCapturedCache.insert(KV: {Object, CaptureComponents::Provenance});
206 if (!Inserted)
207 return CacheIt->second;
208
209 CaptureComponents Ret = PointerMayBeCaptured(
210 V: Object, /*ReturnCaptures=*/false, Mask: CaptureComponents::Provenance,
211 StopFn: [](CaptureComponents CC) { return capturesFullProvenance(CC); });
212 CacheIt->second = Ret;
213 return Ret;
214}
215
216static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
217 const LoopInfo *LI) {
218 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
219 SmallVector<BasicBlock *> Succs(successors(BB));
220 return Succs.empty() ||
221 !isPotentiallyReachableFromMany(Worklist&: Succs, StopBB: BB, ExclusionSet: nullptr, DT, LI);
222}
223
224CaptureComponents
225EarliestEscapeAnalysis::getCapturesBefore(const Value *Object,
226 const Instruction *I, bool OrAt) {
227 if (!isIdentifiedFunctionLocal(V: Object))
228 return CaptureComponents::Provenance;
229
230 auto Iter = EarliestEscapes.try_emplace(Key: Object);
231 if (Iter.second) {
232 std::pair<Instruction *, CaptureComponents> EarliestCapture =
233 FindEarliestCapture(V: Object, F&: *DT.getRoot()->getParent(),
234 /*ReturnCaptures=*/false, DT,
235 Mask: CaptureComponents::Provenance);
236 if (EarliestCapture.first)
237 Inst2Obj[EarliestCapture.first].push_back(NewVal: Object);
238 Iter.first->second = EarliestCapture;
239 }
240
241 auto IsNotCapturedBefore = [&]() {
242 // No capturing instruction.
243 Instruction *CaptureInst = Iter.first->second.first;
244 if (!CaptureInst)
245 return true;
246
247 // No context instruction means any use is capturing.
248 if (!I)
249 return false;
250
251 if (I == CaptureInst) {
252 if (OrAt)
253 return false;
254 return isNotInCycle(I, DT: &DT, LI);
255 }
256
257 return !isPotentiallyReachable(From: CaptureInst, To: I, ExclusionSet: nullptr, DT: &DT, LI);
258 };
259 if (IsNotCapturedBefore())
260 return CaptureComponents::None;
261 return Iter.first->second.second;
262}
263
264void EarliestEscapeAnalysis::removeInstruction(Instruction *I) {
265 auto Iter = Inst2Obj.find(Val: I);
266 if (Iter != Inst2Obj.end()) {
267 for (const Value *Obj : Iter->second)
268 EarliestEscapes.erase(Val: Obj);
269 Inst2Obj.erase(Val: I);
270 }
271}
272
273//===----------------------------------------------------------------------===//
274// GetElementPtr Instruction Decomposition and Analysis
275//===----------------------------------------------------------------------===//
276
277namespace {
278/// Represents zext(sext(trunc(V))).
279struct CastedValue {
280 const Value *V;
281 unsigned ZExtBits = 0;
282 unsigned SExtBits = 0;
283 unsigned TruncBits = 0;
284 /// Whether trunc(V) is non-negative.
285 bool IsNonNegative = false;
286
287 explicit CastedValue(const Value *V) : V(V) {}
288 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
289 unsigned TruncBits, bool IsNonNegative)
290 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
291 IsNonNegative(IsNonNegative) {}
292
293 unsigned getBitWidth() const {
294 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
295 SExtBits;
296 }
297
298 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
299 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
300 IsNonNegative && PreserveNonNeg);
301 }
302
303 /// Replace V with zext(NewV)
304 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
305 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
306 NewV->getType()->getPrimitiveSizeInBits();
307 if (ExtendBy <= TruncBits)
308 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
309 // The nneg can be preserved on the outer zext here.
310 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
311 IsNonNegative);
312
313 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
314 ExtendBy -= TruncBits;
315 // zext<nneg>(zext(NewV)) == zext(NewV)
316 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
317 // The nneg can be preserved from the inner zext here but must be dropped
318 // from the outer.
319 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
320 ZExtNonNegative);
321 }
322
323 /// Replace V with sext(NewV)
324 CastedValue withSExtOfValue(const Value *NewV) const {
325 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
326 NewV->getType()->getPrimitiveSizeInBits();
327 if (ExtendBy <= TruncBits)
328 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
329 // The nneg can be preserved on the outer zext here
330 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
331 IsNonNegative);
332
333 // zext(sext(sext(NewV)))
334 ExtendBy -= TruncBits;
335 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
336 // The nneg can be preserved on the outer zext here
337 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
338 }
339
340 APInt evaluateWith(APInt N) const {
341 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
342 "Incompatible bit width");
343 if (TruncBits) N = N.trunc(width: N.getBitWidth() - TruncBits);
344 if (SExtBits) N = N.sext(width: N.getBitWidth() + SExtBits);
345 if (ZExtBits) N = N.zext(width: N.getBitWidth() + ZExtBits);
346 return N;
347 }
348
349 ConstantRange evaluateWith(ConstantRange N) const {
350 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
351 "Incompatible bit width");
352 if (TruncBits) N = N.truncate(BitWidth: N.getBitWidth() - TruncBits);
353 if (IsNonNegative && !N.isAllNonNegative())
354 N = N.intersectWith(
355 CR: ConstantRange(APInt::getZero(numBits: N.getBitWidth()),
356 APInt::getSignedMinValue(numBits: N.getBitWidth())));
357 if (SExtBits) N = N.signExtend(BitWidth: N.getBitWidth() + SExtBits);
358 if (ZExtBits) N = N.zeroExtend(BitWidth: N.getBitWidth() + ZExtBits);
359 return N;
360 }
361
362 bool canDistributeOver(bool NUW, bool NSW) const {
363 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
364 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
365 // trunc(x op y) == trunc(x) op trunc(y)
366 return (!ZExtBits || NUW) && (!SExtBits || NSW);
367 }
368
369 bool hasSameCastsAs(const CastedValue &Other) const {
370 if (V->getType() != Other.V->getType())
371 return false;
372
373 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
374 TruncBits == Other.TruncBits)
375 return true;
376 // If either CastedValue has a nneg zext then the sext/zext bits are
377 // interchangable for that value.
378 if (IsNonNegative || Other.IsNonNegative)
379 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
380 TruncBits == Other.TruncBits);
381 return false;
382 }
383};
384
385/// Represents zext(sext(trunc(V))) * Scale + Offset.
386struct LinearExpression {
387 CastedValue Val;
388 APInt Scale;
389 APInt Offset;
390
391 /// True if all operations in this expression are NUW.
392 bool IsNUW;
393 /// True if all operations in this expression are NSW.
394 bool IsNSW;
395
396 LinearExpression(const CastedValue &Val, const APInt &Scale,
397 const APInt &Offset, bool IsNUW, bool IsNSW)
398 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
399
400 LinearExpression(const CastedValue &Val)
401 : Val(Val), IsNUW(true), IsNSW(true) {
402 unsigned BitWidth = Val.getBitWidth();
403 Scale = APInt(BitWidth, 1);
404 Offset = APInt(BitWidth, 0);
405 }
406
407 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
408 // The check for zero offset is necessary, because generally
409 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
410 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
411 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
412 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
413 }
414};
415}
416
417/// Analyzes the specified value as a linear expression: "A*V + B", where A and
418/// B are constant integers.
419static LinearExpression GetLinearExpression(
420 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
421 AssumptionCache *AC, DominatorTree *DT) {
422 // Limit our recursion depth.
423 if (Depth == 6)
424 return Val;
425
426 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val: Val.V))
427 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
428 Val.evaluateWith(N: Const->getValue()), true, true);
429
430 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: Val.V)) {
431 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Val: BOp->getOperand(i_nocapture: 1))) {
432 APInt RHS = Val.evaluateWith(N: RHSC->getValue());
433 // The only non-OBO case we deal with is or, and only limited to the
434 // case where it is both nuw and nsw.
435 bool NUW = true, NSW = true;
436 if (isa<OverflowingBinaryOperator>(Val: BOp)) {
437 NUW &= BOp->hasNoUnsignedWrap();
438 NSW &= BOp->hasNoSignedWrap();
439 }
440 if (!Val.canDistributeOver(NUW, NSW))
441 return Val;
442
443 // While we can distribute over trunc, we cannot preserve nowrap flags
444 // in that case.
445 if (Val.TruncBits)
446 NUW = NSW = false;
447
448 LinearExpression E(Val);
449 switch (BOp->getOpcode()) {
450 default:
451 // We don't understand this instruction, so we can't decompose it any
452 // further.
453 return Val;
454 case Instruction::Or:
455 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
456 if (!cast<PossiblyDisjointInst>(Val: BOp)->isDisjoint())
457 return Val;
458
459 [[fallthrough]];
460 case Instruction::Add: {
461 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
462 Depth: Depth + 1, AC, DT);
463 E.Offset += RHS;
464 E.IsNUW &= NUW;
465 E.IsNSW &= NSW;
466 break;
467 }
468 case Instruction::Sub: {
469 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
470 Depth: Depth + 1, AC, DT);
471 E.Offset -= RHS;
472 E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
473 E.IsNSW &= NSW;
474 break;
475 }
476 case Instruction::Mul:
477 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: false), DL,
478 Depth: Depth + 1, AC, DT)
479 .mul(Other: RHS, MulIsNUW: NUW, MulIsNSW: NSW);
480 break;
481 case Instruction::Shl:
482 // We're trying to linearize an expression of the kind:
483 // shl i8 -128, 36
484 // where the shift count exceeds the bitwidth of the type.
485 // We can't decompose this further (the expression would return
486 // a poison value).
487 if (RHS.getLimitedValue() > Val.getBitWidth())
488 return Val;
489
490 E = GetLinearExpression(Val: Val.withValue(NewV: BOp->getOperand(i_nocapture: 0), PreserveNonNeg: NSW), DL,
491 Depth: Depth + 1, AC, DT);
492 E.Offset <<= RHS.getLimitedValue();
493 E.Scale <<= RHS.getLimitedValue();
494 E.IsNUW &= NUW;
495 E.IsNSW &= NSW;
496 break;
497 }
498 return E;
499 }
500 }
501
502 if (const auto *ZExt = dyn_cast<ZExtInst>(Val: Val.V))
503 return GetLinearExpression(
504 Val: Val.withZExtOfValue(NewV: ZExt->getOperand(i_nocapture: 0), ZExtNonNegative: ZExt->hasNonNeg()), DL,
505 Depth: Depth + 1, AC, DT);
506
507 if (isa<SExtInst>(Val: Val.V))
508 return GetLinearExpression(
509 Val: Val.withSExtOfValue(NewV: cast<CastInst>(Val: Val.V)->getOperand(i_nocapture: 0)),
510 DL, Depth: Depth + 1, AC, DT);
511
512 return Val;
513}
514
515namespace {
516// A linear transformation of a Value; this class represents
517// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
518struct VariableGEPIndex {
519 CastedValue Val;
520 APInt Scale;
521
522 // Context instruction to use when querying information about this index.
523 const Instruction *CxtI;
524
525 /// True if all operations in this expression are NSW.
526 bool IsNSW;
527
528 /// True if the index should be subtracted rather than added. We don't simply
529 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
530 /// non-wrapping, while X + INT_MIN*(-1) wraps.
531 bool IsNegated;
532
533 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
534 if (IsNegated == Other.IsNegated)
535 return Scale == -Other.Scale;
536 return Scale == Other.Scale;
537 }
538
539 void dump() const {
540 print(OS&: dbgs());
541 dbgs() << "\n";
542 }
543 void print(raw_ostream &OS) const {
544 OS << "(V=" << Val.V->getName()
545 << ", zextbits=" << Val.ZExtBits
546 << ", sextbits=" << Val.SExtBits
547 << ", truncbits=" << Val.TruncBits
548 << ", scale=" << Scale
549 << ", nsw=" << IsNSW
550 << ", negated=" << IsNegated << ")";
551 }
552};
553}
554
555// Represents the internal structure of a GEP, decomposed into a base pointer,
556// constant offsets, and variable scaled indices.
557struct BasicAAResult::DecomposedGEP {
558 // Base pointer of the GEP
559 const Value *Base;
560 // Total constant offset from base.
561 APInt Offset;
562 // Scaled variable (non-constant) indices.
563 SmallVector<VariableGEPIndex, 4> VarIndices;
564 // Nowrap flags common to all GEP operations involved in expression.
565 GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all();
566
567 void dump() const {
568 print(OS&: dbgs());
569 dbgs() << "\n";
570 }
571 void print(raw_ostream &OS) const {
572 OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
573 << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
574 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
575 << ", VarIndices=[";
576 for (size_t i = 0; i < VarIndices.size(); i++) {
577 if (i != 0)
578 OS << ", ";
579 VarIndices[i].print(OS);
580 }
581 OS << "])";
582 }
583};
584
585
586/// If V is a symbolic pointer expression, decompose it into a base pointer
587/// with a constant offset and a number of scaled symbolic offsets.
588///
589/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
590/// in the VarIndices vector) are Value*'s that are known to be scaled by the
591/// specified amount, but which may have other unrepresented high bits. As
592/// such, the gep cannot necessarily be reconstructed from its decomposed form.
593BasicAAResult::DecomposedGEP
594BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
595 AssumptionCache *AC, DominatorTree *DT) {
596 // Limit recursion depth to limit compile time in crazy cases.
597 unsigned MaxLookup = MaxLookupSearchDepth;
598 SearchTimes++;
599 const Instruction *CxtI = dyn_cast<Instruction>(Val: V);
600
601 unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: V->getType());
602 DecomposedGEP Decomposed;
603 Decomposed.Offset = APInt(IndexSize, 0);
604 do {
605 // See if this is a bitcast or GEP.
606 const Operator *Op = dyn_cast<Operator>(Val: V);
607 if (!Op) {
608 // The only non-operator case we can handle are GlobalAliases.
609 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: V)) {
610 if (!GA->isInterposable()) {
611 V = GA->getAliasee();
612 continue;
613 }
614 }
615 Decomposed.Base = V;
616 return Decomposed;
617 }
618
619 if (Op->getOpcode() == Instruction::BitCast ||
620 Op->getOpcode() == Instruction::AddrSpaceCast) {
621 Value *NewV = Op->getOperand(i: 0);
622 // Don't look through casts between address spaces with differing index
623 // widths.
624 if (DL.getIndexTypeSizeInBits(Ty: NewV->getType()) != IndexSize) {
625 Decomposed.Base = V;
626 return Decomposed;
627 }
628 V = NewV;
629 continue;
630 }
631
632 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Val: Op);
633 if (!GEPOp) {
634 if (const auto *PHI = dyn_cast<PHINode>(Val: V)) {
635 // Look through single-arg phi nodes created by LCSSA.
636 if (PHI->getNumIncomingValues() == 1) {
637 V = PHI->getIncomingValue(i: 0);
638 continue;
639 }
640 } else if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
641 // CaptureTracking can know about special capturing properties of some
642 // intrinsics like launder.invariant.group, that can't be expressed with
643 // the attributes, but have properties like returning aliasing pointer.
644 // Because some analysis may assume that nocaptured pointer is not
645 // returned from some special intrinsic (because function would have to
646 // be marked with returns attribute), it is crucial to use this function
647 // because it should be in sync with CaptureTracking. Not using it may
648 // cause weird miscompilations where 2 aliasing pointers are assumed to
649 // noalias.
650 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: false)) {
651 V = RP;
652 continue;
653 }
654 }
655
656 Decomposed.Base = V;
657 return Decomposed;
658 }
659
660 // Track the common nowrap flags for all GEPs we see.
661 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
662
663 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
664
665 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
666 gep_type_iterator GTI = gep_type_begin(GEP: GEPOp);
667 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
668 I != E; ++I, ++GTI) {
669 const Value *Index = *I;
670 // Compute the (potentially symbolic) offset in bytes for this index.
671 if (StructType *STy = GTI.getStructTypeOrNull()) {
672 // For a struct, add the member offset.
673 unsigned FieldNo = cast<ConstantInt>(Val: Index)->getZExtValue();
674 if (FieldNo == 0)
675 continue;
676
677 Decomposed.Offset += DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo);
678 continue;
679 }
680
681 // For an array/pointer, add the element offset, explicitly scaled.
682 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Val: Index)) {
683 if (CIdx->isZero())
684 continue;
685
686 // Don't attempt to analyze GEPs if the scalable index is not zero.
687 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
688 if (AllocTypeSize.isScalable()) {
689 Decomposed.Base = V;
690 return Decomposed;
691 }
692
693 Decomposed.Offset += AllocTypeSize.getFixedValue() *
694 CIdx->getValue().sextOrTrunc(width: IndexSize);
695 continue;
696 }
697
698 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
699 if (AllocTypeSize.isScalable()) {
700 Decomposed.Base = V;
701 return Decomposed;
702 }
703
704 // If the integer type is smaller than the index size, it is implicitly
705 // sign extended or truncated to index size.
706 bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
707 bool NUW = GEPOp->hasNoUnsignedWrap();
708 bool NonNeg = NUSW && NUW;
709 unsigned Width = Index->getType()->getIntegerBitWidth();
710 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
711 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
712 LinearExpression LE = GetLinearExpression(
713 Val: CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, Depth: 0, AC, DT);
714
715 // Scale by the type size.
716 unsigned TypeSize = AllocTypeSize.getFixedValue();
717 LE = LE.mul(Other: APInt(IndexSize, TypeSize), MulIsNUW: NUW, MulIsNSW: NUSW);
718 Decomposed.Offset += LE.Offset;
719 APInt Scale = LE.Scale;
720 if (!LE.IsNUW)
721 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
722
723 // If we already had an occurrence of this index variable, merge this
724 // scale into it. For example, we want to handle:
725 // A[x][x] -> x*16 + x*4 -> x*20
726 // This also ensures that 'x' only appears in the index list once.
727 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
728 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
729 areBothVScale(V1: Decomposed.VarIndices[i].Val.V, V2: LE.Val.V)) &&
730 Decomposed.VarIndices[i].Val.hasSameCastsAs(Other: LE.Val)) {
731 Scale += Decomposed.VarIndices[i].Scale;
732 // We cannot guarantee no-wrap for the merge.
733 LE.IsNSW = LE.IsNUW = false;
734 Decomposed.VarIndices.erase(CI: Decomposed.VarIndices.begin() + i);
735 break;
736 }
737 }
738
739 if (!!Scale) {
740 VariableGEPIndex Entry = {.Val: LE.Val, .Scale: Scale, .CxtI: CxtI, .IsNSW: LE.IsNSW,
741 /* IsNegated */ false};
742 Decomposed.VarIndices.push_back(Elt: Entry);
743 }
744 }
745
746 // Analyze the base pointer next.
747 V = GEPOp->getOperand(i_nocapture: 0);
748 } while (--MaxLookup);
749
750 // If the chain of expressions is too deep, just return early.
751 Decomposed.Base = V;
752 SearchLimitReached++;
753 return Decomposed;
754}
755
756ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
757 AAQueryInfo &AAQI,
758 bool IgnoreLocals) {
759 assert(Visited.empty() && "Visited must be cleared after use!");
760 llvm::scope_exit _([&] { Visited.clear(); });
761
762 unsigned MaxLookup = 8;
763 SmallVector<const Value *, 16> Worklist;
764 Worklist.push_back(Elt: Loc.Ptr);
765 ModRefInfo Result = ModRefInfo::NoModRef;
766
767 do {
768 const Value *V = getUnderlyingObject(V: Worklist.pop_back_val());
769 if (!Visited.insert(Ptr: V).second)
770 continue;
771
772 // Ignore allocas if we were instructed to do so.
773 if (IgnoreLocals && isa<AllocaInst>(Val: V))
774 continue;
775
776 // If the location points to memory that is known to be invariant for
777 // the life of the underlying SSA value, then we can exclude Mod from
778 // the set of valid memory effects.
779 //
780 // An argument that is marked readonly and noalias is known to be
781 // invariant while that function is executing.
782 if (const Argument *Arg = dyn_cast<Argument>(Val: V)) {
783 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
784 Result |= ModRefInfo::Ref;
785 continue;
786 }
787 }
788
789 // A global constant can't be mutated.
790 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: V)) {
791 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
792 // global to be marked constant in some modules and non-constant in
793 // others. GV may even be a declaration, not a definition.
794 if (!GV->isConstant())
795 return ModRefInfo::ModRef;
796 continue;
797 }
798
799 // If both select values point to local memory, then so does the select.
800 if (const SelectInst *SI = dyn_cast<SelectInst>(Val: V)) {
801 Worklist.push_back(Elt: SI->getTrueValue());
802 Worklist.push_back(Elt: SI->getFalseValue());
803 continue;
804 }
805
806 // If all values incoming to a phi node point to local memory, then so does
807 // the phi.
808 if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) {
809 // Don't bother inspecting phi nodes with many operands.
810 if (PN->getNumIncomingValues() > MaxLookup)
811 return ModRefInfo::ModRef;
812 append_range(C&: Worklist, R: PN->incoming_values());
813 continue;
814 }
815
816 // Otherwise be conservative.
817 return ModRefInfo::ModRef;
818 } while (!Worklist.empty() && --MaxLookup);
819
820 // If we hit the maximum number of instructions to examine, be conservative.
821 if (!Worklist.empty())
822 return ModRefInfo::ModRef;
823
824 return Result;
825}
826
827static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
828 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Call);
829 return II && II->getIntrinsicID() == IID;
830}
831
832/// Returns the behavior when calling the given call site.
833MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
834 AAQueryInfo &AAQI) {
835 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
836
837 if (const Function *F = dyn_cast<Function>(Val: Call->getCalledOperand())) {
838 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
839 // Operand bundles on the call may also read or write memory, in addition
840 // to the behavior of the called function.
841 if (Call->hasReadingOperandBundles())
842 FuncME |= MemoryEffects::readOnly();
843 if (Call->hasClobberingOperandBundles())
844 FuncME |= MemoryEffects::writeOnly();
845 if (Call->isVolatile()) {
846 // Volatile operations also access inaccessible memory.
847 FuncME |= MemoryEffects::inaccessibleMemOnly();
848 }
849 Min &= FuncME;
850 }
851
852 return Min;
853}
854
855/// Returns the behavior when calling the given function. For use when the call
856/// site is not known.
857MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
858 switch (F->getIntrinsicID()) {
859 case Intrinsic::experimental_guard:
860 case Intrinsic::experimental_deoptimize:
861 // These intrinsics can read arbitrary memory, and additionally modref
862 // inaccessible memory to model control dependence.
863 return MemoryEffects::readOnly() |
864 MemoryEffects::inaccessibleMemOnly(MR: ModRefInfo::ModRef);
865 }
866
867 return F->getMemoryEffects();
868}
869
870ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
871 unsigned ArgIdx) {
872 if (Call->doesNotAccessMemory(OpNo: ArgIdx))
873 return ModRefInfo::NoModRef;
874
875 if (Call->onlyWritesMemory(OpNo: ArgIdx))
876 return ModRefInfo::Mod;
877
878 if (Call->onlyReadsMemory(OpNo: ArgIdx))
879 return ModRefInfo::Ref;
880
881 return ModRefInfo::ModRef;
882}
883
884#ifndef NDEBUG
885static const Function *getParent(const Value *V) {
886 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
887 if (!inst->getParent())
888 return nullptr;
889 return inst->getParent()->getParent();
890 }
891
892 if (const Argument *arg = dyn_cast<Argument>(V))
893 return arg->getParent();
894
895 return nullptr;
896}
897
898static bool notDifferentParent(const Value *O1, const Value *O2) {
899
900 const Function *F1 = getParent(O1);
901 const Function *F2 = getParent(O2);
902
903 return !F1 || !F2 || F1 == F2;
904}
905#endif
906
907AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
908 const MemoryLocation &LocB, AAQueryInfo &AAQI,
909 const Instruction *CtxI) {
910 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
911 "BasicAliasAnalysis doesn't support interprocedural queries.");
912 return aliasCheck(V1: LocA.Ptr, V1Size: LocA.Size, V2: LocB.Ptr, V2Size: LocB.Size, AAQI, CtxI);
913}
914
915/// Checks to see if the specified callsite can clobber the specified memory
916/// object.
917///
918/// Since we only look at local properties of this function, we really can't
919/// say much about this query. We do, however, use simple "address taken"
920/// analysis on local objects.
921ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
922 const MemoryLocation &Loc,
923 AAQueryInfo &AAQI) {
924 assert(notDifferentParent(Call, Loc.Ptr) &&
925 "AliasAnalysis query involving multiple functions!");
926
927 const Value *Object = getUnderlyingObject(V: Loc.Ptr);
928
929 // Calls marked 'tail' cannot read or write allocas from the current frame
930 // because the current frame might be destroyed by the time they run. However,
931 // a tail call may use an alloca with byval. Calling with byval copies the
932 // contents of the alloca into argument registers or stack slots, so there is
933 // no lifetime issue.
934 if (isa<AllocaInst>(Val: Object))
935 if (const CallInst *CI = dyn_cast<CallInst>(Val: Call))
936 if (CI->isTailCall() &&
937 !CI->getAttributes().hasAttrSomewhere(Kind: Attribute::ByVal))
938 return ModRefInfo::NoModRef;
939
940 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
941 // modify them even though the alloca is not escaped.
942 if (auto *AI = dyn_cast<AllocaInst>(Val: Object))
943 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, IID: Intrinsic::stackrestore))
944 return ModRefInfo::Mod;
945
946 // We can completely ignore inaccessible memory here, because MemoryLocations
947 // can only reference accessible memory.
948 auto ME = AAQI.AAR.getMemoryEffects(Call, AAQI)
949 .getWithoutLoc(Loc: IRMemLocation::InaccessibleMem);
950 if (ME.doesNotAccessMemory())
951 return ModRefInfo::NoModRef;
952
953 ModRefInfo ArgMR = ME.getModRef(Loc: IRMemLocation::ArgMem);
954 ModRefInfo ErrnoMR = ME.getModRef(Loc: IRMemLocation::ErrnoMem);
955 ModRefInfo OtherMR = ME.getModRef(Loc: IRMemLocation::Other);
956
957 // An identified function-local object that does not escape can only be
958 // accessed via call arguments. Reduce OtherMR (which includes accesses to
959 // escaped memory) based on that.
960 //
961 // We model calls that can return twice (setjmp) as clobbering non-escaping
962 // objects, to model any accesses that may occur prior to the second return.
963 // As an exception, ignore allocas, as setjmp is not required to preserve
964 // non-volatile stores for them.
965 if (isModOrRefSet(MRI: OtherMR) && !isa<Constant>(Val: Object) && Call != Object &&
966 (isa<AllocaInst>(Val: Object) || !Call->hasFnAttr(Kind: Attribute::ReturnsTwice))) {
967 CaptureComponents CC =
968 AAQI.CA->getCapturesBefore(Object, I: Call, /*OrAt=*/false);
969 if (capturesNothing(CC))
970 OtherMR = ModRefInfo::NoModRef;
971 else if (capturesReadProvenanceOnly(CC))
972 OtherMR = ModRefInfo::Ref;
973 }
974
975 // Refine the modref info for argument memory. We only bother to do this
976 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
977 // on the final result.
978 if ((ArgMR | OtherMR) != OtherMR) {
979 ModRefInfo NewArgMR = ModRefInfo::NoModRef;
980 for (const Use &U : Call->data_ops()) {
981 const Value *Arg = U;
982 if (!Arg->getType()->isPointerTy())
983 continue;
984 unsigned ArgIdx = Call->getDataOperandNo(U: &U);
985 MemoryLocation ArgLoc =
986 Call->isArgOperand(U: &U)
987 ? MemoryLocation::getForArgument(Call, ArgIdx, TLI)
988 : MemoryLocation::getBeforeOrAfter(Ptr: Arg);
989 AliasResult ArgAlias = AAQI.AAR.alias(LocA: ArgLoc, LocB: Loc, AAQI, CtxI: Call);
990 if (ArgAlias != AliasResult::NoAlias)
991 NewArgMR |= ArgMR & AAQI.AAR.getArgModRefInfo(Call, ArgIdx);
992
993 // Exit early if we cannot improve over the original ArgMR.
994 if (NewArgMR == ArgMR)
995 break;
996 }
997 ArgMR = NewArgMR;
998 }
999
1000 ModRefInfo Result = ArgMR | OtherMR;
1001
1002 // Refine accesses to errno memory.
1003 if ((ErrnoMR | Result) != Result) {
1004 if (AAQI.AAR.aliasErrno(Loc, M: Call->getModule()) != AliasResult::NoAlias) {
1005 // Exclusion conditions do not hold, this memory location may alias errno.
1006 Result |= ErrnoMR;
1007 }
1008 }
1009
1010 if (!isModAndRefSet(MRI: Result))
1011 return Result;
1012
1013 // If the call is malloc/calloc like, we can assume that it doesn't
1014 // modify any IR visible value. This is only valid because we assume these
1015 // routines do not read values visible in the IR. TODO: Consider special
1016 // casing realloc and strdup routines which access only their arguments as
1017 // well. Or alternatively, replace all of this with inaccessiblememonly once
1018 // that's implemented fully.
1019 if (isMallocOrCallocLikeFn(V: Call, TLI: &TLI)) {
1020 // Be conservative if the accessed pointer may alias the allocation -
1021 // fallback to the generic handling below.
1022 if (AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: Call), LocB: Loc, AAQI) ==
1023 AliasResult::NoAlias)
1024 return ModRefInfo::NoModRef;
1025 }
1026
1027 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1028 // writing so that proper control dependencies are maintained but they never
1029 // mod any particular memory location visible to the IR.
1030 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1031 // intrinsic is now modeled as reading memory. This prevents hoisting the
1032 // invariant.start intrinsic over stores. Consider:
1033 // *ptr = 40;
1034 // *ptr = 50;
1035 // invariant_start(ptr)
1036 // int val = *ptr;
1037 // print(val);
1038 //
1039 // This cannot be transformed to:
1040 //
1041 // *ptr = 40;
1042 // invariant_start(ptr)
1043 // *ptr = 50;
1044 // int val = *ptr;
1045 // print(val);
1046 //
1047 // The transformation will cause the second store to be ignored (based on
1048 // rules of invariant.start) and print 40, while the first program always
1049 // prints 50.
1050 if (isIntrinsicCall(Call, IID: Intrinsic::invariant_start))
1051 return ModRefInfo::Ref;
1052
1053 // Be conservative.
1054 return ModRefInfo::ModRef;
1055}
1056
1057ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1058 const CallBase *Call2,
1059 AAQueryInfo &AAQI) {
1060 // Guard intrinsics are marked as arbitrarily writing so that proper control
1061 // dependencies are maintained but they never mods any particular memory
1062 // location.
1063 //
1064 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1065 // heap state at the point the guard is issued needs to be consistent in case
1066 // the guard invokes the "deopt" continuation.
1067
1068 // NB! This function is *not* commutative, so we special case two
1069 // possibilities for guard intrinsics.
1070
1071 if (isIntrinsicCall(Call: Call1, IID: Intrinsic::experimental_guard))
1072 return isModSet(MRI: getMemoryEffects(Call: Call2, AAQI).getModRef())
1073 ? ModRefInfo::Ref
1074 : ModRefInfo::NoModRef;
1075
1076 if (isIntrinsicCall(Call: Call2, IID: Intrinsic::experimental_guard))
1077 return isModSet(MRI: getMemoryEffects(Call: Call1, AAQI).getModRef())
1078 ? ModRefInfo::Mod
1079 : ModRefInfo::NoModRef;
1080
1081 // Be conservative.
1082 return ModRefInfo::ModRef;
1083}
1084
1085/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1086/// another pointer.
1087///
1088/// We know that V1 is a GEP, but we don't know anything about V2.
1089/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1090/// V2.
1091AliasResult BasicAAResult::aliasGEP(
1092 const GEPOperator *GEP1, LocationSize V1Size,
1093 const Value *V2, LocationSize V2Size,
1094 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1095 auto BaseObjectsAlias = [&]() {
1096 AliasResult BaseAlias =
1097 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV1),
1098 LocB: MemoryLocation::getBeforeOrAfter(Ptr: UnderlyingV2), AAQI);
1099 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1100 : AliasResult::MayAlias;
1101 };
1102
1103 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1104 // TODO: This limitation exists for compile-time reasons. Relax it if we
1105 // can avoid exponential pathological cases.
1106 if (!isa<GEPOperator>(Val: V2))
1107 return AliasResult::MayAlias;
1108
1109 // If both accesses have unknown size, we can only check whether the base
1110 // objects don't alias.
1111 return BaseObjectsAlias();
1112 }
1113
1114 DominatorTree *DT = getDT(AAQI);
1115 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(V: GEP1, DL, AC: &AC, DT);
1116 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V: V2, DL, AC: &AC, DT);
1117
1118 // Bail if we were not able to decompose anything.
1119 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1120 return AliasResult::MayAlias;
1121
1122 // Fall back to base objects if pointers have different index widths.
1123 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1124 return BaseObjectsAlias();
1125
1126 // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1127 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1128 std::swap(a&: DecompGEP1, b&: DecompGEP2);
1129 std::swap(a&: V1Size, b&: V2Size);
1130 std::swap(a&: UnderlyingV1, b&: UnderlyingV2);
1131 }
1132
1133 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1134 // symbolic difference.
1135 subtractDecomposedGEPs(DestGEP&: DecompGEP1, SrcGEP: DecompGEP2, AAQI);
1136
1137 // If an inbounds GEP would have to start from an out of bounds address
1138 // for the two to alias, then we can assume noalias.
1139 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1140 // size
1141
1142 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1143 V2Size.hasValue() && !V2Size.isScalable() &&
1144 DecompGEP1.Offset.sge(RHS: V2Size.getValue()) &&
1145 isBaseOfObject(V: DecompGEP2.Base))
1146 return AliasResult::NoAlias;
1147
1148 // Symmetric case to above.
1149 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1150 V1Size.hasValue() && !V1Size.isScalable() &&
1151 DecompGEP1.Offset.sle(RHS: -V1Size.getValue()) &&
1152 isBaseOfObject(V: DecompGEP1.Base))
1153 return AliasResult::NoAlias;
1154
1155 // For GEPs with identical offsets, we can preserve the size and AAInfo
1156 // when performing the alias check on the underlying objects.
1157 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1158 return AAQI.AAR.alias(LocA: MemoryLocation(DecompGEP1.Base, V1Size),
1159 LocB: MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1160
1161 // Do the base pointers alias?
1162 AliasResult BaseAlias =
1163 AAQI.AAR.alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP1.Base),
1164 LocB: MemoryLocation::getBeforeOrAfter(Ptr: DecompGEP2.Base), AAQI);
1165
1166 // If we get a No or May, then return it immediately, no amount of analysis
1167 // will improve this situation.
1168 if (BaseAlias != AliasResult::MustAlias) {
1169 assert(BaseAlias == AliasResult::NoAlias ||
1170 BaseAlias == AliasResult::MayAlias);
1171 return BaseAlias;
1172 }
1173
1174 // If there is a constant difference between the pointers, but the difference
1175 // is less than the size of the associated memory object, then we know
1176 // that the objects are partially overlapping. If the difference is
1177 // greater, we know they do not overlap.
1178 if (DecompGEP1.VarIndices.empty()) {
1179 APInt &Off = DecompGEP1.Offset;
1180
1181 // Initialize for Off >= 0 (V2 <= GEP1) case.
1182 LocationSize VLeftSize = V2Size;
1183 LocationSize VRightSize = V1Size;
1184 const bool Swapped = Off.isNegative();
1185
1186 if (Swapped) {
1187 // Swap if we have the situation where:
1188 // + +
1189 // | BaseOffset |
1190 // ---------------->|
1191 // |-->V1Size |-------> V2Size
1192 // GEP1 V2
1193 std::swap(a&: VLeftSize, b&: VRightSize);
1194 Off = -Off;
1195 }
1196
1197 if (!VLeftSize.hasValue())
1198 return AliasResult::MayAlias;
1199
1200 const TypeSize LSize = VLeftSize.getValue();
1201 if (!LSize.isScalable()) {
1202 if (Off.ult(RHS: LSize)) {
1203 // Conservatively drop processing if a phi was visited and/or offset is
1204 // too big.
1205 AliasResult AR = AliasResult::PartialAlias;
1206 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1207 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(RHS: LSize)) {
1208 // Memory referenced by right pointer is nested. Save the offset in
1209 // cache. Note that originally offset estimated as GEP1-V2, but
1210 // AliasResult contains the shift that represents GEP1+Offset=V2.
1211 AR.setOffset(-Off.getSExtValue());
1212 AR.swap(DoSwap: Swapped);
1213 }
1214 return AR;
1215 }
1216 return AliasResult::NoAlias;
1217 } else {
1218 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1219 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Off.getBitWidth());
1220 bool Overflow;
1221 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1222 RHS: APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1223 if (!Overflow && Off.uge(RHS: UpperRange))
1224 return AliasResult::NoAlias;
1225 }
1226 }
1227
1228 // VScale Alias Analysis - Given one scalable offset between accesses and a
1229 // scalable typesize, we can divide each side by vscale, treating both values
1230 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1231 if (DecompGEP1.VarIndices.size() == 1 &&
1232 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1233 DecompGEP1.Offset.isZero() &&
1234 PatternMatch::match(V: DecompGEP1.VarIndices[0].Val.V,
1235 P: PatternMatch::m_VScale())) {
1236 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1237 APInt Scale =
1238 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1239 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1240
1241 // Check if the offset is known to not overflow, if it does then attempt to
1242 // prove it with the known values of vscale_range.
1243 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1244 if (Overflows) {
1245 ConstantRange CR = getVScaleRange(F: &F, BitWidth: Scale.getBitWidth());
1246 (void)CR.getSignedMax().smul_ov(RHS: Scale, Overflow&: Overflows);
1247 }
1248
1249 if (!Overflows) {
1250 // Note that we do not check that the typesize is scalable, as vscale >= 1
1251 // so noalias still holds so long as the dependency distance is at least
1252 // as big as the typesize.
1253 if (VLeftSize.hasValue() &&
1254 Scale.abs().uge(RHS: VLeftSize.getValue().getKnownMinValue()))
1255 return AliasResult::NoAlias;
1256 }
1257 }
1258
1259 // If the difference between pointers is Offset +<nuw> Indices then we know
1260 // that the addition does not wrap the pointer index type (add nuw) and the
1261 // constant Offset is a lower bound on the distance between the pointers. We
1262 // can then prove NoAlias via Offset u>= VLeftSize.
1263 // + + +
1264 // | BaseOffset | +<nuw> Indices |
1265 // ---------------->|-------------------->|
1266 // |-->V2Size | |-------> V1Size
1267 // LHS RHS
1268 if (!DecompGEP1.VarIndices.empty() &&
1269 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1270 !V2Size.isScalable() && DecompGEP1.Offset.uge(RHS: V2Size.getValue()))
1271 return AliasResult::NoAlias;
1272
1273 // Bail on analysing scalable LocationSize
1274 if (V1Size.isScalable() || V2Size.isScalable())
1275 return AliasResult::MayAlias;
1276
1277 // We need to know both access sizes for all the following heuristics. Don't
1278 // try to reason about sizes larger than the index space.
1279 unsigned BW = DecompGEP1.Offset.getBitWidth();
1280 if (!V1Size.hasValue() || !V2Size.hasValue() ||
1281 !isUIntN(N: BW, x: V1Size.getValue()) || !isUIntN(N: BW, x: V2Size.getValue()))
1282 return AliasResult::MayAlias;
1283
1284 APInt GCD;
1285 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1286 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1287 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1288 const APInt &Scale = Index.Scale;
1289 APInt ScaleForGCD = Scale;
1290 if (!Index.IsNSW)
1291 ScaleForGCD =
1292 APInt::getOneBitSet(numBits: Scale.getBitWidth(), BitNo: Scale.countr_zero());
1293
1294 if (i == 0)
1295 GCD = ScaleForGCD.abs();
1296 else
1297 GCD = APIntOps::GreatestCommonDivisor(A: GCD, B: ScaleForGCD.abs());
1298
1299 ConstantRange CR = computeConstantRange(V: Index.Val.V, /* ForSigned */ false,
1300 UseInstrInfo: true, AC: &AC, CtxI: Index.CxtI);
1301 KnownBits Known = computeKnownBits(V: Index.Val.V, DL, AC: &AC, CxtI: Index.CxtI, DT);
1302 CR = CR.intersectWith(
1303 CR: ConstantRange::fromKnownBits(Known, /* Signed */ IsSigned: true),
1304 Type: ConstantRange::Signed);
1305 CR = Index.Val.evaluateWith(N: CR).sextOrTrunc(BitWidth: OffsetRange.getBitWidth());
1306
1307 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1308 "Bit widths are normalized to MaxIndexSize");
1309 if (Index.IsNSW)
1310 CR = CR.smul_sat(Other: ConstantRange(Scale));
1311 else
1312 CR = CR.smul_fast(Other: ConstantRange(Scale));
1313
1314 if (Index.IsNegated)
1315 OffsetRange = OffsetRange.sub(Other: CR);
1316 else
1317 OffsetRange = OffsetRange.add(Other: CR);
1318 }
1319
1320 // We now have accesses at two offsets from the same base:
1321 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1322 // 2. 0 with size V2Size
1323 // Using arithmetic modulo GCD, the accesses are at
1324 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1325 // into the range [V2Size..GCD), then we know they cannot overlap.
1326 APInt ModOffset = DecompGEP1.Offset.srem(RHS: GCD);
1327 if (ModOffset.isNegative())
1328 ModOffset += GCD; // We want mod, not rem.
1329 if (ModOffset.uge(RHS: V2Size.getValue()) &&
1330 (GCD - ModOffset).uge(RHS: V1Size.getValue()))
1331 return AliasResult::NoAlias;
1332
1333 // Compute ranges of potentially accessed bytes for both accesses. If the
1334 // interseciton is empty, there can be no overlap.
1335 ConstantRange Range1 = OffsetRange.add(
1336 Other: ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1337 ConstantRange Range2 =
1338 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1339 if (Range1.intersectWith(CR: Range2).isEmptySet())
1340 return AliasResult::NoAlias;
1341
1342 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1343 // potentially wrapping math.
1344 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1345 if (Var.IsNSW)
1346 return true;
1347
1348 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1349 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1350 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1351 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1352 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1353 if (MaxScaleValueBW <= 0)
1354 return false;
1355 return Var.Scale.ule(
1356 RHS: APInt::getMaxValue(numBits: MaxScaleValueBW).zext(width: Var.Scale.getBitWidth()));
1357 };
1358
1359 // Try to determine the range of values for VarIndex such that
1360 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1361 std::optional<APInt> MinAbsVarIndex;
1362 if (DecompGEP1.VarIndices.size() == 1) {
1363 // VarIndex = Scale*V.
1364 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1365 if (Var.Val.TruncBits == 0 &&
1366 isKnownNonZero(V: Var.Val.V, Q: SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1367 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1368 // presence of potentially wrapping math.
1369 if (MultiplyByScaleNoWrap(Var)) {
1370 // If V != 0 then abs(VarIndex) >= abs(Scale).
1371 MinAbsVarIndex = Var.Scale.abs();
1372 }
1373 }
1374 } else if (DecompGEP1.VarIndices.size() == 2) {
1375 // VarIndex = Scale*V0 + (-Scale)*V1.
1376 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1377 // Check that MayBeCrossIteration is false, to avoid reasoning about
1378 // inequality of values across loop iterations.
1379 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1380 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1381 if (Var0.hasNegatedScaleOf(Other: Var1) && Var0.Val.TruncBits == 0 &&
1382 Var0.Val.hasSameCastsAs(Other: Var1.Val) && !AAQI.MayBeCrossIteration &&
1383 MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) &&
1384 isKnownNonEqual(V1: Var0.Val.V, V2: Var1.Val.V,
1385 SQ: SimplifyQuery(DL, DT, &AC, /*CxtI=*/Var0.CxtI
1386 ? Var0.CxtI
1387 : Var1.CxtI)))
1388 MinAbsVarIndex = Var0.Scale.abs();
1389 }
1390
1391 if (MinAbsVarIndex) {
1392 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1393 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1394 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1395 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1396 if (OffsetLo.isNegative() && (-OffsetLo).uge(RHS: V1Size.getValue()) &&
1397 OffsetHi.isNonNegative() && OffsetHi.uge(RHS: V2Size.getValue()))
1398 return AliasResult::NoAlias;
1399 }
1400
1401 if (constantOffsetHeuristic(GEP: DecompGEP1, V1Size, V2Size, AC: &AC, DT, AAQI))
1402 return AliasResult::NoAlias;
1403
1404 // Statically, we can see that the base objects are the same, but the
1405 // pointers have dynamic offsets which we can't resolve. And none of our
1406 // little tricks above worked.
1407 return AliasResult::MayAlias;
1408}
1409
1410static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1411 // If the results agree, take it.
1412 if (A == B)
1413 return A;
1414 // A mix of PartialAlias and MustAlias is PartialAlias.
1415 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1416 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1417 return AliasResult::PartialAlias;
1418 // Otherwise, we don't know anything.
1419 return AliasResult::MayAlias;
1420}
1421
1422/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1423/// against another.
1424AliasResult
1425BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1426 const Value *V2, LocationSize V2Size,
1427 AAQueryInfo &AAQI) {
1428 // If the values are Selects with the same condition, we can do a more precise
1429 // check: just check for aliases between the values on corresponding arms.
1430 if (const SelectInst *SI2 = dyn_cast<SelectInst>(Val: V2))
1431 if (isValueEqualInPotentialCycles(V1: SI->getCondition(), V2: SI2->getCondition(),
1432 AAQI)) {
1433 AliasResult Alias =
1434 AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1435 LocB: MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1436 if (Alias == AliasResult::MayAlias)
1437 return AliasResult::MayAlias;
1438 AliasResult ThisAlias =
1439 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1440 LocB: MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1441 return MergeAliasResults(A: ThisAlias, B: Alias);
1442 }
1443
1444 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1445 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1446 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(SI->getTrueValue(), SISize),
1447 LocB: MemoryLocation(V2, V2Size), AAQI);
1448 if (Alias == AliasResult::MayAlias)
1449 return AliasResult::MayAlias;
1450
1451 AliasResult ThisAlias =
1452 AAQI.AAR.alias(LocA: MemoryLocation(SI->getFalseValue(), SISize),
1453 LocB: MemoryLocation(V2, V2Size), AAQI);
1454 return MergeAliasResults(A: ThisAlias, B: Alias);
1455}
1456
1457/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1458/// another.
1459AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1460 const Value *V2, LocationSize V2Size,
1461 AAQueryInfo &AAQI) {
1462 if (!PN->getNumIncomingValues())
1463 return AliasResult::NoAlias;
1464 // If the values are PHIs in the same block, we can do a more precise
1465 // as well as efficient check: just check for aliases between the values
1466 // on corresponding edges. Don't do this if we are analyzing across
1467 // iterations, as we may pick a different phi entry in different iterations.
1468 if (const PHINode *PN2 = dyn_cast<PHINode>(Val: V2))
1469 if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1470 std::optional<AliasResult> Alias;
1471 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1472 AliasResult ThisAlias = AAQI.AAR.alias(
1473 LocA: MemoryLocation(PN->getIncomingValue(i), PNSize),
1474 LocB: MemoryLocation(
1475 PN2->getIncomingValueForBlock(BB: PN->getIncomingBlock(i)), V2Size),
1476 AAQI);
1477 if (Alias)
1478 *Alias = MergeAliasResults(A: *Alias, B: ThisAlias);
1479 else
1480 Alias = ThisAlias;
1481 if (*Alias == AliasResult::MayAlias)
1482 break;
1483 }
1484 return *Alias;
1485 }
1486
1487 SmallVector<Value *, 4> V1Srcs;
1488 // If a phi operand recurses back to the phi, we can still determine NoAlias
1489 // if we don't alias the underlying objects of the other phi operands, as we
1490 // know that the recursive phi needs to be based on them in some way.
1491 bool isRecursive = false;
1492 auto CheckForRecPhi = [&](Value *PV) {
1493 if (!EnableRecPhiAnalysis)
1494 return false;
1495 if (getUnderlyingObject(V: PV) == PN) {
1496 isRecursive = true;
1497 return true;
1498 }
1499 return false;
1500 };
1501
1502 SmallPtrSet<Value *, 4> UniqueSrc;
1503 Value *OnePhi = nullptr;
1504 for (Value *PV1 : PN->incoming_values()) {
1505 // Skip the phi itself being the incoming value.
1506 if (PV1 == PN)
1507 continue;
1508
1509 if (isa<PHINode>(Val: PV1)) {
1510 if (OnePhi && OnePhi != PV1) {
1511 // To control potential compile time explosion, we choose to be
1512 // conserviate when we have more than one Phi input. It is important
1513 // that we handle the single phi case as that lets us handle LCSSA
1514 // phi nodes and (combined with the recursive phi handling) simple
1515 // pointer induction variable patterns.
1516 return AliasResult::MayAlias;
1517 }
1518 OnePhi = PV1;
1519 }
1520
1521 if (CheckForRecPhi(PV1))
1522 continue;
1523
1524 if (UniqueSrc.insert(Ptr: PV1).second)
1525 V1Srcs.push_back(Elt: PV1);
1526 }
1527
1528 if (OnePhi && UniqueSrc.size() > 1)
1529 // Out of an abundance of caution, allow only the trivial lcssa and
1530 // recursive phi cases.
1531 return AliasResult::MayAlias;
1532
1533 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1534 // value. This should only be possible in blocks unreachable from the entry
1535 // block, but return MayAlias just in case.
1536 if (V1Srcs.empty())
1537 return AliasResult::MayAlias;
1538
1539 // If this PHI node is recursive, indicate that the pointer may be moved
1540 // across iterations. We can only prove NoAlias if different underlying
1541 // objects are involved.
1542 if (isRecursive)
1543 PNSize = LocationSize::beforeOrAfterPointer();
1544
1545 // In the recursive alias queries below, we may compare values from two
1546 // different loop iterations.
1547 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1548
1549 AliasResult Alias = AAQI.AAR.alias(LocA: MemoryLocation(V1Srcs[0], PNSize),
1550 LocB: MemoryLocation(V2, V2Size), AAQI);
1551
1552 // Early exit if the check of the first PHI source against V2 is MayAlias.
1553 // Other results are not possible.
1554 if (Alias == AliasResult::MayAlias)
1555 return AliasResult::MayAlias;
1556 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1557 // remain valid to all elements and needs to conservatively return MayAlias.
1558 if (isRecursive && Alias != AliasResult::NoAlias)
1559 return AliasResult::MayAlias;
1560
1561 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1562 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1563 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1564 Value *V = V1Srcs[i];
1565
1566 AliasResult ThisAlias = AAQI.AAR.alias(
1567 LocA: MemoryLocation(V, PNSize), LocB: MemoryLocation(V2, V2Size), AAQI);
1568 Alias = MergeAliasResults(A: ThisAlias, B: Alias);
1569 if (Alias == AliasResult::MayAlias)
1570 break;
1571 }
1572
1573 return Alias;
1574}
1575
1576// Return true for an Argument or extractvalue(Argument). These are all known
1577// to not alias with FunctionLocal objects and can come up from coerced function
1578// arguments.
1579static bool isArgumentOrArgumentLike(const Value *V) {
1580 if (isa<Argument>(Val: V))
1581 return true;
1582 auto *E = dyn_cast<ExtractValueInst>(Val: V);
1583 return E && isa<Argument>(Val: E->getOperand(i_nocapture: 0));
1584}
1585
1586/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1587/// array references.
1588AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1589 const Value *V2, LocationSize V2Size,
1590 AAQueryInfo &AAQI,
1591 const Instruction *CtxI) {
1592 // If either of the memory references is empty, it doesn't matter what the
1593 // pointer values are.
1594 if (V1Size.isZero() || V2Size.isZero())
1595 return AliasResult::NoAlias;
1596
1597 // Strip off any casts if they exist.
1598 V1 = V1->stripPointerCastsForAliasAnalysis();
1599 V2 = V2->stripPointerCastsForAliasAnalysis();
1600
1601 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1602 // value for undef that aliases nothing in the program.
1603 if (isa<UndefValue>(Val: V1) || isa<UndefValue>(Val: V2))
1604 return AliasResult::NoAlias;
1605
1606 // Are we checking for alias of the same value?
1607 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1608 // different iterations. We must therefore make sure that this is not the
1609 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1610 // happen by looking at the visited phi nodes and making sure they cannot
1611 // reach the value.
1612 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1613 return AliasResult::MustAlias;
1614
1615 // Figure out what objects these things are pointing to if we can.
1616 const Value *O1 = getUnderlyingObject(V: V1, MaxLookup: MaxLookupSearchDepth);
1617 const Value *O2 = getUnderlyingObject(V: V2, MaxLookup: MaxLookupSearchDepth);
1618
1619 // Null values in the default address space don't point to any object, so they
1620 // don't alias any other pointer.
1621 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O1))
1622 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1623 return AliasResult::NoAlias;
1624 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(Val: O2))
1625 if (!NullPointerIsDefined(F: &F, AS: CPN->getType()->getAddressSpace()))
1626 return AliasResult::NoAlias;
1627
1628 if (O1 != O2) {
1629 // If V1/V2 point to two different objects, we know that we have no alias.
1630 if (isIdentifiedObject(V: O1) && isIdentifiedObject(V: O2))
1631 return AliasResult::NoAlias;
1632
1633 // Function arguments can't alias with things that are known to be
1634 // unambigously identified at the function level.
1635 if ((isArgumentOrArgumentLike(V: O1) && isIdentifiedFunctionLocal(V: O2)) ||
1636 (isArgumentOrArgumentLike(V: O2) && isIdentifiedFunctionLocal(V: O1)))
1637 return AliasResult::NoAlias;
1638
1639 // If one pointer is the result of a call/invoke or load and the other is a
1640 // non-escaping local object within the same function, then we know the
1641 // object couldn't escape to a point where the call could return it.
1642 //
1643 // Note that if the pointers are in different functions, there are a
1644 // variety of complications. A call with a nocapture argument may still
1645 // temporary store the nocapture argument's value in a temporary memory
1646 // location if that memory location doesn't escape. Or it may pass a
1647 // nocapture value to other functions as long as they don't capture it.
1648 if (isEscapeSource(V: O1) &&
1649 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1650 Object: O2, I: dyn_cast<Instruction>(Val: O1), /*OrAt*/ true)))
1651 return AliasResult::NoAlias;
1652 if (isEscapeSource(V: O2) &&
1653 capturesNothing(CC: AAQI.CA->getCapturesBefore(
1654 Object: O1, I: dyn_cast<Instruction>(Val: O2), /*OrAt*/ true)))
1655 return AliasResult::NoAlias;
1656 }
1657
1658 // If the size of one access is larger than the entire object on the other
1659 // side, then we know such behavior is undefined and can assume no alias.
1660 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1661 if ((isObjectSmallerThan(
1662 V: O2, Size: getMinimalExtentFrom(V: *V1, LocSize: V1Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1663 TLI, NullIsValidLoc: NullIsValidLocation)) ||
1664 (isObjectSmallerThan(
1665 V: O1, Size: getMinimalExtentFrom(V: *V2, LocSize: V2Size, DL, NullIsValidLoc: NullIsValidLocation), DL,
1666 TLI, NullIsValidLoc: NullIsValidLocation)))
1667 return AliasResult::NoAlias;
1668
1669 if (EnableSeparateStorageAnalysis) {
1670 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(V: O1)) {
1671 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1672 continue;
1673
1674 AssumeInst *Assume = cast<AssumeInst>(Val&: Elem);
1675 OperandBundleUse OBU = Assume->getOperandBundleAt(Index: Elem.Index);
1676 if (OBU.getTagName() == "separate_storage") {
1677 assert(OBU.Inputs.size() == 2);
1678 const Value *Hint1 = OBU.Inputs[0].get();
1679 const Value *Hint2 = OBU.Inputs[1].get();
1680 // This is often a no-op; instcombine rewrites this for us. No-op
1681 // getUnderlyingObject calls are fast, though.
1682 const Value *HintO1 = getUnderlyingObject(V: Hint1);
1683 const Value *HintO2 = getUnderlyingObject(V: Hint2);
1684
1685 DominatorTree *DT = getDT(AAQI);
1686 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1687 if (const Instruction *PtrI = dyn_cast<Instruction>(Val: Ptr)) {
1688 return isValidAssumeForContext(I: Assume, CxtI: PtrI, DT,
1689 /* AllowEphemerals */ true);
1690 }
1691 if (const Argument *PtrA = dyn_cast<Argument>(Val: Ptr)) {
1692 const Instruction *FirstI =
1693 &*PtrA->getParent()->getEntryBlock().begin();
1694 return isValidAssumeForContext(I: Assume, CxtI: FirstI, DT,
1695 /* AllowEphemerals */ true);
1696 }
1697 return false;
1698 };
1699
1700 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1701 // Note that we go back to V1 and V2 for the
1702 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1703 // so strictly more assumptions are valid for them.
1704 if ((CtxI && isValidAssumeForContext(I: Assume, CxtI: CtxI, DT,
1705 /* AllowEphemerals */ true)) ||
1706 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1707 return AliasResult::NoAlias;
1708 }
1709 }
1710 }
1711 }
1712 }
1713
1714 // If one the accesses may be before the accessed pointer, canonicalize this
1715 // by using unknown after-pointer sizes for both accesses. This is
1716 // equivalent, because regardless of which pointer is lower, one of them
1717 // will always came after the other, as long as the underlying objects aren't
1718 // disjoint. We do this so that the rest of BasicAA does not have to deal
1719 // with accesses before the base pointer, and to improve cache utilization by
1720 // merging equivalent states.
1721 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1722 V1Size = LocationSize::afterPointer();
1723 V2Size = LocationSize::afterPointer();
1724 }
1725
1726 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1727 // for recursive queries. For this reason, this limit is chosen to be large
1728 // enough to be very rarely hit, while still being small enough to avoid
1729 // stack overflows.
1730 if (AAQI.Depth >= 512)
1731 return AliasResult::MayAlias;
1732
1733 // Check the cache before climbing up use-def chains. This also terminates
1734 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1735 // cache key, because some cases where MayBeCrossIteration==false returns
1736 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1737 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1738 {V2, V2Size, AAQI.MayBeCrossIteration});
1739 const bool Swapped = V1 > V2;
1740 if (Swapped)
1741 std::swap(a&: Locs.first, b&: Locs.second);
1742 const auto &Pair = AAQI.AliasCache.try_emplace(
1743 Key: Locs, Args: AAQueryInfo::CacheEntry{.Result: AliasResult::NoAlias, .NumAssumptionUses: 0});
1744 if (!Pair.second) {
1745 auto &Entry = Pair.first->second;
1746 if (!Entry.isDefinitive()) {
1747 // Remember that we used an assumption. This may either be a direct use
1748 // of an assumption, or a use of an entry that may itself be based on an
1749 // assumption.
1750 ++AAQI.NumAssumptionUses;
1751 if (Entry.isAssumption())
1752 ++Entry.NumAssumptionUses;
1753 }
1754 // Cache contains sorted {V1,V2} pairs but we should return original order.
1755 auto Result = Entry.Result;
1756 Result.swap(DoSwap: Swapped);
1757 return Result;
1758 }
1759
1760 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1761 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1762 AliasResult Result =
1763 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1764
1765 auto It = AAQI.AliasCache.find(Val: Locs);
1766 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1767 auto &Entry = It->second;
1768
1769 // Check whether a NoAlias assumption has been used, but disproven.
1770 bool AssumptionDisproven =
1771 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1772 if (AssumptionDisproven)
1773 Result = AliasResult::MayAlias;
1774
1775 // This is a definitive result now, when considered as a root query.
1776 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1777 Entry.Result = Result;
1778 // Cache contains sorted {V1,V2} pairs.
1779 Entry.Result.swap(DoSwap: Swapped);
1780
1781 // If the assumption has been disproven, remove any results that may have
1782 // been based on this assumption. Do this after the Entry updates above to
1783 // avoid iterator invalidation.
1784 if (AssumptionDisproven)
1785 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1786 AAQI.AliasCache.erase(Val: AAQI.AssumptionBasedResults.pop_back_val());
1787
1788 // The result may still be based on assumptions higher up in the chain.
1789 // Remember it, so it can be purged from the cache later.
1790 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1791 Result != AliasResult::MayAlias) {
1792 AAQI.AssumptionBasedResults.push_back(Elt: Locs);
1793 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1794 } else {
1795 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1796 }
1797
1798 // Depth is incremented before this function is called, so Depth==1 indicates
1799 // a root query.
1800 if (AAQI.Depth == 1) {
1801 // Any remaining assumption based results must be based on proven
1802 // assumptions, so convert them to definitive results.
1803 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1804 auto It = AAQI.AliasCache.find(Val: Loc);
1805 if (It != AAQI.AliasCache.end())
1806 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1807 }
1808 AAQI.AssumptionBasedResults.clear();
1809 AAQI.NumAssumptionUses = 0;
1810 }
1811 return Result;
1812}
1813
1814AliasResult BasicAAResult::aliasCheckRecursive(
1815 const Value *V1, LocationSize V1Size,
1816 const Value *V2, LocationSize V2Size,
1817 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1818 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(Val: V1)) {
1819 AliasResult Result = aliasGEP(GEP1: GV1, V1Size, V2, V2Size, UnderlyingV1: O1, UnderlyingV2: O2, AAQI);
1820 if (Result != AliasResult::MayAlias)
1821 return Result;
1822 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(Val: V2)) {
1823 AliasResult Result = aliasGEP(GEP1: GV2, V1Size: V2Size, V2: V1, V2Size: V1Size, UnderlyingV1: O2, UnderlyingV2: O1, AAQI);
1824 Result.swap();
1825 if (Result != AliasResult::MayAlias)
1826 return Result;
1827 }
1828
1829 if (const PHINode *PN = dyn_cast<PHINode>(Val: V1)) {
1830 AliasResult Result = aliasPHI(PN, PNSize: V1Size, V2, V2Size, AAQI);
1831 if (Result != AliasResult::MayAlias)
1832 return Result;
1833 } else if (const PHINode *PN = dyn_cast<PHINode>(Val: V2)) {
1834 AliasResult Result = aliasPHI(PN, PNSize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1835 Result.swap();
1836 if (Result != AliasResult::MayAlias)
1837 return Result;
1838 }
1839
1840 if (const SelectInst *S1 = dyn_cast<SelectInst>(Val: V1)) {
1841 AliasResult Result = aliasSelect(SI: S1, SISize: V1Size, V2, V2Size, AAQI);
1842 if (Result != AliasResult::MayAlias)
1843 return Result;
1844 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(Val: V2)) {
1845 AliasResult Result = aliasSelect(SI: S2, SISize: V2Size, V2: V1, V2Size: V1Size, AAQI);
1846 Result.swap();
1847 if (Result != AliasResult::MayAlias)
1848 return Result;
1849 }
1850
1851 // If both pointers are pointing into the same object and one of them
1852 // accesses the entire object, then the accesses must overlap in some way.
1853 if (O1 == O2) {
1854 bool NullIsValidLocation = NullPointerIsDefined(F: &F);
1855 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1856 (isObjectSize(V: O1, Size: V1Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation) ||
1857 isObjectSize(V: O2, Size: V2Size.getValue(), DL, TLI, NullIsValidLoc: NullIsValidLocation)))
1858 return AliasResult::PartialAlias;
1859 }
1860
1861 return AliasResult::MayAlias;
1862}
1863
1864AliasResult BasicAAResult::aliasErrno(const MemoryLocation &Loc,
1865 const Module *M) {
1866 // There cannot be any alias with errno if the given memory location is an
1867 // identified function-local object, or the size of the memory access is
1868 // larger than the integer size.
1869 if (Loc.Size.hasValue() &&
1870 Loc.Size.getValue().getKnownMinValue() * 8 > TLI.getIntSize())
1871 return AliasResult::NoAlias;
1872
1873 if (isIdentifiedFunctionLocal(V: getUnderlyingObject(V: Loc.Ptr)))
1874 return AliasResult::NoAlias;
1875 return AliasResult::MayAlias;
1876}
1877
1878/// Check whether two Values can be considered equivalent.
1879///
1880/// If the values may come from different cycle iterations, this will also
1881/// check that the values are not part of cycle. We have to do this because we
1882/// are looking through phi nodes, that is we say
1883/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1884bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1885 const Value *V2,
1886 const AAQueryInfo &AAQI) {
1887 if (V != V2)
1888 return false;
1889
1890 if (!AAQI.MayBeCrossIteration)
1891 return true;
1892
1893 // Non-instructions and instructions in the entry block cannot be part of
1894 // a loop.
1895 const Instruction *Inst = dyn_cast<Instruction>(Val: V);
1896 if (!Inst || Inst->getParent()->isEntryBlock())
1897 return true;
1898
1899 return isNotInCycle(I: Inst, DT: getDT(AAQI), /*LI*/ nullptr);
1900}
1901
1902/// Computes the symbolic difference between two de-composed GEPs.
1903void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1904 const DecomposedGEP &SrcGEP,
1905 const AAQueryInfo &AAQI) {
1906 // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1907 // unsigned sense.
1908 if (DestGEP.Offset.ult(RHS: SrcGEP.Offset))
1909 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1910
1911 DestGEP.Offset -= SrcGEP.Offset;
1912 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1913 // Find V in Dest. This is N^2, but pointer indices almost never have more
1914 // than a few variable indexes.
1915 bool Found = false;
1916 for (auto I : enumerate(First&: DestGEP.VarIndices)) {
1917 VariableGEPIndex &Dest = I.value();
1918 if ((!isValueEqualInPotentialCycles(V: Dest.Val.V, V2: Src.Val.V, AAQI) &&
1919 !areBothVScale(V1: Dest.Val.V, V2: Src.Val.V)) ||
1920 !Dest.Val.hasSameCastsAs(Other: Src.Val))
1921 continue;
1922
1923 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1924 if (Dest.IsNegated) {
1925 Dest.Scale = -Dest.Scale;
1926 Dest.IsNegated = false;
1927 Dest.IsNSW = false;
1928 }
1929
1930 // If we found it, subtract off Scale V's from the entry in Dest. If it
1931 // goes to zero, remove the entry.
1932 if (Dest.Scale != Src.Scale) {
1933 // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1934 // unsigned sense.
1935 if (Dest.Scale.ult(RHS: Src.Scale))
1936 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1937
1938 Dest.Scale -= Src.Scale;
1939 Dest.IsNSW = false;
1940 } else {
1941 DestGEP.VarIndices.erase(CI: DestGEP.VarIndices.begin() + I.index());
1942 }
1943 Found = true;
1944 break;
1945 }
1946
1947 // If we didn't consume this entry, add it to the end of the Dest list.
1948 if (!Found) {
1949 VariableGEPIndex Entry = {.Val: Src.Val, .Scale: Src.Scale, .CxtI: Src.CxtI, .IsNSW: Src.IsNSW,
1950 /* IsNegated */ true};
1951 DestGEP.VarIndices.push_back(Elt: Entry);
1952
1953 // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1954 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1955 }
1956 }
1957}
1958
1959bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1960 LocationSize MaybeV1Size,
1961 LocationSize MaybeV2Size,
1962 AssumptionCache *AC,
1963 DominatorTree *DT,
1964 const AAQueryInfo &AAQI) {
1965 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1966 !MaybeV2Size.hasValue())
1967 return false;
1968
1969 const uint64_t V1Size = MaybeV1Size.getValue();
1970 const uint64_t V2Size = MaybeV2Size.getValue();
1971
1972 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1973
1974 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Other: Var1.Val) ||
1975 !Var0.hasNegatedScaleOf(Other: Var1) ||
1976 Var0.Val.V->getType() != Var1.Val.V->getType())
1977 return false;
1978
1979 // We'll strip off the Extensions of Var0 and Var1 and do another round
1980 // of GetLinearExpression decomposition. In the example above, if Var0
1981 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1982
1983 LinearExpression E0 =
1984 GetLinearExpression(Val: CastedValue(Var0.Val.V), DL, Depth: 0, AC, DT);
1985 LinearExpression E1 =
1986 GetLinearExpression(Val: CastedValue(Var1.Val.V), DL, Depth: 0, AC, DT);
1987 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(Other: E1.Val) ||
1988 !isValueEqualInPotentialCycles(V: E0.Val.V, V2: E1.Val.V, AAQI))
1989 return false;
1990
1991 // We have a hit - Var0 and Var1 only differ by a constant offset!
1992
1993 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1994 // Var1 is possible to calculate, but we're just interested in the absolute
1995 // minimum difference between the two. The minimum distance may occur due to
1996 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1997 // the minimum distance between %i and %i + 5 is 3.
1998 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1999 MinDiff = APIntOps::umin(A: MinDiff, B: Wrapped);
2000 APInt MinDiffBytes =
2001 MinDiff.zextOrTrunc(width: Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2002
2003 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2004 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2005 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2006 // V2Size can fit in the MinDiffBytes gap.
2007 return MinDiffBytes.uge(RHS: V1Size + GEP.Offset.abs()) &&
2008 MinDiffBytes.uge(RHS: V2Size + GEP.Offset.abs());
2009}
2010
2011//===----------------------------------------------------------------------===//
2012// BasicAliasAnalysis Pass
2013//===----------------------------------------------------------------------===//
2014
2015AnalysisKey BasicAA::Key;
2016
2017BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2018 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2019 auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F);
2020 auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
2021 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
2022}
2023
2024BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {}
2025
2026char BasicAAWrapperPass::ID = 0;
2027
2028void BasicAAWrapperPass::anchor() {}
2029
2030INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2031 "Basic Alias Analysis (stateless AA impl)", true, true)
2032INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2033INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2034INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2035INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2036 "Basic Alias Analysis (stateless AA impl)", true, true)
2037
2038FunctionPass *llvm::createBasicAAWrapperPass() {
2039 return new BasicAAWrapperPass();
2040}
2041
2042bool BasicAAWrapperPass::runOnFunction(Function &F) {
2043 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2044 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2045 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2046
2047 Result.reset(p: new BasicAAResult(F.getDataLayout(), F,
2048 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2049 &DTWP.getDomTree()));
2050
2051 return false;
2052}
2053
2054void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2055 AU.setPreservesAll();
2056 AU.addRequiredTransitive<AssumptionCacheTracker>();
2057 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2058 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
2059}
2060