1//==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the generic AliasAnalysis interface which is used as the
10// common interface used by all clients and implementations of alias analysis.
11//
12// This file also implements the default version of the AliasAnalysis interface
13// that is to be used when no other implementation is specified. This does some
14// simple tests that detect obvious cases: two different global pointers cannot
15// alias, a global cannot alias a malloc, two different mallocs cannot alias,
16// etc.
17//
18// This alias analysis implementation really isn't very good for anything, but
19// it is very fast, and makes a nice clean default implementation. Because it
20// handles lots of little corner cases, other, more complex, alias analysis
21// implementations may choose to rely on this pass to resolve these simple and
22// easy cases.
23//
24//===----------------------------------------------------------------------===//
25
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/Analysis/BasicAliasAnalysis.h"
29#include "llvm/Analysis/CaptureTracking.h"
30#include "llvm/Analysis/GlobalsModRef.h"
31#include "llvm/Analysis/MemoryLocation.h"
32#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
33#include "llvm/Analysis/ScopedNoAliasAA.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/InitializePasses.h"
45#include "llvm/Pass.h"
46#include "llvm/Support/AtomicOrdering.h"
47#include "llvm/Support/Casting.h"
48#include "llvm/Support/CommandLine.h"
49#include <cassert>
50#include <functional>
51#include <iterator>
52
53#define DEBUG_TYPE "aa"
54
55using namespace llvm;
56
57STATISTIC(NumNoAlias, "Number of NoAlias results");
58STATISTIC(NumMayAlias, "Number of MayAlias results");
59STATISTIC(NumMustAlias, "Number of MustAlias results");
60
61/// Allow disabling BasicAA from the AA results. This is particularly useful
62/// when testing to isolate a single AA implementation.
63static cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden,
64 cl::init(Val: false));
65
66#ifndef NDEBUG
67/// Print a trace of alias analysis queries and their results.
68static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
69#else
70static const bool EnableAATrace = false;
71#endif
72
73AAResults::AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
74
75AAResults::AAResults(AAResults &&Arg)
76 : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
77
78AAResults::~AAResults() {}
79
80bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
81 FunctionAnalysisManager::Invalidator &Inv) {
82 // AAResults preserves the AAManager by default, due to the stateless nature
83 // of AliasAnalysis. There is no need to check whether it has been preserved
84 // explicitly. Check if any module dependency was invalidated and caused the
85 // AAManager to be invalidated. Invalidate ourselves in that case.
86 auto PAC = PA.getChecker<AAManager>();
87 if (!PAC.preservedWhenStateless())
88 return true;
89
90 // Check if any of the function dependencies were invalidated, and invalidate
91 // ourselves in that case.
92 for (AnalysisKey *ID : AADeps)
93 if (Inv.invalidate(ID, IR&: F, PA))
94 return true;
95
96 // Everything we depend on is still fine, so are we. Nothing to invalidate.
97 return false;
98}
99
100//===----------------------------------------------------------------------===//
101// Default chaining methods
102//===----------------------------------------------------------------------===//
103
104AliasResult AAResults::alias(const MemoryLocation &LocA,
105 const MemoryLocation &LocB) {
106 SimpleAAQueryInfo AAQIP(*this);
107 return alias(LocA, LocB, AAQI&: AAQIP, CtxI: nullptr);
108}
109
110AliasResult AAResults::alias(const MemoryLocation &LocA,
111 const MemoryLocation &LocB, AAQueryInfo &AAQI,
112 const Instruction *CtxI) {
113 assert(LocA.Ptr->getType()->isPointerTy() &&
114 LocB.Ptr->getType()->isPointerTy() &&
115 "Can only call alias() on pointers");
116 AliasResult Result = AliasResult::MayAlias;
117
118 if (EnableAATrace) {
119 for (unsigned I = 0; I < AAQI.Depth; ++I)
120 dbgs() << " ";
121 dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
122 << *LocB.Ptr << " @ " << LocB.Size << "\n";
123 }
124
125 AAQI.Depth++;
126 for (const auto &AA : AAs) {
127 Result = AA->alias(LocA, LocB, AAQI, CtxI);
128 if (Result != AliasResult::MayAlias)
129 break;
130 }
131 AAQI.Depth--;
132
133 if (EnableAATrace) {
134 for (unsigned I = 0; I < AAQI.Depth; ++I)
135 dbgs() << " ";
136 dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
137 << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
138 }
139
140 if (AAQI.Depth == 0) {
141 if (Result == AliasResult::NoAlias)
142 ++NumNoAlias;
143 else if (Result == AliasResult::MustAlias)
144 ++NumMustAlias;
145 else
146 ++NumMayAlias;
147 }
148 return Result;
149}
150
151ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
152 bool IgnoreLocals) {
153 SimpleAAQueryInfo AAQIP(*this);
154 return getModRefInfoMask(Loc, AAQI&: AAQIP, IgnoreLocals);
155}
156
157ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
158 AAQueryInfo &AAQI, bool IgnoreLocals) {
159 ModRefInfo Result = ModRefInfo::ModRef;
160
161 for (const auto &AA : AAs) {
162 Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals);
163
164 // Early-exit the moment we reach the bottom of the lattice.
165 if (isNoModRef(MRI: Result))
166 return ModRefInfo::NoModRef;
167 }
168
169 return Result;
170}
171
172ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
173 ModRefInfo Result = ModRefInfo::ModRef;
174
175 for (const auto &AA : AAs) {
176 Result &= AA->getArgModRefInfo(Call, ArgIdx);
177
178 // Early-exit the moment we reach the bottom of the lattice.
179 if (isNoModRef(MRI: Result))
180 return ModRefInfo::NoModRef;
181 }
182
183 return Result;
184}
185
186ModRefInfo AAResults::getModRefInfo(const Instruction *I,
187 const CallBase *Call2) {
188 SimpleAAQueryInfo AAQIP(*this);
189 return getModRefInfo(I, Call2, AAQIP);
190}
191
192ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2,
193 AAQueryInfo &AAQI) {
194 // We may have two calls.
195 if (const auto *Call1 = dyn_cast<CallBase>(Val: I)) {
196 // Check if the two calls modify the same memory.
197 return getModRefInfo(Call1, Call2, AAQI);
198 }
199 // If this is a fence, just return ModRef.
200 if (I->isFenceLike())
201 return ModRefInfo::ModRef;
202 // Otherwise, check if the call modifies or references the
203 // location this memory access defines. The best we can say
204 // is that if the call references what this instruction
205 // defines, it must be clobbered by this location.
206 const MemoryLocation DefLoc = MemoryLocation::get(Inst: I);
207 ModRefInfo MR = getModRefInfo(Call: Call2, Loc: DefLoc, AAQI);
208 if (isModOrRefSet(MRI: MR))
209 return ModRefInfo::ModRef;
210 return ModRefInfo::NoModRef;
211}
212
213ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
214 const MemoryLocation &Loc,
215 AAQueryInfo &AAQI) {
216 ModRefInfo Result = ModRefInfo::ModRef;
217
218 for (const auto &AA : AAs) {
219 Result &= AA->getModRefInfo(Call, Loc, AAQI);
220
221 // Early-exit the moment we reach the bottom of the lattice.
222 if (isNoModRef(MRI: Result))
223 return ModRefInfo::NoModRef;
224 }
225
226 // Apply the ModRef mask. This ensures that if Loc is a constant memory
227 // location, we take into account the fact that the call definitely could not
228 // modify the memory location.
229 if (!isNoModRef(MRI: Result))
230 Result &= getModRefInfoMask(Loc);
231
232 return Result;
233}
234
235ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
236 const CallBase *Call2, AAQueryInfo &AAQI) {
237 ModRefInfo Result = ModRefInfo::ModRef;
238
239 for (const auto &AA : AAs) {
240 Result &= AA->getModRefInfo(Call1, Call2, AAQI);
241
242 // Early-exit the moment we reach the bottom of the lattice.
243 if (isNoModRef(MRI: Result))
244 return ModRefInfo::NoModRef;
245 }
246
247 // Try to refine the mod-ref info further using other API entry points to the
248 // aggregate set of AA results.
249
250 // If Call1 or Call2 are readnone, they don't interact.
251 auto Call1B = getMemoryEffects(Call: Call1, AAQI);
252 if (Call1B.doesNotAccessMemory())
253 return ModRefInfo::NoModRef;
254
255 auto Call2B = getMemoryEffects(Call: Call2, AAQI);
256 if (Call2B.doesNotAccessMemory())
257 return ModRefInfo::NoModRef;
258
259 // If they both only read from memory, there is no dependence.
260 if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory())
261 return ModRefInfo::NoModRef;
262
263 // If Call1 only reads memory, the only dependence on Call2 can be
264 // from Call1 reading memory written by Call2.
265 if (Call1B.onlyReadsMemory())
266 Result &= ModRefInfo::Ref;
267 else if (Call1B.onlyWritesMemory())
268 Result &= ModRefInfo::Mod;
269
270 // If Call2 only access memory through arguments, accumulate the mod/ref
271 // information from Call1's references to the memory referenced by
272 // Call2's arguments.
273 if (Call2B.onlyAccessesArgPointees()) {
274 if (!Call2B.doesAccessArgPointees())
275 return ModRefInfo::NoModRef;
276 ModRefInfo R = ModRefInfo::NoModRef;
277 for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
278 const Value *Arg = *I;
279 if (!Arg->getType()->isPointerTy())
280 continue;
281 unsigned Call2ArgIdx = std::distance(first: Call2->arg_begin(), last: I);
282 auto Call2ArgLoc =
283 MemoryLocation::getForArgument(Call: Call2, ArgIdx: Call2ArgIdx, TLI);
284
285 // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
286 // dependence of Call1 on that location is the inverse:
287 // - If Call2 modifies location, dependence exists if Call1 reads or
288 // writes.
289 // - If Call2 only reads location, dependence exists if Call1 writes.
290 ModRefInfo ArgModRefC2 = getArgModRefInfo(Call: Call2, ArgIdx: Call2ArgIdx);
291 ModRefInfo ArgMask = ModRefInfo::NoModRef;
292 if (isModSet(MRI: ArgModRefC2))
293 ArgMask = ModRefInfo::ModRef;
294 else if (isRefSet(MRI: ArgModRefC2))
295 ArgMask = ModRefInfo::Mod;
296
297 // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
298 // above ArgMask to update dependence info.
299 ArgMask &= getModRefInfo(Call: Call1, Loc: Call2ArgLoc, AAQI);
300
301 R = (R | ArgMask) & Result;
302 if (R == Result)
303 break;
304 }
305
306 return R;
307 }
308
309 // If Call1 only accesses memory through arguments, check if Call2 references
310 // any of the memory referenced by Call1's arguments. If not, return NoModRef.
311 if (Call1B.onlyAccessesArgPointees()) {
312 if (!Call1B.doesAccessArgPointees())
313 return ModRefInfo::NoModRef;
314 ModRefInfo R = ModRefInfo::NoModRef;
315 for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
316 const Value *Arg = *I;
317 if (!Arg->getType()->isPointerTy())
318 continue;
319 unsigned Call1ArgIdx = std::distance(first: Call1->arg_begin(), last: I);
320 auto Call1ArgLoc =
321 MemoryLocation::getForArgument(Call: Call1, ArgIdx: Call1ArgIdx, TLI);
322
323 // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
324 // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
325 // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
326 ModRefInfo ArgModRefC1 = getArgModRefInfo(Call: Call1, ArgIdx: Call1ArgIdx);
327 ModRefInfo ModRefC2 = getModRefInfo(Call: Call2, Loc: Call1ArgLoc, AAQI);
328 if ((isModSet(MRI: ArgModRefC1) && isModOrRefSet(MRI: ModRefC2)) ||
329 (isRefSet(MRI: ArgModRefC1) && isModSet(MRI: ModRefC2)))
330 R = (R | ArgModRefC1) & Result;
331
332 if (R == Result)
333 break;
334 }
335
336 return R;
337 }
338
339 return Result;
340}
341
342ModRefInfo AAResults::getModRefInfo(const Instruction *I1,
343 const Instruction *I2) {
344 SimpleAAQueryInfo AAQIP(*this);
345 return getModRefInfo(I1, I2, AAQI&: AAQIP);
346}
347
348ModRefInfo AAResults::getModRefInfo(const Instruction *I1,
349 const Instruction *I2, AAQueryInfo &AAQI) {
350 // Early-exit if either instruction does not read or write memory.
351 if (!I1->mayReadOrWriteMemory() || !I2->mayReadOrWriteMemory())
352 return ModRefInfo::NoModRef;
353
354 if (const auto *Call2 = dyn_cast<CallBase>(Val: I2))
355 return getModRefInfo(I: I1, Call2, AAQI);
356
357 // FIXME: We can have a more precise result.
358 ModRefInfo MR = getModRefInfo(I: I1, OptLoc: MemoryLocation::getOrNone(Inst: I2), AAQIP&: AAQI);
359 return isModOrRefSet(MRI: MR) ? ModRefInfo::ModRef : ModRefInfo::NoModRef;
360}
361
362MemoryEffects AAResults::getMemoryEffects(const CallBase *Call,
363 AAQueryInfo &AAQI) {
364 MemoryEffects Result = MemoryEffects::unknown();
365
366 for (const auto &AA : AAs) {
367 Result &= AA->getMemoryEffects(Call, AAQI);
368
369 // Early-exit the moment we reach the bottom of the lattice.
370 if (Result.doesNotAccessMemory())
371 return Result;
372 }
373
374 return Result;
375}
376
377MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) {
378 SimpleAAQueryInfo AAQI(*this);
379 return getMemoryEffects(Call, AAQI);
380}
381
382MemoryEffects AAResults::getMemoryEffects(const Function *F) {
383 MemoryEffects Result = MemoryEffects::unknown();
384
385 for (const auto &AA : AAs) {
386 Result &= AA->getMemoryEffects(F);
387
388 // Early-exit the moment we reach the bottom of the lattice.
389 if (Result.doesNotAccessMemory())
390 return Result;
391 }
392
393 return Result;
394}
395
396raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
397 switch (AR) {
398 case AliasResult::NoAlias:
399 OS << "NoAlias";
400 break;
401 case AliasResult::MustAlias:
402 OS << "MustAlias";
403 break;
404 case AliasResult::MayAlias:
405 OS << "MayAlias";
406 break;
407 case AliasResult::PartialAlias:
408 OS << "PartialAlias";
409 if (AR.hasOffset())
410 OS << " (off " << AR.getOffset() << ")";
411 break;
412 }
413 return OS;
414}
415
416//===----------------------------------------------------------------------===//
417// Helper method implementation
418//===----------------------------------------------------------------------===//
419
420ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
421 const MemoryLocation &Loc,
422 AAQueryInfo &AAQI) {
423 // Be conservative in the face of atomic.
424 if (isStrongerThan(AO: L->getOrdering(), Other: AtomicOrdering::Unordered))
425 return ModRefInfo::ModRef;
426
427 // If the load address doesn't alias the given address, it doesn't read
428 // or write the specified memory.
429 if (Loc.Ptr) {
430 AliasResult AR = alias(LocA: MemoryLocation::get(LI: L), LocB: Loc, AAQI, CtxI: L);
431 if (AR == AliasResult::NoAlias)
432 return ModRefInfo::NoModRef;
433 }
434 // Otherwise, a load just reads.
435 return ModRefInfo::Ref;
436}
437
438ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
439 const MemoryLocation &Loc,
440 AAQueryInfo &AAQI) {
441 // Be conservative in the face of atomic.
442 if (isStrongerThan(AO: S->getOrdering(), Other: AtomicOrdering::Unordered))
443 return ModRefInfo::ModRef;
444
445 if (Loc.Ptr) {
446 AliasResult AR = alias(LocA: MemoryLocation::get(SI: S), LocB: Loc, AAQI, CtxI: S);
447 // If the store address cannot alias the pointer in question, then the
448 // specified memory cannot be modified by the store.
449 if (AR == AliasResult::NoAlias)
450 return ModRefInfo::NoModRef;
451
452 // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
453 // This ensures that if Loc is a constant memory location, we take into
454 // account the fact that the store definitely could not modify the memory
455 // location.
456 if (!isModSet(MRI: getModRefInfoMask(Loc)))
457 return ModRefInfo::NoModRef;
458 }
459
460 // Otherwise, a store just writes.
461 return ModRefInfo::Mod;
462}
463
464ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
465 const MemoryLocation &Loc,
466 AAQueryInfo &AAQI) {
467 // All we know about a fence instruction is what we get from the ModRef
468 // mask: if Loc is a constant memory location, the fence definitely could
469 // not modify it.
470 if (Loc.Ptr)
471 return getModRefInfoMask(Loc);
472 return ModRefInfo::ModRef;
473}
474
475ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
476 const MemoryLocation &Loc,
477 AAQueryInfo &AAQI) {
478 if (Loc.Ptr) {
479 AliasResult AR = alias(LocA: MemoryLocation::get(VI: V), LocB: Loc, AAQI, CtxI: V);
480 // If the va_arg address cannot alias the pointer in question, then the
481 // specified memory cannot be accessed by the va_arg.
482 if (AR == AliasResult::NoAlias)
483 return ModRefInfo::NoModRef;
484
485 // If the pointer is a pointer to invariant memory, then it could not have
486 // been modified by this va_arg.
487 return getModRefInfoMask(Loc, AAQI);
488 }
489
490 // Otherwise, a va_arg reads and writes.
491 return ModRefInfo::ModRef;
492}
493
494ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
495 const MemoryLocation &Loc,
496 AAQueryInfo &AAQI) {
497 if (Loc.Ptr) {
498 // If the pointer is a pointer to invariant memory,
499 // then it could not have been modified by this catchpad.
500 return getModRefInfoMask(Loc, AAQI);
501 }
502
503 // Otherwise, a catchpad reads and writes.
504 return ModRefInfo::ModRef;
505}
506
507ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
508 const MemoryLocation &Loc,
509 AAQueryInfo &AAQI) {
510 if (Loc.Ptr) {
511 // If the pointer is a pointer to invariant memory,
512 // then it could not have been modified by this catchpad.
513 return getModRefInfoMask(Loc, AAQI);
514 }
515
516 // Otherwise, a catchret reads and writes.
517 return ModRefInfo::ModRef;
518}
519
520ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
521 const MemoryLocation &Loc,
522 AAQueryInfo &AAQI) {
523 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
524 if (isStrongerThanMonotonic(AO: CX->getSuccessOrdering()))
525 return ModRefInfo::ModRef;
526
527 if (Loc.Ptr) {
528 AliasResult AR = alias(LocA: MemoryLocation::get(CXI: CX), LocB: Loc, AAQI, CtxI: CX);
529 // If the cmpxchg address does not alias the location, it does not access
530 // it.
531 if (AR == AliasResult::NoAlias)
532 return ModRefInfo::NoModRef;
533 }
534
535 return ModRefInfo::ModRef;
536}
537
538ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
539 const MemoryLocation &Loc,
540 AAQueryInfo &AAQI) {
541 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
542 if (isStrongerThanMonotonic(AO: RMW->getOrdering()))
543 return ModRefInfo::ModRef;
544
545 if (Loc.Ptr) {
546 AliasResult AR = alias(LocA: MemoryLocation::get(RMWI: RMW), LocB: Loc, AAQI, CtxI: RMW);
547 // If the atomicrmw address does not alias the location, it does not access
548 // it.
549 if (AR == AliasResult::NoAlias)
550 return ModRefInfo::NoModRef;
551 }
552
553 return ModRefInfo::ModRef;
554}
555
556ModRefInfo AAResults::getModRefInfo(const Instruction *I,
557 const std::optional<MemoryLocation> &OptLoc,
558 AAQueryInfo &AAQIP) {
559 if (OptLoc == std::nullopt) {
560 if (const auto *Call = dyn_cast<CallBase>(Val: I))
561 return getMemoryEffects(Call, AAQI&: AAQIP).getModRef();
562 }
563
564 const MemoryLocation &Loc = OptLoc.value_or(u: MemoryLocation());
565
566 switch (I->getOpcode()) {
567 case Instruction::VAArg:
568 return getModRefInfo(V: (const VAArgInst *)I, Loc, AAQI&: AAQIP);
569 case Instruction::Load:
570 return getModRefInfo(L: (const LoadInst *)I, Loc, AAQI&: AAQIP);
571 case Instruction::Store:
572 return getModRefInfo(S: (const StoreInst *)I, Loc, AAQI&: AAQIP);
573 case Instruction::Fence:
574 return getModRefInfo(S: (const FenceInst *)I, Loc, AAQI&: AAQIP);
575 case Instruction::AtomicCmpXchg:
576 return getModRefInfo(CX: (const AtomicCmpXchgInst *)I, Loc, AAQI&: AAQIP);
577 case Instruction::AtomicRMW:
578 return getModRefInfo(RMW: (const AtomicRMWInst *)I, Loc, AAQI&: AAQIP);
579 case Instruction::Call:
580 case Instruction::CallBr:
581 case Instruction::Invoke:
582 return getModRefInfo(Call: (const CallBase *)I, Loc, AAQI&: AAQIP);
583 case Instruction::CatchPad:
584 return getModRefInfo(CatchPad: (const CatchPadInst *)I, Loc, AAQI&: AAQIP);
585 case Instruction::CatchRet:
586 return getModRefInfo(CatchRet: (const CatchReturnInst *)I, Loc, AAQI&: AAQIP);
587 default:
588 assert(!I->mayReadOrWriteMemory() &&
589 "Unhandled memory access instruction!");
590 return ModRefInfo::NoModRef;
591 }
592}
593
594/// Return information about whether a particular call site modifies
595/// or reads the specified memory location \p MemLoc before instruction \p I
596/// in a BasicBlock.
597/// FIXME: this is really just shoring-up a deficiency in alias analysis.
598/// BasicAA isn't willing to spend linear time determining whether an alloca
599/// was captured before or after this particular call, while we are. However,
600/// with a smarter AA in place, this test is just wasting compile time.
601ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
602 const MemoryLocation &MemLoc,
603 DominatorTree *DT,
604 AAQueryInfo &AAQI) {
605 if (!DT)
606 return ModRefInfo::ModRef;
607
608 const Value *Object = getUnderlyingObject(V: MemLoc.Ptr);
609 if (!isIdentifiedFunctionLocal(V: Object))
610 return ModRefInfo::ModRef;
611
612 const auto *Call = dyn_cast<CallBase>(Val: I);
613 if (!Call || Call == Object)
614 return ModRefInfo::ModRef;
615
616 if (capturesAnything(CC: PointerMayBeCapturedBefore(
617 V: Object, /* ReturnCaptures */ true, I, DT,
618 /* include Object */ IncludeI: true, Mask: CaptureComponents::Provenance)))
619 return ModRefInfo::ModRef;
620
621 unsigned ArgNo = 0;
622 ModRefInfo R = ModRefInfo::NoModRef;
623 // Set flag only if no May found and all operands processed.
624 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
625 CI != CE; ++CI, ++ArgNo) {
626 // Only look at the no-capture or byval pointer arguments. If this
627 // pointer were passed to arguments that were neither of these, then it
628 // couldn't be no-capture.
629 if (!(*CI)->getType()->isPointerTy())
630 continue;
631
632 // Make sure we still check captures(ret: address, provenance) and
633 // captures(address) arguments, as these wouldn't be treated as a capture
634 // at the call-site.
635 CaptureInfo Captures = Call->getCaptureInfo(OpNo: ArgNo);
636 if (capturesAnyProvenance(CC: Captures.getOtherComponents()))
637 continue;
638
639 AliasResult AR =
640 alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: *CI),
641 LocB: MemoryLocation::getBeforeOrAfter(Ptr: Object), AAQI, CtxI: Call);
642 // If this is a no-capture pointer argument, see if we can tell that it
643 // is impossible to alias the pointer we're checking. If not, we have to
644 // assume that the call could touch the pointer, even though it doesn't
645 // escape.
646 if (AR == AliasResult::NoAlias)
647 continue;
648 if (Call->doesNotAccessMemory(OpNo: ArgNo))
649 continue;
650 if (Call->onlyReadsMemory(OpNo: ArgNo)) {
651 R = ModRefInfo::Ref;
652 continue;
653 }
654 return ModRefInfo::ModRef;
655 }
656 return R;
657}
658
659/// canBasicBlockModify - Return true if it is possible for execution of the
660/// specified basic block to modify the location Loc.
661///
662bool AAResults::canBasicBlockModify(const BasicBlock &BB,
663 const MemoryLocation &Loc) {
664 return canInstructionRangeModRef(I1: BB.front(), I2: BB.back(), Loc, Mode: ModRefInfo::Mod);
665}
666
667/// canInstructionRangeModRef - Return true if it is possible for the
668/// execution of the specified instructions to mod\ref (according to the
669/// mode) the location Loc. The instructions to consider are all
670/// of the instructions in the range of [I1,I2] INCLUSIVE.
671/// I1 and I2 must be in the same basic block.
672bool AAResults::canInstructionRangeModRef(const Instruction &I1,
673 const Instruction &I2,
674 const MemoryLocation &Loc,
675 const ModRefInfo Mode) {
676 assert(I1.getParent() == I2.getParent() &&
677 "Instructions not in same basic block!");
678 BasicBlock::const_iterator I = I1.getIterator();
679 BasicBlock::const_iterator E = I2.getIterator();
680 ++E; // Convert from inclusive to exclusive range.
681
682 for (; I != E; ++I) // Check every instruction in range
683 if (isModOrRefSet(MRI: getModRefInfo(I: &*I, OptLoc: Loc) & Mode))
684 return true;
685 return false;
686}
687
688// Provide a definition for the root virtual destructor.
689AAResults::Concept::~Concept() = default;
690
691// Provide a definition for the static object used to identify passes.
692AnalysisKey AAManager::Key;
693
694ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) {}
695
696ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB, bool RunEarly)
697 : ImmutablePass(ID), CB(std::move(CB)), RunEarly(RunEarly) {}
698
699char ExternalAAWrapperPass::ID = 0;
700
701INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
702 false, true)
703
704ImmutablePass *
705llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
706 return new ExternalAAWrapperPass(std::move(Callback));
707}
708
709AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {}
710
711char AAResultsWrapperPass::ID = 0;
712
713INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
714 "Function Alias Analysis Results", false, true)
715INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
716INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
717INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
718INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
719INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
720INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
721INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
722 "Function Alias Analysis Results", false, true)
723
724/// Run the wrapper pass to rebuild an aggregation over known AA passes.
725///
726/// This is the legacy pass manager's interface to the new-style AA results
727/// aggregation object. Because this is somewhat shoe-horned into the legacy
728/// pass manager, we hard code all the specific alias analyses available into
729/// it. While the particular set enabled is configured via commandline flags,
730/// adding a new alias analysis to LLVM will require adding support for it to
731/// this list.
732bool AAResultsWrapperPass::runOnFunction(Function &F) {
733 // NB! This *must* be reset before adding new AA results to the new
734 // AAResults object because in the legacy pass manager, each instance
735 // of these will refer to the *same* immutable analyses, registering and
736 // unregistering themselves with them. We need to carefully tear down the
737 // previous object first, in this case replacing it with an empty one, before
738 // registering new results.
739 AAR.reset(
740 p: new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)));
741
742 // Add any target-specific alias analyses that should be run early.
743 auto *ExtWrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>();
744 if (ExtWrapperPass && ExtWrapperPass->RunEarly && ExtWrapperPass->CB) {
745 LLVM_DEBUG(dbgs() << "AAResults register Early ExternalAA: "
746 << ExtWrapperPass->getPassName() << "\n");
747 ExtWrapperPass->CB(*this, F, *AAR);
748 }
749
750 // BasicAA is always available for function analyses. Also, we add it first
751 // so that it can trump TBAA results when it proves MustAlias.
752 // FIXME: TBAA should have an explicit mode to support this and then we
753 // should reconsider the ordering here.
754 if (!DisableBasicAA) {
755 LLVM_DEBUG(dbgs() << "AAResults register BasicAA\n");
756 AAR->addAAResult(AAResult&: getAnalysis<BasicAAWrapperPass>().getResult());
757 }
758
759 // Populate the results with the currently available AAs.
760 if (auto *WrapperPass =
761 getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) {
762 LLVM_DEBUG(dbgs() << "AAResults register ScopedNoAliasAA\n");
763 AAR->addAAResult(AAResult&: WrapperPass->getResult());
764 }
765 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) {
766 LLVM_DEBUG(dbgs() << "AAResults register TypeBasedAA\n");
767 AAR->addAAResult(AAResult&: WrapperPass->getResult());
768 }
769 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>()) {
770 LLVM_DEBUG(dbgs() << "AAResults register GlobalsAA\n");
771 AAR->addAAResult(AAResult&: WrapperPass->getResult());
772 }
773 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>()) {
774 LLVM_DEBUG(dbgs() << "AAResults register SCEVAA\n");
775 AAR->addAAResult(AAResult&: WrapperPass->getResult());
776 }
777
778 // If available, run an external AA providing callback over the results as
779 // well.
780 if (ExtWrapperPass && !ExtWrapperPass->RunEarly && ExtWrapperPass->CB) {
781 LLVM_DEBUG(dbgs() << "AAResults register Late ExternalAA: "
782 << ExtWrapperPass->getPassName() << "\n");
783 ExtWrapperPass->CB(*this, F, *AAR);
784 }
785
786 // Analyses don't mutate the IR, so return false.
787 return false;
788}
789
790void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
791 AU.setPreservesAll();
792 AU.addRequiredTransitive<BasicAAWrapperPass>();
793 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
794
795 // We also need to mark all the alias analysis passes we will potentially
796 // probe in runOnFunction as used here to ensure the legacy pass manager
797 // preserves them. This hard coding of lists of alias analyses is specific to
798 // the legacy pass manager.
799 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
800 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
801 AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
802 AU.addUsedIfAvailable<SCEVAAWrapperPass>();
803 AU.addUsedIfAvailable<ExternalAAWrapperPass>();
804}
805
806AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) {
807 Result R(AM.getResult<TargetLibraryAnalysis>(IR&: F));
808 for (auto &Getter : ResultGetters)
809 (*Getter)(F, AM, R);
810 return R;
811}
812
813bool llvm::isNoAliasCall(const Value *V) {
814 if (const auto *Call = dyn_cast<CallBase>(Val: V))
815 return Call->hasRetAttr(Kind: Attribute::NoAlias);
816 return false;
817}
818
819static bool isNoAliasOrByValArgument(const Value *V) {
820 if (const Argument *A = dyn_cast<Argument>(Val: V))
821 return A->hasNoAliasAttr() || A->hasByValAttr();
822 return false;
823}
824
825bool llvm::isIdentifiedObject(const Value *V) {
826 if (isa<AllocaInst>(Val: V))
827 return true;
828 if (isa<GlobalValue>(Val: V) && !isa<GlobalAlias>(Val: V))
829 return true;
830 if (isNoAliasCall(V))
831 return true;
832 if (isNoAliasOrByValArgument(V))
833 return true;
834 return false;
835}
836
837bool llvm::isIdentifiedFunctionLocal(const Value *V) {
838 return isa<AllocaInst>(Val: V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
839}
840
841bool llvm::isBaseOfObject(const Value *V) {
842 // TODO: We can handle other cases here
843 // 1) For GC languages, arguments to functions are often required to be
844 // base pointers.
845 // 2) Result of allocation routines are often base pointers. Leverage TLI.
846 return (isa<AllocaInst>(Val: V) || isa<GlobalVariable>(Val: V));
847}
848
849bool llvm::isEscapeSource(const Value *V) {
850 if (auto *CB = dyn_cast<CallBase>(Val: V)) {
851 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call: CB, MustPreserveNullness: true))
852 return false;
853
854 // The return value of a function with a captures(ret: address, provenance)
855 // attribute is not necessarily an escape source. The return value may
856 // alias with a non-escaping object.
857 return !CB->hasArgumentWithAdditionalReturnCaptureComponents();
858 }
859
860 // The load case works because isNotCapturedBefore considers all
861 // stores to be escapes (it passes true for the StoreCaptures argument
862 // to PointerMayBeCaptured).
863 if (isa<LoadInst>(Val: V))
864 return true;
865
866 // The inttoptr case works because isNotCapturedBefore considers all
867 // means of converting or equating a pointer to an int (ptrtoint, ptr store
868 // which could be followed by an integer load, ptr<->int compare) as
869 // escaping, and objects located at well-known addresses via platform-specific
870 // means cannot be considered non-escaping local objects.
871 if (isa<IntToPtrInst>(Val: V))
872 return true;
873
874 // Capture tracking considers insertions into aggregates and vectors as
875 // captures. As such, extractions from aggregates and vectors are escape
876 // sources.
877 if (isa<ExtractValueInst, ExtractElementInst>(Val: V))
878 return true;
879
880 // Same for inttoptr constant expressions.
881 if (auto *CE = dyn_cast<ConstantExpr>(Val: V))
882 if (CE->getOpcode() == Instruction::IntToPtr)
883 return true;
884
885 return false;
886}
887
888bool llvm::isNotVisibleOnUnwind(const Value *Object,
889 bool &RequiresNoCaptureBeforeUnwind) {
890 RequiresNoCaptureBeforeUnwind = false;
891
892 // Alloca goes out of scope on unwind.
893 if (isa<AllocaInst>(Val: Object))
894 return true;
895
896 // Byval goes out of scope on unwind.
897 if (auto *A = dyn_cast<Argument>(Val: Object))
898 return A->hasByValAttr() || A->hasAttribute(Kind: Attribute::DeadOnUnwind);
899
900 // A noalias return is not accessible from any other code. If the pointer
901 // does not escape prior to the unwind, then the caller cannot access the
902 // memory either.
903 if (isNoAliasCall(V: Object)) {
904 RequiresNoCaptureBeforeUnwind = true;
905 return true;
906 }
907
908 return false;
909}
910
911// We don't consider globals as writable: While the physical memory is writable,
912// we may not have provenance to perform the write.
913bool llvm::isWritableObject(const Value *Object,
914 bool &ExplicitlyDereferenceableOnly) {
915 ExplicitlyDereferenceableOnly = false;
916
917 // TODO: Alloca might not be writable after its lifetime ends.
918 // See https://github.com/llvm/llvm-project/issues/51838.
919 if (isa<AllocaInst>(Val: Object))
920 return true;
921
922 if (auto *A = dyn_cast<Argument>(Val: Object)) {
923 // Also require noalias, otherwise writability at function entry cannot be
924 // generalized to writability at other program points, even if the pointer
925 // does not escape.
926 if (A->hasAttribute(Kind: Attribute::Writable) && A->hasNoAliasAttr()) {
927 ExplicitlyDereferenceableOnly = true;
928 return true;
929 }
930
931 return A->hasByValAttr();
932 }
933
934 // TODO: Noalias shouldn't imply writability, this should check for an
935 // allocator function instead.
936 return isNoAliasCall(V: Object);
937}
938