1//==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the generic AliasAnalysis interface which is used as the
10// common interface used by all clients and implementations of alias analysis.
11//
12// This file also implements the default version of the AliasAnalysis interface
13// that is to be used when no other implementation is specified. This does some
14// simple tests that detect obvious cases: two different global pointers cannot
15// alias, a global cannot alias a malloc, two different mallocs cannot alias,
16// etc.
17//
18// This alias analysis implementation really isn't very good for anything, but
19// it is very fast, and makes a nice clean default implementation. Because it
20// handles lots of little corner cases, other, more complex, alias analysis
21// implementations may choose to rely on this pass to resolve these simple and
22// easy cases.
23//
24//===----------------------------------------------------------------------===//
25
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/Analysis/BasicAliasAnalysis.h"
29#include "llvm/Analysis/CaptureTracking.h"
30#include "llvm/Analysis/GlobalsModRef.h"
31#include "llvm/Analysis/MemoryLocation.h"
32#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
33#include "llvm/Analysis/ScopedNoAliasAA.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/InitializePasses.h"
45#include "llvm/Pass.h"
46#include "llvm/Support/AtomicOrdering.h"
47#include "llvm/Support/Casting.h"
48#include "llvm/Support/CommandLine.h"
49#include <cassert>
50#include <functional>
51#include <iterator>
52
53#define DEBUG_TYPE "aa"
54
55using namespace llvm;
56
57STATISTIC(NumNoAlias, "Number of NoAlias results");
58STATISTIC(NumMayAlias, "Number of MayAlias results");
59STATISTIC(NumMustAlias, "Number of MustAlias results");
60
61/// Allow disabling BasicAA from the AA results. This is particularly useful
62/// when testing to isolate a single AA implementation.
63static cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden,
64 cl::init(Val: false));
65
66#ifndef NDEBUG
67/// Print a trace of alias analysis queries and their results.
68static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
69#else
70static const bool EnableAATrace = false;
71#endif
72
73AAResults::AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
74
75AAResults::AAResults(AAResults &&Arg)
76 : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
77
78AAResults::~AAResults() = default;
79
80bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
81 FunctionAnalysisManager::Invalidator &Inv) {
82 // AAResults preserves the AAManager by default, due to the stateless nature
83 // of AliasAnalysis. There is no need to check whether it has been preserved
84 // explicitly. Check if any module dependency was invalidated and caused the
85 // AAManager to be invalidated. Invalidate ourselves in that case.
86 auto PAC = PA.getChecker<AAManager>();
87 if (!PAC.preservedWhenStateless())
88 return true;
89
90 // Check if any of the function dependencies were invalidated, and invalidate
91 // ourselves in that case.
92 for (AnalysisKey *ID : AADeps)
93 if (Inv.invalidate(ID, IR&: F, PA))
94 return true;
95
96 // Everything we depend on is still fine, so are we. Nothing to invalidate.
97 return false;
98}
99
100//===----------------------------------------------------------------------===//
101// Default chaining methods
102//===----------------------------------------------------------------------===//
103
104AliasResult AAResults::alias(const MemoryLocation &LocA,
105 const MemoryLocation &LocB) {
106 SimpleAAQueryInfo AAQIP(*this);
107 return alias(LocA, LocB, AAQI&: AAQIP, CtxI: nullptr);
108}
109
110AliasResult AAResults::alias(const MemoryLocation &LocA,
111 const MemoryLocation &LocB, AAQueryInfo &AAQI,
112 const Instruction *CtxI) {
113 assert(LocA.Ptr->getType()->isPointerTy() &&
114 LocB.Ptr->getType()->isPointerTy() &&
115 "Can only call alias() on pointers");
116 AliasResult Result = AliasResult::MayAlias;
117
118 if (EnableAATrace) {
119 for (unsigned I = 0; I < AAQI.Depth; ++I)
120 dbgs() << " ";
121 dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
122 << *LocB.Ptr << " @ " << LocB.Size << "\n";
123 }
124
125 AAQI.Depth++;
126 for (const auto &AA : AAs) {
127 Result = AA->alias(LocA, LocB, AAQI, CtxI);
128 if (Result != AliasResult::MayAlias)
129 break;
130 }
131 AAQI.Depth--;
132
133 if (EnableAATrace) {
134 for (unsigned I = 0; I < AAQI.Depth; ++I)
135 dbgs() << " ";
136 dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
137 << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
138 }
139
140 if (AAQI.Depth == 0) {
141 if (Result == AliasResult::NoAlias)
142 ++NumNoAlias;
143 else if (Result == AliasResult::MustAlias)
144 ++NumMustAlias;
145 else
146 ++NumMayAlias;
147 }
148 return Result;
149}
150
151AliasResult AAResults::aliasErrno(const MemoryLocation &Loc, const Module *M) {
152 AliasResult Result = AliasResult::MayAlias;
153
154 for (const auto &AA : AAs) {
155 Result = AA->aliasErrno(Loc, M);
156 if (Result != AliasResult::MayAlias)
157 break;
158 }
159
160 return Result;
161}
162
163ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
164 bool IgnoreLocals) {
165 SimpleAAQueryInfo AAQIP(*this);
166 return getModRefInfoMask(Loc, AAQI&: AAQIP, IgnoreLocals);
167}
168
169ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
170 AAQueryInfo &AAQI, bool IgnoreLocals) {
171 ModRefInfo Result = ModRefInfo::ModRef;
172
173 for (const auto &AA : AAs) {
174 Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals);
175
176 // Early-exit the moment we reach the bottom of the lattice.
177 if (isNoModRef(MRI: Result))
178 return ModRefInfo::NoModRef;
179 }
180
181 return Result;
182}
183
184ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
185 ModRefInfo Result = ModRefInfo::ModRef;
186
187 for (const auto &AA : AAs) {
188 Result &= AA->getArgModRefInfo(Call, ArgIdx);
189
190 // Early-exit the moment we reach the bottom of the lattice.
191 if (isNoModRef(MRI: Result))
192 return ModRefInfo::NoModRef;
193 }
194
195 return Result;
196}
197
198ModRefInfo AAResults::getModRefInfo(const Instruction *I,
199 const CallBase *Call2) {
200 SimpleAAQueryInfo AAQIP(*this);
201 return getModRefInfo(I, Call2, AAQIP);
202}
203
204ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2,
205 AAQueryInfo &AAQI) {
206 // We may have two calls.
207 if (const auto *Call1 = dyn_cast<CallBase>(Val: I)) {
208 // Check if the two calls modify the same memory.
209 return getModRefInfo(Call1, Call2, AAQI);
210 }
211 // If this is a fence, just return ModRef.
212 if (I->isFenceLike())
213 return ModRefInfo::ModRef;
214 // Otherwise, check if the call modifies or references the
215 // location this memory access defines. The best we can say
216 // is that if the call references what this instruction
217 // defines, it must be clobbered by this location.
218 const MemoryLocation DefLoc = MemoryLocation::get(Inst: I);
219 ModRefInfo MR = getModRefInfo(Call: Call2, Loc: DefLoc, AAQI);
220 if (isModOrRefSet(MRI: MR))
221 return ModRefInfo::ModRef;
222 return ModRefInfo::NoModRef;
223}
224
225ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
226 const MemoryLocation &Loc,
227 AAQueryInfo &AAQI) {
228 ModRefInfo Result = ModRefInfo::ModRef;
229
230 for (const auto &AA : AAs) {
231 Result &= AA->getModRefInfo(Call, Loc, AAQI);
232
233 // Early-exit the moment we reach the bottom of the lattice.
234 if (isNoModRef(MRI: Result))
235 return ModRefInfo::NoModRef;
236 }
237
238 // Apply the ModRef mask. This ensures that if Loc is a constant memory
239 // location, we take into account the fact that the call definitely could not
240 // modify the memory location.
241 if (!isNoModRef(MRI: Result))
242 Result &= getModRefInfoMask(Loc);
243
244 return Result;
245}
246
247ModRefInfo
248getModRefInfoInaccessibleAndTargetMemLoc(const MemoryEffects CallUse,
249 const MemoryEffects CallDef) {
250
251 ModRefInfo Result = ModRefInfo::NoModRef;
252 auto addModRefInfoForLoc = [&](IRMemLocation L) {
253 ModRefInfo UseMR = CallUse.getModRef(Loc: L);
254 if (UseMR == ModRefInfo::NoModRef)
255 return;
256 ModRefInfo DefMR = CallDef.getModRef(Loc: L);
257 if (DefMR == ModRefInfo::NoModRef)
258 return;
259 if (DefMR == ModRefInfo::Ref && DefMR == UseMR)
260 return;
261 Result |= UseMR;
262 };
263
264 addModRefInfoForLoc(IRMemLocation::InaccessibleMem);
265 for (auto Loc : MemoryEffects::targetMemLocations())
266 addModRefInfoForLoc(Loc);
267 return Result;
268}
269
270ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
271 const CallBase *Call2, AAQueryInfo &AAQI) {
272 ModRefInfo Result = ModRefInfo::ModRef;
273
274 for (const auto &AA : AAs) {
275 Result &= AA->getModRefInfo(Call1, Call2, AAQI);
276
277 // Early-exit the moment we reach the bottom of the lattice.
278 if (isNoModRef(MRI: Result))
279 return ModRefInfo::NoModRef;
280 }
281
282 // Try to refine the mod-ref info further using other API entry points to the
283 // aggregate set of AA results.
284
285 // If Call1 or Call2 are readnone, they don't interact.
286 auto Call1B = getMemoryEffects(Call: Call1, AAQI);
287 if (Call1B.doesNotAccessMemory())
288 return ModRefInfo::NoModRef;
289
290 auto Call2B = getMemoryEffects(Call: Call2, AAQI);
291 if (Call2B.doesNotAccessMemory())
292 return ModRefInfo::NoModRef;
293
294 // If they both only read from memory, there is no dependence.
295 if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory())
296 return ModRefInfo::NoModRef;
297
298 // If Call1 only reads memory, the only dependence on Call2 can be
299 // from Call1 reading memory written by Call2.
300 if (Call1B.onlyReadsMemory())
301 Result &= ModRefInfo::Ref;
302 else if (Call1B.onlyWritesMemory())
303 Result &= ModRefInfo::Mod;
304
305 // If Call2 only access memory through arguments, accumulate the mod/ref
306 // information from Call1's references to the memory referenced by
307 // Call2's arguments.
308 if (Call2B.onlyAccessesArgPointees()) {
309 if (!Call2B.doesAccessArgPointees())
310 return ModRefInfo::NoModRef;
311 ModRefInfo R = ModRefInfo::NoModRef;
312 for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
313 const Value *Arg = *I;
314 if (!Arg->getType()->isPointerTy())
315 continue;
316 unsigned Call2ArgIdx = std::distance(first: Call2->arg_begin(), last: I);
317 auto Call2ArgLoc =
318 MemoryLocation::getForArgument(Call: Call2, ArgIdx: Call2ArgIdx, TLI);
319
320 // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
321 // dependence of Call1 on that location is the inverse:
322 // - If Call2 modifies location, dependence exists if Call1 reads or
323 // writes.
324 // - If Call2 only reads location, dependence exists if Call1 writes.
325 ModRefInfo ArgModRefC2 = getArgModRefInfo(Call: Call2, ArgIdx: Call2ArgIdx);
326 ModRefInfo ArgMask = ModRefInfo::NoModRef;
327 if (isModSet(MRI: ArgModRefC2))
328 ArgMask = ModRefInfo::ModRef;
329 else if (isRefSet(MRI: ArgModRefC2))
330 ArgMask = ModRefInfo::Mod;
331
332 // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
333 // above ArgMask to update dependence info.
334 ArgMask &= getModRefInfo(Call: Call1, Loc: Call2ArgLoc, AAQI);
335
336 R = (R | ArgMask) & Result;
337 if (R == Result)
338 break;
339 }
340
341 return R;
342 }
343
344 // If Call1 only accesses memory through arguments, check if Call2 references
345 // any of the memory referenced by Call1's arguments. If not, return NoModRef.
346 if (Call1B.onlyAccessesArgPointees()) {
347 if (!Call1B.doesAccessArgPointees())
348 return ModRefInfo::NoModRef;
349 ModRefInfo R = ModRefInfo::NoModRef;
350 for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
351 const Value *Arg = *I;
352 if (!Arg->getType()->isPointerTy())
353 continue;
354 unsigned Call1ArgIdx = std::distance(first: Call1->arg_begin(), last: I);
355 auto Call1ArgLoc =
356 MemoryLocation::getForArgument(Call: Call1, ArgIdx: Call1ArgIdx, TLI);
357
358 // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
359 // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
360 // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
361 ModRefInfo ArgModRefC1 = getArgModRefInfo(Call: Call1, ArgIdx: Call1ArgIdx);
362 ModRefInfo ModRefC2 = getModRefInfo(Call: Call2, Loc: Call1ArgLoc, AAQI);
363 if ((isModSet(MRI: ArgModRefC1) && isModOrRefSet(MRI: ModRefC2)) ||
364 (isRefSet(MRI: ArgModRefC1) && isModSet(MRI: ModRefC2)))
365 R = (R | ArgModRefC1) & Result;
366
367 if (R == Result)
368 break;
369 }
370
371 return R;
372 }
373
374 // If only Inaccessible and Target Memory Location have set ModRefInfo
375 // then check the relation between the same locations.
376 if (Call1B.onlyAccessesInaccessibleOrTargetMem() &&
377 Call2B.onlyAccessesInaccessibleOrTargetMem())
378 return getModRefInfoInaccessibleAndTargetMemLoc(CallUse: Call1B, CallDef: Call2B);
379
380 return Result;
381}
382
383ModRefInfo AAResults::getModRefInfo(const Instruction *I1,
384 const Instruction *I2) {
385 SimpleAAQueryInfo AAQIP(*this);
386 return getModRefInfo(I1, I2, AAQI&: AAQIP);
387}
388
389ModRefInfo AAResults::getModRefInfo(const Instruction *I1,
390 const Instruction *I2, AAQueryInfo &AAQI) {
391 // Early-exit if either instruction does not read or write memory.
392 if (!I1->mayReadOrWriteMemory() || !I2->mayReadOrWriteMemory())
393 return ModRefInfo::NoModRef;
394
395 if (const auto *Call2 = dyn_cast<CallBase>(Val: I2))
396 return getModRefInfo(I: I1, Call2, AAQI);
397
398 // FIXME: We can have a more precise result.
399 ModRefInfo MR = getModRefInfo(I: I1, OptLoc: MemoryLocation::getOrNone(Inst: I2), AAQIP&: AAQI);
400 return isModOrRefSet(MRI: MR) ? ModRefInfo::ModRef : ModRefInfo::NoModRef;
401}
402
403MemoryEffects AAResults::getMemoryEffects(const CallBase *Call,
404 AAQueryInfo &AAQI) {
405 MemoryEffects Result = MemoryEffects::unknown();
406
407 for (const auto &AA : AAs) {
408 Result &= AA->getMemoryEffects(Call, AAQI);
409
410 // Early-exit the moment we reach the bottom of the lattice.
411 if (Result.doesNotAccessMemory())
412 return Result;
413 }
414
415 return Result;
416}
417
418MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) {
419 SimpleAAQueryInfo AAQI(*this);
420 return getMemoryEffects(Call, AAQI);
421}
422
423MemoryEffects AAResults::getMemoryEffects(const Function *F) {
424 MemoryEffects Result = MemoryEffects::unknown();
425
426 for (const auto &AA : AAs) {
427 Result &= AA->getMemoryEffects(F);
428
429 // Early-exit the moment we reach the bottom of the lattice.
430 if (Result.doesNotAccessMemory())
431 return Result;
432 }
433
434 return Result;
435}
436
437raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
438 switch (AR) {
439 case AliasResult::NoAlias:
440 OS << "NoAlias";
441 break;
442 case AliasResult::MustAlias:
443 OS << "MustAlias";
444 break;
445 case AliasResult::MayAlias:
446 OS << "MayAlias";
447 break;
448 case AliasResult::PartialAlias:
449 OS << "PartialAlias";
450 if (AR.hasOffset())
451 OS << " (off " << AR.getOffset() << ")";
452 break;
453 }
454 return OS;
455}
456
457//===----------------------------------------------------------------------===//
458// Helper method implementation
459//===----------------------------------------------------------------------===//
460
461ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
462 const MemoryLocation &Loc,
463 AAQueryInfo &AAQI) {
464 // Be conservative in the face of atomic.
465 if (isStrongerThan(AO: L->getOrdering(), Other: AtomicOrdering::Unordered))
466 return ModRefInfo::ModRef;
467
468 // If the load address doesn't alias the given address, it doesn't read
469 // or write the specified memory.
470 if (Loc.Ptr) {
471 AliasResult AR = alias(LocA: MemoryLocation::get(LI: L), LocB: Loc, AAQI, CtxI: L);
472 if (AR == AliasResult::NoAlias)
473 return ModRefInfo::NoModRef;
474 }
475 // Otherwise, a load just reads.
476 return ModRefInfo::Ref;
477}
478
479ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
480 const MemoryLocation &Loc,
481 AAQueryInfo &AAQI) {
482 // Be conservative in the face of atomic.
483 if (isStrongerThan(AO: S->getOrdering(), Other: AtomicOrdering::Unordered))
484 return ModRefInfo::ModRef;
485
486 if (Loc.Ptr) {
487 AliasResult AR = alias(LocA: MemoryLocation::get(SI: S), LocB: Loc, AAQI, CtxI: S);
488 // If the store address cannot alias the pointer in question, then the
489 // specified memory cannot be modified by the store.
490 if (AR == AliasResult::NoAlias)
491 return ModRefInfo::NoModRef;
492
493 // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
494 // This ensures that if Loc is a constant memory location, we take into
495 // account the fact that the store definitely could not modify the memory
496 // location.
497 if (!isModSet(MRI: getModRefInfoMask(Loc)))
498 return ModRefInfo::NoModRef;
499 }
500
501 // Otherwise, a store just writes.
502 return ModRefInfo::Mod;
503}
504
505ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
506 const MemoryLocation &Loc,
507 AAQueryInfo &AAQI) {
508 // All we know about a fence instruction is what we get from the ModRef
509 // mask: if Loc is a constant memory location, the fence definitely could
510 // not modify it.
511 if (Loc.Ptr)
512 return getModRefInfoMask(Loc);
513 return ModRefInfo::ModRef;
514}
515
516ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
517 const MemoryLocation &Loc,
518 AAQueryInfo &AAQI) {
519 if (Loc.Ptr) {
520 AliasResult AR = alias(LocA: MemoryLocation::get(VI: V), LocB: Loc, AAQI, CtxI: V);
521 // If the va_arg address cannot alias the pointer in question, then the
522 // specified memory cannot be accessed by the va_arg.
523 if (AR == AliasResult::NoAlias)
524 return ModRefInfo::NoModRef;
525
526 // If the pointer is a pointer to invariant memory, then it could not have
527 // been modified by this va_arg.
528 return getModRefInfoMask(Loc, AAQI);
529 }
530
531 // Otherwise, a va_arg reads and writes.
532 return ModRefInfo::ModRef;
533}
534
535ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
536 const MemoryLocation &Loc,
537 AAQueryInfo &AAQI) {
538 if (Loc.Ptr) {
539 // If the pointer is a pointer to invariant memory,
540 // then it could not have been modified by this catchpad.
541 return getModRefInfoMask(Loc, AAQI);
542 }
543
544 // Otherwise, a catchpad reads and writes.
545 return ModRefInfo::ModRef;
546}
547
548ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
549 const MemoryLocation &Loc,
550 AAQueryInfo &AAQI) {
551 if (Loc.Ptr) {
552 // If the pointer is a pointer to invariant memory,
553 // then it could not have been modified by this catchpad.
554 return getModRefInfoMask(Loc, AAQI);
555 }
556
557 // Otherwise, a catchret reads and writes.
558 return ModRefInfo::ModRef;
559}
560
561ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
562 const MemoryLocation &Loc,
563 AAQueryInfo &AAQI) {
564 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
565 if (isStrongerThanMonotonic(AO: CX->getSuccessOrdering()))
566 return ModRefInfo::ModRef;
567
568 if (Loc.Ptr) {
569 AliasResult AR = alias(LocA: MemoryLocation::get(CXI: CX), LocB: Loc, AAQI, CtxI: CX);
570 // If the cmpxchg address does not alias the location, it does not access
571 // it.
572 if (AR == AliasResult::NoAlias)
573 return ModRefInfo::NoModRef;
574 }
575
576 return ModRefInfo::ModRef;
577}
578
579ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
580 const MemoryLocation &Loc,
581 AAQueryInfo &AAQI) {
582 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
583 if (isStrongerThanMonotonic(AO: RMW->getOrdering()))
584 return ModRefInfo::ModRef;
585
586 if (Loc.Ptr) {
587 AliasResult AR = alias(LocA: MemoryLocation::get(RMWI: RMW), LocB: Loc, AAQI, CtxI: RMW);
588 // If the atomicrmw address does not alias the location, it does not access
589 // it.
590 if (AR == AliasResult::NoAlias)
591 return ModRefInfo::NoModRef;
592 }
593
594 return ModRefInfo::ModRef;
595}
596
597ModRefInfo AAResults::getModRefInfo(const Instruction *I,
598 const std::optional<MemoryLocation> &OptLoc,
599 AAQueryInfo &AAQIP) {
600 if (OptLoc == std::nullopt) {
601 if (const auto *Call = dyn_cast<CallBase>(Val: I))
602 return getMemoryEffects(Call, AAQI&: AAQIP).getModRef();
603 }
604
605 const MemoryLocation &Loc = OptLoc.value_or(u: MemoryLocation());
606
607 switch (I->getOpcode()) {
608 case Instruction::VAArg:
609 return getModRefInfo(V: (const VAArgInst *)I, Loc, AAQI&: AAQIP);
610 case Instruction::Load:
611 return getModRefInfo(L: (const LoadInst *)I, Loc, AAQI&: AAQIP);
612 case Instruction::Store:
613 return getModRefInfo(S: (const StoreInst *)I, Loc, AAQI&: AAQIP);
614 case Instruction::Fence:
615 return getModRefInfo(S: (const FenceInst *)I, Loc, AAQI&: AAQIP);
616 case Instruction::AtomicCmpXchg:
617 return getModRefInfo(CX: (const AtomicCmpXchgInst *)I, Loc, AAQI&: AAQIP);
618 case Instruction::AtomicRMW:
619 return getModRefInfo(RMW: (const AtomicRMWInst *)I, Loc, AAQI&: AAQIP);
620 case Instruction::Call:
621 case Instruction::CallBr:
622 case Instruction::Invoke:
623 return getModRefInfo(Call: (const CallBase *)I, Loc, AAQI&: AAQIP);
624 case Instruction::CatchPad:
625 return getModRefInfo(CatchPad: (const CatchPadInst *)I, Loc, AAQI&: AAQIP);
626 case Instruction::CatchRet:
627 return getModRefInfo(CatchRet: (const CatchReturnInst *)I, Loc, AAQI&: AAQIP);
628 default:
629 assert(!I->mayReadOrWriteMemory() &&
630 "Unhandled memory access instruction!");
631 return ModRefInfo::NoModRef;
632 }
633}
634
635/// Return information about whether a particular call site modifies
636/// or reads the specified memory location \p MemLoc before instruction \p I
637/// in a BasicBlock.
638/// FIXME: this is really just shoring-up a deficiency in alias analysis.
639/// BasicAA isn't willing to spend linear time determining whether an alloca
640/// was captured before or after this particular call, while we are. However,
641/// with a smarter AA in place, this test is just wasting compile time.
642ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
643 const MemoryLocation &MemLoc,
644 DominatorTree *DT,
645 AAQueryInfo &AAQI) {
646 if (!DT)
647 return ModRefInfo::ModRef;
648
649 const Value *Object = getUnderlyingObject(V: MemLoc.Ptr);
650 if (!isIdentifiedFunctionLocal(V: Object))
651 return ModRefInfo::ModRef;
652
653 const auto *Call = dyn_cast<CallBase>(Val: I);
654 if (!Call || Call == Object)
655 return ModRefInfo::ModRef;
656
657 if (capturesAnything(CC: PointerMayBeCapturedBefore(
658 V: Object, /* ReturnCaptures */ true, I, DT,
659 /* include Object */ IncludeI: true, Mask: CaptureComponents::Provenance)))
660 return ModRefInfo::ModRef;
661
662 unsigned ArgNo = 0;
663 ModRefInfo R = ModRefInfo::NoModRef;
664 // Set flag only if no May found and all operands processed.
665 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
666 CI != CE; ++CI, ++ArgNo) {
667 // Only look at the no-capture or byval pointer arguments. If this
668 // pointer were passed to arguments that were neither of these, then it
669 // couldn't be no-capture.
670 if (!(*CI)->getType()->isPointerTy())
671 continue;
672
673 // Make sure we still check captures(ret: address, provenance) and
674 // captures(address) arguments, as these wouldn't be treated as a capture
675 // at the call-site.
676 CaptureInfo Captures = Call->getCaptureInfo(OpNo: ArgNo);
677 if (capturesAnyProvenance(CC: Captures.getOtherComponents()))
678 continue;
679
680 AliasResult AR =
681 alias(LocA: MemoryLocation::getBeforeOrAfter(Ptr: *CI),
682 LocB: MemoryLocation::getBeforeOrAfter(Ptr: Object), AAQI, CtxI: Call);
683 // If this is a no-capture pointer argument, see if we can tell that it
684 // is impossible to alias the pointer we're checking. If not, we have to
685 // assume that the call could touch the pointer, even though it doesn't
686 // escape.
687 if (AR == AliasResult::NoAlias)
688 continue;
689 if (Call->doesNotAccessMemory(OpNo: ArgNo))
690 continue;
691 if (Call->onlyReadsMemory(OpNo: ArgNo)) {
692 R = ModRefInfo::Ref;
693 continue;
694 }
695 return ModRefInfo::ModRef;
696 }
697 return R;
698}
699
700/// canBasicBlockModify - Return true if it is possible for execution of the
701/// specified basic block to modify the location Loc.
702///
703bool AAResults::canBasicBlockModify(const BasicBlock &BB,
704 const MemoryLocation &Loc) {
705 return canInstructionRangeModRef(I1: BB.front(), I2: BB.back(), Loc, Mode: ModRefInfo::Mod);
706}
707
708/// canInstructionRangeModRef - Return true if it is possible for the
709/// execution of the specified instructions to mod\ref (according to the
710/// mode) the location Loc. The instructions to consider are all
711/// of the instructions in the range of [I1,I2] INCLUSIVE.
712/// I1 and I2 must be in the same basic block.
713bool AAResults::canInstructionRangeModRef(const Instruction &I1,
714 const Instruction &I2,
715 const MemoryLocation &Loc,
716 const ModRefInfo Mode) {
717 assert(I1.getParent() == I2.getParent() &&
718 "Instructions not in same basic block!");
719 BasicBlock::const_iterator I = I1.getIterator();
720 BasicBlock::const_iterator E = I2.getIterator();
721 ++E; // Convert from inclusive to exclusive range.
722
723 for (; I != E; ++I) // Check every instruction in range
724 if (isModOrRefSet(MRI: getModRefInfo(I: &*I, OptLoc: Loc) & Mode))
725 return true;
726 return false;
727}
728
729// Provide a definition for the root virtual destructor.
730AAResults::Concept::~Concept() = default;
731
732// Provide a definition for the static object used to identify passes.
733AnalysisKey AAManager::Key;
734
735ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) {}
736
737ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB, bool RunEarly)
738 : ImmutablePass(ID), CB(std::move(CB)), RunEarly(RunEarly) {}
739
740char ExternalAAWrapperPass::ID = 0;
741
742INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
743 false, true)
744
745ImmutablePass *
746llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
747 return new ExternalAAWrapperPass(std::move(Callback));
748}
749
750AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {}
751
752char AAResultsWrapperPass::ID = 0;
753
754INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
755 "Function Alias Analysis Results", false, true)
756INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
757INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
758INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
759INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
760INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
761INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
762INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
763 "Function Alias Analysis Results", false, true)
764
765/// Run the wrapper pass to rebuild an aggregation over known AA passes.
766///
767/// This is the legacy pass manager's interface to the new-style AA results
768/// aggregation object. Because this is somewhat shoe-horned into the legacy
769/// pass manager, we hard code all the specific alias analyses available into
770/// it. While the particular set enabled is configured via commandline flags,
771/// adding a new alias analysis to LLVM will require adding support for it to
772/// this list.
773bool AAResultsWrapperPass::runOnFunction(Function &F) {
774 // NB! This *must* be reset before adding new AA results to the new
775 // AAResults object because in the legacy pass manager, each instance
776 // of these will refer to the *same* immutable analyses, registering and
777 // unregistering themselves with them. We need to carefully tear down the
778 // previous object first, in this case replacing it with an empty one, before
779 // registering new results.
780 AAR.reset(
781 p: new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)));
782
783 // Add any target-specific alias analyses that should be run early.
784 auto *ExtWrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>();
785 if (ExtWrapperPass && ExtWrapperPass->RunEarly && ExtWrapperPass->CB) {
786 LLVM_DEBUG(dbgs() << "AAResults register Early ExternalAA: "
787 << ExtWrapperPass->getPassName() << "\n");
788 ExtWrapperPass->CB(*this, F, *AAR);
789 }
790
791 // BasicAA is always available for function analyses. Also, we add it first
792 // so that it can trump TBAA results when it proves MustAlias.
793 // FIXME: TBAA should have an explicit mode to support this and then we
794 // should reconsider the ordering here.
795 if (!DisableBasicAA) {
796 LLVM_DEBUG(dbgs() << "AAResults register BasicAA\n");
797 AAR->addAAResult(AAResult&: getAnalysis<BasicAAWrapperPass>().getResult());
798 }
799
800 // Populate the results with the currently available AAs.
801 if (auto *WrapperPass =
802 getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) {
803 LLVM_DEBUG(dbgs() << "AAResults register ScopedNoAliasAA\n");
804 AAR->addAAResult(AAResult&: WrapperPass->getResult());
805 }
806 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) {
807 LLVM_DEBUG(dbgs() << "AAResults register TypeBasedAA\n");
808 AAR->addAAResult(AAResult&: WrapperPass->getResult());
809 }
810 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>()) {
811 LLVM_DEBUG(dbgs() << "AAResults register GlobalsAA\n");
812 AAR->addAAResult(AAResult&: WrapperPass->getResult());
813 }
814 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>()) {
815 LLVM_DEBUG(dbgs() << "AAResults register SCEVAA\n");
816 AAR->addAAResult(AAResult&: WrapperPass->getResult());
817 }
818
819 // If available, run an external AA providing callback over the results as
820 // well.
821 if (ExtWrapperPass && !ExtWrapperPass->RunEarly && ExtWrapperPass->CB) {
822 LLVM_DEBUG(dbgs() << "AAResults register Late ExternalAA: "
823 << ExtWrapperPass->getPassName() << "\n");
824 ExtWrapperPass->CB(*this, F, *AAR);
825 }
826
827 // Analyses don't mutate the IR, so return false.
828 return false;
829}
830
831void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
832 AU.setPreservesAll();
833 AU.addRequiredTransitive<BasicAAWrapperPass>();
834 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
835
836 // We also need to mark all the alias analysis passes we will potentially
837 // probe in runOnFunction as used here to ensure the legacy pass manager
838 // preserves them. This hard coding of lists of alias analyses is specific to
839 // the legacy pass manager.
840 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
841 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
842 AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
843 AU.addUsedIfAvailable<SCEVAAWrapperPass>();
844 AU.addUsedIfAvailable<ExternalAAWrapperPass>();
845}
846
847AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) {
848 Result R(AM.getResult<TargetLibraryAnalysis>(IR&: F));
849 for (auto &Getter : ResultGetters)
850 (*Getter)(F, AM, R);
851 return R;
852}
853
854bool llvm::isNoAliasCall(const Value *V) {
855 if (const auto *Call = dyn_cast<CallBase>(Val: V))
856 return Call->hasRetAttr(Kind: Attribute::NoAlias);
857 return false;
858}
859
860static bool isNoAliasOrByValArgument(const Value *V) {
861 if (const Argument *A = dyn_cast<Argument>(Val: V))
862 return A->hasNoAliasAttr() || A->hasByValAttr();
863 return false;
864}
865
866bool llvm::isIdentifiedObject(const Value *V) {
867 if (isa<AllocaInst>(Val: V))
868 return true;
869 if (isa<GlobalValue>(Val: V) && !isa<GlobalAlias>(Val: V))
870 return true;
871 if (isNoAliasCall(V))
872 return true;
873 if (isNoAliasOrByValArgument(V))
874 return true;
875 return false;
876}
877
878bool llvm::isIdentifiedFunctionLocal(const Value *V) {
879 return isa<AllocaInst>(Val: V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
880}
881
882bool llvm::isBaseOfObject(const Value *V) {
883 // TODO: We can handle other cases here
884 // 1) For GC languages, arguments to functions are often required to be
885 // base pointers.
886 // 2) Result of allocation routines are often base pointers. Leverage TLI.
887 return (isa<AllocaInst>(Val: V) || isa<GlobalVariable>(Val: V));
888}
889
890bool llvm::isEscapeSource(const Value *V) {
891 if (auto *CB = dyn_cast<CallBase>(Val: V)) {
892 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call: CB, MustPreserveNullness: true))
893 return false;
894
895 // The return value of a function with a captures(ret: address, provenance)
896 // attribute is not necessarily an escape source. The return value may
897 // alias with a non-escaping object.
898 return !CB->hasArgumentWithAdditionalReturnCaptureComponents();
899 }
900
901 // The load case works because isNotCapturedBefore considers all
902 // stores to be escapes (it passes true for the StoreCaptures argument
903 // to PointerMayBeCaptured).
904 if (isa<LoadInst>(Val: V))
905 return true;
906
907 // The inttoptr case works because isNotCapturedBefore considers all
908 // means of converting or equating a pointer to an int (ptrtoint, ptr store
909 // which could be followed by an integer load, ptr<->int compare) as
910 // escaping, and objects located at well-known addresses via platform-specific
911 // means cannot be considered non-escaping local objects.
912 if (isa<IntToPtrInst>(Val: V))
913 return true;
914
915 // Capture tracking considers insertions into aggregates and vectors as
916 // captures. As such, extractions from aggregates and vectors are escape
917 // sources.
918 if (isa<ExtractValueInst, ExtractElementInst>(Val: V))
919 return true;
920
921 // Same for inttoptr constant expressions.
922 if (auto *CE = dyn_cast<ConstantExpr>(Val: V))
923 if (CE->getOpcode() == Instruction::IntToPtr)
924 return true;
925
926 return false;
927}
928
929bool llvm::isNotVisibleOnUnwind(const Value *Object,
930 bool &RequiresNoCaptureBeforeUnwind) {
931 RequiresNoCaptureBeforeUnwind = false;
932
933 // Alloca goes out of scope on unwind.
934 if (isa<AllocaInst>(Val: Object))
935 return true;
936
937 // Byval goes out of scope on unwind.
938 if (auto *A = dyn_cast<Argument>(Val: Object))
939 return A->hasByValAttr() || A->hasAttribute(Kind: Attribute::DeadOnUnwind);
940
941 // A noalias return is not accessible from any other code. If the pointer
942 // does not escape prior to the unwind, then the caller cannot access the
943 // memory either.
944 if (isNoAliasCall(V: Object)) {
945 RequiresNoCaptureBeforeUnwind = true;
946 return true;
947 }
948
949 return false;
950}
951
952// We don't consider globals as writable: While the physical memory is writable,
953// we may not have provenance to perform the write.
954bool llvm::isWritableObject(const Value *Object,
955 bool &ExplicitlyDereferenceableOnly) {
956 ExplicitlyDereferenceableOnly = false;
957
958 // TODO: Alloca might not be writable after its lifetime ends.
959 // See https://github.com/llvm/llvm-project/issues/51838.
960 if (isa<AllocaInst>(Val: Object))
961 return true;
962
963 if (auto *A = dyn_cast<Argument>(Val: Object)) {
964 // Also require noalias, otherwise writability at function entry cannot be
965 // generalized to writability at other program points, even if the pointer
966 // does not escape.
967 if (A->hasAttribute(Kind: Attribute::Writable) && A->hasNoAliasAttr()) {
968 ExplicitlyDereferenceableOnly = true;
969 return true;
970 }
971
972 return A->hasByValAttr();
973 }
974
975 // TODO: Noalias shouldn't imply writability, this should check for an
976 // allocator function instead.
977 return isNoAliasCall(V: Object);
978}
979