1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Transforms/IPO/Attributor.h"
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/PointerIntPair.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/CallGraph.h"
25#include "llvm/Analysis/InlineCost.h"
26#include "llvm/Analysis/MemoryBuiltins.h"
27#include "llvm/Analysis/MustExecute.h"
28#include "llvm/IR/AttributeMask.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/ConstantFold.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/GlobalVariable.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/ValueHandle.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/DebugCounter.h"
45#include "llvm/Support/FileSystem.h"
46#include "llvm/Support/GraphWriter.h"
47#include "llvm/Support/ModRef.h"
48#include "llvm/Support/raw_ostream.h"
49#include "llvm/Transforms/Utils/BasicBlockUtils.h"
50#include "llvm/Transforms/Utils/Cloning.h"
51#include "llvm/Transforms/Utils/Local.h"
52#include <cstdint>
53#include <memory>
54
55#ifdef EXPENSIVE_CHECKS
56#include "llvm/IR/Verifier.h"
57#endif
58
59#include <cassert>
60#include <optional>
61#include <string>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "attributor"
66#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71STATISTIC(NumFnDeleted, "Number of function deleted");
72STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84// TODO: Determine a good default value.
85//
86// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87// (when run with the first 5 abstract attributes). The results also indicate
88// that we never reach 32 iterations but always find a fixpoint sooner.
89//
90// This will become more evolved once we perform two interleaved fixpoint
91// iterations: bottom-up and top-down.
92static cl::opt<unsigned>
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(Val: 32));
96
97static cl::opt<unsigned>
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
99 cl::Hidden,
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
104static cl::opt<unsigned, true> MaxInitializationChainLengthX(
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
108 cl::location(L&: MaxInitializationChainLength), cl::init(Val: 1024));
109unsigned llvm::MaxInitializationChainLength;
110
111static cl::opt<bool> AnnotateDeclarationCallSites(
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(Val: false));
114
115static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(Val: true), cl::Hidden);
117
118static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(Val: false));
123
124static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(Val: false));
129
130// These options can only used for debug builds.
131#ifndef NDEBUG
132static cl::list<std::string>
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
136 cl::CommaSeparated);
137
138static cl::list<std::string> FunctionSeedAllowList(
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
142 cl::CommaSeparated);
143#endif
144
145static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(Val: false));
149
150static cl::opt<std::string> DepGraphDotFileNamePrefix(
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(Val: false));
157
158static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(Val: false));
161
162static cl::opt<bool> EnableCallSiteSpecific(
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(Val: false));
166
167static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(Val: false));
171
172static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
173 cl::Hidden,
174 cl::desc("Try to simplify all loads."),
175 cl::init(Val: true));
176
177static cl::opt<bool> CloseWorldAssumption(
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181/// Logic operators for the change status enum class.
182///
183///{
184ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
185 return L == ChangeStatus::CHANGED ? L : R;
186}
187ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
188 L = L | R;
189 return L;
190}
191ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
192 return L == ChangeStatus::UNCHANGED ? L : R;
193}
194ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
195 L = L & R;
196 return L;
197}
198///}
199
200bool AA::isGPU(const Module &M) {
201 Triple T(M.getTargetTriple());
202 return T.isGPU();
203}
204
205bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
206 const AbstractAttribute &QueryingAA) {
207 // We are looking for volatile instructions or non-relaxed atomics.
208 if (const auto *CB = dyn_cast<CallBase>(Val: &I)) {
209 if (CB->hasFnAttr(Kind: Attribute::NoSync))
210 return true;
211
212 // Non-convergent and readnone imply nosync.
213 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
214 return true;
215
216 if (AANoSync::isNoSyncIntrinsic(I: &I))
217 return true;
218
219 bool IsKnownNoSync;
220 return AA::hasAssumedIRAttr<Attribute::NoSync>(
221 A, QueryingAA: &QueryingAA, IRP: IRPosition::callsite_function(CB: *CB),
222 DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoSync);
223 }
224
225 if (!I.mayReadOrWriteMemory())
226 return true;
227
228 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(I: &I);
229}
230
231bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
232 const Value &V, bool ForAnalysisOnly) {
233 // TODO: See the AAInstanceInfo class comment.
234 if (!ForAnalysisOnly)
235 return false;
236 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
237 QueryingAA, IRP: IRPosition::value(V), DepClass: DepClassTy::OPTIONAL);
238 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
239}
240
241Constant *
242AA::getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA,
243 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
244 const DataLayout &DL, AA::RangeTy *RangePtr) {
245 if (Constant *Init = getInitialValueOfAllocation(V: &Obj, TLI, Ty: &Ty))
246 return Init;
247 auto *GV = dyn_cast<GlobalVariable>(Val: &Obj);
248 if (!GV)
249 return nullptr;
250
251 bool UsedAssumedInformation = false;
252 Constant *Initializer = nullptr;
253 if (A.hasGlobalVariableSimplificationCallback(GV: *GV)) {
254 auto AssumedGV = A.getAssumedInitializerFromCallBack(
255 GV: *GV, AA: &QueryingAA, UsedAssumedInformation);
256 Initializer = *AssumedGV;
257 if (!Initializer)
258 return nullptr;
259 } else {
260 if (!GV->hasLocalLinkage()) {
261 // Externally visible global that's either non-constant,
262 // or a constant with an uncertain initializer.
263 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
264 return nullptr;
265 }
266
267 // Globals with local linkage are always initialized.
268 assert(!GV->hasLocalLinkage() || GV->hasInitializer());
269
270 if (!Initializer)
271 Initializer = GV->getInitializer();
272 }
273
274 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
275 int64_t StorageSize = DL.getTypeStoreSize(Ty: &Ty);
276 if (StorageSize != RangePtr->Size)
277 return nullptr;
278 APInt Offset = APInt(64, RangePtr->Offset);
279 return ConstantFoldLoadFromConst(C: Initializer, Ty: &Ty, Offset, DL);
280 }
281
282 return ConstantFoldLoadFromUniformValue(C: Initializer, Ty: &Ty, DL);
283}
284
285bool AA::isValidInScope(const Value &V, const Function *Scope) {
286 if (isa<Constant>(Val: V))
287 return true;
288 if (auto *I = dyn_cast<Instruction>(Val: &V))
289 return I->getFunction() == Scope;
290 if (auto *A = dyn_cast<Argument>(Val: &V))
291 return A->getParent() == Scope;
292 return false;
293}
294
295bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
296 InformationCache &InfoCache) {
297 if (isa<Constant>(Val: VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
298 return true;
299 const Function *Scope = nullptr;
300 const Instruction *CtxI = VAC.getCtxI();
301 if (CtxI)
302 Scope = CtxI->getFunction();
303 if (auto *A = dyn_cast<Argument>(Val: VAC.getValue()))
304 return A->getParent() == Scope;
305 if (auto *I = dyn_cast<Instruction>(Val: VAC.getValue())) {
306 if (I->getFunction() == Scope) {
307 if (const DominatorTree *DT =
308 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
309 F: *Scope))
310 return DT->dominates(Def: I, User: CtxI);
311 // Local dominance check mostly for the old PM passes.
312 if (CtxI && I->getParent() == CtxI->getParent())
313 return llvm::any_of(
314 Range: make_range(x: I->getIterator(), y: I->getParent()->end()),
315 P: [&](const Instruction &AfterI) { return &AfterI == CtxI; });
316 }
317 }
318 return false;
319}
320
321Value *AA::getWithType(Value &V, Type &Ty) {
322 if (V.getType() == &Ty)
323 return &V;
324 if (isa<PoisonValue>(Val: V))
325 return PoisonValue::get(T: &Ty);
326 if (isa<UndefValue>(Val: V))
327 return UndefValue::get(T: &Ty);
328 if (auto *C = dyn_cast<Constant>(Val: &V)) {
329 if (C->isNullValue() && !Ty.isPtrOrPtrVectorTy())
330 return Constant::getNullValue(Ty: &Ty);
331 if (C->getType()->isPointerTy() && Ty.isPointerTy())
332 return ConstantExpr::getPointerCast(C, Ty: &Ty);
333 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
334 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
335 return ConstantExpr::getTrunc(C, Ty: &Ty, /* OnlyIfReduced */ true);
336 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
337 return ConstantFoldCastInstruction(opcode: Instruction::FPTrunc, V: C, DestTy: &Ty);
338 }
339 }
340 return nullptr;
341}
342
343std::optional<Value *>
344AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
345 const std::optional<Value *> &B,
346 Type *Ty) {
347 if (A == B)
348 return A;
349 if (!B)
350 return A;
351 if (*B == nullptr)
352 return nullptr;
353 if (!A)
354 return Ty ? getWithType(V&: **B, Ty&: *Ty) : nullptr;
355 if (*A == nullptr)
356 return nullptr;
357 if (!Ty)
358 Ty = (*A)->getType();
359 if (isa_and_nonnull<UndefValue>(Val: *A))
360 return getWithType(V&: **B, Ty&: *Ty);
361 if (isa<UndefValue>(Val: *B))
362 return A;
363 if (*A && *B && *A == getWithType(V&: **B, Ty&: *Ty))
364 return A;
365 return nullptr;
366}
367
368template <bool IsLoad, typename Ty>
369static bool getPotentialCopiesOfMemoryValue(
370 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
371 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
372 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
373 bool OnlyExact) {
374 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
375 << " (only exact: " << OnlyExact << ")\n";);
376
377 Value &Ptr = *I.getPointerOperand();
378 // Containers to remember the pointer infos and new copies while we are not
379 // sure that we can find all of them. If we abort we want to avoid spurious
380 // dependences and potential copies in the provided container.
381 SmallVector<const AAPointerInfo *> PIs;
382 SmallSetVector<Value *, 8> NewCopies;
383 SmallSetVector<Instruction *, 8> NewCopyOrigins;
384
385 const auto *TLI =
386 A.getInfoCache().getTargetLibraryInfoForFunction(F: *I.getFunction());
387
388 auto Pred = [&](Value &Obj) {
389 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
390 if (isa<UndefValue>(Val: &Obj))
391 return true;
392 if (isa<ConstantPointerNull>(Val: &Obj)) {
393 // A null pointer access can be undefined but any offset from null may
394 // be OK. We do not try to optimize the latter.
395 if (!NullPointerIsDefined(I.getFunction(),
396 Ptr.getType()->getPointerAddressSpace()) &&
397 A.getAssumedSimplified(V: Ptr, AA: QueryingAA, UsedAssumedInformation,
398 S: AA::Interprocedural) == &Obj)
399 return true;
400 LLVM_DEBUG(
401 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
402 return false;
403 }
404 // TODO: Use assumed noalias return.
405 if (!isa<AllocaInst>(Val: &Obj) && !isa<GlobalVariable>(Val: &Obj) &&
406 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(V: &Obj))) {
407 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
408 << "\n";);
409 return false;
410 }
411 if (auto *GV = dyn_cast<GlobalVariable>(Val: &Obj))
412 if (!GV->hasLocalLinkage() &&
413 !(GV->isConstant() && GV->hasInitializer())) {
414 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
415 "linkage, not supported yet: "
416 << Obj << "\n";);
417 return false;
418 }
419
420 bool NullOnly = true;
421 bool NullRequired = false;
422 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
423 bool IsExact) {
424 if (!V || *V == nullptr)
425 NullOnly = false;
426 else if (isa<UndefValue>(Val: *V))
427 /* No op */;
428 else if (isa<Constant>(Val: *V) && cast<Constant>(Val: *V)->isNullValue())
429 NullRequired = !IsExact;
430 else
431 NullOnly = false;
432 };
433
434 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
435 Value &V) {
436 Value *AdjV = AA::getWithType(V, Ty&: *I.getType());
437 if (!AdjV) {
438 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
439 "cannot be converted to read type: "
440 << *Acc.getRemoteInst() << " : " << *I.getType()
441 << "\n";);
442 }
443 return AdjV;
444 };
445
446 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
447 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
448 return true;
449 if (IsLoad) {
450 if (Acc.isWrittenValueYetUndetermined())
451 return true;
452 if (PotentialValueOrigins && !isa<AssumeInst>(Val: Acc.getRemoteInst()))
453 return false;
454 if (!Acc.isWrittenValueUnknown())
455 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
456 if (NewCopies.count(key: V)) {
457 NewCopyOrigins.insert(X: Acc.getRemoteInst());
458 return true;
459 }
460 if (auto *SI = dyn_cast<StoreInst>(Val: Acc.getRemoteInst()))
461 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
462 if (NewCopies.count(key: V)) {
463 NewCopyOrigins.insert(X: Acc.getRemoteInst());
464 return true;
465 }
466 }
467 return false;
468 };
469
470 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
471 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
472 return true;
473 if (IsLoad && Acc.isWrittenValueYetUndetermined())
474 return true;
475 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
476 if (OnlyExact && !IsExact && !NullOnly &&
477 !isa_and_nonnull<UndefValue>(Val: Acc.getWrittenValue())) {
478 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
479 << ", abort!\n");
480 return false;
481 }
482 if (NullRequired && !NullOnly) {
483 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
484 "one, however found non-null one: "
485 << *Acc.getRemoteInst() << ", abort!\n");
486 return false;
487 }
488 if (IsLoad) {
489 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
490 if (!Acc.isWrittenValueUnknown()) {
491 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
492 if (!V)
493 return false;
494 NewCopies.insert(X: V);
495 if (PotentialValueOrigins)
496 NewCopyOrigins.insert(X: Acc.getRemoteInst());
497 return true;
498 }
499 auto *SI = dyn_cast<StoreInst>(Val: Acc.getRemoteInst());
500 if (!SI) {
501 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
502 "instruction not supported yet: "
503 << *Acc.getRemoteInst() << "\n";);
504 return false;
505 }
506 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
507 if (!V)
508 return false;
509 NewCopies.insert(X: V);
510 if (PotentialValueOrigins)
511 NewCopyOrigins.insert(X: SI);
512 } else {
513 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
514 auto *LI = dyn_cast<LoadInst>(Val: Acc.getRemoteInst());
515 if (!LI && OnlyExact) {
516 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
517 "instruction not supported yet: "
518 << *Acc.getRemoteInst() << "\n";);
519 return false;
520 }
521 NewCopies.insert(X: Acc.getRemoteInst());
522 }
523 return true;
524 };
525
526 // If the value has been written to we don't need the initial value of the
527 // object.
528 bool HasBeenWrittenTo = false;
529
530 AA::RangeTy Range;
531 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRP: IRPosition::value(V: Obj),
532 DepClass: DepClassTy::NONE);
533 if (!PI || !PI->forallInterferingAccesses(
534 A, QueryingAA, I,
535 /* FindInterferingWrites */ IsLoad,
536 /* FindInterferingReads */ !IsLoad, CheckAccess,
537 HasBeenWrittenTo, Range, SkipCB)) {
538 LLVM_DEBUG(
539 dbgs()
540 << "Failed to verify all interfering accesses for underlying object: "
541 << Obj << "\n");
542 return false;
543 }
544
545 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
546 const DataLayout &DL = A.getDataLayout();
547 Value *InitialValue = AA::getInitialValueForObj(
548 A, QueryingAA, Obj, Ty&: *I.getType(), TLI, DL, RangePtr: &Range);
549 if (!InitialValue) {
550 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
551 "underlying object, abort!\n");
552 return false;
553 }
554 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
555 if (NullRequired && !NullOnly) {
556 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
557 "null or undef, abort!\n");
558 return false;
559 }
560
561 NewCopies.insert(X: InitialValue);
562 if (PotentialValueOrigins)
563 NewCopyOrigins.insert(X: nullptr);
564 }
565
566 PIs.push_back(Elt: PI);
567
568 return true;
569 };
570
571 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
572 QueryingAA, IRP: IRPosition::value(V: Ptr), DepClass: DepClassTy::OPTIONAL);
573 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
574 LLVM_DEBUG(
575 dbgs() << "Underlying objects stored into could not be determined\n";);
576 return false;
577 }
578
579 // Only if we were successful collection all potential copies we record
580 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
581 // given PotentialCopies container.
582 for (const auto *PI : PIs) {
583 if (!PI->getState().isAtFixpoint())
584 UsedAssumedInformation = true;
585 A.recordDependence(FromAA: *PI, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
586 }
587 PotentialCopies.insert_range(R&: NewCopies);
588 if (PotentialValueOrigins)
589 PotentialValueOrigins->insert_range(R&: NewCopyOrigins);
590
591 return true;
592}
593
594bool AA::getPotentiallyLoadedValues(
595 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
596 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
597 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
598 bool OnlyExact) {
599 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
600 A, I&: LI, PotentialCopies&: PotentialValues, PotentialValueOrigins: &PotentialValueOrigins, QueryingAA,
601 UsedAssumedInformation, OnlyExact);
602}
603
604bool AA::getPotentialCopiesOfStoredValue(
605 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
606 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
607 bool OnlyExact) {
608 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
609 A, I&: SI, PotentialCopies, PotentialValueOrigins: nullptr, QueryingAA, UsedAssumedInformation,
610 OnlyExact);
611}
612
613static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
614 const AbstractAttribute &QueryingAA,
615 bool RequireReadNone, bool &IsKnown) {
616 if (RequireReadNone) {
617 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
618 A, QueryingAA: &QueryingAA, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown,
619 /* IgnoreSubsumingPositions */ true))
620 return true;
621 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
622 A, QueryingAA: &QueryingAA, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown,
623 /* IgnoreSubsumingPositions */ true))
624 return true;
625
626 IRPosition::Kind Kind = IRP.getPositionKind();
627 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
628 const auto *MemLocAA =
629 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClass: DepClassTy::NONE);
630 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
631 IsKnown = MemLocAA->isKnownReadNone();
632 if (!IsKnown)
633 A.recordDependence(FromAA: *MemLocAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
634 return true;
635 }
636 }
637
638 const auto *MemBehaviorAA =
639 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClass: DepClassTy::NONE);
640 if (MemBehaviorAA &&
641 (MemBehaviorAA->isAssumedReadNone() ||
642 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
643 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
644 : MemBehaviorAA->isKnownReadOnly();
645 if (!IsKnown)
646 A.recordDependence(FromAA: *MemBehaviorAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
647 return true;
648 }
649
650 return false;
651}
652
653bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
654 const AbstractAttribute &QueryingAA, bool &IsKnown) {
655 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
656 /* RequireReadNone */ false, IsKnown);
657}
658bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
659 const AbstractAttribute &QueryingAA, bool &IsKnown) {
660 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
661 /* RequireReadNone */ true, IsKnown);
662}
663
664static bool
665isPotentiallyReachable(Attributor &A, const Instruction &FromI,
666 const Instruction *ToI, const Function &ToFn,
667 const AbstractAttribute &QueryingAA,
668 const AA::InstExclusionSetTy *ExclusionSet,
669 std::function<bool(const Function &F)> GoBackwardsCB) {
670 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
671 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
672 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
673 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
674 << "]\n";
675 if (ExclusionSet)
676 for (auto *ES : *ExclusionSet)
677 dbgs() << *ES << "\n";
678 });
679
680 // We know kernels (generally) cannot be called from within the module. Thus,
681 // for reachability we would need to step back from a kernel which would allow
682 // us to reach anything anyway. Even if a kernel is invoked from another
683 // kernel, values like allocas and shared memory are not accessible. We
684 // implicitly check for this situation to avoid costly lookups.
685 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
686 !GoBackwardsCB(*FromI.getFunction()) && A.getInfoCache().isKernel(F: ToFn) &&
687 A.getInfoCache().isKernel(F: *FromI.getFunction())) {
688 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
689 "module; success\n";);
690 return false;
691 }
692
693 // If we can go arbitrarily backwards we will eventually reach an entry point
694 // that can reach ToI. Only if a set of blocks through which we cannot go is
695 // provided, or once we track internal functions not accessible from the
696 // outside, it makes sense to perform backwards analysis in the absence of a
697 // GoBackwardsCB.
698 if (!GoBackwardsCB && !ExclusionSet) {
699 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
700 << " is not checked backwards and does not have an "
701 "exclusion set, abort\n");
702 return true;
703 }
704
705 SmallPtrSet<const Instruction *, 8> Visited;
706 SmallVector<const Instruction *> Worklist;
707 Worklist.push_back(Elt: &FromI);
708
709 while (!Worklist.empty()) {
710 const Instruction *CurFromI = Worklist.pop_back_val();
711 if (!Visited.insert(Ptr: CurFromI).second)
712 continue;
713
714 const Function *FromFn = CurFromI->getFunction();
715 if (FromFn == &ToFn) {
716 if (!ToI)
717 return true;
718 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
719 << " intraprocedurally\n");
720 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
721 QueryingAA, IRP: IRPosition::function(F: ToFn), DepClass: DepClassTy::OPTIONAL);
722 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
723 A, From: *CurFromI, To: *ToI, ExclusionSet);
724 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
725 << (Result ? "can potentially " : "cannot ") << "reach "
726 << *ToI << " [Intra]\n");
727 if (Result)
728 return true;
729 }
730
731 bool Result = true;
732 if (!ToFn.isDeclaration() && ToI) {
733 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
734 QueryingAA, IRP: IRPosition::function(F: ToFn), DepClass: DepClassTy::OPTIONAL);
735 const Instruction &EntryI = ToFn.getEntryBlock().front();
736 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
737 A, From: EntryI, To: *ToI, ExclusionSet);
738 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
739 << " " << (Result ? "can potentially " : "cannot ")
740 << "reach @" << *ToI << " [ToFn]\n");
741 }
742
743 if (Result) {
744 // The entry of the ToFn can reach the instruction ToI. If the current
745 // instruction is already known to reach the ToFn.
746 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
747 QueryingAA, IRP: IRPosition::function(F: *FromFn), DepClass: DepClassTy::OPTIONAL);
748 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
749 A, Inst: *CurFromI, Fn: ToFn, ExclusionSet);
750 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
751 << " " << (Result ? "can potentially " : "cannot ")
752 << "reach @" << ToFn.getName() << " [FromFn]\n");
753 if (Result)
754 return true;
755 }
756
757 // TODO: Check assumed nounwind.
758 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
759 QueryingAA, IRP: IRPosition::function(F: *FromFn), DepClass: DepClassTy::OPTIONAL);
760 auto ReturnInstCB = [&](Instruction &Ret) {
761 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
762 A, From: *CurFromI, To: Ret, ExclusionSet);
763 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
764 << (Result ? "can potentially " : "cannot ") << "reach "
765 << Ret << " [Intra]\n");
766 return !Result;
767 };
768
769 // Check if we can reach returns.
770 bool UsedAssumedInformation = false;
771 if (A.checkForAllInstructions(Pred: ReturnInstCB, Fn: FromFn, QueryingAA: &QueryingAA,
772 Opcodes: {Instruction::Ret}, UsedAssumedInformation)) {
773 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
774 continue;
775 }
776
777 if (!GoBackwardsCB) {
778 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
779 << " is not checked backwards, abort\n");
780 return true;
781 }
782
783 // If we do not go backwards from the FromFn we are done here and so far we
784 // could not find a way to reach ToFn/ToI.
785 if (!GoBackwardsCB(*FromFn))
786 continue;
787
788 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
789 << FromFn->getName() << "\n");
790
791 auto CheckCallSite = [&](AbstractCallSite ACS) {
792 CallBase *CB = ACS.getInstruction();
793 if (!CB)
794 return false;
795
796 if (isa<InvokeInst>(Val: CB))
797 return false;
798
799 Instruction *Inst = CB->getNextNode();
800 Worklist.push_back(Elt: Inst);
801 return true;
802 };
803
804 Result = !A.checkForAllCallSites(Pred: CheckCallSite, Fn: *FromFn,
805 /* RequireAllCallSites */ true,
806 QueryingAA: &QueryingAA, UsedAssumedInformation);
807 if (Result) {
808 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
809 << " in @" << FromFn->getName()
810 << " failed, give up\n");
811 return true;
812 }
813
814 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
815 << " in @" << FromFn->getName()
816 << " worklist size is: " << Worklist.size() << "\n");
817 }
818 return false;
819}
820
821bool AA::isPotentiallyReachable(
822 Attributor &A, const Instruction &FromI, const Instruction &ToI,
823 const AbstractAttribute &QueryingAA,
824 const AA::InstExclusionSetTy *ExclusionSet,
825 std::function<bool(const Function &F)> GoBackwardsCB) {
826 const Function *ToFn = ToI.getFunction();
827 return ::isPotentiallyReachable(A, FromI, ToI: &ToI, ToFn: *ToFn, QueryingAA,
828 ExclusionSet, GoBackwardsCB);
829}
830
831bool AA::isPotentiallyReachable(
832 Attributor &A, const Instruction &FromI, const Function &ToFn,
833 const AbstractAttribute &QueryingAA,
834 const AA::InstExclusionSetTy *ExclusionSet,
835 std::function<bool(const Function &F)> GoBackwardsCB) {
836 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
837 ExclusionSet, GoBackwardsCB);
838}
839
840bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
841 const AbstractAttribute &QueryingAA) {
842 if (isa<UndefValue>(Val: Obj))
843 return true;
844 if (isa<AllocaInst>(Val: Obj)) {
845 InformationCache &InfoCache = A.getInfoCache();
846 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
847 LLVM_DEBUG(
848 dbgs() << "[AA] Object '" << Obj
849 << "' is thread local; stack objects are thread local.\n");
850 return true;
851 }
852 bool IsKnownNoCapture;
853 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>(
854 A, QueryingAA: &QueryingAA, IRP: IRPosition::value(V: Obj), DepClass: DepClassTy::OPTIONAL,
855 IsKnown&: IsKnownNoCapture);
856 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
857 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
858 << (IsAssumedNoCapture ? "non-" : "")
859 << "captured stack object.\n");
860 return IsAssumedNoCapture;
861 }
862 if (auto *GV = dyn_cast<GlobalVariable>(Val: &Obj)) {
863 if (GV->isConstant()) {
864 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
865 << "' is thread local; constant global\n");
866 return true;
867 }
868 if (GV->isThreadLocal()) {
869 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
870 << "' is thread local; thread local global\n");
871 return true;
872 }
873 }
874
875 if (A.getInfoCache().targetIsGPU()) {
876 if (Obj.getType()->getPointerAddressSpace() ==
877 (int)AA::GPUAddressSpace::Local) {
878 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
879 << "' is thread local; GPU local memory\n");
880 return true;
881 }
882 if (Obj.getType()->getPointerAddressSpace() ==
883 (int)AA::GPUAddressSpace::Constant) {
884 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
885 << "' is thread local; GPU constant memory\n");
886 return true;
887 }
888 }
889
890 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
891 return false;
892}
893
894bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
895 const AbstractAttribute &QueryingAA) {
896 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
897 return false;
898
899 SmallSetVector<const Value *, 8> Ptrs;
900
901 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
902 if (!Loc || !Loc->Ptr) {
903 LLVM_DEBUG(
904 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
905 return false;
906 }
907 Ptrs.insert(X: Loc->Ptr);
908 return true;
909 };
910
911 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: &I)) {
912 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
913 return true;
914 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: &I))
915 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
916 return true;
917 } else if (!AddLocationPtr(MemoryLocation::getOrNone(Inst: &I)))
918 return true;
919
920 return isPotentiallyAffectedByBarrier(A, Ptrs: Ptrs.getArrayRef(), QueryingAA, CtxI: &I);
921}
922
923bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
924 ArrayRef<const Value *> Ptrs,
925 const AbstractAttribute &QueryingAA,
926 const Instruction *CtxI) {
927 for (const Value *Ptr : Ptrs) {
928 if (!Ptr) {
929 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
930 return true;
931 }
932
933 auto Pred = [&](Value &Obj) {
934 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
935 return true;
936 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
937 << "'; -> requires barrier\n");
938 return false;
939 };
940
941 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
942 QueryingAA, IRP: IRPosition::value(V: *Ptr), DepClass: DepClassTy::OPTIONAL);
943 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
944 return true;
945 }
946 return false;
947}
948
949/// Return true if \p New is equal or worse than \p Old.
950static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
951 if (!Old.isIntAttribute())
952 return true;
953
954 return Old.getValueAsInt() >= New.getValueAsInt();
955}
956
957/// Return true if the information provided by \p Attr was added to the
958/// attribute set \p AttrSet. This is only the case if it was not already
959/// present in \p AttrSet.
960static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
961 AttributeSet AttrSet, bool ForceReplace,
962 AttrBuilder &AB) {
963
964 if (Attr.isEnumAttribute()) {
965 Attribute::AttrKind Kind = Attr.getKindAsEnum();
966 if (AttrSet.hasAttribute(Kind))
967 return false;
968 AB.addAttribute(Val: Kind);
969 return true;
970 }
971 if (Attr.isStringAttribute()) {
972 StringRef Kind = Attr.getKindAsString();
973 if (AttrSet.hasAttribute(Kind)) {
974 if (!ForceReplace)
975 return false;
976 }
977 AB.addAttribute(A: Kind, V: Attr.getValueAsString());
978 return true;
979 }
980 if (Attr.isIntAttribute()) {
981 Attribute::AttrKind Kind = Attr.getKindAsEnum();
982 if (!ForceReplace && Kind == Attribute::Memory) {
983 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
984 if (ME == AttrSet.getMemoryEffects())
985 return false;
986 AB.addMemoryAttr(ME);
987 return true;
988 }
989 if (AttrSet.hasAttribute(Kind)) {
990 if (!ForceReplace && isEqualOrWorse(New: Attr, Old: AttrSet.getAttribute(Kind)))
991 return false;
992 }
993 AB.addAttribute(A: Attr);
994 return true;
995 }
996 if (Attr.isConstantRangeAttribute()) {
997 Attribute::AttrKind Kind = Attr.getKindAsEnum();
998 if (!ForceReplace && AttrSet.hasAttribute(Kind))
999 return false;
1000 AB.addAttribute(A: Attr);
1001 return true;
1002 }
1003
1004 llvm_unreachable("Expected enum or string attribute!");
1005}
1006
1007Argument *IRPosition::getAssociatedArgument() const {
1008 if (getPositionKind() == IRP_ARGUMENT)
1009 return cast<Argument>(Val: &getAnchorValue());
1010
1011 // Not an Argument and no argument number means this is not a call site
1012 // argument, thus we cannot find a callback argument to return.
1013 int ArgNo = getCallSiteArgNo();
1014 if (ArgNo < 0)
1015 return nullptr;
1016
1017 // Use abstract call sites to make the connection between the call site
1018 // values and the ones in callbacks. If a callback was found that makes use
1019 // of the underlying call site operand, we want the corresponding callback
1020 // callee argument and not the direct callee argument.
1021 std::optional<Argument *> CBCandidateArg;
1022 SmallVector<const Use *, 4> CallbackUses;
1023 const auto &CB = cast<CallBase>(Val&: getAnchorValue());
1024 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1025 for (const Use *U : CallbackUses) {
1026 AbstractCallSite ACS(U);
1027 assert(ACS && ACS.isCallbackCall());
1028 if (!ACS.getCalledFunction())
1029 continue;
1030
1031 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1032
1033 // Test if the underlying call site operand is argument number u of the
1034 // callback callee.
1035 if (ACS.getCallArgOperandNo(ArgNo: u) != ArgNo)
1036 continue;
1037
1038 assert(ACS.getCalledFunction()->arg_size() > u &&
1039 "ACS mapped into var-args arguments!");
1040 if (CBCandidateArg) {
1041 CBCandidateArg = nullptr;
1042 break;
1043 }
1044 CBCandidateArg = ACS.getCalledFunction()->getArg(i: u);
1045 }
1046 }
1047
1048 // If we found a unique callback candidate argument, return it.
1049 if (CBCandidateArg && *CBCandidateArg)
1050 return *CBCandidateArg;
1051
1052 // If no callbacks were found, or none used the underlying call site operand
1053 // exclusively, use the direct callee argument if available.
1054 auto *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand());
1055 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1056 return Callee->getArg(i: ArgNo);
1057
1058 return nullptr;
1059}
1060
1061ChangeStatus AbstractAttribute::update(Attributor &A) {
1062 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1063 if (getState().isAtFixpoint())
1064 return HasChanged;
1065
1066 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1067
1068 HasChanged = updateImpl(A);
1069
1070 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1071 << "\n");
1072
1073 return HasChanged;
1074}
1075
1076Attributor::Attributor(SetVector<Function *> &Functions,
1077 InformationCache &InfoCache,
1078 AttributorConfig Configuration)
1079 : Allocator(InfoCache.Allocator), Functions(Functions),
1080 InfoCache(InfoCache), Configuration(Configuration) {
1081 if (!isClosedWorldModule())
1082 return;
1083 for (Function *Fn : Functions)
1084 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1085 /*IgnoreCallbackUses=*/false,
1086 /*IgnoreAssumeLikeCalls=*/true,
1087 /*IgnoreLLVMUsed=*/IngoreLLVMUsed: true,
1088 /*IgnoreARCAttachedCall=*/false,
1089 /*IgnoreCastedDirectCall=*/true))
1090 InfoCache.IndirectlyCallableFunctions.push_back(Elt: Fn);
1091}
1092
1093bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1094 Attribute::AttrKind AK,
1095 SmallVectorImpl<Attribute> &Attrs) {
1096 assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1097 "Did expect a valid position!");
1098 MustBeExecutedContextExplorer *Explorer =
1099 getInfoCache().getMustBeExecutedContextExplorer();
1100 if (!Explorer)
1101 return false;
1102
1103 Value &AssociatedValue = IRP.getAssociatedValue();
1104
1105 const Assume2KnowledgeMap &A2K =
1106 getInfoCache().getKnowledgeMap().lookup(Val: {&AssociatedValue, AK});
1107
1108 // Check if we found any potential assume use, if not we don't need to create
1109 // explorer iterators.
1110 if (A2K.empty())
1111 return false;
1112
1113 LLVMContext &Ctx = AssociatedValue.getContext();
1114 unsigned AttrsSize = Attrs.size();
1115 auto EIt = Explorer->begin(PP: IRP.getCtxI()),
1116 EEnd = Explorer->end(IRP.getCtxI());
1117 for (const auto &It : A2K)
1118 if (Explorer->findInContextOf(I: It.first, EIt, EEnd))
1119 Attrs.push_back(Elt: Attribute::get(Context&: Ctx, Kind: AK, Val: It.second.Max));
1120 return AttrsSize != Attrs.size();
1121}
1122
1123template <typename DescTy>
1124ChangeStatus
1125Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1126 function_ref<bool(const DescTy &, AttributeSet,
1127 AttributeMask &, AttrBuilder &)>
1128 CB) {
1129 if (AttrDescs.empty())
1130 return ChangeStatus::UNCHANGED;
1131 switch (IRP.getPositionKind()) {
1132 case IRPosition::IRP_FLOAT:
1133 case IRPosition::IRP_INVALID:
1134 return ChangeStatus::UNCHANGED;
1135 default:
1136 break;
1137 };
1138
1139 AttributeList AL;
1140 Value *AttrListAnchor = IRP.getAttrListAnchor();
1141 auto It = AttrsMap.find(Val: AttrListAnchor);
1142 if (It == AttrsMap.end())
1143 AL = IRP.getAttrList();
1144 else
1145 AL = It->getSecond();
1146
1147 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1148 auto AttrIdx = IRP.getAttrIdx();
1149 AttributeSet AS = AL.getAttributes(Index: AttrIdx);
1150 AttributeMask AM;
1151 AttrBuilder AB(Ctx);
1152
1153 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1154 for (const DescTy &AttrDesc : AttrDescs)
1155 if (CB(AttrDesc, AS, AM, AB))
1156 HasChanged = ChangeStatus::CHANGED;
1157
1158 if (HasChanged == ChangeStatus::UNCHANGED)
1159 return ChangeStatus::UNCHANGED;
1160
1161 AL = AL.removeAttributesAtIndex(C&: Ctx, Index: AttrIdx, AttrsToRemove: AM);
1162 AL = AL.addAttributesAtIndex(C&: Ctx, Index: AttrIdx, B: AB);
1163 AttrsMap[AttrListAnchor] = AL;
1164 return ChangeStatus::CHANGED;
1165}
1166
1167bool Attributor::hasAttr(const IRPosition &IRP,
1168 ArrayRef<Attribute::AttrKind> AttrKinds,
1169 bool IgnoreSubsumingPositions,
1170 Attribute::AttrKind ImpliedAttributeKind) {
1171 bool Implied = false;
1172 bool HasAttr = false;
1173 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1174 AttributeMask &, AttrBuilder &) {
1175 if (AttrSet.hasAttribute(Kind)) {
1176 Implied |= Kind != ImpliedAttributeKind;
1177 HasAttr = true;
1178 }
1179 return false;
1180 };
1181 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1182 updateAttrMap<Attribute::AttrKind>(IRP: EquivIRP, AttrDescs: AttrKinds, CB: HasAttrCB);
1183 if (HasAttr)
1184 break;
1185 // The first position returned by the SubsumingPositionIterator is
1186 // always the position itself. If we ignore subsuming positions we
1187 // are done after the first iteration.
1188 if (IgnoreSubsumingPositions)
1189 break;
1190 Implied = true;
1191 }
1192 if (!HasAttr) {
1193 Implied = true;
1194 SmallVector<Attribute> Attrs;
1195 for (Attribute::AttrKind AK : AttrKinds)
1196 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1197 HasAttr = true;
1198 break;
1199 }
1200 }
1201
1202 // Check if we should manifest the implied attribute kind at the IRP.
1203 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1204 manifestAttrs(IRP, DeducedAttrs: {Attribute::get(Context&: IRP.getAnchorValue().getContext(),
1205 Kind: ImpliedAttributeKind)});
1206 return HasAttr;
1207}
1208
1209void Attributor::getAttrs(const IRPosition &IRP,
1210 ArrayRef<Attribute::AttrKind> AttrKinds,
1211 SmallVectorImpl<Attribute> &Attrs,
1212 bool IgnoreSubsumingPositions) {
1213 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1214 AttributeSet AttrSet, AttributeMask &,
1215 AttrBuilder &) {
1216 if (AttrSet.hasAttribute(Kind))
1217 Attrs.push_back(Elt: AttrSet.getAttribute(Kind));
1218 return false;
1219 };
1220 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1221 updateAttrMap<Attribute::AttrKind>(IRP: EquivIRP, AttrDescs: AttrKinds, CB: CollectAttrCB);
1222 // The first position returned by the SubsumingPositionIterator is
1223 // always the position itself. If we ignore subsuming positions we
1224 // are done after the first iteration.
1225 if (IgnoreSubsumingPositions)
1226 break;
1227 }
1228 for (Attribute::AttrKind AK : AttrKinds)
1229 getAttrsFromAssumes(IRP, AK, Attrs);
1230}
1231
1232ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1233 ArrayRef<Attribute::AttrKind> AttrKinds) {
1234 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1235 AttributeMask &AM, AttrBuilder &) {
1236 if (!AttrSet.hasAttribute(Kind))
1237 return false;
1238 AM.addAttribute(Val: Kind);
1239 return true;
1240 };
1241 return updateAttrMap<Attribute::AttrKind>(IRP, AttrDescs: AttrKinds, CB: RemoveAttrCB);
1242}
1243
1244ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1245 ArrayRef<StringRef> Attrs) {
1246 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1247 AttributeMask &AM, AttrBuilder &) -> bool {
1248 if (!AttrSet.hasAttribute(Kind: Attr))
1249 return false;
1250 AM.addAttribute(A: Attr);
1251 return true;
1252 };
1253
1254 return updateAttrMap<StringRef>(IRP, AttrDescs: Attrs, CB: RemoveAttrCB);
1255}
1256
1257ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1258 ArrayRef<Attribute> Attrs,
1259 bool ForceReplace) {
1260 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1261 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1262 AttributeMask &, AttrBuilder &AB) {
1263 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1264 };
1265 return updateAttrMap<Attribute>(IRP, AttrDescs: Attrs, CB: AddAttrCB);
1266}
1267
1268const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1269const IRPosition
1270 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1271
1272SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1273 IRPositions.emplace_back(Args: IRP);
1274
1275 // Helper to determine if operand bundles on a call site are benign or
1276 // potentially problematic. We handle only llvm.assume for now.
1277 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1278 return (isa<IntrinsicInst>(Val: CB) &&
1279 cast<IntrinsicInst>(Val: CB).getIntrinsicID() == Intrinsic ::assume);
1280 };
1281
1282 const auto *CB = dyn_cast<CallBase>(Val: &IRP.getAnchorValue());
1283 switch (IRP.getPositionKind()) {
1284 case IRPosition::IRP_INVALID:
1285 case IRPosition::IRP_FLOAT:
1286 case IRPosition::IRP_FUNCTION:
1287 return;
1288 case IRPosition::IRP_ARGUMENT:
1289 case IRPosition::IRP_RETURNED:
1290 IRPositions.emplace_back(Args: IRPosition::function(F: *IRP.getAnchorScope()));
1291 return;
1292 case IRPosition::IRP_CALL_SITE:
1293 assert(CB && "Expected call site!");
1294 // TODO: We need to look at the operand bundles similar to the redirection
1295 // in CallBase.
1296 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1297 if (auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand()))
1298 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1299 return;
1300 case IRPosition::IRP_CALL_SITE_RETURNED:
1301 assert(CB && "Expected call site!");
1302 // TODO: We need to look at the operand bundles similar to the redirection
1303 // in CallBase.
1304 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1305 if (auto *Callee =
1306 dyn_cast_if_present<Function>(Val: CB->getCalledOperand())) {
1307 IRPositions.emplace_back(Args: IRPosition::returned(F: *Callee));
1308 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1309 for (const Argument &Arg : Callee->args())
1310 if (Arg.hasReturnedAttr()) {
1311 IRPositions.emplace_back(
1312 Args: IRPosition::callsite_argument(CB: *CB, ArgNo: Arg.getArgNo()));
1313 IRPositions.emplace_back(
1314 Args: IRPosition::value(V: *CB->getArgOperand(i: Arg.getArgNo())));
1315 IRPositions.emplace_back(Args: IRPosition::argument(Arg));
1316 }
1317 }
1318 }
1319 IRPositions.emplace_back(Args: IRPosition::callsite_function(CB: *CB));
1320 return;
1321 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1322 assert(CB && "Expected call site!");
1323 // TODO: We need to look at the operand bundles similar to the redirection
1324 // in CallBase.
1325 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1326 auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand());
1327 if (Callee) {
1328 if (Argument *Arg = IRP.getAssociatedArgument())
1329 IRPositions.emplace_back(Args: IRPosition::argument(Arg: *Arg));
1330 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1331 }
1332 }
1333 IRPositions.emplace_back(Args: IRPosition::value(V: IRP.getAssociatedValue()));
1334 return;
1335 }
1336 }
1337}
1338
1339void IRPosition::verify() {
1340#ifdef EXPENSIVE_CHECKS
1341 switch (getPositionKind()) {
1342 case IRP_INVALID:
1343 assert((CBContext == nullptr) &&
1344 "Invalid position must not have CallBaseContext!");
1345 assert(!Enc.getOpaqueValue() &&
1346 "Expected a nullptr for an invalid position!");
1347 return;
1348 case IRP_FLOAT:
1349 assert((!isa<Argument>(&getAssociatedValue())) &&
1350 "Expected specialized kind for argument values!");
1351 return;
1352 case IRP_RETURNED:
1353 assert(isa<Function>(getAsValuePtr()) &&
1354 "Expected function for a 'returned' position!");
1355 assert(getAsValuePtr() == &getAssociatedValue() &&
1356 "Associated value mismatch!");
1357 return;
1358 case IRP_CALL_SITE_RETURNED:
1359 assert((CBContext == nullptr) &&
1360 "'call site returned' position must not have CallBaseContext!");
1361 assert((isa<CallBase>(getAsValuePtr())) &&
1362 "Expected call base for 'call site returned' position!");
1363 assert(getAsValuePtr() == &getAssociatedValue() &&
1364 "Associated value mismatch!");
1365 return;
1366 case IRP_CALL_SITE:
1367 assert((CBContext == nullptr) &&
1368 "'call site function' position must not have CallBaseContext!");
1369 assert((isa<CallBase>(getAsValuePtr())) &&
1370 "Expected call base for 'call site function' position!");
1371 assert(getAsValuePtr() == &getAssociatedValue() &&
1372 "Associated value mismatch!");
1373 return;
1374 case IRP_FUNCTION:
1375 assert(isa<Function>(getAsValuePtr()) &&
1376 "Expected function for a 'function' position!");
1377 assert(getAsValuePtr() == &getAssociatedValue() &&
1378 "Associated value mismatch!");
1379 return;
1380 case IRP_ARGUMENT:
1381 assert(isa<Argument>(getAsValuePtr()) &&
1382 "Expected argument for a 'argument' position!");
1383 assert(getAsValuePtr() == &getAssociatedValue() &&
1384 "Associated value mismatch!");
1385 return;
1386 case IRP_CALL_SITE_ARGUMENT: {
1387 assert((CBContext == nullptr) &&
1388 "'call site argument' position must not have CallBaseContext!");
1389 Use *U = getAsUsePtr();
1390 (void)U; // Silence unused variable warning.
1391 assert(U && "Expected use for a 'call site argument' position!");
1392 assert(isa<CallBase>(U->getUser()) &&
1393 "Expected call base user for a 'call site argument' position!");
1394 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1395 "Expected call base argument operand for a 'call site argument' "
1396 "position");
1397 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1398 unsigned(getCallSiteArgNo()) &&
1399 "Argument number mismatch!");
1400 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1401 return;
1402 }
1403 }
1404#endif
1405}
1406
1407std::optional<Constant *>
1408Attributor::getAssumedConstant(const IRPosition &IRP,
1409 const AbstractAttribute &AA,
1410 bool &UsedAssumedInformation) {
1411 // First check all callbacks provided by outside AAs. If any of them returns
1412 // a non-null value that is different from the associated value, or
1413 // std::nullopt, we assume it's simplified.
1414 for (auto &CB : SimplificationCallbacks.lookup(Val: IRP)) {
1415 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1416 if (!SimplifiedV)
1417 return std::nullopt;
1418 if (isa_and_nonnull<Constant>(Val: *SimplifiedV))
1419 return cast<Constant>(Val: *SimplifiedV);
1420 return nullptr;
1421 }
1422 if (auto *C = dyn_cast<Constant>(Val: &IRP.getAssociatedValue()))
1423 return C;
1424 SmallVector<AA::ValueAndContext> Values;
1425 if (getAssumedSimplifiedValues(IRP, AA: &AA, Values,
1426 S: AA::ValueScope::Interprocedural,
1427 UsedAssumedInformation)) {
1428 if (Values.empty())
1429 return std::nullopt;
1430 if (auto *C = dyn_cast_or_null<Constant>(
1431 Val: AAPotentialValues::getSingleValue(A&: *this, AA, IRP, Values)))
1432 return C;
1433 }
1434 return nullptr;
1435}
1436
1437std::optional<Value *> Attributor::getAssumedSimplified(
1438 const IRPosition &IRP, const AbstractAttribute *AA,
1439 bool &UsedAssumedInformation, AA::ValueScope S) {
1440 // First check all callbacks provided by outside AAs. If any of them returns
1441 // a non-null value that is different from the associated value, or
1442 // std::nullopt, we assume it's simplified.
1443 for (auto &CB : SimplificationCallbacks.lookup(Val: IRP))
1444 return CB(IRP, AA, UsedAssumedInformation);
1445
1446 SmallVector<AA::ValueAndContext> Values;
1447 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1448 return &IRP.getAssociatedValue();
1449 if (Values.empty())
1450 return std::nullopt;
1451 if (AA)
1452 if (Value *V = AAPotentialValues::getSingleValue(A&: *this, AA: *AA, IRP, Values))
1453 return V;
1454 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1455 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1456 return nullptr;
1457 return &IRP.getAssociatedValue();
1458}
1459
1460bool Attributor::getAssumedSimplifiedValues(
1461 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1462 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1463 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1464 SmallPtrSet<Value *, 8> Seen;
1465 SmallVector<IRPosition, 8> Worklist;
1466 Worklist.push_back(Elt: InitialIRP);
1467 while (!Worklist.empty()) {
1468 const IRPosition &IRP = Worklist.pop_back_val();
1469
1470 // First check all callbacks provided by outside AAs. If any of them returns
1471 // a non-null value that is different from the associated value, or
1472 // std::nullopt, we assume it's simplified.
1473 int NV = Values.size();
1474 const auto &SimplificationCBs = SimplificationCallbacks.lookup(Val: IRP);
1475 for (const auto &CB : SimplificationCBs) {
1476 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1477 if (!CBResult.has_value())
1478 continue;
1479 Value *V = *CBResult;
1480 if (!V)
1481 return false;
1482 if ((S & AA::ValueScope::Interprocedural) ||
1483 AA::isValidInScope(V: *V, Scope: IRP.getAnchorScope()))
1484 Values.push_back(Elt: AA::ValueAndContext{*V, nullptr});
1485 else
1486 return false;
1487 }
1488 if (SimplificationCBs.empty()) {
1489 // If no high-level/outside simplification occurred, use
1490 // AAPotentialValues.
1491 const auto *PotentialValuesAA =
1492 getOrCreateAAFor<AAPotentialValues>(IRP, QueryingAA: AA, DepClass: DepClassTy::OPTIONAL);
1493 if (PotentialValuesAA &&
1494 PotentialValuesAA->getAssumedSimplifiedValues(A&: *this, Values, S)) {
1495 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1496 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1497 Values.push_back(Elt: {IRP.getAssociatedValue(), IRP.getCtxI()});
1498 } else {
1499 // TODO: We could visit all returns and add the operands.
1500 return false;
1501 }
1502 }
1503
1504 if (!RecurseForSelectAndPHI)
1505 break;
1506
1507 for (int I = NV, E = Values.size(); I < E; ++I) {
1508 Value *V = Values[I].getValue();
1509 if (!isa<PHINode>(Val: V) && !isa<SelectInst>(Val: V))
1510 continue;
1511 if (!Seen.insert(Ptr: V).second)
1512 continue;
1513 // Move the last element to this slot.
1514 Values[I] = Values[E - 1];
1515 // Eliminate the last slot, adjust the indices.
1516 Values.pop_back();
1517 --E;
1518 --I;
1519 // Add a new value (select or phi) to the worklist.
1520 Worklist.push_back(Elt: IRPosition::value(V: *V));
1521 }
1522 }
1523 return true;
1524}
1525
1526std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1527 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1528 bool &UsedAssumedInformation) {
1529 if (!V)
1530 return V;
1531 if (*V == nullptr || isa<Constant>(Val: *V))
1532 return V;
1533 if (auto *Arg = dyn_cast<Argument>(Val: *V))
1534 if (CB.getCalledOperand() == Arg->getParent() &&
1535 CB.arg_size() > Arg->getArgNo())
1536 if (!Arg->hasPointeeInMemoryValueAttr())
1537 return getAssumedSimplified(
1538 IRP: IRPosition::callsite_argument(CB, ArgNo: Arg->getArgNo()), AA,
1539 UsedAssumedInformation, S: AA::Intraprocedural);
1540 return nullptr;
1541}
1542
1543Attributor::~Attributor() {
1544 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1545 // thus we cannot delete them. We can, and want to, destruct them though.
1546 for (auto &It : AAMap) {
1547 AbstractAttribute *AA = It.getSecond();
1548 AA->~AbstractAttribute();
1549 }
1550}
1551
1552bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1553 const AAIsDead *FnLivenessAA,
1554 bool &UsedAssumedInformation,
1555 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1556 if (!Configuration.UseLiveness)
1557 return false;
1558 const IRPosition &IRP = AA.getIRPosition();
1559 if (!Functions.count(key: IRP.getAnchorScope()))
1560 return false;
1561 return isAssumedDead(IRP, QueryingAA: &AA, FnLivenessAA, UsedAssumedInformation,
1562 CheckBBLivenessOnly, DepClass);
1563}
1564
1565bool Attributor::isAssumedDead(const Use &U,
1566 const AbstractAttribute *QueryingAA,
1567 const AAIsDead *FnLivenessAA,
1568 bool &UsedAssumedInformation,
1569 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1570 if (!Configuration.UseLiveness)
1571 return false;
1572 Instruction *UserI = dyn_cast<Instruction>(Val: U.getUser());
1573 if (!UserI)
1574 return isAssumedDead(IRP: IRPosition::value(V: *U.get()), QueryingAA, FnLivenessAA,
1575 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1576
1577 if (auto *CB = dyn_cast<CallBase>(Val: UserI)) {
1578 // For call site argument uses we can check if the argument is
1579 // unused/dead.
1580 if (CB->isArgOperand(U: &U)) {
1581 const IRPosition &CSArgPos =
1582 IRPosition::callsite_argument(CB: *CB, ArgNo: CB->getArgOperandNo(U: &U));
1583 return isAssumedDead(IRP: CSArgPos, QueryingAA, FnLivenessAA,
1584 UsedAssumedInformation, CheckBBLivenessOnly,
1585 DepClass);
1586 }
1587 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: UserI)) {
1588 const IRPosition &RetPos = IRPosition::returned(F: *RI->getFunction());
1589 return isAssumedDead(IRP: RetPos, QueryingAA, FnLivenessAA,
1590 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1591 } else if (PHINode *PHI = dyn_cast<PHINode>(Val: UserI)) {
1592 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1593 return isAssumedDead(I: *IncomingBB->getTerminator(), QueryingAA, LivenessAA: FnLivenessAA,
1594 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1595 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: UserI)) {
1596 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1597 const IRPosition IRP = IRPosition::inst(I: *SI);
1598 const AAIsDead *IsDeadAA =
1599 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1600 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1601 if (QueryingAA)
1602 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1603 if (!IsDeadAA->isKnown(BitsEncoding: AAIsDead::IS_REMOVABLE))
1604 UsedAssumedInformation = true;
1605 return true;
1606 }
1607 }
1608 }
1609
1610 return isAssumedDead(IRP: IRPosition::inst(I: *UserI), QueryingAA, FnLivenessAA,
1611 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1612}
1613
1614bool Attributor::isAssumedDead(const Instruction &I,
1615 const AbstractAttribute *QueryingAA,
1616 const AAIsDead *FnLivenessAA,
1617 bool &UsedAssumedInformation,
1618 bool CheckBBLivenessOnly, DepClassTy DepClass,
1619 bool CheckForDeadStore) {
1620 if (!Configuration.UseLiveness)
1621 return false;
1622 const IRPosition::CallBaseContext *CBCtx =
1623 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1624
1625 if (ManifestAddedBlocks.contains(Ptr: I.getParent()))
1626 return false;
1627
1628 const Function &F = *I.getFunction();
1629 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1630 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRP: IRPosition::function(F, CBContext: CBCtx),
1631 QueryingAA, DepClass: DepClassTy::NONE);
1632
1633 // Don't use recursive reasoning.
1634 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1635 return false;
1636
1637 // If we have a context instruction and a liveness AA we use it.
1638 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(BB: I.getParent())
1639 : FnLivenessAA->isAssumedDead(I: &I)) {
1640 if (QueryingAA)
1641 recordDependence(FromAA: *FnLivenessAA, ToAA: *QueryingAA, DepClass);
1642 if (!FnLivenessAA->isKnownDead(I: &I))
1643 UsedAssumedInformation = true;
1644 return true;
1645 }
1646
1647 if (CheckBBLivenessOnly)
1648 return false;
1649
1650 const IRPosition IRP = IRPosition::inst(I, CBContext: CBCtx);
1651 const AAIsDead *IsDeadAA =
1652 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1653
1654 // Don't use recursive reasoning.
1655 if (!IsDeadAA || QueryingAA == IsDeadAA)
1656 return false;
1657
1658 if (IsDeadAA->isAssumedDead()) {
1659 if (QueryingAA)
1660 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1661 if (!IsDeadAA->isKnownDead())
1662 UsedAssumedInformation = true;
1663 return true;
1664 }
1665
1666 if (CheckForDeadStore && isa<StoreInst>(Val: I) && IsDeadAA->isRemovableStore()) {
1667 if (QueryingAA)
1668 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1669 if (!IsDeadAA->isKnownDead())
1670 UsedAssumedInformation = true;
1671 return true;
1672 }
1673
1674 return false;
1675}
1676
1677bool Attributor::isAssumedDead(const IRPosition &IRP,
1678 const AbstractAttribute *QueryingAA,
1679 const AAIsDead *FnLivenessAA,
1680 bool &UsedAssumedInformation,
1681 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1682 if (!Configuration.UseLiveness)
1683 return false;
1684 // Don't check liveness for constants, e.g. functions, used as (floating)
1685 // values since the context instruction and such is here meaningless.
1686 if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1687 isa<Constant>(Val: IRP.getAssociatedValue())) {
1688 return false;
1689 }
1690
1691 Instruction *CtxI = IRP.getCtxI();
1692 if (CtxI &&
1693 isAssumedDead(I: *CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1694 /* CheckBBLivenessOnly */ true,
1695 DepClass: CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1696 return true;
1697
1698 if (CheckBBLivenessOnly)
1699 return false;
1700
1701 // If we haven't succeeded we query the specific liveness info for the IRP.
1702 const AAIsDead *IsDeadAA;
1703 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1704 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1705 IRP: IRPosition::callsite_returned(CB: cast<CallBase>(Val&: IRP.getAssociatedValue())),
1706 QueryingAA, DepClass: DepClassTy::NONE);
1707 else
1708 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1709
1710 // Don't use recursive reasoning.
1711 if (!IsDeadAA || QueryingAA == IsDeadAA)
1712 return false;
1713
1714 if (IsDeadAA->isAssumedDead()) {
1715 if (QueryingAA)
1716 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1717 if (!IsDeadAA->isKnownDead())
1718 UsedAssumedInformation = true;
1719 return true;
1720 }
1721
1722 return false;
1723}
1724
1725bool Attributor::isAssumedDead(const BasicBlock &BB,
1726 const AbstractAttribute *QueryingAA,
1727 const AAIsDead *FnLivenessAA,
1728 DepClassTy DepClass) {
1729 if (!Configuration.UseLiveness)
1730 return false;
1731 const Function &F = *BB.getParent();
1732 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1733 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRP: IRPosition::function(F),
1734 QueryingAA, DepClass: DepClassTy::NONE);
1735
1736 // Don't use recursive reasoning.
1737 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1738 return false;
1739
1740 if (FnLivenessAA->isAssumedDead(BB: &BB)) {
1741 if (QueryingAA)
1742 recordDependence(FromAA: *FnLivenessAA, ToAA: *QueryingAA, DepClass);
1743 return true;
1744 }
1745
1746 return false;
1747}
1748
1749bool Attributor::checkForAllCallees(
1750 function_ref<bool(ArrayRef<const Function *>)> Pred,
1751 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1752 if (const Function *Callee = dyn_cast<Function>(Val: CB.getCalledOperand()))
1753 return Pred(Callee);
1754
1755 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1756 QueryingAA, IRP: IRPosition::callsite_function(CB), DepClass: DepClassTy::OPTIONAL);
1757 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1758 return false;
1759
1760 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1761 return Pred(Callees.getArrayRef());
1762}
1763
1764bool canMarkAsVisited(const User *Usr) {
1765 return isa<PHINode>(Val: Usr) || !isa<Instruction>(Val: Usr);
1766}
1767
1768bool Attributor::checkForAllUses(
1769 function_ref<bool(const Use &, bool &)> Pred,
1770 const AbstractAttribute &QueryingAA, const Value &V,
1771 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1772 bool IgnoreDroppableUses,
1773 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1774
1775 // Check virtual uses first.
1776 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(Val: &V))
1777 if (!CB(*this, &QueryingAA))
1778 return false;
1779
1780 if (isa<ConstantData>(Val: V))
1781 return false;
1782
1783 // Check the trivial case first as it catches void values.
1784 if (V.use_empty())
1785 return true;
1786
1787 const IRPosition &IRP = QueryingAA.getIRPosition();
1788 SmallVector<const Use *, 16> Worklist;
1789 SmallPtrSet<const Use *, 16> Visited;
1790
1791 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1792 for (const Use &UU : V.uses()) {
1793 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1794 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1795 "rejected by the equivalence call back: "
1796 << *UU << "!\n");
1797 return false;
1798 }
1799
1800 Worklist.push_back(Elt: &UU);
1801 }
1802 return true;
1803 };
1804
1805 AddUsers(V, /* OldUse */ nullptr);
1806
1807 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1808 << " initial uses to check\n");
1809
1810 const Function *ScopeFn = IRP.getAnchorScope();
1811 const auto *LivenessAA =
1812 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRP: IRPosition::function(F: *ScopeFn),
1813 DepClass: DepClassTy::NONE)
1814 : nullptr;
1815
1816 while (!Worklist.empty()) {
1817 const Use *U = Worklist.pop_back_val();
1818 if (canMarkAsVisited(Usr: U->getUser()) && !Visited.insert(Ptr: U).second)
1819 continue;
1820 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1821 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1822 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1823 << "\n";
1824 else
1825 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1826 << "\n";
1827 });
1828 bool UsedAssumedInformation = false;
1829 if (isAssumedDead(U: *U, QueryingAA: &QueryingAA, FnLivenessAA: LivenessAA, UsedAssumedInformation,
1830 CheckBBLivenessOnly, DepClass: LivenessDepClass)) {
1831 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1832 dbgs() << "[Attributor] Dead use, skip!\n");
1833 continue;
1834 }
1835 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1836 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1837 dbgs() << "[Attributor] Droppable user, skip!\n");
1838 continue;
1839 }
1840
1841 if (auto *SI = dyn_cast<StoreInst>(Val: U->getUser())) {
1842 if (&SI->getOperandUse(i: 0) == U) {
1843 if (!Visited.insert(Ptr: U).second)
1844 continue;
1845 SmallSetVector<Value *, 4> PotentialCopies;
1846 if (AA::getPotentialCopiesOfStoredValue(
1847 A&: *this, SI&: *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1848 /* OnlyExact */ true)) {
1849 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1850 dbgs()
1851 << "[Attributor] Value is stored, continue with "
1852 << PotentialCopies.size()
1853 << " potential copies instead!\n");
1854 for (Value *PotentialCopy : PotentialCopies)
1855 if (!AddUsers(*PotentialCopy, U))
1856 return false;
1857 continue;
1858 }
1859 }
1860 }
1861
1862 bool Follow = false;
1863 if (!Pred(*U, Follow))
1864 return false;
1865 if (!Follow)
1866 continue;
1867
1868 User &Usr = *U->getUser();
1869 AddUsers(Usr, /* OldUse */ nullptr);
1870 }
1871
1872 return true;
1873}
1874
1875bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1876 const AbstractAttribute &QueryingAA,
1877 bool RequireAllCallSites,
1878 bool &UsedAssumedInformation) {
1879 // We can try to determine information from
1880 // the call sites. However, this is only possible all call sites are known,
1881 // hence the function has internal linkage.
1882 const IRPosition &IRP = QueryingAA.getIRPosition();
1883 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1884 if (!AssociatedFunction) {
1885 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1886 << "\n");
1887 return false;
1888 }
1889
1890 return checkForAllCallSites(Pred, Fn: *AssociatedFunction, RequireAllCallSites,
1891 QueryingAA: &QueryingAA, UsedAssumedInformation);
1892}
1893
1894bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1895 const Function &Fn,
1896 bool RequireAllCallSites,
1897 const AbstractAttribute *QueryingAA,
1898 bool &UsedAssumedInformation,
1899 bool CheckPotentiallyDead) {
1900 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1901 LLVM_DEBUG(
1902 dbgs()
1903 << "[Attributor] Function " << Fn.getName()
1904 << " has no internal linkage, hence not all call sites are known\n");
1905 return false;
1906 }
1907 // Check virtual uses first.
1908 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(Val: &Fn))
1909 if (!CB(*this, QueryingAA))
1910 return false;
1911
1912 SmallVector<const Use *, 8> Uses(make_pointer_range(Range: Fn.uses()));
1913 for (unsigned u = 0; u < Uses.size(); ++u) {
1914 const Use &U = *Uses[u];
1915 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1916 if (auto *Fn = dyn_cast<Function>(U))
1917 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1918 << *U.getUser() << "\n";
1919 else
1920 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1921 << "\n";
1922 });
1923 if (!CheckPotentiallyDead &&
1924 isAssumedDead(U, QueryingAA, FnLivenessAA: nullptr, UsedAssumedInformation,
1925 /* CheckBBLivenessOnly */ true)) {
1926 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1927 dbgs() << "[Attributor] Dead use, skip!\n");
1928 continue;
1929 }
1930 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: U.getUser())) {
1931 if (CE->isCast() && CE->getType()->isPointerTy()) {
1932 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1933 dbgs() << "[Attributor] Use, is constant cast expression, add "
1934 << CE->getNumUses() << " uses of that expression instead!\n";
1935 });
1936 for (const Use &CEU : CE->uses())
1937 Uses.push_back(Elt: &CEU);
1938 continue;
1939 }
1940 }
1941
1942 AbstractCallSite ACS(&U);
1943 if (!ACS) {
1944 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1945 << " has non call site use " << *U.get() << " in "
1946 << *U.getUser() << "\n");
1947 return false;
1948 }
1949
1950 const Use *EffectiveUse =
1951 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1952 if (!ACS.isCallee(U: EffectiveUse)) {
1953 if (!RequireAllCallSites) {
1954 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1955 << " is not a call of " << Fn.getName()
1956 << ", skip use\n");
1957 continue;
1958 }
1959 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1960 << " is an invalid use of " << Fn.getName() << "\n");
1961 return false;
1962 }
1963
1964 // Make sure the arguments that can be matched between the call site and the
1965 // callee argee on their type. It is unlikely they do not and it doesn't
1966 // make sense for all attributes to know/care about this.
1967 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1968 unsigned MinArgsParams =
1969 std::min(a: size_t(ACS.getNumArgOperands()), b: Fn.arg_size());
1970 for (unsigned u = 0; u < MinArgsParams; ++u) {
1971 Value *CSArgOp = ACS.getCallArgOperand(ArgNo: u);
1972 if (CSArgOp && Fn.getArg(i: u)->getType() != CSArgOp->getType()) {
1973 LLVM_DEBUG(
1974 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1975 << u << "@" << Fn.getName() << ": "
1976 << *Fn.getArg(u)->getType() << " vs. "
1977 << *ACS.getCallArgOperand(u)->getType() << "\n");
1978 return false;
1979 }
1980 }
1981
1982 if (Pred(ACS))
1983 continue;
1984
1985 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1986 << *ACS.getInstruction() << "\n");
1987 return false;
1988 }
1989
1990 return true;
1991}
1992
1993bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1994 // TODO: Maintain a cache of Values that are
1995 // on the pathway from a Argument to a Instruction that would effect the
1996 // liveness/return state etc.
1997 return EnableCallSiteSpecific;
1998}
1999
2000bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
2001 const AbstractAttribute &QueryingAA,
2002 AA::ValueScope S,
2003 bool RecurseForSelectAndPHI) {
2004
2005 const IRPosition &IRP = QueryingAA.getIRPosition();
2006 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2007 if (!AssociatedFunction)
2008 return false;
2009
2010 bool UsedAssumedInformation = false;
2011 SmallVector<AA::ValueAndContext> Values;
2012 if (!getAssumedSimplifiedValues(
2013 InitialIRP: IRPosition::returned(F: *AssociatedFunction), AA: &QueryingAA, Values, S,
2014 UsedAssumedInformation, RecurseForSelectAndPHI))
2015 return false;
2016
2017 return llvm::all_of(Range&: Values, P: [&](const AA::ValueAndContext &VAC) {
2018 return Pred(*VAC.getValue());
2019 });
2020}
2021
2022static bool checkForAllInstructionsImpl(
2023 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
2024 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2025 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2026 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2027 bool CheckPotentiallyDead = false) {
2028 for (unsigned Opcode : Opcodes) {
2029 // Check if we have instructions with this opcode at all first.
2030 auto *Insts = OpcodeInstMap.lookup(Val: Opcode);
2031 if (!Insts)
2032 continue;
2033
2034 for (Instruction *I : *Insts) {
2035 // Skip dead instructions.
2036 if (A && !CheckPotentiallyDead &&
2037 A->isAssumedDead(IRP: IRPosition::inst(I: *I), QueryingAA, FnLivenessAA: LivenessAA,
2038 UsedAssumedInformation, CheckBBLivenessOnly)) {
2039 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2040 dbgs() << "[Attributor] Instruction " << *I
2041 << " is potentially dead, skip!\n";);
2042 continue;
2043 }
2044
2045 if (!Pred(*I))
2046 return false;
2047 }
2048 }
2049 return true;
2050}
2051
2052bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2053 const Function *Fn,
2054 const AbstractAttribute *QueryingAA,
2055 ArrayRef<unsigned> Opcodes,
2056 bool &UsedAssumedInformation,
2057 bool CheckBBLivenessOnly,
2058 bool CheckPotentiallyDead) {
2059 // Since we need to provide instructions we have to have an exact definition.
2060 if (!Fn || Fn->isDeclaration())
2061 return false;
2062
2063 const IRPosition &QueryIRP = IRPosition::function(F: *Fn);
2064 const auto *LivenessAA =
2065 CheckPotentiallyDead && QueryingAA
2066 ? (getAAFor<AAIsDead>(QueryingAA: *QueryingAA, IRP: QueryIRP, DepClass: DepClassTy::NONE))
2067 : nullptr;
2068
2069 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F: *Fn);
2070 if (!checkForAllInstructionsImpl(A: this, OpcodeInstMap, Pred, QueryingAA,
2071 LivenessAA, Opcodes, UsedAssumedInformation,
2072 CheckBBLivenessOnly, CheckPotentiallyDead))
2073 return false;
2074
2075 return true;
2076}
2077
2078bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2079 const AbstractAttribute &QueryingAA,
2080 ArrayRef<unsigned> Opcodes,
2081 bool &UsedAssumedInformation,
2082 bool CheckBBLivenessOnly,
2083 bool CheckPotentiallyDead) {
2084 const IRPosition &IRP = QueryingAA.getIRPosition();
2085 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2086 return checkForAllInstructions(Pred, Fn: AssociatedFunction, QueryingAA: &QueryingAA, Opcodes,
2087 UsedAssumedInformation, CheckBBLivenessOnly,
2088 CheckPotentiallyDead);
2089}
2090
2091bool Attributor::checkForAllReadWriteInstructions(
2092 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2093 bool &UsedAssumedInformation) {
2094 TimeTraceScope TS("checkForAllReadWriteInstructions");
2095
2096 const Function *AssociatedFunction =
2097 QueryingAA.getIRPosition().getAssociatedFunction();
2098 if (!AssociatedFunction)
2099 return false;
2100
2101 const IRPosition &QueryIRP = IRPosition::function(F: *AssociatedFunction);
2102 const auto *LivenessAA =
2103 getAAFor<AAIsDead>(QueryingAA, IRP: QueryIRP, DepClass: DepClassTy::NONE);
2104
2105 for (Instruction *I :
2106 InfoCache.getReadOrWriteInstsForFunction(F: *AssociatedFunction)) {
2107 // Skip dead instructions.
2108 if (isAssumedDead(IRP: IRPosition::inst(I: *I), QueryingAA: &QueryingAA, FnLivenessAA: LivenessAA,
2109 UsedAssumedInformation))
2110 continue;
2111
2112 if (!Pred(*I))
2113 return false;
2114 }
2115
2116 return true;
2117}
2118
2119void Attributor::runTillFixpoint() {
2120 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2121 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2122 << DG.SyntheticRoot.Deps.size()
2123 << " abstract attributes.\n");
2124
2125 // Now that all abstract attributes are collected and initialized we start
2126 // the abstract analysis.
2127
2128 unsigned IterationCounter = 1;
2129 unsigned MaxIterations =
2130 Configuration.MaxFixpointIterations.value_or(u&: SetFixpointIterations);
2131
2132 SmallVector<AbstractAttribute *, 32> ChangedAAs;
2133 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2134 Worklist.insert_range(R&: DG.SyntheticRoot);
2135
2136 do {
2137 // Remember the size to determine new attributes.
2138 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2139 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2140 << ", Worklist size: " << Worklist.size() << "\n");
2141
2142 // For invalid AAs we can fix dependent AAs that have a required dependence,
2143 // thereby folding long dependence chains in a single step without the need
2144 // to run updates.
2145 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2146 AbstractAttribute *InvalidAA = InvalidAAs[u];
2147
2148 // Check the dependences to fast track invalidation.
2149 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2150 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2151 << " has " << InvalidAA->Deps.size()
2152 << " required & optional dependences\n");
2153 for (auto &DepIt : InvalidAA->Deps) {
2154 AbstractAttribute *DepAA = cast<AbstractAttribute>(Val: DepIt.getPointer());
2155 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2156 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2157 dbgs() << " - recompute: " << *DepAA);
2158 Worklist.insert(X: DepAA);
2159 continue;
2160 }
2161 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2162 << " - invalidate: " << *DepAA);
2163 DepAA->getState().indicatePessimisticFixpoint();
2164 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2165 if (!DepAA->getState().isValidState())
2166 InvalidAAs.insert(X: DepAA);
2167 else
2168 ChangedAAs.push_back(Elt: DepAA);
2169 }
2170 InvalidAA->Deps.clear();
2171 }
2172
2173 // Add all abstract attributes that are potentially dependent on one that
2174 // changed to the work list.
2175 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2176 for (auto &DepIt : ChangedAA->Deps)
2177 Worklist.insert(X: cast<AbstractAttribute>(Val: DepIt.getPointer()));
2178 ChangedAA->Deps.clear();
2179 }
2180
2181 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2182 << ", Worklist+Dependent size: " << Worklist.size()
2183 << "\n");
2184
2185 // Reset the changed and invalid set.
2186 ChangedAAs.clear();
2187 InvalidAAs.clear();
2188
2189 // Update all abstract attribute in the work list and record the ones that
2190 // changed.
2191 for (AbstractAttribute *AA : Worklist) {
2192 const auto &AAState = AA->getState();
2193 if (!AAState.isAtFixpoint())
2194 if (updateAA(AA&: *AA) == ChangeStatus::CHANGED)
2195 ChangedAAs.push_back(Elt: AA);
2196
2197 // Use the InvalidAAs vector to propagate invalid states fast transitively
2198 // without requiring updates.
2199 if (!AAState.isValidState())
2200 InvalidAAs.insert(X: AA);
2201 }
2202
2203 // Add attributes to the changed set if they have been created in the last
2204 // iteration.
2205 ChangedAAs.append(in_start: DG.SyntheticRoot.begin() + NumAAs,
2206 in_end: DG.SyntheticRoot.end());
2207
2208 // Reset the work list and repopulate with the changed abstract attributes.
2209 // Note that dependent ones are added above.
2210 Worklist.clear();
2211 Worklist.insert_range(R&: ChangedAAs);
2212 Worklist.insert_range(R&: QueryAAsAwaitingUpdate);
2213 QueryAAsAwaitingUpdate.clear();
2214
2215 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2216
2217 if (IterationCounter > MaxIterations && !Functions.empty()) {
2218 auto Remark = [&](OptimizationRemarkMissed ORM) {
2219 return ORM << "Attributor did not reach a fixpoint after "
2220 << ore::NV("Iterations", MaxIterations) << " iterations.";
2221 };
2222 Function *F = Functions.front();
2223 emitRemark<OptimizationRemarkMissed>(F, RemarkName: "FixedPoint", RemarkCB&: Remark);
2224 }
2225
2226 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2227 << IterationCounter << "/" << MaxIterations
2228 << " iterations\n");
2229
2230 // Reset abstract arguments not settled in a sound fixpoint by now. This
2231 // happens when we stopped the fixpoint iteration early. Note that only the
2232 // ones marked as "changed" *and* the ones transitively depending on them
2233 // need to be reverted to a pessimistic state. Others might not be in a
2234 // fixpoint state but we can use the optimistic results for them anyway.
2235 SmallPtrSet<AbstractAttribute *, 32> Visited;
2236 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2237 AbstractAttribute *ChangedAA = ChangedAAs[u];
2238 if (!Visited.insert(Ptr: ChangedAA).second)
2239 continue;
2240
2241 AbstractState &State = ChangedAA->getState();
2242 if (!State.isAtFixpoint()) {
2243 State.indicatePessimisticFixpoint();
2244
2245 NumAttributesTimedOut++;
2246 }
2247
2248 for (auto &DepIt : ChangedAA->Deps)
2249 ChangedAAs.push_back(Elt: cast<AbstractAttribute>(Val: DepIt.getPointer()));
2250 ChangedAA->Deps.clear();
2251 }
2252
2253 LLVM_DEBUG({
2254 if (!Visited.empty())
2255 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2256 << " abstract attributes.\n";
2257 });
2258}
2259
2260void Attributor::registerForUpdate(AbstractAttribute &AA) {
2261 assert(AA.isQueryAA() &&
2262 "Non-query AAs should not be required to register for updates!");
2263 QueryAAsAwaitingUpdate.insert(X: &AA);
2264}
2265
2266ChangeStatus Attributor::manifestAttributes() {
2267 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2268 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2269
2270 unsigned NumManifested = 0;
2271 unsigned NumAtFixpoint = 0;
2272 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2273 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2274 AbstractAttribute *AA = cast<AbstractAttribute>(Val: DepAA.getPointer());
2275 AbstractState &State = AA->getState();
2276
2277 // If there is not already a fixpoint reached, we can now take the
2278 // optimistic state. This is correct because we enforced a pessimistic one
2279 // on abstract attributes that were transitively dependent on a changed one
2280 // already above.
2281 if (!State.isAtFixpoint())
2282 State.indicateOptimisticFixpoint();
2283
2284 // We must not manifest Attributes that use Callbase info.
2285 if (AA->hasCallBaseContext())
2286 continue;
2287 // If the state is invalid, we do not try to manifest it.
2288 if (!State.isValidState())
2289 continue;
2290
2291 if (AA->getCtxI() && !isRunOn(Fn&: *AA->getAnchorScope()))
2292 continue;
2293
2294 // Skip dead code.
2295 bool UsedAssumedInformation = false;
2296 if (isAssumedDead(AA: *AA, FnLivenessAA: nullptr, UsedAssumedInformation,
2297 /* CheckBBLivenessOnly */ true))
2298 continue;
2299 // Check if the manifest debug counter that allows skipping manifestation of
2300 // AAs
2301 if (!DebugCounter::shouldExecute(Counter&: ManifestDBGCounter))
2302 continue;
2303 // Manifest the state and record if we changed the IR.
2304 ChangeStatus LocalChange = AA->manifest(A&: *this);
2305 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2306 AA->trackStatistics();
2307 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2308 << "\n");
2309
2310 ManifestChange = ManifestChange | LocalChange;
2311
2312 NumAtFixpoint++;
2313 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2314 }
2315
2316 (void)NumManifested;
2317 (void)NumAtFixpoint;
2318 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2319 << " arguments while " << NumAtFixpoint
2320 << " were in a valid fixpoint state\n");
2321
2322 NumAttributesManifested += NumManifested;
2323 NumAttributesValidFixpoint += NumAtFixpoint;
2324
2325 (void)NumFinalAAs;
2326 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2327 auto DepIt = DG.SyntheticRoot.Deps.begin();
2328 for (unsigned u = 0; u < NumFinalAAs; ++u)
2329 ++DepIt;
2330 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2331 ++u, ++DepIt) {
2332 errs() << "Unexpected abstract attribute: "
2333 << cast<AbstractAttribute>(Val: DepIt->getPointer()) << " :: "
2334 << cast<AbstractAttribute>(Val: DepIt->getPointer())
2335 ->getIRPosition()
2336 .getAssociatedValue()
2337 << "\n";
2338 }
2339 llvm_unreachable("Expected the final number of abstract attributes to "
2340 "remain unchanged!");
2341 }
2342
2343 for (auto &It : AttrsMap) {
2344 AttributeList &AL = It.getSecond();
2345 const IRPosition &IRP =
2346 isa<Function>(Val: It.getFirst())
2347 ? IRPosition::function(F: *cast<Function>(Val: It.getFirst()))
2348 : IRPosition::callsite_function(CB: *cast<CallBase>(Val: It.getFirst()));
2349 IRP.setAttrList(AL);
2350 }
2351
2352 return ManifestChange;
2353}
2354
2355void Attributor::identifyDeadInternalFunctions() {
2356 // Early exit if we don't intend to delete functions.
2357 if (!Configuration.DeleteFns)
2358 return;
2359
2360 // To avoid triggering an assertion in the lazy call graph we will not delete
2361 // any internal library functions. We should modify the assertion though and
2362 // allow internals to be deleted.
2363 const auto *TLI =
2364 isModulePass()
2365 ? nullptr
2366 : getInfoCache().getTargetLibraryInfoForFunction(F: *Functions.back());
2367 LibFunc LF;
2368
2369 // Identify dead internal functions and delete them. This happens outside
2370 // the other fixpoint analysis as we might treat potentially dead functions
2371 // as live to lower the number of iterations. If they happen to be dead, the
2372 // below fixpoint loop will identify and eliminate them.
2373
2374 SmallVector<Function *, 8> InternalFns;
2375 for (Function *F : Functions)
2376 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(FDecl: *F, F&: LF)))
2377 InternalFns.push_back(Elt: F);
2378
2379 SmallPtrSet<Function *, 8> LiveInternalFns;
2380 bool FoundLiveInternal = true;
2381 while (FoundLiveInternal) {
2382 FoundLiveInternal = false;
2383 for (Function *&F : InternalFns) {
2384 if (!F)
2385 continue;
2386
2387 bool UsedAssumedInformation = false;
2388 if (checkForAllCallSites(
2389 Pred: [&](AbstractCallSite ACS) {
2390 Function *Callee = ACS.getInstruction()->getFunction();
2391 return ToBeDeletedFunctions.count(key: Callee) ||
2392 (Functions.count(key: Callee) && Callee->hasLocalLinkage() &&
2393 !LiveInternalFns.count(Ptr: Callee));
2394 },
2395 Fn: *F, RequireAllCallSites: true, QueryingAA: nullptr, UsedAssumedInformation)) {
2396 continue;
2397 }
2398
2399 LiveInternalFns.insert(Ptr: F);
2400 F = nullptr;
2401 FoundLiveInternal = true;
2402 }
2403 }
2404
2405 for (Function *F : InternalFns)
2406 if (F)
2407 ToBeDeletedFunctions.insert(X: F);
2408}
2409
2410ChangeStatus Attributor::cleanupIR() {
2411 TimeTraceScope TimeScope("Attributor::cleanupIR");
2412 // Delete stuff at the end to avoid invalid references and a nice order.
2413 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2414 << ToBeDeletedFunctions.size() << " functions and "
2415 << ToBeDeletedBlocks.size() << " blocks and "
2416 << ToBeDeletedInsts.size() << " instructions and "
2417 << ToBeChangedValues.size() << " values and "
2418 << ToBeChangedUses.size() << " uses. To insert "
2419 << ToBeChangedToUnreachableInsts.size()
2420 << " unreachables.\n"
2421 << "Preserve manifest added " << ManifestAddedBlocks.size()
2422 << " blocks\n");
2423
2424 SmallVector<WeakTrackingVH, 32> DeadInsts;
2425 SmallVector<Instruction *, 32> TerminatorsToFold;
2426
2427 auto ReplaceUse = [&](Use *U, Value *NewV) {
2428 Value *OldV = U->get();
2429
2430 // If we plan to replace NewV we need to update it at this point.
2431 do {
2432 const auto &Entry = ToBeChangedValues.lookup(Key: NewV);
2433 if (!get<0>(Pair: Entry))
2434 break;
2435 NewV = get<0>(Pair: Entry);
2436 } while (true);
2437
2438 Instruction *I = dyn_cast<Instruction>(Val: U->getUser());
2439 assert((!I || isRunOn(*I->getFunction())) &&
2440 "Cannot replace an instruction outside the current SCC!");
2441
2442 // Do not replace uses in returns if the value is a must-tail call we will
2443 // not delete.
2444 if (auto *RI = dyn_cast_or_null<ReturnInst>(Val: I)) {
2445 if (auto *CI = dyn_cast<CallInst>(Val: OldV->stripPointerCasts()))
2446 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(key: CI))
2447 return;
2448 // If we rewrite a return and the new value is not an argument, strip the
2449 // `returned` attribute as it is wrong now.
2450 if (!isa<Argument>(Val: NewV))
2451 for (auto &Arg : RI->getFunction()->args())
2452 Arg.removeAttr(Kind: Attribute::Returned);
2453 }
2454
2455 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2456 << " instead of " << *OldV << "\n");
2457 U->set(NewV);
2458
2459 if (Instruction *I = dyn_cast<Instruction>(Val: OldV)) {
2460 CGModifiedFunctions.insert(X: I->getFunction());
2461 if (!isa<PHINode>(Val: I) && !ToBeDeletedInsts.count(key: I) &&
2462 isInstructionTriviallyDead(I))
2463 DeadInsts.push_back(Elt: I);
2464 }
2465 if (isa<UndefValue>(Val: NewV) && isa<CallBase>(Val: U->getUser())) {
2466 auto *CB = cast<CallBase>(Val: U->getUser());
2467 if (CB->isArgOperand(U)) {
2468 unsigned Idx = CB->getArgOperandNo(U);
2469 CB->removeParamAttr(ArgNo: Idx, Kind: Attribute::NoUndef);
2470 auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand());
2471 if (Callee && Callee->arg_size() > Idx)
2472 Callee->removeParamAttr(ArgNo: Idx, Kind: Attribute::NoUndef);
2473 }
2474 }
2475 if (isa<Constant>(Val: NewV) && isa<BranchInst>(Val: U->getUser())) {
2476 Instruction *UserI = cast<Instruction>(Val: U->getUser());
2477 if (isa<UndefValue>(Val: NewV)) {
2478 ToBeChangedToUnreachableInsts.insert(X: UserI);
2479 } else {
2480 TerminatorsToFold.push_back(Elt: UserI);
2481 }
2482 }
2483 };
2484
2485 for (auto &It : ToBeChangedUses) {
2486 Use *U = It.first;
2487 Value *NewV = It.second;
2488 ReplaceUse(U, NewV);
2489 }
2490
2491 SmallVector<Use *, 4> Uses;
2492 for (auto &It : ToBeChangedValues) {
2493 Value *OldV = It.first;
2494 auto [NewV, Done] = It.second;
2495 Uses.clear();
2496 for (auto &U : OldV->uses())
2497 if (Done || !U.getUser()->isDroppable())
2498 Uses.push_back(Elt: &U);
2499 for (Use *U : Uses) {
2500 if (auto *I = dyn_cast<Instruction>(Val: U->getUser()))
2501 if (!isRunOn(Fn&: *I->getFunction()))
2502 continue;
2503 ReplaceUse(U, NewV);
2504 }
2505 }
2506
2507 for (const auto &V : InvokeWithDeadSuccessor)
2508 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(Val: V)) {
2509 assert(isRunOn(*II->getFunction()) &&
2510 "Cannot replace an invoke outside the current SCC!");
2511 bool UnwindBBIsDead = II->hasFnAttr(Kind: Attribute::NoUnwind);
2512 bool NormalBBIsDead = II->hasFnAttr(Kind: Attribute::NoReturn);
2513 bool Invoke2CallAllowed =
2514 !AAIsDead::mayCatchAsynchronousExceptions(F: *II->getFunction());
2515 assert((UnwindBBIsDead || NormalBBIsDead) &&
2516 "Invoke does not have dead successors!");
2517 BasicBlock *BB = II->getParent();
2518 BasicBlock *NormalDestBB = II->getNormalDest();
2519 if (UnwindBBIsDead) {
2520 Instruction *NormalNextIP = &NormalDestBB->front();
2521 if (Invoke2CallAllowed) {
2522 changeToCall(II);
2523 NormalNextIP = BB->getTerminator();
2524 }
2525 if (NormalBBIsDead)
2526 ToBeChangedToUnreachableInsts.insert(X: NormalNextIP);
2527 } else {
2528 assert(NormalBBIsDead && "Broken invariant!");
2529 if (!NormalDestBB->getUniquePredecessor())
2530 NormalDestBB = SplitBlockPredecessors(BB: NormalDestBB, Preds: {BB}, Suffix: ".dead");
2531 ToBeChangedToUnreachableInsts.insert(X: &NormalDestBB->front());
2532 }
2533 }
2534 for (Instruction *I : TerminatorsToFold) {
2535 assert(isRunOn(*I->getFunction()) &&
2536 "Cannot replace a terminator outside the current SCC!");
2537 CGModifiedFunctions.insert(X: I->getFunction());
2538 ConstantFoldTerminator(BB: I->getParent());
2539 }
2540 for (const auto &V : ToBeChangedToUnreachableInsts)
2541 if (Instruction *I = dyn_cast_or_null<Instruction>(Val: V)) {
2542 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2543 << "\n");
2544 assert(isRunOn(*I->getFunction()) &&
2545 "Cannot replace an instruction outside the current SCC!");
2546 CGModifiedFunctions.insert(X: I->getFunction());
2547 changeToUnreachable(I);
2548 }
2549
2550 for (const auto &V : ToBeDeletedInsts) {
2551 if (Instruction *I = dyn_cast_or_null<Instruction>(Val: V)) {
2552 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2553 isRunOn(*I->getFunction())) &&
2554 "Cannot delete an instruction outside the current SCC!");
2555 I->dropDroppableUses();
2556 CGModifiedFunctions.insert(X: I->getFunction());
2557 if (!I->getType()->isVoidTy())
2558 I->replaceAllUsesWith(V: UndefValue::get(T: I->getType()));
2559 if (!isa<PHINode>(Val: I) && isInstructionTriviallyDead(I))
2560 DeadInsts.push_back(Elt: I);
2561 else
2562 I->eraseFromParent();
2563 }
2564 }
2565
2566 llvm::erase_if(C&: DeadInsts, P: [&](WeakTrackingVH I) { return !I; });
2567
2568 LLVM_DEBUG({
2569 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2570 for (auto &I : DeadInsts)
2571 if (I)
2572 dbgs() << " - " << *I << "\n";
2573 });
2574
2575 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2576
2577 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2578 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2579 ToBeDeletedBBs.reserve(N: NumDeadBlocks);
2580 for (BasicBlock *BB : ToBeDeletedBlocks) {
2581 assert(isRunOn(*BB->getParent()) &&
2582 "Cannot delete a block outside the current SCC!");
2583 CGModifiedFunctions.insert(X: BB->getParent());
2584 // Do not delete BBs added during manifests of AAs.
2585 if (ManifestAddedBlocks.contains(Ptr: BB))
2586 continue;
2587 ToBeDeletedBBs.push_back(Elt: BB);
2588 }
2589 // Actually we do not delete the blocks but squash them into a single
2590 // unreachable but untangling branches that jump here is something we need
2591 // to do in a more generic way.
2592 detachDeadBlocks(BBs: ToBeDeletedBBs, Updates: nullptr);
2593 }
2594
2595 identifyDeadInternalFunctions();
2596
2597 // Rewrite the functions as requested during manifest.
2598 ChangeStatus ManifestChange = rewriteFunctionSignatures(ModifiedFns&: CGModifiedFunctions);
2599
2600 for (Function *Fn : CGModifiedFunctions)
2601 if (!ToBeDeletedFunctions.count(key: Fn) && Functions.count(key: Fn))
2602 Configuration.CGUpdater.reanalyzeFunction(Fn&: *Fn);
2603
2604 for (Function *Fn : ToBeDeletedFunctions) {
2605 if (!Functions.count(key: Fn))
2606 continue;
2607 Configuration.CGUpdater.removeFunction(Fn&: *Fn);
2608 }
2609
2610 if (!ToBeChangedUses.empty())
2611 ManifestChange = ChangeStatus::CHANGED;
2612
2613 if (!ToBeChangedToUnreachableInsts.empty())
2614 ManifestChange = ChangeStatus::CHANGED;
2615
2616 if (!ToBeDeletedFunctions.empty())
2617 ManifestChange = ChangeStatus::CHANGED;
2618
2619 if (!ToBeDeletedBlocks.empty())
2620 ManifestChange = ChangeStatus::CHANGED;
2621
2622 if (!ToBeDeletedInsts.empty())
2623 ManifestChange = ChangeStatus::CHANGED;
2624
2625 if (!InvokeWithDeadSuccessor.empty())
2626 ManifestChange = ChangeStatus::CHANGED;
2627
2628 if (!DeadInsts.empty())
2629 ManifestChange = ChangeStatus::CHANGED;
2630
2631 NumFnDeleted += ToBeDeletedFunctions.size();
2632
2633 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2634 << " functions after manifest.\n");
2635
2636#ifdef EXPENSIVE_CHECKS
2637 for (Function *F : Functions) {
2638 if (ToBeDeletedFunctions.count(F))
2639 continue;
2640 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2641 }
2642#endif
2643
2644 return ManifestChange;
2645}
2646
2647ChangeStatus Attributor::run() {
2648 TimeTraceScope TimeScope("Attributor::run");
2649 AttributorCallGraph ACallGraph(*this);
2650
2651 if (PrintCallGraph)
2652 ACallGraph.populateAll();
2653
2654 Phase = AttributorPhase::UPDATE;
2655 runTillFixpoint();
2656
2657 // dump graphs on demand
2658 if (DumpDepGraph)
2659 DG.dumpGraph();
2660
2661 if (ViewDepGraph)
2662 DG.viewGraph();
2663
2664 if (PrintDependencies)
2665 DG.print();
2666
2667 Phase = AttributorPhase::MANIFEST;
2668 ChangeStatus ManifestChange = manifestAttributes();
2669
2670 Phase = AttributorPhase::CLEANUP;
2671 ChangeStatus CleanupChange = cleanupIR();
2672
2673 if (PrintCallGraph)
2674 ACallGraph.print();
2675
2676 return ManifestChange | CleanupChange;
2677}
2678
2679ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2680 TimeTraceScope TimeScope("updateAA", [&]() {
2681 return AA.getName().str() +
2682 std::to_string(val: AA.getIRPosition().getPositionKind());
2683 });
2684 assert(Phase == AttributorPhase::UPDATE &&
2685 "We can update AA only in the update stage!");
2686
2687 // Use a new dependence vector for this update.
2688 DependenceVector DV;
2689 DependenceStack.push_back(Elt: &DV);
2690
2691 auto &AAState = AA.getState();
2692 ChangeStatus CS = ChangeStatus::UNCHANGED;
2693 bool UsedAssumedInformation = false;
2694 if (!isAssumedDead(AA, FnLivenessAA: nullptr, UsedAssumedInformation,
2695 /* CheckBBLivenessOnly */ true))
2696 CS = AA.update(A&: *this);
2697
2698 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2699 // If the AA did not rely on outside information but changed, we run it
2700 // again to see if it found a fixpoint. Most AAs do but we don't require
2701 // them to. Hence, it might take the AA multiple iterations to get to a
2702 // fixpoint even if it does not rely on outside information, which is fine.
2703 ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2704 if (CS == ChangeStatus::CHANGED)
2705 RerunCS = AA.update(A&: *this);
2706
2707 // If the attribute did not change during the run or rerun, and it still did
2708 // not query any non-fix information, the state will not change and we can
2709 // indicate that right at this point.
2710 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2711 AAState.indicateOptimisticFixpoint();
2712 }
2713
2714 if (!AAState.isAtFixpoint())
2715 rememberDependences();
2716
2717 // Verify the stack was used properly, that is we pop the dependence vector we
2718 // put there earlier.
2719 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2720 (void)PoppedDV;
2721 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2722
2723 return CS;
2724}
2725
2726void Attributor::createShallowWrapper(Function &F) {
2727 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2728
2729 Module &M = *F.getParent();
2730 LLVMContext &Ctx = M.getContext();
2731 FunctionType *FnTy = F.getFunctionType();
2732
2733 Function *Wrapper =
2734 Function::Create(Ty: FnTy, Linkage: F.getLinkage(), AddrSpace: F.getAddressSpace(), N: F.getName());
2735 F.setName(""); // set the inside function anonymous
2736 M.getFunctionList().insert(where: F.getIterator(), New: Wrapper);
2737
2738 F.setLinkage(GlobalValue::InternalLinkage);
2739
2740 F.replaceAllUsesWith(V: Wrapper);
2741 assert(F.use_empty() && "Uses remained after wrapper was created!");
2742
2743 // Move the COMDAT section to the wrapper.
2744 // TODO: Check if we need to keep it for F as well.
2745 Wrapper->setComdat(F.getComdat());
2746 F.setComdat(nullptr);
2747
2748 // Copy all metadata and attributes but keep them on F as well.
2749 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2750 F.getAllMetadata(MDs);
2751 for (auto MDIt : MDs)
2752 Wrapper->addMetadata(KindID: MDIt.first, MD&: *MDIt.second);
2753 Wrapper->setAttributes(F.getAttributes());
2754
2755 // Create the call in the wrapper.
2756 BasicBlock *EntryBB = BasicBlock::Create(Context&: Ctx, Name: "entry", Parent: Wrapper);
2757
2758 SmallVector<Value *, 8> Args;
2759 Argument *FArgIt = F.arg_begin();
2760 for (Argument &Arg : Wrapper->args()) {
2761 Args.push_back(Elt: &Arg);
2762 Arg.setName((FArgIt++)->getName());
2763 }
2764
2765 CallInst *CI = CallInst::Create(Func: &F, Args, NameStr: "", InsertBefore: EntryBB);
2766 CI->setTailCall(true);
2767 CI->addFnAttr(Kind: Attribute::NoInline);
2768 ReturnInst::Create(C&: Ctx, retVal: CI->getType()->isVoidTy() ? nullptr : CI, InsertBefore: EntryBB);
2769
2770 NumFnShallowWrappersCreated++;
2771}
2772
2773bool Attributor::isInternalizable(Function &F) {
2774 if (F.isDeclaration() || F.hasLocalLinkage() ||
2775 GlobalValue::isInterposableLinkage(Linkage: F.getLinkage()))
2776 return false;
2777 return true;
2778}
2779
2780Function *Attributor::internalizeFunction(Function &F, bool Force) {
2781 if (!AllowDeepWrapper && !Force)
2782 return nullptr;
2783 if (!isInternalizable(F))
2784 return nullptr;
2785
2786 SmallPtrSet<Function *, 2> FnSet = {&F};
2787 DenseMap<Function *, Function *> InternalizedFns;
2788 internalizeFunctions(FnSet, FnMap&: InternalizedFns);
2789
2790 return InternalizedFns[&F];
2791}
2792
2793bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2794 DenseMap<Function *, Function *> &FnMap) {
2795 for (Function *F : FnSet)
2796 if (!Attributor::isInternalizable(F&: *F))
2797 return false;
2798
2799 FnMap.clear();
2800 // Generate the internalized version of each function.
2801 for (Function *F : FnSet) {
2802 Module &M = *F->getParent();
2803 FunctionType *FnTy = F->getFunctionType();
2804
2805 // Create a copy of the current function
2806 Function *Copied =
2807 Function::Create(Ty: FnTy, Linkage: F->getLinkage(), AddrSpace: F->getAddressSpace(),
2808 N: F->getName() + ".internalized");
2809 ValueToValueMapTy VMap;
2810 auto *NewFArgIt = Copied->arg_begin();
2811 for (auto &Arg : F->args()) {
2812 auto ArgName = Arg.getName();
2813 NewFArgIt->setName(ArgName);
2814 VMap[&Arg] = &(*NewFArgIt++);
2815 }
2816 SmallVector<ReturnInst *, 8> Returns;
2817
2818 // Copy the body of the original function to the new one
2819 CloneFunctionInto(NewFunc: Copied, OldFunc: F, VMap,
2820 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
2821
2822 // Set the linakage and visibility late as CloneFunctionInto has some
2823 // implicit requirements.
2824 Copied->setVisibility(GlobalValue::DefaultVisibility);
2825 Copied->setLinkage(GlobalValue::PrivateLinkage);
2826
2827 // Copy metadata
2828 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2829 F->getAllMetadata(MDs);
2830 for (auto MDIt : MDs)
2831 if (!Copied->hasMetadata())
2832 Copied->addMetadata(KindID: MDIt.first, MD&: *MDIt.second);
2833
2834 M.getFunctionList().insert(where: F->getIterator(), New: Copied);
2835 Copied->setDSOLocal(true);
2836 FnMap[F] = Copied;
2837 }
2838
2839 // Replace all uses of the old function with the new internalized function
2840 // unless the caller is a function that was just internalized.
2841 for (Function *F : FnSet) {
2842 auto &InternalizedFn = FnMap[F];
2843 auto IsNotInternalized = [&](Use &U) -> bool {
2844 if (auto *CB = dyn_cast<CallBase>(Val: U.getUser()))
2845 return !FnMap.lookup(Val: CB->getCaller());
2846 return false;
2847 };
2848 F->replaceUsesWithIf(New: InternalizedFn, ShouldReplace: IsNotInternalized);
2849 }
2850
2851 return true;
2852}
2853
2854bool Attributor::isValidFunctionSignatureRewrite(
2855 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2856
2857 if (!Configuration.RewriteSignatures)
2858 return false;
2859
2860 Function *Fn = Arg.getParent();
2861 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2862 // Forbid the call site to cast the function return type. If we need to
2863 // rewrite these functions we need to re-create a cast for the new call site
2864 // (if the old had uses).
2865 if (!ACS.getCalledFunction() ||
2866 ACS.getInstruction()->getType() !=
2867 ACS.getCalledFunction()->getReturnType())
2868 return false;
2869 if (cast<CallBase>(Val: ACS.getInstruction())->getCalledOperand()->getType() !=
2870 Fn->getType())
2871 return false;
2872 if (ACS.getNumArgOperands() != Fn->arg_size())
2873 return false;
2874 // Forbid must-tail calls for now.
2875 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2876 };
2877
2878 // Avoid var-arg functions for now.
2879 if (Fn->isVarArg()) {
2880 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2881 return false;
2882 }
2883
2884 // Avoid functions with complicated argument passing semantics.
2885 AttributeList FnAttributeList = Fn->getAttributes();
2886 if (FnAttributeList.hasAttrSomewhere(Kind: Attribute::Nest) ||
2887 FnAttributeList.hasAttrSomewhere(Kind: Attribute::StructRet) ||
2888 FnAttributeList.hasAttrSomewhere(Kind: Attribute::InAlloca) ||
2889 FnAttributeList.hasAttrSomewhere(Kind: Attribute::Preallocated)) {
2890 LLVM_DEBUG(
2891 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2892 return false;
2893 }
2894
2895 // Avoid callbacks for now.
2896 bool UsedAssumedInformation = false;
2897 if (!checkForAllCallSites(Pred: CallSiteCanBeChanged, Fn: *Fn, RequireAllCallSites: true, QueryingAA: nullptr,
2898 UsedAssumedInformation,
2899 /* CheckPotentiallyDead */ true)) {
2900 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2901 return false;
2902 }
2903
2904 auto InstPred = [](Instruction &I) {
2905 if (auto *CI = dyn_cast<CallInst>(Val: &I))
2906 return !CI->isMustTailCall();
2907 return true;
2908 };
2909
2910 // Forbid must-tail calls for now.
2911 // TODO:
2912 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F: *Fn);
2913 if (!checkForAllInstructionsImpl(A: nullptr, OpcodeInstMap, Pred: InstPred, QueryingAA: nullptr,
2914 LivenessAA: nullptr, Opcodes: {Instruction::Call},
2915 UsedAssumedInformation)) {
2916 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2917 return false;
2918 }
2919
2920 return true;
2921}
2922
2923bool Attributor::registerFunctionSignatureRewrite(
2924 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2925 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2926 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2927 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2928 << Arg.getParent()->getName() << " with "
2929 << ReplacementTypes.size() << " replacements\n");
2930 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2931 "Cannot register an invalid rewrite");
2932
2933 Function *Fn = Arg.getParent();
2934 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2935 ArgumentReplacementMap[Fn];
2936 if (ARIs.empty())
2937 ARIs.resize(N: Fn->arg_size());
2938
2939 // If we have a replacement already with less than or equal new arguments,
2940 // ignore this request.
2941 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2942 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2943 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2944 return false;
2945 }
2946
2947 // If we have a replacement already but we like the new one better, delete
2948 // the old.
2949 ARI.reset();
2950
2951 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2952 << Arg.getParent()->getName() << " with "
2953 << ReplacementTypes.size() << " replacements\n");
2954
2955 // Remember the replacement.
2956 ARI.reset(p: new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2957 std::move(CalleeRepairCB),
2958 std::move(ACSRepairCB)));
2959
2960 return true;
2961}
2962
2963bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2964 bool Result = true;
2965#ifndef NDEBUG
2966 if (SeedAllowList.size() != 0)
2967 Result = llvm::is_contained(SeedAllowList, AA.getName());
2968 Function *Fn = AA.getAnchorScope();
2969 if (FunctionSeedAllowList.size() != 0 && Fn)
2970 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2971#endif
2972 return Result;
2973}
2974
2975ChangeStatus Attributor::rewriteFunctionSignatures(
2976 SmallSetVector<Function *, 8> &ModifiedFns) {
2977 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2978
2979 for (auto &It : ArgumentReplacementMap) {
2980 Function *OldFn = It.getFirst();
2981
2982 // Deleted functions do not require rewrites.
2983 if (!Functions.count(key: OldFn) || ToBeDeletedFunctions.count(key: OldFn))
2984 continue;
2985
2986 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2987 It.getSecond();
2988 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2989
2990 SmallVector<Type *, 16> NewArgumentTypes;
2991 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2992
2993 // Collect replacement argument types and copy over existing attributes.
2994 AttributeList OldFnAttributeList = OldFn->getAttributes();
2995 for (Argument &Arg : OldFn->args()) {
2996 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2997 ARIs[Arg.getArgNo()]) {
2998 NewArgumentTypes.append(in_start: ARI->ReplacementTypes.begin(),
2999 in_end: ARI->ReplacementTypes.end());
3000 NewArgumentAttributes.append(NumInputs: ARI->getNumReplacementArgs(),
3001 Elt: AttributeSet());
3002 } else {
3003 NewArgumentTypes.push_back(Elt: Arg.getType());
3004 NewArgumentAttributes.push_back(
3005 Elt: OldFnAttributeList.getParamAttrs(ArgNo: Arg.getArgNo()));
3006 }
3007 }
3008
3009 uint64_t LargestVectorWidth = 0;
3010 for (auto *I : NewArgumentTypes)
3011 if (auto *VT = dyn_cast<llvm::VectorType>(Val: I))
3012 LargestVectorWidth =
3013 std::max(a: LargestVectorWidth,
3014 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
3015
3016 FunctionType *OldFnTy = OldFn->getFunctionType();
3017 Type *RetTy = OldFnTy->getReturnType();
3018
3019 // Construct the new function type using the new arguments types.
3020 FunctionType *NewFnTy =
3021 FunctionType::get(Result: RetTy, Params: NewArgumentTypes, isVarArg: OldFnTy->isVarArg());
3022
3023 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3024 << "' from " << *OldFn->getFunctionType() << " to "
3025 << *NewFnTy << "\n");
3026
3027 // Create the new function body and insert it into the module.
3028 Function *NewFn = Function::Create(Ty: NewFnTy, Linkage: OldFn->getLinkage(),
3029 AddrSpace: OldFn->getAddressSpace(), N: "");
3030 Functions.insert(X: NewFn);
3031 OldFn->getParent()->getFunctionList().insert(where: OldFn->getIterator(), New: NewFn);
3032 NewFn->takeName(V: OldFn);
3033 NewFn->copyAttributesFrom(Src: OldFn);
3034
3035 // Patch the pointer to LLVM function in debug info descriptor.
3036 NewFn->setSubprogram(OldFn->getSubprogram());
3037 OldFn->setSubprogram(nullptr);
3038
3039 // Recompute the parameter attributes list based on the new arguments for
3040 // the function.
3041 LLVMContext &Ctx = OldFn->getContext();
3042 NewFn->setAttributes(AttributeList::get(
3043 C&: Ctx, FnAttrs: OldFnAttributeList.getFnAttrs(), RetAttrs: OldFnAttributeList.getRetAttrs(),
3044 ArgAttrs: NewArgumentAttributes));
3045 AttributeFuncs::updateMinLegalVectorWidthAttr(Fn&: *NewFn, Width: LargestVectorWidth);
3046
3047 // Remove argmem from the memory effects if we have no more pointer
3048 // arguments, or they are readnone.
3049 MemoryEffects ME = NewFn->getMemoryEffects();
3050 int ArgNo = -1;
3051 if (ME.doesAccessArgPointees() && all_of(Range&: NewArgumentTypes, P: [&](Type *T) {
3052 ++ArgNo;
3053 return !T->isPtrOrPtrVectorTy() ||
3054 NewFn->hasParamAttribute(ArgNo, Kind: Attribute::ReadNone);
3055 })) {
3056 NewFn->setMemoryEffects(ME - MemoryEffects::argMemOnly());
3057 }
3058
3059 // Since we have now created the new function, splice the body of the old
3060 // function right into the new function, leaving the old rotting hulk of the
3061 // function empty.
3062 NewFn->splice(ToIt: NewFn->begin(), FromF: OldFn);
3063
3064 // Set of all "call-like" instructions that invoke the old function mapped
3065 // to their new replacements.
3066 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
3067
3068 // Callback to create a new "call-like" instruction for a given one.
3069 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3070 CallBase *OldCB = cast<CallBase>(Val: ACS.getInstruction());
3071 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3072
3073 // Collect the new argument operands for the replacement call site.
3074 SmallVector<Value *, 16> NewArgOperands;
3075 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3076 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3077 unsigned NewFirstArgNum = NewArgOperands.size();
3078 (void)NewFirstArgNum; // only used inside assert.
3079 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3080 ARIs[OldArgNum]) {
3081 if (ARI->ACSRepairCB)
3082 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3083 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3084 NewArgOperands.size() &&
3085 "ACS repair callback did not provide as many operand as new "
3086 "types were registered!");
3087 // TODO: Exose the attribute set to the ACS repair callback
3088 NewArgOperandAttributes.append(NumInputs: ARI->ReplacementTypes.size(),
3089 Elt: AttributeSet());
3090 } else {
3091 NewArgOperands.push_back(Elt: ACS.getCallArgOperand(ArgNo: OldArgNum));
3092 NewArgOperandAttributes.push_back(
3093 Elt: OldCallAttributeList.getParamAttrs(ArgNo: OldArgNum));
3094 }
3095 }
3096
3097 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3098 "Mismatch # argument operands vs. # argument operand attributes!");
3099 assert(NewArgOperands.size() == NewFn->arg_size() &&
3100 "Mismatch # argument operands vs. # function arguments!");
3101
3102 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3103 OldCB->getOperandBundlesAsDefs(Defs&: OperandBundleDefs);
3104
3105 // Create a new call or invoke instruction to replace the old one.
3106 CallBase *NewCB;
3107 if (InvokeInst *II = dyn_cast<InvokeInst>(Val: OldCB)) {
3108 NewCB = InvokeInst::Create(Func: NewFn, IfNormal: II->getNormalDest(),
3109 IfException: II->getUnwindDest(), Args: NewArgOperands,
3110 Bundles: OperandBundleDefs, NameStr: "", InsertBefore: OldCB->getIterator());
3111 } else {
3112 auto *NewCI = CallInst::Create(Func: NewFn, Args: NewArgOperands, Bundles: OperandBundleDefs,
3113 NameStr: "", InsertBefore: OldCB->getIterator());
3114 NewCI->setTailCallKind(cast<CallInst>(Val: OldCB)->getTailCallKind());
3115 NewCB = NewCI;
3116 }
3117
3118 // Copy over various properties and the new attributes.
3119 NewCB->copyMetadata(SrcInst: *OldCB, WL: {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3120 NewCB->setCallingConv(OldCB->getCallingConv());
3121 NewCB->takeName(V: OldCB);
3122 NewCB->setAttributes(AttributeList::get(
3123 C&: Ctx, FnAttrs: OldCallAttributeList.getFnAttrs(),
3124 RetAttrs: OldCallAttributeList.getRetAttrs(), ArgAttrs: NewArgOperandAttributes));
3125
3126 AttributeFuncs::updateMinLegalVectorWidthAttr(Fn&: *NewCB->getCaller(),
3127 Width: LargestVectorWidth);
3128
3129 CallSitePairs.push_back(Elt: {OldCB, NewCB});
3130 return true;
3131 };
3132
3133 // Use the CallSiteReplacementCreator to create replacement call sites.
3134 bool UsedAssumedInformation = false;
3135 bool Success = checkForAllCallSites(Pred: CallSiteReplacementCreator, Fn: *OldFn,
3136 RequireAllCallSites: true, QueryingAA: nullptr, UsedAssumedInformation,
3137 /* CheckPotentiallyDead */ true);
3138 (void)Success;
3139 assert(Success && "Assumed call site replacement to succeed!");
3140
3141 // Rewire the arguments.
3142 Argument *OldFnArgIt = OldFn->arg_begin();
3143 Argument *NewFnArgIt = NewFn->arg_begin();
3144 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3145 ++OldArgNum, ++OldFnArgIt) {
3146 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3147 ARIs[OldArgNum]) {
3148 if (ARI->CalleeRepairCB)
3149 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3150 if (ARI->ReplacementTypes.empty())
3151 OldFnArgIt->replaceAllUsesWith(
3152 V: PoisonValue::get(T: OldFnArgIt->getType()));
3153 NewFnArgIt += ARI->ReplacementTypes.size();
3154 } else {
3155 NewFnArgIt->takeName(V: &*OldFnArgIt);
3156 OldFnArgIt->replaceAllUsesWith(V: &*NewFnArgIt);
3157 ++NewFnArgIt;
3158 }
3159 }
3160
3161 // Eliminate the instructions *after* we visited all of them.
3162 for (auto &CallSitePair : CallSitePairs) {
3163 CallBase &OldCB = *CallSitePair.first;
3164 CallBase &NewCB = *CallSitePair.second;
3165 assert(OldCB.getType() == NewCB.getType() &&
3166 "Cannot handle call sites with different types!");
3167 ModifiedFns.insert(X: OldCB.getFunction());
3168 OldCB.replaceAllUsesWith(V: &NewCB);
3169 OldCB.eraseFromParent();
3170 }
3171
3172 // Replace the function in the call graph (if any).
3173 Configuration.CGUpdater.replaceFunctionWith(OldFn&: *OldFn, NewFn&: *NewFn);
3174
3175 // If the old function was modified and needed to be reanalyzed, the new one
3176 // does now.
3177 if (ModifiedFns.remove(X: OldFn))
3178 ModifiedFns.insert(X: NewFn);
3179
3180 Changed = ChangeStatus::CHANGED;
3181 }
3182
3183 return Changed;
3184}
3185
3186void InformationCache::initializeInformationCache(const Function &CF,
3187 FunctionInfo &FI) {
3188 // As we do not modify the function here we can remove the const
3189 // withouth breaking implicit assumptions. At the end of the day, we could
3190 // initialize the cache eagerly which would look the same to the users.
3191 Function &F = const_cast<Function &>(CF);
3192
3193 FI.IsKernel = F.hasFnAttribute(Kind: "kernel");
3194
3195 // Walk all instructions to find interesting instructions that might be
3196 // queried by abstract attributes during their initialization or update.
3197 // This has to happen before we create attributes.
3198
3199 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3200
3201 // Add \p V to the assume uses map which track the number of uses outside of
3202 // "visited" assumes. If no outside uses are left the value is added to the
3203 // assume only use vector.
3204 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3205 SmallVector<const Instruction *> Worklist;
3206 if (auto *I = dyn_cast<Instruction>(Val: &V))
3207 Worklist.push_back(Elt: I);
3208 while (!Worklist.empty()) {
3209 const Instruction *I = Worklist.pop_back_val();
3210 std::optional<short> &NumUses = AssumeUsesMap[I];
3211 if (!NumUses)
3212 NumUses = I->getNumUses();
3213 NumUses = *NumUses - /* this assume */ 1;
3214 if (*NumUses != 0)
3215 continue;
3216 AssumeOnlyValues.insert(X: I);
3217 for (const Value *Op : I->operands())
3218 if (auto *OpI = dyn_cast<Instruction>(Val: Op))
3219 Worklist.push_back(Elt: OpI);
3220 }
3221 };
3222
3223 for (Instruction &I : instructions(F: &F)) {
3224 bool IsInterestingOpcode = false;
3225
3226 // To allow easy access to all instructions in a function with a given
3227 // opcode we store them in the InfoCache. As not all opcodes are interesting
3228 // to concrete attributes we only cache the ones that are as identified in
3229 // the following switch.
3230 // Note: There are no concrete attributes now so this is initially empty.
3231 switch (I.getOpcode()) {
3232 default:
3233 assert(!isa<CallBase>(&I) &&
3234 "New call base instruction type needs to be known in the "
3235 "Attributor.");
3236 break;
3237 case Instruction::Call:
3238 // Calls are interesting on their own, additionally:
3239 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3240 // For `must-tail` calls we remember the caller and callee.
3241 if (auto *Assume = dyn_cast<AssumeInst>(Val: &I)) {
3242 AssumeOnlyValues.insert(X: Assume);
3243 fillMapFromAssume(Assume&: *Assume, Result&: KnowledgeMap);
3244 AddToAssumeUsesMap(*Assume->getArgOperand(i: 0));
3245 } else if (cast<CallInst>(Val&: I).isMustTailCall()) {
3246 FI.ContainsMustTailCall = true;
3247 if (auto *Callee = dyn_cast_if_present<Function>(
3248 Val: cast<CallInst>(Val&: I).getCalledOperand()))
3249 getFunctionInfo(F: *Callee).CalledViaMustTail = true;
3250 }
3251 [[fallthrough]];
3252 case Instruction::CallBr:
3253 case Instruction::Invoke:
3254 case Instruction::CleanupRet:
3255 case Instruction::CatchSwitch:
3256 case Instruction::AtomicRMW:
3257 case Instruction::AtomicCmpXchg:
3258 case Instruction::Br:
3259 case Instruction::Resume:
3260 case Instruction::Ret:
3261 case Instruction::Load:
3262 // The alignment of a pointer is interesting for loads.
3263 case Instruction::Store:
3264 // The alignment of a pointer is interesting for stores.
3265 case Instruction::Alloca:
3266 case Instruction::AddrSpaceCast:
3267 IsInterestingOpcode = true;
3268 }
3269 if (IsInterestingOpcode) {
3270 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3271 if (!Insts)
3272 Insts = new (Allocator) InstructionVectorTy();
3273 Insts->push_back(Elt: &I);
3274 }
3275 if (I.mayReadOrWriteMemory())
3276 FI.RWInsts.push_back(Elt: &I);
3277 }
3278
3279 if (F.hasFnAttribute(Kind: Attribute::AlwaysInline) &&
3280 isInlineViable(Callee&: F).isSuccess())
3281 InlineableFunctions.insert(Ptr: &F);
3282}
3283
3284InformationCache::FunctionInfo::~FunctionInfo() {
3285 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3286 // manually destroy them.
3287 for (auto &It : OpcodeInstMap)
3288 It.getSecond()->~InstructionVectorTy();
3289}
3290
3291ArrayRef<Function *>
3292InformationCache::getIndirectlyCallableFunctions(Attributor &A) const {
3293 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3294 return IndirectlyCallableFunctions;
3295}
3296
3297std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3298 if (TargetTriple.isGPU())
3299 return 0;
3300 return std::nullopt;
3301}
3302
3303void Attributor::recordDependence(const AbstractAttribute &FromAA,
3304 const AbstractAttribute &ToAA,
3305 DepClassTy DepClass) {
3306 if (DepClass == DepClassTy::NONE)
3307 return;
3308 // If we are outside of an update, thus before the actual fixpoint iteration
3309 // started (= when we create AAs), we do not track dependences because we will
3310 // put all AAs into the initial worklist anyway.
3311 if (DependenceStack.empty())
3312 return;
3313 if (FromAA.getState().isAtFixpoint())
3314 return;
3315 DependenceStack.back()->push_back(Elt: {.FromAA: &FromAA, .ToAA: &ToAA, .DepClass: DepClass});
3316}
3317
3318void Attributor::rememberDependences() {
3319 assert(!DependenceStack.empty() && "No dependences to remember!");
3320
3321 for (DepInfo &DI : *DependenceStack.back()) {
3322 assert((DI.DepClass == DepClassTy::REQUIRED ||
3323 DI.DepClass == DepClassTy::OPTIONAL) &&
3324 "Expected required or optional dependence (1 bit)!");
3325 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3326 DepAAs.insert(X: AbstractAttribute::DepTy(
3327 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3328 }
3329}
3330
3331template <Attribute::AttrKind AK, typename AAType>
3332void Attributor::checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs,
3333 bool SkipHasAttrCheck) {
3334 bool IsKnown;
3335 if (SkipHasAttrCheck || !Attrs.hasAttribute(Kind: AK))
3336 if (!Configuration.Allowed || Configuration.Allowed->count(V: &AAType::ID))
3337 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3338 IsKnown))
3339 getOrCreateAAFor<AAType>(IRP);
3340}
3341
3342void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3343 if (!VisitedFunctions.insert(V: &F).second)
3344 return;
3345 if (F.isDeclaration())
3346 return;
3347
3348 // In non-module runs we need to look at the call sites of a function to
3349 // determine if it is part of a must-tail call edge. This will influence what
3350 // attributes we can derive.
3351 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3352 if (!isModulePass() && !FI.CalledViaMustTail) {
3353 for (const Use &U : F.uses())
3354 if (const auto *CB = dyn_cast<CallBase>(Val: U.getUser()))
3355 if (CB->isCallee(U: &U) && CB->isMustTailCall())
3356 FI.CalledViaMustTail = true;
3357 }
3358
3359 IRPosition FPos = IRPosition::function(F);
3360 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3361 auto Attrs = F.getAttributes();
3362 auto FnAttrs = Attrs.getFnAttrs();
3363
3364 // Check for dead BasicBlocks in every function.
3365 // We need dead instruction detection because we do not want to deal with
3366 // broken IR in which SSA rules do not apply.
3367 getOrCreateAAFor<AAIsDead>(IRP: FPos);
3368
3369 // Every function might contain instructions that cause "undefined
3370 // behavior".
3371 getOrCreateAAFor<AAUndefinedBehavior>(IRP: FPos);
3372
3373 // Every function might be applicable for Heap-To-Stack conversion.
3374 if (EnableHeapToStack)
3375 getOrCreateAAFor<AAHeapToStack>(IRP: FPos);
3376
3377 // Every function might be "must-progress".
3378 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(IRP: FPos, Attrs: FnAttrs);
3379
3380 // Every function might be "no-free".
3381 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: FPos, Attrs: FnAttrs);
3382
3383 // Every function might be "will-return".
3384 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(IRP: FPos, Attrs: FnAttrs);
3385
3386 // Every function might be marked "nosync"
3387 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(IRP: FPos, Attrs: FnAttrs);
3388
3389 // Everything that is visible from the outside (=function, argument, return
3390 // positions), cannot be changed if the function is not IPO amendable. We can
3391 // however analyse the code inside.
3392 if (IsIPOAmendable) {
3393
3394 // Every function can be nounwind.
3395 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(IRP: FPos, Attrs: FnAttrs);
3396
3397 // Every function might be "no-return".
3398 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(IRP: FPos, Attrs: FnAttrs);
3399
3400 // Every function might be "no-recurse".
3401 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(IRP: FPos, Attrs: FnAttrs);
3402
3403 // Every function can be "non-convergent".
3404 if (Attrs.hasFnAttr(Kind: Attribute::Convergent))
3405 getOrCreateAAFor<AANonConvergent>(IRP: FPos);
3406
3407 // Every function might be "readnone/readonly/writeonly/...".
3408 getOrCreateAAFor<AAMemoryBehavior>(IRP: FPos);
3409
3410 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3411 getOrCreateAAFor<AAMemoryLocation>(IRP: FPos);
3412
3413 // Every function can track active assumptions.
3414 getOrCreateAAFor<AAAssumptionInfo>(IRP: FPos);
3415
3416 // If we're not using a dynamic mode for float, there's nothing worthwhile
3417 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3418 // denormal-fp-math-f32=something, but that likely has no real world use.
3419 DenormalMode Mode = F.getDenormalMode(FPType: APFloat::IEEEsingle());
3420 if (Mode.Input == DenormalMode::Dynamic ||
3421 Mode.Output == DenormalMode::Dynamic)
3422 getOrCreateAAFor<AADenormalFPMath>(IRP: FPos);
3423
3424 // Return attributes are only appropriate if the return type is non void.
3425 Type *ReturnType = F.getReturnType();
3426 if (!ReturnType->isVoidTy()) {
3427 IRPosition RetPos = IRPosition::returned(F);
3428 AttributeSet RetAttrs = Attrs.getRetAttrs();
3429
3430 // Every returned value might be dead.
3431 getOrCreateAAFor<AAIsDead>(IRP: RetPos);
3432
3433 // Every function might be simplified.
3434 bool UsedAssumedInformation = false;
3435 getAssumedSimplified(IRP: RetPos, AA: nullptr, UsedAssumedInformation,
3436 S: AA::Intraprocedural);
3437
3438 // Every returned value might be marked noundef.
3439 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: RetPos, Attrs: RetAttrs);
3440
3441 if (ReturnType->isPointerTy()) {
3442
3443 // Every function with pointer return type might be marked align.
3444 getOrCreateAAFor<AAAlign>(IRP: RetPos);
3445
3446 // Every function with pointer return type might be marked nonnull.
3447 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: RetPos, Attrs: RetAttrs);
3448
3449 // Every function with pointer return type might be marked noalias.
3450 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: RetPos, Attrs: RetAttrs);
3451
3452 // Every function with pointer return type might be marked
3453 // dereferenceable.
3454 getOrCreateAAFor<AADereferenceable>(IRP: RetPos);
3455 } else if (AttributeFuncs::isNoFPClassCompatibleType(Ty: ReturnType)) {
3456 getOrCreateAAFor<AANoFPClass>(IRP: RetPos);
3457 }
3458 }
3459 }
3460
3461 for (Argument &Arg : F.args()) {
3462 IRPosition ArgPos = IRPosition::argument(Arg);
3463 auto ArgNo = Arg.getArgNo();
3464 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3465
3466 if (!IsIPOAmendable) {
3467 if (Arg.getType()->isPointerTy())
3468 // Every argument with pointer type might be marked nofree.
3469 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: ArgPos, Attrs: ArgAttrs);
3470 continue;
3471 }
3472
3473 // Every argument might be simplified. We have to go through the
3474 // Attributor interface though as outside AAs can register custom
3475 // simplification callbacks.
3476 bool UsedAssumedInformation = false;
3477 getAssumedSimplified(IRP: ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3478 S: AA::Intraprocedural);
3479
3480 // Every argument might be dead.
3481 getOrCreateAAFor<AAIsDead>(IRP: ArgPos);
3482
3483 // Every argument might be marked noundef.
3484 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: ArgPos, Attrs: ArgAttrs);
3485
3486 if (Arg.getType()->isPointerTy()) {
3487 // Every argument with pointer type might be marked nonnull.
3488 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: ArgPos, Attrs: ArgAttrs);
3489
3490 // Every argument with pointer type might be marked noalias.
3491 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: ArgPos, Attrs: ArgAttrs);
3492
3493 // Every argument with pointer type might be marked dereferenceable.
3494 getOrCreateAAFor<AADereferenceable>(IRP: ArgPos);
3495
3496 // Every argument with pointer type might be marked align.
3497 getOrCreateAAFor<AAAlign>(IRP: ArgPos);
3498
3499 // Every argument with pointer type might be marked nocapture.
3500 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3501 IRP: ArgPos, Attrs: ArgAttrs, /*SkipHasAttrCheck=*/true);
3502
3503 // Every argument with pointer type might be marked
3504 // "readnone/readonly/writeonly/..."
3505 getOrCreateAAFor<AAMemoryBehavior>(IRP: ArgPos);
3506
3507 // Every argument with pointer type might be marked nofree.
3508 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: ArgPos, Attrs: ArgAttrs);
3509
3510 // Every argument with pointer type might be privatizable (or
3511 // promotable)
3512 getOrCreateAAFor<AAPrivatizablePtr>(IRP: ArgPos);
3513 } else if (AttributeFuncs::isNoFPClassCompatibleType(Ty: Arg.getType())) {
3514 getOrCreateAAFor<AANoFPClass>(IRP: ArgPos);
3515 }
3516 }
3517
3518 auto CallSitePred = [&](Instruction &I) -> bool {
3519 auto &CB = cast<CallBase>(Val&: I);
3520 IRPosition CBInstPos = IRPosition::inst(I: CB);
3521 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3522
3523 // Call sites might be dead if they do not have side effects and no live
3524 // users. The return value might be dead if there are no live users.
3525 getOrCreateAAFor<AAIsDead>(IRP: CBInstPos);
3526
3527 Function *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand());
3528 // TODO: Even if the callee is not known now we might be able to simplify
3529 // the call/callee.
3530 if (!Callee) {
3531 getOrCreateAAFor<AAIndirectCallInfo>(IRP: CBFnPos);
3532 return true;
3533 }
3534
3535 // Every call site can track active assumptions.
3536 getOrCreateAAFor<AAAssumptionInfo>(IRP: CBFnPos);
3537
3538 // Skip declarations except if annotations on their call sites were
3539 // explicitly requested.
3540 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3541 !Callee->hasMetadata(KindID: LLVMContext::MD_callback))
3542 return true;
3543
3544 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3545 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3546 bool UsedAssumedInformation = false;
3547 getAssumedSimplified(IRP: CBRetPos, AA: nullptr, UsedAssumedInformation,
3548 S: AA::Intraprocedural);
3549
3550 if (AttributeFuncs::isNoFPClassCompatibleType(Ty: Callee->getReturnType()))
3551 getOrCreateAAFor<AANoFPClass>(IRP: CBInstPos);
3552 }
3553
3554 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3555 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3556
3557 IRPosition CBArgPos = IRPosition::callsite_argument(CB, ArgNo: I);
3558 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(ArgNo: I);
3559
3560 // Every call site argument might be dead.
3561 getOrCreateAAFor<AAIsDead>(IRP: CBArgPos);
3562
3563 // Call site argument might be simplified. We have to go through the
3564 // Attributor interface though as outside AAs can register custom
3565 // simplification callbacks.
3566 bool UsedAssumedInformation = false;
3567 getAssumedSimplified(IRP: CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3568 S: AA::Intraprocedural);
3569
3570 // Every call site argument might be marked "noundef".
3571 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: CBArgPos, Attrs: CBArgAttrs);
3572
3573 Type *ArgTy = CB.getArgOperand(i: I)->getType();
3574
3575 if (!ArgTy->isPointerTy()) {
3576 if (AttributeFuncs::isNoFPClassCompatibleType(Ty: ArgTy))
3577 getOrCreateAAFor<AANoFPClass>(IRP: CBArgPos);
3578
3579 continue;
3580 }
3581
3582 // Call site argument attribute "non-null".
3583 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: CBArgPos, Attrs: CBArgAttrs);
3584
3585 // Call site argument attribute "captures(none)".
3586 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3587 IRP: CBArgPos, Attrs: CBArgAttrs, /*SkipHasAttrCheck=*/true);
3588
3589 // Call site argument attribute "no-alias".
3590 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: CBArgPos, Attrs: CBArgAttrs);
3591
3592 // Call site argument attribute "dereferenceable".
3593 getOrCreateAAFor<AADereferenceable>(IRP: CBArgPos);
3594
3595 // Call site argument attribute "align".
3596 getOrCreateAAFor<AAAlign>(IRP: CBArgPos);
3597
3598 // Call site argument attribute
3599 // "readnone/readonly/writeonly/..."
3600 if (!CBAttrs.hasParamAttr(ArgNo: I, Kind: Attribute::ReadNone))
3601 getOrCreateAAFor<AAMemoryBehavior>(IRP: CBArgPos);
3602
3603 // Call site argument attribute "nofree".
3604 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: CBArgPos, Attrs: CBArgAttrs);
3605 }
3606 return true;
3607 };
3608
3609 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3610 [[maybe_unused]] bool Success;
3611 bool UsedAssumedInformation = false;
3612 Success = checkForAllInstructionsImpl(
3613 A: nullptr, OpcodeInstMap, Pred: CallSitePred, QueryingAA: nullptr, LivenessAA: nullptr,
3614 Opcodes: {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3615 (unsigned)Instruction::Call},
3616 UsedAssumedInformation);
3617 assert(Success && "Expected the check call to be successful!");
3618
3619 auto LoadStorePred = [&](Instruction &I) -> bool {
3620 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
3621 getOrCreateAAFor<AAAlign>(IRP: IRPosition::value(V: *LI->getPointerOperand()));
3622 if (SimplifyAllLoads)
3623 getAssumedSimplified(IRP: IRPosition::value(V: I), AA: nullptr,
3624 UsedAssumedInformation, S: AA::Intraprocedural);
3625 getOrCreateAAFor<AAInvariantLoadPointer>(
3626 IRP: IRPosition::value(V: *LI->getPointerOperand()));
3627 getOrCreateAAFor<AAAddressSpace>(
3628 IRP: IRPosition::value(V: *LI->getPointerOperand()));
3629 } else {
3630 auto &SI = cast<StoreInst>(Val&: I);
3631 getOrCreateAAFor<AAIsDead>(IRP: IRPosition::inst(I));
3632 getAssumedSimplified(IRP: IRPosition::value(V: *SI.getValueOperand()), AA: nullptr,
3633 UsedAssumedInformation, S: AA::Intraprocedural);
3634 getOrCreateAAFor<AAAlign>(IRP: IRPosition::value(V: *SI.getPointerOperand()));
3635 getOrCreateAAFor<AAAddressSpace>(
3636 IRP: IRPosition::value(V: *SI.getPointerOperand()));
3637 }
3638 return true;
3639 };
3640 Success = checkForAllInstructionsImpl(
3641 A: nullptr, OpcodeInstMap, Pred: LoadStorePred, QueryingAA: nullptr, LivenessAA: nullptr,
3642 Opcodes: {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3643 UsedAssumedInformation);
3644 assert(Success && "Expected the check call to be successful!");
3645
3646 // AllocaInstPredicate
3647 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3648 getOrCreateAAFor<AAAllocationInfo>(IRP: IRPosition::value(V: I));
3649 return true;
3650 };
3651
3652 Success = checkForAllInstructionsImpl(
3653 A: nullptr, OpcodeInstMap, Pred: AAAllocationInfoPred, QueryingAA: nullptr, LivenessAA: nullptr,
3654 Opcodes: {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3655 assert(Success && "Expected the check call to be successful!");
3656}
3657
3658bool Attributor::isClosedWorldModule() const {
3659 if (CloseWorldAssumption.getNumOccurrences())
3660 return CloseWorldAssumption;
3661 return isModulePass() && Configuration.IsClosedWorldModule;
3662}
3663
3664/// Helpers to ease debugging through output streams and print calls.
3665///
3666///{
3667raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3668 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3669}
3670
3671raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3672 switch (AP) {
3673 case IRPosition::IRP_INVALID:
3674 return OS << "inv";
3675 case IRPosition::IRP_FLOAT:
3676 return OS << "flt";
3677 case IRPosition::IRP_RETURNED:
3678 return OS << "fn_ret";
3679 case IRPosition::IRP_CALL_SITE_RETURNED:
3680 return OS << "cs_ret";
3681 case IRPosition::IRP_FUNCTION:
3682 return OS << "fn";
3683 case IRPosition::IRP_CALL_SITE:
3684 return OS << "cs";
3685 case IRPosition::IRP_ARGUMENT:
3686 return OS << "arg";
3687 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3688 return OS << "cs_arg";
3689 }
3690 llvm_unreachable("Unknown attribute position!");
3691}
3692
3693raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3694 const Value &AV = Pos.getAssociatedValue();
3695 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3696 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3697
3698 if (Pos.hasCallBaseContext())
3699 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3700 return OS << "}";
3701}
3702
3703raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3704 OS << "range-state(" << S.getBitWidth() << ")<";
3705 S.getKnown().print(OS);
3706 OS << " / ";
3707 S.getAssumed().print(OS);
3708 OS << ">";
3709
3710 return OS << static_cast<const AbstractState &>(S);
3711}
3712
3713raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3714 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3715}
3716
3717raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3718 AA.print(OS);
3719 return OS;
3720}
3721
3722raw_ostream &llvm::operator<<(raw_ostream &OS,
3723 const PotentialConstantIntValuesState &S) {
3724 OS << "set-state(< {";
3725 if (!S.isValidState())
3726 OS << "full-set";
3727 else {
3728 for (const auto &It : S.getAssumedSet())
3729 OS << It << ", ";
3730 if (S.undefIsContained())
3731 OS << "undef ";
3732 }
3733 OS << "} >)";
3734
3735 return OS;
3736}
3737
3738raw_ostream &llvm::operator<<(raw_ostream &OS,
3739 const PotentialLLVMValuesState &S) {
3740 OS << "set-state(< {";
3741 if (!S.isValidState())
3742 OS << "full-set";
3743 else {
3744 for (const auto &It : S.getAssumedSet()) {
3745 if (auto *F = dyn_cast<Function>(Val: It.first.getValue()))
3746 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3747 else
3748 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3749 }
3750 if (S.undefIsContained())
3751 OS << "undef ";
3752 }
3753 OS << "} >)";
3754
3755 return OS;
3756}
3757
3758void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3759 OS << "[";
3760 OS << getName();
3761 OS << "] for CtxI ";
3762
3763 if (auto *I = getCtxI()) {
3764 OS << "'";
3765 I->print(O&: OS);
3766 OS << "'";
3767 } else
3768 OS << "<<null inst>>";
3769
3770 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3771 << '\n';
3772}
3773
3774void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3775 print(OS);
3776
3777 for (const auto &DepAA : Deps) {
3778 auto *AA = DepAA.getPointer();
3779 OS << " updates ";
3780 AA->print(OS);
3781 }
3782
3783 OS << '\n';
3784}
3785
3786raw_ostream &llvm::operator<<(raw_ostream &OS,
3787 const AAPointerInfo::Access &Acc) {
3788 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3789 if (Acc.getLocalInst() != Acc.getRemoteInst())
3790 OS << " via " << *Acc.getLocalInst();
3791 if (Acc.getContent()) {
3792 if (*Acc.getContent())
3793 OS << " [" << **Acc.getContent() << "]";
3794 else
3795 OS << " [ <unknown> ]";
3796 }
3797 return OS;
3798}
3799///}
3800
3801/// ----------------------------------------------------------------------------
3802/// Pass (Manager) Boilerplate
3803/// ----------------------------------------------------------------------------
3804
3805static bool runAttributorOnFunctions(InformationCache &InfoCache,
3806 SetVector<Function *> &Functions,
3807 AnalysisGetter &AG,
3808 CallGraphUpdater &CGUpdater,
3809 bool DeleteFns, bool IsModulePass) {
3810 if (Functions.empty())
3811 return false;
3812
3813 LLVM_DEBUG({
3814 dbgs() << "[Attributor] Run on module with " << Functions.size()
3815 << " functions:\n";
3816 for (Function *Fn : Functions)
3817 dbgs() << " - " << Fn->getName() << "\n";
3818 });
3819
3820 // Create an Attributor and initially empty information cache that is filled
3821 // while we identify default attribute opportunities.
3822 AttributorConfig AC(CGUpdater);
3823 AC.IsModulePass = IsModulePass;
3824 AC.DeleteFns = DeleteFns;
3825
3826 /// Tracking callback for specialization of indirect calls.
3827 DenseMap<CallBase *, std::unique_ptr<SmallPtrSet<Function *, 8>>>
3828 IndirectCalleeTrackingMap;
3829 if (MaxSpecializationPerCB.getNumOccurrences()) {
3830 AC.IndirectCalleeSpecializationCallback =
3831 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3832 Function &Callee, unsigned) {
3833 if (MaxSpecializationPerCB == 0)
3834 return false;
3835 auto &Set = IndirectCalleeTrackingMap[&CB];
3836 if (!Set)
3837 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3838 if (Set->size() >= MaxSpecializationPerCB)
3839 return Set->contains(Ptr: &Callee);
3840 Set->insert(Ptr: &Callee);
3841 return true;
3842 };
3843 }
3844
3845 Attributor A(Functions, InfoCache, AC);
3846
3847 // Create shallow wrappers for all functions that are not IPO amendable
3848 if (AllowShallowWrappers)
3849 for (Function *F : Functions)
3850 if (!A.isFunctionIPOAmendable(F: *F))
3851 Attributor::createShallowWrapper(F&: *F);
3852
3853 // Internalize non-exact functions
3854 // TODO: for now we eagerly internalize functions without calculating the
3855 // cost, we need a cost interface to determine whether internalizing
3856 // a function is "beneficial"
3857 if (AllowDeepWrapper) {
3858 unsigned FunSize = Functions.size();
3859 for (unsigned u = 0; u < FunSize; u++) {
3860 Function *F = Functions[u];
3861 if (!F->isDeclaration() && !F->isDefinitionExact() && !F->use_empty() &&
3862 !GlobalValue::isInterposableLinkage(Linkage: F->getLinkage())) {
3863 Function *NewF = Attributor::internalizeFunction(F&: *F);
3864 assert(NewF && "Could not internalize function.");
3865 Functions.insert(X: NewF);
3866
3867 // Update call graph
3868 CGUpdater.replaceFunctionWith(OldFn&: *F, NewFn&: *NewF);
3869 for (const Use &U : NewF->uses())
3870 if (CallBase *CB = dyn_cast<CallBase>(Val: U.getUser())) {
3871 auto *CallerF = CB->getCaller();
3872 CGUpdater.reanalyzeFunction(Fn&: *CallerF);
3873 }
3874 }
3875 }
3876 }
3877
3878 for (Function *F : Functions) {
3879 if (F->hasExactDefinition())
3880 NumFnWithExactDefinition++;
3881 else
3882 NumFnWithoutExactDefinition++;
3883
3884 // We look at internal functions only on-demand but if any use is not a
3885 // direct call or outside the current set of analyzed functions, we have
3886 // to do it eagerly.
3887 if (F->hasLocalLinkage()) {
3888 if (llvm::all_of(Range: F->uses(), P: [&Functions](const Use &U) {
3889 const auto *CB = dyn_cast<CallBase>(Val: U.getUser());
3890 return CB && CB->isCallee(U: &U) &&
3891 Functions.count(key: const_cast<Function *>(CB->getCaller()));
3892 }))
3893 continue;
3894 }
3895
3896 // Populate the Attributor with abstract attribute opportunities in the
3897 // function and the information cache with IR information.
3898 A.identifyDefaultAbstractAttributes(F&: *F);
3899 }
3900
3901 ChangeStatus Changed = A.run();
3902
3903 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3904 << " functions, result: " << Changed << ".\n");
3905 return Changed == ChangeStatus::CHANGED;
3906}
3907
3908static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
3909 SetVector<Function *> &Functions,
3910 AnalysisGetter &AG,
3911 CallGraphUpdater &CGUpdater,
3912 FunctionAnalysisManager &FAM,
3913 bool IsModulePass) {
3914 if (Functions.empty())
3915 return false;
3916
3917 LLVM_DEBUG({
3918 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3919 << " functions:\n";
3920 for (Function *Fn : Functions)
3921 dbgs() << " - " << Fn->getName() << "\n";
3922 });
3923
3924 // Create an Attributor and initially empty information cache that is filled
3925 // while we identify default attribute opportunities.
3926 AttributorConfig AC(CGUpdater);
3927 AC.IsModulePass = IsModulePass;
3928 AC.DeleteFns = false;
3929 DenseSet<const char *> Allowed(
3930 {&AAWillReturn::ID, &AANoUnwind::ID, &AANoRecurse::ID, &AANoSync::ID,
3931 &AANoFree::ID, &AANoReturn::ID, &AAMemoryLocation::ID,
3932 &AAMemoryBehavior::ID, &AAUnderlyingObjects::ID, &AANoCapture::ID,
3933 &AAInterFnReachability::ID, &AAIntraFnReachability::ID, &AACallEdges::ID,
3934 &AANoFPClass::ID, &AAMustProgress::ID, &AANonNull::ID,
3935 &AADenormalFPMath::ID});
3936 AC.Allowed = &Allowed;
3937 AC.UseLiveness = false;
3938
3939 Attributor A(Functions, InfoCache, AC);
3940
3941 for (Function *F : Functions) {
3942 if (F->hasExactDefinition())
3943 NumFnWithExactDefinition++;
3944 else
3945 NumFnWithoutExactDefinition++;
3946
3947 // We look at internal functions only on-demand but if any use is not a
3948 // direct call or outside the current set of analyzed functions, we have
3949 // to do it eagerly.
3950 if (AC.UseLiveness && F->hasLocalLinkage()) {
3951 if (llvm::all_of(Range: F->uses(), P: [&Functions](const Use &U) {
3952 const auto *CB = dyn_cast<CallBase>(Val: U.getUser());
3953 return CB && CB->isCallee(U: &U) &&
3954 Functions.count(key: const_cast<Function *>(CB->getCaller()));
3955 }))
3956 continue;
3957 }
3958
3959 // Populate the Attributor with abstract attribute opportunities in the
3960 // function and the information cache with IR information.
3961 A.identifyDefaultAbstractAttributes(F&: *F);
3962 }
3963
3964 ChangeStatus Changed = A.run();
3965
3966 if (Changed == ChangeStatus::CHANGED) {
3967 // Invalidate analyses for modified functions so that we don't have to
3968 // invalidate all analyses for all functions in this SCC.
3969 PreservedAnalyses FuncPA;
3970 // We haven't changed the CFG for modified functions.
3971 FuncPA.preserveSet<CFGAnalyses>();
3972 for (Function *Changed : A.getModifiedFunctions()) {
3973 FAM.invalidate(IR&: *Changed, PA: FuncPA);
3974 // Also invalidate any direct callers of changed functions since analyses
3975 // may care about attributes of direct callees. For example, MemorySSA
3976 // cares about whether or not a call's callee modifies memory and queries
3977 // that through function attributes.
3978 for (auto *U : Changed->users()) {
3979 if (auto *Call = dyn_cast<CallBase>(Val: U)) {
3980 if (Call->getCalledFunction() == Changed)
3981 FAM.invalidate(IR&: *Call->getFunction(), PA: FuncPA);
3982 }
3983 }
3984 }
3985 }
3986 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3987 << " functions, result: " << Changed << ".\n");
3988 return Changed == ChangeStatus::CHANGED;
3989}
3990
3991void AADepGraph::viewGraph() { llvm::ViewGraph(G: this, Name: "Dependency Graph"); }
3992
3993void AADepGraph::dumpGraph() {
3994 static std::atomic<int> CallTimes;
3995 std::string Prefix;
3996
3997 if (!DepGraphDotFileNamePrefix.empty())
3998 Prefix = DepGraphDotFileNamePrefix;
3999 else
4000 Prefix = "dep_graph";
4001 std::string Filename =
4002 Prefix + "_" + std::to_string(val: CallTimes.load()) + ".dot";
4003
4004 outs() << "Dependency graph dump to " << Filename << ".\n";
4005
4006 std::error_code EC;
4007
4008 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4009 if (!EC)
4010 llvm::WriteGraph(O&: File, G: this);
4011
4012 CallTimes++;
4013}
4014
4015void AADepGraph::print() {
4016 for (auto DepAA : SyntheticRoot.Deps)
4017 cast<AbstractAttribute>(Val: DepAA.getPointer())->printWithDeps(OS&: outs());
4018}
4019
4020PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
4021 FunctionAnalysisManager &FAM =
4022 AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
4023 AnalysisGetter AG(FAM);
4024
4025 SetVector<Function *> Functions;
4026 for (Function &F : M)
4027 Functions.insert(X: &F);
4028
4029 CallGraphUpdater CGUpdater;
4030 BumpPtrAllocator Allocator;
4031 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4032 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4033 /* DeleteFns */ true, /* IsModulePass */ true)) {
4034 // FIXME: Think about passes we will preserve and add them here.
4035 return PreservedAnalyses::none();
4036 }
4037 return PreservedAnalyses::all();
4038}
4039
4040PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
4041 CGSCCAnalysisManager &AM,
4042 LazyCallGraph &CG,
4043 CGSCCUpdateResult &UR) {
4044 FunctionAnalysisManager &FAM =
4045 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
4046 AnalysisGetter AG(FAM);
4047
4048 SetVector<Function *> Functions;
4049 for (LazyCallGraph::Node &N : C)
4050 Functions.insert(X: &N.getFunction());
4051
4052 if (Functions.empty())
4053 return PreservedAnalyses::all();
4054
4055 Module &M = *Functions.back()->getParent();
4056 CallGraphUpdater CGUpdater;
4057 CGUpdater.initialize(LCG&: CG, SCC&: C, AM, UR);
4058 BumpPtrAllocator Allocator;
4059 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4060 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4061 /* DeleteFns */ false,
4062 /* IsModulePass */ false)) {
4063 // FIXME: Think about passes we will preserve and add them here.
4064 PreservedAnalyses PA;
4065 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4066 return PA;
4067 }
4068 return PreservedAnalyses::all();
4069}
4070
4071PreservedAnalyses AttributorLightPass::run(Module &M,
4072 ModuleAnalysisManager &AM) {
4073 FunctionAnalysisManager &FAM =
4074 AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
4075 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4076
4077 SetVector<Function *> Functions;
4078 for (Function &F : M)
4079 Functions.insert(X: &F);
4080
4081 CallGraphUpdater CGUpdater;
4082 BumpPtrAllocator Allocator;
4083 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4084 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4085 /* IsModulePass */ true)) {
4086 PreservedAnalyses PA;
4087 // We have not added or removed functions.
4088 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4089 // We already invalidated all relevant function analyses above.
4090 PA.preserveSet<AllAnalysesOn<Function>>();
4091 return PA;
4092 }
4093 return PreservedAnalyses::all();
4094}
4095
4096PreservedAnalyses AttributorLightCGSCCPass::run(LazyCallGraph::SCC &C,
4097 CGSCCAnalysisManager &AM,
4098 LazyCallGraph &CG,
4099 CGSCCUpdateResult &UR) {
4100 FunctionAnalysisManager &FAM =
4101 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
4102 AnalysisGetter AG(FAM);
4103
4104 SetVector<Function *> Functions;
4105 for (LazyCallGraph::Node &N : C)
4106 Functions.insert(X: &N.getFunction());
4107
4108 if (Functions.empty())
4109 return PreservedAnalyses::all();
4110
4111 Module &M = *Functions.back()->getParent();
4112 CallGraphUpdater CGUpdater;
4113 CGUpdater.initialize(LCG&: CG, SCC&: C, AM, UR);
4114 BumpPtrAllocator Allocator;
4115 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4116 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4117 /* IsModulePass */ false)) {
4118 PreservedAnalyses PA;
4119 // We have not added or removed functions.
4120 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4121 // We already invalidated all relevant function analyses above.
4122 PA.preserveSet<AllAnalysesOn<Function>>();
4123 return PA;
4124 }
4125 return PreservedAnalyses::all();
4126}
4127namespace llvm {
4128
4129template <> struct GraphTraits<AADepGraphNode *> {
4130 using NodeRef = AADepGraphNode *;
4131 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
4132 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
4133
4134 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4135 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4136
4137 using ChildIteratorType =
4138 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4139 using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
4140
4141 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4142
4143 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4144};
4145
4146template <>
4147struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
4148 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4149
4150 using nodes_iterator =
4151 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4152
4153 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4154
4155 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4156};
4157
4158template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4159 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4160
4161 static std::string getNodeLabel(const AADepGraphNode *Node,
4162 const AADepGraph *DG) {
4163 std::string AAString;
4164 raw_string_ostream O(AAString);
4165 Node->print(OS&: O);
4166 return AAString;
4167 }
4168};
4169
4170} // end namespace llvm
4171