1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Transforms/IPO/Attributor.h"
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/PointerIntPair.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/CallGraph.h"
25#include "llvm/Analysis/InlineCost.h"
26#include "llvm/Analysis/MemoryBuiltins.h"
27#include "llvm/Analysis/MustExecute.h"
28#include "llvm/IR/AttributeMask.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/ConstantFold.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/GlobalVariable.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/ValueHandle.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/DebugCounter.h"
45#include "llvm/Support/FileSystem.h"
46#include "llvm/Support/GraphWriter.h"
47#include "llvm/Support/ModRef.h"
48#include "llvm/Support/raw_ostream.h"
49#include "llvm/Transforms/Utils/BasicBlockUtils.h"
50#include "llvm/Transforms/Utils/Cloning.h"
51#include "llvm/Transforms/Utils/Local.h"
52#include <cstdint>
53#include <memory>
54
55#ifdef EXPENSIVE_CHECKS
56#include "llvm/IR/Verifier.h"
57#endif
58
59#include <cassert>
60#include <optional>
61#include <string>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "attributor"
66#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71STATISTIC(NumFnDeleted, "Number of function deleted");
72STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84// TODO: Determine a good default value.
85//
86// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87// (when run with the first 5 abstract attributes). The results also indicate
88// that we never reach 32 iterations but always find a fixpoint sooner.
89//
90// This will become more evolved once we perform two interleaved fixpoint
91// iterations: bottom-up and top-down.
92static cl::opt<unsigned>
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(Val: 32));
96
97static cl::opt<unsigned>
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
99 cl::Hidden,
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
104static cl::opt<unsigned, true> MaxInitializationChainLengthX(
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
108 cl::location(L&: MaxInitializationChainLength), cl::init(Val: 1024));
109unsigned llvm::MaxInitializationChainLength;
110
111static cl::opt<bool> AnnotateDeclarationCallSites(
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(Val: false));
114
115static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(Val: true), cl::Hidden);
117
118static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(Val: false));
123
124static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(Val: false));
129
130// These options can only used for debug builds.
131#ifndef NDEBUG
132static cl::list<std::string>
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
136 cl::CommaSeparated);
137
138static cl::list<std::string> FunctionSeedAllowList(
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
142 cl::CommaSeparated);
143#endif
144
145static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(Val: false));
149
150static cl::opt<std::string> DepGraphDotFileNamePrefix(
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(Val: false));
157
158static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(Val: false));
161
162static cl::opt<bool> EnableCallSiteSpecific(
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(Val: false));
166
167static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(Val: false));
171
172static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
173 cl::Hidden,
174 cl::desc("Try to simplify all loads."),
175 cl::init(Val: true));
176
177static cl::opt<bool> CloseWorldAssumption(
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181/// Logic operators for the change status enum class.
182///
183///{
184ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
185 return L == ChangeStatus::CHANGED ? L : R;
186}
187ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
188 L = L | R;
189 return L;
190}
191ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
192 return L == ChangeStatus::UNCHANGED ? L : R;
193}
194ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
195 L = L & R;
196 return L;
197}
198///}
199
200bool AA::isGPU(const Module &M) {
201 Triple T(M.getTargetTriple());
202 return T.isGPU();
203}
204
205bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
206 const AbstractAttribute &QueryingAA) {
207 // We are looking for volatile instructions or non-relaxed atomics.
208 if (const auto *CB = dyn_cast<CallBase>(Val: &I)) {
209 if (CB->hasFnAttr(Kind: Attribute::NoSync))
210 return true;
211
212 // Non-convergent and readnone imply nosync.
213 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
214 return true;
215
216 if (AANoSync::isNoSyncIntrinsic(I: &I))
217 return true;
218
219 bool IsKnownNoSync;
220 return AA::hasAssumedIRAttr<Attribute::NoSync>(
221 A, QueryingAA: &QueryingAA, IRP: IRPosition::callsite_function(CB: *CB),
222 DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoSync);
223 }
224
225 if (!I.mayReadOrWriteMemory())
226 return true;
227
228 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(I: &I);
229}
230
231bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
232 const Value &V, bool ForAnalysisOnly) {
233 // TODO: See the AAInstanceInfo class comment.
234 if (!ForAnalysisOnly)
235 return false;
236 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
237 QueryingAA, IRP: IRPosition::value(V), DepClass: DepClassTy::OPTIONAL);
238 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
239}
240
241Constant *
242AA::getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA,
243 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
244 const DataLayout &DL, AA::RangeTy *RangePtr) {
245 if (Constant *Init = getInitialValueOfAllocation(V: &Obj, TLI, Ty: &Ty))
246 return Init;
247 auto *GV = dyn_cast<GlobalVariable>(Val: &Obj);
248 if (!GV)
249 return nullptr;
250
251 bool UsedAssumedInformation = false;
252 Constant *Initializer = nullptr;
253 if (A.hasGlobalVariableSimplificationCallback(GV: *GV)) {
254 auto AssumedGV = A.getAssumedInitializerFromCallBack(
255 GV: *GV, AA: &QueryingAA, UsedAssumedInformation);
256 Initializer = *AssumedGV;
257 if (!Initializer)
258 return nullptr;
259 } else {
260 if (!GV->hasLocalLinkage()) {
261 // Externally visible global that's either non-constant,
262 // or a constant with an uncertain initializer.
263 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
264 return nullptr;
265 }
266
267 // Globals with local linkage are always initialized.
268 assert(!GV->hasLocalLinkage() || GV->hasInitializer());
269
270 if (!Initializer)
271 Initializer = GV->getInitializer();
272 }
273
274 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
275 APInt Offset = APInt(64, RangePtr->Offset);
276 return ConstantFoldLoadFromConst(C: Initializer, Ty: &Ty, Offset, DL);
277 }
278
279 return ConstantFoldLoadFromUniformValue(C: Initializer, Ty: &Ty, DL);
280}
281
282bool AA::isValidInScope(const Value &V, const Function *Scope) {
283 if (isa<Constant>(Val: V))
284 return true;
285 if (auto *I = dyn_cast<Instruction>(Val: &V))
286 return I->getFunction() == Scope;
287 if (auto *A = dyn_cast<Argument>(Val: &V))
288 return A->getParent() == Scope;
289 return false;
290}
291
292bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
293 InformationCache &InfoCache) {
294 if (isa<Constant>(Val: VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
295 return true;
296 const Function *Scope = nullptr;
297 const Instruction *CtxI = VAC.getCtxI();
298 if (CtxI)
299 Scope = CtxI->getFunction();
300 if (auto *A = dyn_cast<Argument>(Val: VAC.getValue()))
301 return A->getParent() == Scope;
302 if (auto *I = dyn_cast<Instruction>(Val: VAC.getValue())) {
303 if (I->getFunction() == Scope) {
304 if (const DominatorTree *DT =
305 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
306 F: *Scope))
307 return DT->dominates(Def: I, User: CtxI);
308 // Local dominance check mostly for the old PM passes.
309 if (CtxI && I->getParent() == CtxI->getParent())
310 return llvm::any_of(
311 Range: make_range(x: I->getIterator(), y: I->getParent()->end()),
312 P: [&](const Instruction &AfterI) { return &AfterI == CtxI; });
313 }
314 }
315 return false;
316}
317
318Value *AA::getWithType(Value &V, Type &Ty) {
319 if (V.getType() == &Ty)
320 return &V;
321 if (isa<PoisonValue>(Val: V))
322 return PoisonValue::get(T: &Ty);
323 if (isa<UndefValue>(Val: V))
324 return UndefValue::get(T: &Ty);
325 if (auto *C = dyn_cast<Constant>(Val: &V)) {
326 if (C->isNullValue() && !Ty.isPtrOrPtrVectorTy())
327 return Constant::getNullValue(Ty: &Ty);
328 if (C->getType()->isPointerTy() && Ty.isPointerTy())
329 return ConstantExpr::getPointerCast(C, Ty: &Ty);
330 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
331 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
332 return ConstantExpr::getTrunc(C, Ty: &Ty, /* OnlyIfReduced */ true);
333 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
334 return ConstantFoldCastInstruction(opcode: Instruction::FPTrunc, V: C, DestTy: &Ty);
335 }
336 }
337 return nullptr;
338}
339
340std::optional<Value *>
341AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
342 const std::optional<Value *> &B,
343 Type *Ty) {
344 if (A == B)
345 return A;
346 if (!B)
347 return A;
348 if (*B == nullptr)
349 return nullptr;
350 if (!A)
351 return Ty ? getWithType(V&: **B, Ty&: *Ty) : nullptr;
352 if (*A == nullptr)
353 return nullptr;
354 if (!Ty)
355 Ty = (*A)->getType();
356 if (isa_and_nonnull<UndefValue>(Val: *A))
357 return getWithType(V&: **B, Ty&: *Ty);
358 if (isa<UndefValue>(Val: *B))
359 return A;
360 if (*A && *B && *A == getWithType(V&: **B, Ty&: *Ty))
361 return A;
362 return nullptr;
363}
364
365template <bool IsLoad, typename Ty>
366static bool getPotentialCopiesOfMemoryValue(
367 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
368 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
369 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
370 bool OnlyExact) {
371 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
372 << " (only exact: " << OnlyExact << ")\n";);
373
374 Value &Ptr = *I.getPointerOperand();
375 // Containers to remember the pointer infos and new copies while we are not
376 // sure that we can find all of them. If we abort we want to avoid spurious
377 // dependences and potential copies in the provided container.
378 SmallVector<const AAPointerInfo *> PIs;
379 SmallSetVector<Value *, 8> NewCopies;
380 SmallSetVector<Instruction *, 8> NewCopyOrigins;
381
382 const auto *TLI =
383 A.getInfoCache().getTargetLibraryInfoForFunction(F: *I.getFunction());
384
385 auto Pred = [&](Value &Obj) {
386 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
387 if (isa<UndefValue>(Val: &Obj))
388 return true;
389 if (isa<ConstantPointerNull>(Val: &Obj)) {
390 // A null pointer access can be undefined but any offset from null may
391 // be OK. We do not try to optimize the latter.
392 if (!NullPointerIsDefined(I.getFunction(),
393 Ptr.getType()->getPointerAddressSpace()) &&
394 A.getAssumedSimplified(V: Ptr, AA: QueryingAA, UsedAssumedInformation,
395 S: AA::Interprocedural) == &Obj)
396 return true;
397 LLVM_DEBUG(
398 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
399 return false;
400 }
401 // TODO: Use assumed noalias return.
402 if (!isa<AllocaInst>(Val: &Obj) && !isa<GlobalVariable>(Val: &Obj) &&
403 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(V: &Obj))) {
404 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
405 << "\n";);
406 return false;
407 }
408 if (auto *GV = dyn_cast<GlobalVariable>(Val: &Obj))
409 if (!GV->hasLocalLinkage() &&
410 !(GV->isConstant() && GV->hasInitializer())) {
411 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
412 "linkage, not supported yet: "
413 << Obj << "\n";);
414 return false;
415 }
416
417 bool NullOnly = true;
418 bool NullRequired = false;
419 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
420 bool IsExact) {
421 if (!V || *V == nullptr)
422 NullOnly = false;
423 else if (isa<UndefValue>(Val: *V))
424 /* No op */;
425 else if (isa<Constant>(Val: *V) && cast<Constant>(Val: *V)->isNullValue())
426 NullRequired = !IsExact;
427 else
428 NullOnly = false;
429 };
430
431 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
432 Value &V) {
433 Value *AdjV = AA::getWithType(V, Ty&: *I.getType());
434 if (!AdjV) {
435 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
436 "cannot be converted to read type: "
437 << *Acc.getRemoteInst() << " : " << *I.getType()
438 << "\n";);
439 }
440 return AdjV;
441 };
442
443 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
444 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
445 return true;
446 if (IsLoad) {
447 if (Acc.isWrittenValueYetUndetermined())
448 return true;
449 if (PotentialValueOrigins && !isa<AssumeInst>(Val: Acc.getRemoteInst()))
450 return false;
451 if (!Acc.isWrittenValueUnknown())
452 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
453 if (NewCopies.count(key: V)) {
454 NewCopyOrigins.insert(X: Acc.getRemoteInst());
455 return true;
456 }
457 if (auto *SI = dyn_cast<StoreInst>(Val: Acc.getRemoteInst()))
458 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
459 if (NewCopies.count(key: V)) {
460 NewCopyOrigins.insert(X: Acc.getRemoteInst());
461 return true;
462 }
463 }
464 return false;
465 };
466
467 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
468 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
469 return true;
470 if (IsLoad && Acc.isWrittenValueYetUndetermined())
471 return true;
472 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
473 if (OnlyExact && !IsExact && !NullOnly &&
474 !isa_and_nonnull<UndefValue>(Val: Acc.getWrittenValue())) {
475 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
476 << ", abort!\n");
477 return false;
478 }
479 if (NullRequired && !NullOnly) {
480 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
481 "one, however found non-null one: "
482 << *Acc.getRemoteInst() << ", abort!\n");
483 return false;
484 }
485 if (IsLoad) {
486 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
487 if (!Acc.isWrittenValueUnknown()) {
488 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
489 if (!V)
490 return false;
491 NewCopies.insert(X: V);
492 if (PotentialValueOrigins)
493 NewCopyOrigins.insert(X: Acc.getRemoteInst());
494 return true;
495 }
496 auto *SI = dyn_cast<StoreInst>(Val: Acc.getRemoteInst());
497 if (!SI) {
498 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
499 "instruction not supported yet: "
500 << *Acc.getRemoteInst() << "\n";);
501 return false;
502 }
503 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
504 if (!V)
505 return false;
506 NewCopies.insert(X: V);
507 if (PotentialValueOrigins)
508 NewCopyOrigins.insert(X: SI);
509 } else {
510 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
511 auto *LI = dyn_cast<LoadInst>(Val: Acc.getRemoteInst());
512 if (!LI && OnlyExact) {
513 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
514 "instruction not supported yet: "
515 << *Acc.getRemoteInst() << "\n";);
516 return false;
517 }
518 NewCopies.insert(X: Acc.getRemoteInst());
519 }
520 return true;
521 };
522
523 // If the value has been written to we don't need the initial value of the
524 // object.
525 bool HasBeenWrittenTo = false;
526
527 AA::RangeTy Range;
528 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRP: IRPosition::value(V: Obj),
529 DepClass: DepClassTy::NONE);
530 if (!PI || !PI->forallInterferingAccesses(
531 A, QueryingAA, I,
532 /* FindInterferingWrites */ IsLoad,
533 /* FindInterferingReads */ !IsLoad, CheckAccess,
534 HasBeenWrittenTo, Range, SkipCB)) {
535 LLVM_DEBUG(
536 dbgs()
537 << "Failed to verify all interfering accesses for underlying object: "
538 << Obj << "\n");
539 return false;
540 }
541
542 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
543 const DataLayout &DL = A.getDataLayout();
544 Value *InitialValue = AA::getInitialValueForObj(
545 A, QueryingAA, Obj, Ty&: *I.getType(), TLI, DL, RangePtr: &Range);
546 if (!InitialValue) {
547 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
548 "underlying object, abort!\n");
549 return false;
550 }
551 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
552 if (NullRequired && !NullOnly) {
553 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
554 "null or undef, abort!\n");
555 return false;
556 }
557
558 NewCopies.insert(X: InitialValue);
559 if (PotentialValueOrigins)
560 NewCopyOrigins.insert(X: nullptr);
561 }
562
563 PIs.push_back(Elt: PI);
564
565 return true;
566 };
567
568 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
569 QueryingAA, IRP: IRPosition::value(V: Ptr), DepClass: DepClassTy::OPTIONAL);
570 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
571 LLVM_DEBUG(
572 dbgs() << "Underlying objects stored into could not be determined\n";);
573 return false;
574 }
575
576 // Only if we were successful collection all potential copies we record
577 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
578 // given PotentialCopies container.
579 for (const auto *PI : PIs) {
580 if (!PI->getState().isAtFixpoint())
581 UsedAssumedInformation = true;
582 A.recordDependence(FromAA: *PI, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
583 }
584 PotentialCopies.insert_range(R&: NewCopies);
585 if (PotentialValueOrigins)
586 PotentialValueOrigins->insert_range(R&: NewCopyOrigins);
587
588 return true;
589}
590
591bool AA::getPotentiallyLoadedValues(
592 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
593 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
594 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
595 bool OnlyExact) {
596 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
597 A, I&: LI, PotentialCopies&: PotentialValues, PotentialValueOrigins: &PotentialValueOrigins, QueryingAA,
598 UsedAssumedInformation, OnlyExact);
599}
600
601bool AA::getPotentialCopiesOfStoredValue(
602 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
603 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
604 bool OnlyExact) {
605 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
606 A, I&: SI, PotentialCopies, PotentialValueOrigins: nullptr, QueryingAA, UsedAssumedInformation,
607 OnlyExact);
608}
609
610static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
611 const AbstractAttribute &QueryingAA,
612 bool RequireReadNone, bool &IsKnown) {
613 if (RequireReadNone) {
614 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
615 A, QueryingAA: &QueryingAA, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown,
616 /* IgnoreSubsumingPositions */ true))
617 return true;
618 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
619 A, QueryingAA: &QueryingAA, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown,
620 /* IgnoreSubsumingPositions */ true))
621 return true;
622
623 IRPosition::Kind Kind = IRP.getPositionKind();
624 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
625 const auto *MemLocAA =
626 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClass: DepClassTy::NONE);
627 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
628 IsKnown = MemLocAA->isKnownReadNone();
629 if (!IsKnown)
630 A.recordDependence(FromAA: *MemLocAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
631 return true;
632 }
633 }
634
635 const auto *MemBehaviorAA =
636 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClass: DepClassTy::NONE);
637 if (MemBehaviorAA &&
638 (MemBehaviorAA->isAssumedReadNone() ||
639 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
640 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
641 : MemBehaviorAA->isKnownReadOnly();
642 if (!IsKnown)
643 A.recordDependence(FromAA: *MemBehaviorAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL);
644 return true;
645 }
646
647 return false;
648}
649
650bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
651 const AbstractAttribute &QueryingAA, bool &IsKnown) {
652 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
653 /* RequireReadNone */ false, IsKnown);
654}
655bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
656 const AbstractAttribute &QueryingAA, bool &IsKnown) {
657 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
658 /* RequireReadNone */ true, IsKnown);
659}
660
661static bool
662isPotentiallyReachable(Attributor &A, const Instruction &FromI,
663 const Instruction *ToI, const Function &ToFn,
664 const AbstractAttribute &QueryingAA,
665 const AA::InstExclusionSetTy *ExclusionSet,
666 std::function<bool(const Function &F)> GoBackwardsCB) {
667 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
668 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
669 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
670 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
671 << "]\n";
672 if (ExclusionSet)
673 for (auto *ES : *ExclusionSet)
674 dbgs() << *ES << "\n";
675 });
676
677 // We know kernels (generally) cannot be called from within the module. Thus,
678 // for reachability we would need to step back from a kernel which would allow
679 // us to reach anything anyway. Even if a kernel is invoked from another
680 // kernel, values like allocas and shared memory are not accessible. We
681 // implicitly check for this situation to avoid costly lookups.
682 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
683 !GoBackwardsCB(*FromI.getFunction()) && A.getInfoCache().isKernel(F: ToFn) &&
684 A.getInfoCache().isKernel(F: *FromI.getFunction())) {
685 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
686 "module; success\n";);
687 return false;
688 }
689
690 // If we can go arbitrarily backwards we will eventually reach an entry point
691 // that can reach ToI. Only if a set of blocks through which we cannot go is
692 // provided, or once we track internal functions not accessible from the
693 // outside, it makes sense to perform backwards analysis in the absence of a
694 // GoBackwardsCB.
695 if (!GoBackwardsCB && !ExclusionSet) {
696 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
697 << " is not checked backwards and does not have an "
698 "exclusion set, abort\n");
699 return true;
700 }
701
702 SmallPtrSet<const Instruction *, 8> Visited;
703 SmallVector<const Instruction *> Worklist;
704 Worklist.push_back(Elt: &FromI);
705
706 while (!Worklist.empty()) {
707 const Instruction *CurFromI = Worklist.pop_back_val();
708 if (!Visited.insert(Ptr: CurFromI).second)
709 continue;
710
711 const Function *FromFn = CurFromI->getFunction();
712 if (FromFn == &ToFn) {
713 if (!ToI)
714 return true;
715 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
716 << " intraprocedurally\n");
717 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
718 QueryingAA, IRP: IRPosition::function(F: ToFn), DepClass: DepClassTy::OPTIONAL);
719 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
720 A, From: *CurFromI, To: *ToI, ExclusionSet);
721 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
722 << (Result ? "can potentially " : "cannot ") << "reach "
723 << *ToI << " [Intra]\n");
724 if (Result)
725 return true;
726 }
727
728 bool Result = true;
729 if (!ToFn.isDeclaration() && ToI) {
730 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
731 QueryingAA, IRP: IRPosition::function(F: ToFn), DepClass: DepClassTy::OPTIONAL);
732 const Instruction &EntryI = ToFn.getEntryBlock().front();
733 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
734 A, From: EntryI, To: *ToI, ExclusionSet);
735 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
736 << " " << (Result ? "can potentially " : "cannot ")
737 << "reach @" << *ToI << " [ToFn]\n");
738 }
739
740 if (Result) {
741 // The entry of the ToFn can reach the instruction ToI. If the current
742 // instruction is already known to reach the ToFn.
743 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
744 QueryingAA, IRP: IRPosition::function(F: *FromFn), DepClass: DepClassTy::OPTIONAL);
745 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
746 A, Inst: *CurFromI, Fn: ToFn, ExclusionSet);
747 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
748 << " " << (Result ? "can potentially " : "cannot ")
749 << "reach @" << ToFn.getName() << " [FromFn]\n");
750 if (Result)
751 return true;
752 }
753
754 // TODO: Check assumed nounwind.
755 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
756 QueryingAA, IRP: IRPosition::function(F: *FromFn), DepClass: DepClassTy::OPTIONAL);
757 auto ReturnInstCB = [&](Instruction &Ret) {
758 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
759 A, From: *CurFromI, To: Ret, ExclusionSet);
760 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
761 << (Result ? "can potentially " : "cannot ") << "reach "
762 << Ret << " [Intra]\n");
763 return !Result;
764 };
765
766 // Check if we can reach returns.
767 bool UsedAssumedInformation = false;
768 if (A.checkForAllInstructions(Pred: ReturnInstCB, Fn: FromFn, QueryingAA: &QueryingAA,
769 Opcodes: {Instruction::Ret}, UsedAssumedInformation)) {
770 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
771 continue;
772 }
773
774 if (!GoBackwardsCB) {
775 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
776 << " is not checked backwards, abort\n");
777 return true;
778 }
779
780 // If we do not go backwards from the FromFn we are done here and so far we
781 // could not find a way to reach ToFn/ToI.
782 if (!GoBackwardsCB(*FromFn))
783 continue;
784
785 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
786 << FromFn->getName() << "\n");
787
788 auto CheckCallSite = [&](AbstractCallSite ACS) {
789 CallBase *CB = ACS.getInstruction();
790 if (!CB)
791 return false;
792
793 if (isa<InvokeInst>(Val: CB))
794 return false;
795
796 Instruction *Inst = CB->getNextNonDebugInstruction();
797 Worklist.push_back(Elt: Inst);
798 return true;
799 };
800
801 Result = !A.checkForAllCallSites(Pred: CheckCallSite, Fn: *FromFn,
802 /* RequireAllCallSites */ true,
803 QueryingAA: &QueryingAA, UsedAssumedInformation);
804 if (Result) {
805 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
806 << " in @" << FromFn->getName()
807 << " failed, give up\n");
808 return true;
809 }
810
811 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
812 << " in @" << FromFn->getName()
813 << " worklist size is: " << Worklist.size() << "\n");
814 }
815 return false;
816}
817
818bool AA::isPotentiallyReachable(
819 Attributor &A, const Instruction &FromI, const Instruction &ToI,
820 const AbstractAttribute &QueryingAA,
821 const AA::InstExclusionSetTy *ExclusionSet,
822 std::function<bool(const Function &F)> GoBackwardsCB) {
823 const Function *ToFn = ToI.getFunction();
824 return ::isPotentiallyReachable(A, FromI, ToI: &ToI, ToFn: *ToFn, QueryingAA,
825 ExclusionSet, GoBackwardsCB);
826}
827
828bool AA::isPotentiallyReachable(
829 Attributor &A, const Instruction &FromI, const Function &ToFn,
830 const AbstractAttribute &QueryingAA,
831 const AA::InstExclusionSetTy *ExclusionSet,
832 std::function<bool(const Function &F)> GoBackwardsCB) {
833 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
834 ExclusionSet, GoBackwardsCB);
835}
836
837bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
838 const AbstractAttribute &QueryingAA) {
839 if (isa<UndefValue>(Val: Obj))
840 return true;
841 if (isa<AllocaInst>(Val: Obj)) {
842 InformationCache &InfoCache = A.getInfoCache();
843 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
844 LLVM_DEBUG(
845 dbgs() << "[AA] Object '" << Obj
846 << "' is thread local; stack objects are thread local.\n");
847 return true;
848 }
849 bool IsKnownNoCapture;
850 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>(
851 A, QueryingAA: &QueryingAA, IRP: IRPosition::value(V: Obj), DepClass: DepClassTy::OPTIONAL,
852 IsKnown&: IsKnownNoCapture);
853 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
854 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
855 << (IsAssumedNoCapture ? "non-" : "")
856 << "captured stack object.\n");
857 return IsAssumedNoCapture;
858 }
859 if (auto *GV = dyn_cast<GlobalVariable>(Val: &Obj)) {
860 if (GV->isConstant()) {
861 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
862 << "' is thread local; constant global\n");
863 return true;
864 }
865 if (GV->isThreadLocal()) {
866 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
867 << "' is thread local; thread local global\n");
868 return true;
869 }
870 }
871
872 if (A.getInfoCache().targetIsGPU()) {
873 if (Obj.getType()->getPointerAddressSpace() ==
874 (int)AA::GPUAddressSpace::Local) {
875 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
876 << "' is thread local; GPU local memory\n");
877 return true;
878 }
879 if (Obj.getType()->getPointerAddressSpace() ==
880 (int)AA::GPUAddressSpace::Constant) {
881 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
882 << "' is thread local; GPU constant memory\n");
883 return true;
884 }
885 }
886
887 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
888 return false;
889}
890
891bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
892 const AbstractAttribute &QueryingAA) {
893 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
894 return false;
895
896 SmallSetVector<const Value *, 8> Ptrs;
897
898 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
899 if (!Loc || !Loc->Ptr) {
900 LLVM_DEBUG(
901 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
902 return false;
903 }
904 Ptrs.insert(X: Loc->Ptr);
905 return true;
906 };
907
908 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: &I)) {
909 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
910 return true;
911 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: &I))
912 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
913 return true;
914 } else if (!AddLocationPtr(MemoryLocation::getOrNone(Inst: &I)))
915 return true;
916
917 return isPotentiallyAffectedByBarrier(A, Ptrs: Ptrs.getArrayRef(), QueryingAA, CtxI: &I);
918}
919
920bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
921 ArrayRef<const Value *> Ptrs,
922 const AbstractAttribute &QueryingAA,
923 const Instruction *CtxI) {
924 for (const Value *Ptr : Ptrs) {
925 if (!Ptr) {
926 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
927 return true;
928 }
929
930 auto Pred = [&](Value &Obj) {
931 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
932 return true;
933 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
934 << "'; -> requires barrier\n");
935 return false;
936 };
937
938 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
939 QueryingAA, IRP: IRPosition::value(V: *Ptr), DepClass: DepClassTy::OPTIONAL);
940 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
941 return true;
942 }
943 return false;
944}
945
946/// Return true if \p New is equal or worse than \p Old.
947static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
948 if (!Old.isIntAttribute())
949 return true;
950
951 return Old.getValueAsInt() >= New.getValueAsInt();
952}
953
954/// Return true if the information provided by \p Attr was added to the
955/// attribute set \p AttrSet. This is only the case if it was not already
956/// present in \p AttrSet.
957static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
958 AttributeSet AttrSet, bool ForceReplace,
959 AttrBuilder &AB) {
960
961 if (Attr.isEnumAttribute()) {
962 Attribute::AttrKind Kind = Attr.getKindAsEnum();
963 if (AttrSet.hasAttribute(Kind))
964 return false;
965 AB.addAttribute(Val: Kind);
966 return true;
967 }
968 if (Attr.isStringAttribute()) {
969 StringRef Kind = Attr.getKindAsString();
970 if (AttrSet.hasAttribute(Kind)) {
971 if (!ForceReplace)
972 return false;
973 }
974 AB.addAttribute(A: Kind, V: Attr.getValueAsString());
975 return true;
976 }
977 if (Attr.isIntAttribute()) {
978 Attribute::AttrKind Kind = Attr.getKindAsEnum();
979 if (!ForceReplace && Kind == Attribute::Memory) {
980 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
981 if (ME == AttrSet.getMemoryEffects())
982 return false;
983 AB.addMemoryAttr(ME);
984 return true;
985 }
986 if (AttrSet.hasAttribute(Kind)) {
987 if (!ForceReplace && isEqualOrWorse(New: Attr, Old: AttrSet.getAttribute(Kind)))
988 return false;
989 }
990 AB.addAttribute(A: Attr);
991 return true;
992 }
993
994 llvm_unreachable("Expected enum or string attribute!");
995}
996
997Argument *IRPosition::getAssociatedArgument() const {
998 if (getPositionKind() == IRP_ARGUMENT)
999 return cast<Argument>(Val: &getAnchorValue());
1000
1001 // Not an Argument and no argument number means this is not a call site
1002 // argument, thus we cannot find a callback argument to return.
1003 int ArgNo = getCallSiteArgNo();
1004 if (ArgNo < 0)
1005 return nullptr;
1006
1007 // Use abstract call sites to make the connection between the call site
1008 // values and the ones in callbacks. If a callback was found that makes use
1009 // of the underlying call site operand, we want the corresponding callback
1010 // callee argument and not the direct callee argument.
1011 std::optional<Argument *> CBCandidateArg;
1012 SmallVector<const Use *, 4> CallbackUses;
1013 const auto &CB = cast<CallBase>(Val&: getAnchorValue());
1014 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1015 for (const Use *U : CallbackUses) {
1016 AbstractCallSite ACS(U);
1017 assert(ACS && ACS.isCallbackCall());
1018 if (!ACS.getCalledFunction())
1019 continue;
1020
1021 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1022
1023 // Test if the underlying call site operand is argument number u of the
1024 // callback callee.
1025 if (ACS.getCallArgOperandNo(ArgNo: u) != ArgNo)
1026 continue;
1027
1028 assert(ACS.getCalledFunction()->arg_size() > u &&
1029 "ACS mapped into var-args arguments!");
1030 if (CBCandidateArg) {
1031 CBCandidateArg = nullptr;
1032 break;
1033 }
1034 CBCandidateArg = ACS.getCalledFunction()->getArg(i: u);
1035 }
1036 }
1037
1038 // If we found a unique callback candidate argument, return it.
1039 if (CBCandidateArg && *CBCandidateArg)
1040 return *CBCandidateArg;
1041
1042 // If no callbacks were found, or none used the underlying call site operand
1043 // exclusively, use the direct callee argument if available.
1044 auto *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand());
1045 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1046 return Callee->getArg(i: ArgNo);
1047
1048 return nullptr;
1049}
1050
1051ChangeStatus AbstractAttribute::update(Attributor &A) {
1052 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1053 if (getState().isAtFixpoint())
1054 return HasChanged;
1055
1056 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1057
1058 HasChanged = updateImpl(A);
1059
1060 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1061 << "\n");
1062
1063 return HasChanged;
1064}
1065
1066Attributor::Attributor(SetVector<Function *> &Functions,
1067 InformationCache &InfoCache,
1068 AttributorConfig Configuration)
1069 : Allocator(InfoCache.Allocator), Functions(Functions),
1070 InfoCache(InfoCache), Configuration(Configuration) {
1071 if (!isClosedWorldModule())
1072 return;
1073 for (Function *Fn : Functions)
1074 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1075 /*IgnoreCallbackUses=*/false,
1076 /*IgnoreAssumeLikeCalls=*/true,
1077 /*IgnoreLLVMUsed=*/IngoreLLVMUsed: true,
1078 /*IgnoreARCAttachedCall=*/false,
1079 /*IgnoreCastedDirectCall=*/true))
1080 InfoCache.IndirectlyCallableFunctions.push_back(Elt: Fn);
1081}
1082
1083bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1084 Attribute::AttrKind AK,
1085 SmallVectorImpl<Attribute> &Attrs) {
1086 assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1087 "Did expect a valid position!");
1088 MustBeExecutedContextExplorer *Explorer =
1089 getInfoCache().getMustBeExecutedContextExplorer();
1090 if (!Explorer)
1091 return false;
1092
1093 Value &AssociatedValue = IRP.getAssociatedValue();
1094
1095 const Assume2KnowledgeMap &A2K =
1096 getInfoCache().getKnowledgeMap().lookup(Val: {&AssociatedValue, AK});
1097
1098 // Check if we found any potential assume use, if not we don't need to create
1099 // explorer iterators.
1100 if (A2K.empty())
1101 return false;
1102
1103 LLVMContext &Ctx = AssociatedValue.getContext();
1104 unsigned AttrsSize = Attrs.size();
1105 auto EIt = Explorer->begin(PP: IRP.getCtxI()),
1106 EEnd = Explorer->end(IRP.getCtxI());
1107 for (const auto &It : A2K)
1108 if (Explorer->findInContextOf(I: It.first, EIt, EEnd))
1109 Attrs.push_back(Elt: Attribute::get(Context&: Ctx, Kind: AK, Val: It.second.Max));
1110 return AttrsSize != Attrs.size();
1111}
1112
1113template <typename DescTy>
1114ChangeStatus
1115Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1116 function_ref<bool(const DescTy &, AttributeSet,
1117 AttributeMask &, AttrBuilder &)>
1118 CB) {
1119 if (AttrDescs.empty())
1120 return ChangeStatus::UNCHANGED;
1121 switch (IRP.getPositionKind()) {
1122 case IRPosition::IRP_FLOAT:
1123 case IRPosition::IRP_INVALID:
1124 return ChangeStatus::UNCHANGED;
1125 default:
1126 break;
1127 };
1128
1129 AttributeList AL;
1130 Value *AttrListAnchor = IRP.getAttrListAnchor();
1131 auto It = AttrsMap.find(Val: AttrListAnchor);
1132 if (It == AttrsMap.end())
1133 AL = IRP.getAttrList();
1134 else
1135 AL = It->getSecond();
1136
1137 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1138 auto AttrIdx = IRP.getAttrIdx();
1139 AttributeSet AS = AL.getAttributes(Index: AttrIdx);
1140 AttributeMask AM;
1141 AttrBuilder AB(Ctx);
1142
1143 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1144 for (const DescTy &AttrDesc : AttrDescs)
1145 if (CB(AttrDesc, AS, AM, AB))
1146 HasChanged = ChangeStatus::CHANGED;
1147
1148 if (HasChanged == ChangeStatus::UNCHANGED)
1149 return ChangeStatus::UNCHANGED;
1150
1151 AL = AL.removeAttributesAtIndex(C&: Ctx, Index: AttrIdx, AttrsToRemove: AM);
1152 AL = AL.addAttributesAtIndex(C&: Ctx, Index: AttrIdx, B: AB);
1153 AttrsMap[AttrListAnchor] = AL;
1154 return ChangeStatus::CHANGED;
1155}
1156
1157bool Attributor::hasAttr(const IRPosition &IRP,
1158 ArrayRef<Attribute::AttrKind> AttrKinds,
1159 bool IgnoreSubsumingPositions,
1160 Attribute::AttrKind ImpliedAttributeKind) {
1161 bool Implied = false;
1162 bool HasAttr = false;
1163 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1164 AttributeMask &, AttrBuilder &) {
1165 if (AttrSet.hasAttribute(Kind)) {
1166 Implied |= Kind != ImpliedAttributeKind;
1167 HasAttr = true;
1168 }
1169 return false;
1170 };
1171 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1172 updateAttrMap<Attribute::AttrKind>(IRP: EquivIRP, AttrDescs: AttrKinds, CB: HasAttrCB);
1173 if (HasAttr)
1174 break;
1175 // The first position returned by the SubsumingPositionIterator is
1176 // always the position itself. If we ignore subsuming positions we
1177 // are done after the first iteration.
1178 if (IgnoreSubsumingPositions)
1179 break;
1180 Implied = true;
1181 }
1182 if (!HasAttr) {
1183 Implied = true;
1184 SmallVector<Attribute> Attrs;
1185 for (Attribute::AttrKind AK : AttrKinds)
1186 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1187 HasAttr = true;
1188 break;
1189 }
1190 }
1191
1192 // Check if we should manifest the implied attribute kind at the IRP.
1193 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1194 manifestAttrs(IRP, DeducedAttrs: {Attribute::get(Context&: IRP.getAnchorValue().getContext(),
1195 Kind: ImpliedAttributeKind)});
1196 return HasAttr;
1197}
1198
1199void Attributor::getAttrs(const IRPosition &IRP,
1200 ArrayRef<Attribute::AttrKind> AttrKinds,
1201 SmallVectorImpl<Attribute> &Attrs,
1202 bool IgnoreSubsumingPositions) {
1203 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1204 AttributeSet AttrSet, AttributeMask &,
1205 AttrBuilder &) {
1206 if (AttrSet.hasAttribute(Kind))
1207 Attrs.push_back(Elt: AttrSet.getAttribute(Kind));
1208 return false;
1209 };
1210 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1211 updateAttrMap<Attribute::AttrKind>(IRP: EquivIRP, AttrDescs: AttrKinds, CB: CollectAttrCB);
1212 // The first position returned by the SubsumingPositionIterator is
1213 // always the position itself. If we ignore subsuming positions we
1214 // are done after the first iteration.
1215 if (IgnoreSubsumingPositions)
1216 break;
1217 }
1218 for (Attribute::AttrKind AK : AttrKinds)
1219 getAttrsFromAssumes(IRP, AK, Attrs);
1220}
1221
1222ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1223 ArrayRef<Attribute::AttrKind> AttrKinds) {
1224 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1225 AttributeMask &AM, AttrBuilder &) {
1226 if (!AttrSet.hasAttribute(Kind))
1227 return false;
1228 AM.addAttribute(Val: Kind);
1229 return true;
1230 };
1231 return updateAttrMap<Attribute::AttrKind>(IRP, AttrDescs: AttrKinds, CB: RemoveAttrCB);
1232}
1233
1234ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1235 ArrayRef<StringRef> Attrs) {
1236 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1237 AttributeMask &AM, AttrBuilder &) -> bool {
1238 if (!AttrSet.hasAttribute(Kind: Attr))
1239 return false;
1240 AM.addAttribute(A: Attr);
1241 return true;
1242 };
1243
1244 return updateAttrMap<StringRef>(IRP, AttrDescs: Attrs, CB: RemoveAttrCB);
1245}
1246
1247ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1248 ArrayRef<Attribute> Attrs,
1249 bool ForceReplace) {
1250 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1251 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1252 AttributeMask &, AttrBuilder &AB) {
1253 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1254 };
1255 return updateAttrMap<Attribute>(IRP, AttrDescs: Attrs, CB: AddAttrCB);
1256}
1257
1258const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1259const IRPosition
1260 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1261
1262SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1263 IRPositions.emplace_back(Args: IRP);
1264
1265 // Helper to determine if operand bundles on a call site are benign or
1266 // potentially problematic. We handle only llvm.assume for now.
1267 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1268 return (isa<IntrinsicInst>(Val: CB) &&
1269 cast<IntrinsicInst>(Val: CB).getIntrinsicID() == Intrinsic ::assume);
1270 };
1271
1272 const auto *CB = dyn_cast<CallBase>(Val: &IRP.getAnchorValue());
1273 switch (IRP.getPositionKind()) {
1274 case IRPosition::IRP_INVALID:
1275 case IRPosition::IRP_FLOAT:
1276 case IRPosition::IRP_FUNCTION:
1277 return;
1278 case IRPosition::IRP_ARGUMENT:
1279 case IRPosition::IRP_RETURNED:
1280 IRPositions.emplace_back(Args: IRPosition::function(F: *IRP.getAnchorScope()));
1281 return;
1282 case IRPosition::IRP_CALL_SITE:
1283 assert(CB && "Expected call site!");
1284 // TODO: We need to look at the operand bundles similar to the redirection
1285 // in CallBase.
1286 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1287 if (auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand()))
1288 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1289 return;
1290 case IRPosition::IRP_CALL_SITE_RETURNED:
1291 assert(CB && "Expected call site!");
1292 // TODO: We need to look at the operand bundles similar to the redirection
1293 // in CallBase.
1294 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1295 if (auto *Callee =
1296 dyn_cast_if_present<Function>(Val: CB->getCalledOperand())) {
1297 IRPositions.emplace_back(Args: IRPosition::returned(F: *Callee));
1298 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1299 for (const Argument &Arg : Callee->args())
1300 if (Arg.hasReturnedAttr()) {
1301 IRPositions.emplace_back(
1302 Args: IRPosition::callsite_argument(CB: *CB, ArgNo: Arg.getArgNo()));
1303 IRPositions.emplace_back(
1304 Args: IRPosition::value(V: *CB->getArgOperand(i: Arg.getArgNo())));
1305 IRPositions.emplace_back(Args: IRPosition::argument(Arg));
1306 }
1307 }
1308 }
1309 IRPositions.emplace_back(Args: IRPosition::callsite_function(CB: *CB));
1310 return;
1311 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1312 assert(CB && "Expected call site!");
1313 // TODO: We need to look at the operand bundles similar to the redirection
1314 // in CallBase.
1315 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1316 auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand());
1317 if (Callee) {
1318 if (Argument *Arg = IRP.getAssociatedArgument())
1319 IRPositions.emplace_back(Args: IRPosition::argument(Arg: *Arg));
1320 IRPositions.emplace_back(Args: IRPosition::function(F: *Callee));
1321 }
1322 }
1323 IRPositions.emplace_back(Args: IRPosition::value(V: IRP.getAssociatedValue()));
1324 return;
1325 }
1326 }
1327}
1328
1329void IRPosition::verify() {
1330#ifdef EXPENSIVE_CHECKS
1331 switch (getPositionKind()) {
1332 case IRP_INVALID:
1333 assert((CBContext == nullptr) &&
1334 "Invalid position must not have CallBaseContext!");
1335 assert(!Enc.getOpaqueValue() &&
1336 "Expected a nullptr for an invalid position!");
1337 return;
1338 case IRP_FLOAT:
1339 assert((!isa<Argument>(&getAssociatedValue())) &&
1340 "Expected specialized kind for argument values!");
1341 return;
1342 case IRP_RETURNED:
1343 assert(isa<Function>(getAsValuePtr()) &&
1344 "Expected function for a 'returned' position!");
1345 assert(getAsValuePtr() == &getAssociatedValue() &&
1346 "Associated value mismatch!");
1347 return;
1348 case IRP_CALL_SITE_RETURNED:
1349 assert((CBContext == nullptr) &&
1350 "'call site returned' position must not have CallBaseContext!");
1351 assert((isa<CallBase>(getAsValuePtr())) &&
1352 "Expected call base for 'call site returned' position!");
1353 assert(getAsValuePtr() == &getAssociatedValue() &&
1354 "Associated value mismatch!");
1355 return;
1356 case IRP_CALL_SITE:
1357 assert((CBContext == nullptr) &&
1358 "'call site function' position must not have CallBaseContext!");
1359 assert((isa<CallBase>(getAsValuePtr())) &&
1360 "Expected call base for 'call site function' position!");
1361 assert(getAsValuePtr() == &getAssociatedValue() &&
1362 "Associated value mismatch!");
1363 return;
1364 case IRP_FUNCTION:
1365 assert(isa<Function>(getAsValuePtr()) &&
1366 "Expected function for a 'function' position!");
1367 assert(getAsValuePtr() == &getAssociatedValue() &&
1368 "Associated value mismatch!");
1369 return;
1370 case IRP_ARGUMENT:
1371 assert(isa<Argument>(getAsValuePtr()) &&
1372 "Expected argument for a 'argument' position!");
1373 assert(getAsValuePtr() == &getAssociatedValue() &&
1374 "Associated value mismatch!");
1375 return;
1376 case IRP_CALL_SITE_ARGUMENT: {
1377 assert((CBContext == nullptr) &&
1378 "'call site argument' position must not have CallBaseContext!");
1379 Use *U = getAsUsePtr();
1380 (void)U; // Silence unused variable warning.
1381 assert(U && "Expected use for a 'call site argument' position!");
1382 assert(isa<CallBase>(U->getUser()) &&
1383 "Expected call base user for a 'call site argument' position!");
1384 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1385 "Expected call base argument operand for a 'call site argument' "
1386 "position");
1387 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1388 unsigned(getCallSiteArgNo()) &&
1389 "Argument number mismatch!");
1390 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1391 return;
1392 }
1393 }
1394#endif
1395}
1396
1397std::optional<Constant *>
1398Attributor::getAssumedConstant(const IRPosition &IRP,
1399 const AbstractAttribute &AA,
1400 bool &UsedAssumedInformation) {
1401 // First check all callbacks provided by outside AAs. If any of them returns
1402 // a non-null value that is different from the associated value, or
1403 // std::nullopt, we assume it's simplified.
1404 for (auto &CB : SimplificationCallbacks.lookup(Val: IRP)) {
1405 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1406 if (!SimplifiedV)
1407 return std::nullopt;
1408 if (isa_and_nonnull<Constant>(Val: *SimplifiedV))
1409 return cast<Constant>(Val: *SimplifiedV);
1410 return nullptr;
1411 }
1412 if (auto *C = dyn_cast<Constant>(Val: &IRP.getAssociatedValue()))
1413 return C;
1414 SmallVector<AA::ValueAndContext> Values;
1415 if (getAssumedSimplifiedValues(IRP, AA: &AA, Values,
1416 S: AA::ValueScope::Interprocedural,
1417 UsedAssumedInformation)) {
1418 if (Values.empty())
1419 return std::nullopt;
1420 if (auto *C = dyn_cast_or_null<Constant>(
1421 Val: AAPotentialValues::getSingleValue(A&: *this, AA, IRP, Values)))
1422 return C;
1423 }
1424 return nullptr;
1425}
1426
1427std::optional<Value *> Attributor::getAssumedSimplified(
1428 const IRPosition &IRP, const AbstractAttribute *AA,
1429 bool &UsedAssumedInformation, AA::ValueScope S) {
1430 // First check all callbacks provided by outside AAs. If any of them returns
1431 // a non-null value that is different from the associated value, or
1432 // std::nullopt, we assume it's simplified.
1433 for (auto &CB : SimplificationCallbacks.lookup(Val: IRP))
1434 return CB(IRP, AA, UsedAssumedInformation);
1435
1436 SmallVector<AA::ValueAndContext> Values;
1437 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1438 return &IRP.getAssociatedValue();
1439 if (Values.empty())
1440 return std::nullopt;
1441 if (AA)
1442 if (Value *V = AAPotentialValues::getSingleValue(A&: *this, AA: *AA, IRP, Values))
1443 return V;
1444 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1445 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1446 return nullptr;
1447 return &IRP.getAssociatedValue();
1448}
1449
1450bool Attributor::getAssumedSimplifiedValues(
1451 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1452 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1453 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1454 SmallPtrSet<Value *, 8> Seen;
1455 SmallVector<IRPosition, 8> Worklist;
1456 Worklist.push_back(Elt: InitialIRP);
1457 while (!Worklist.empty()) {
1458 const IRPosition &IRP = Worklist.pop_back_val();
1459
1460 // First check all callbacks provided by outside AAs. If any of them returns
1461 // a non-null value that is different from the associated value, or
1462 // std::nullopt, we assume it's simplified.
1463 int NV = Values.size();
1464 const auto &SimplificationCBs = SimplificationCallbacks.lookup(Val: IRP);
1465 for (const auto &CB : SimplificationCBs) {
1466 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1467 if (!CBResult.has_value())
1468 continue;
1469 Value *V = *CBResult;
1470 if (!V)
1471 return false;
1472 if ((S & AA::ValueScope::Interprocedural) ||
1473 AA::isValidInScope(V: *V, Scope: IRP.getAnchorScope()))
1474 Values.push_back(Elt: AA::ValueAndContext{*V, nullptr});
1475 else
1476 return false;
1477 }
1478 if (SimplificationCBs.empty()) {
1479 // If no high-level/outside simplification occurred, use
1480 // AAPotentialValues.
1481 const auto *PotentialValuesAA =
1482 getOrCreateAAFor<AAPotentialValues>(IRP, QueryingAA: AA, DepClass: DepClassTy::OPTIONAL);
1483 if (PotentialValuesAA &&
1484 PotentialValuesAA->getAssumedSimplifiedValues(A&: *this, Values, S)) {
1485 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1486 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1487 Values.push_back(Elt: {IRP.getAssociatedValue(), IRP.getCtxI()});
1488 } else {
1489 // TODO: We could visit all returns and add the operands.
1490 return false;
1491 }
1492 }
1493
1494 if (!RecurseForSelectAndPHI)
1495 break;
1496
1497 for (int I = NV, E = Values.size(); I < E; ++I) {
1498 Value *V = Values[I].getValue();
1499 if (!isa<PHINode>(Val: V) && !isa<SelectInst>(Val: V))
1500 continue;
1501 if (!Seen.insert(Ptr: V).second)
1502 continue;
1503 // Move the last element to this slot.
1504 Values[I] = Values[E - 1];
1505 // Eliminate the last slot, adjust the indices.
1506 Values.pop_back();
1507 --E;
1508 --I;
1509 // Add a new value (select or phi) to the worklist.
1510 Worklist.push_back(Elt: IRPosition::value(V: *V));
1511 }
1512 }
1513 return true;
1514}
1515
1516std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1517 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1518 bool &UsedAssumedInformation) {
1519 if (!V)
1520 return V;
1521 if (*V == nullptr || isa<Constant>(Val: *V))
1522 return V;
1523 if (auto *Arg = dyn_cast<Argument>(Val: *V))
1524 if (CB.getCalledOperand() == Arg->getParent() &&
1525 CB.arg_size() > Arg->getArgNo())
1526 if (!Arg->hasPointeeInMemoryValueAttr())
1527 return getAssumedSimplified(
1528 IRP: IRPosition::callsite_argument(CB, ArgNo: Arg->getArgNo()), AA,
1529 UsedAssumedInformation, S: AA::Intraprocedural);
1530 return nullptr;
1531}
1532
1533Attributor::~Attributor() {
1534 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1535 // thus we cannot delete them. We can, and want to, destruct them though.
1536 for (auto &It : AAMap) {
1537 AbstractAttribute *AA = It.getSecond();
1538 AA->~AbstractAttribute();
1539 }
1540}
1541
1542bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1543 const AAIsDead *FnLivenessAA,
1544 bool &UsedAssumedInformation,
1545 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1546 if (!Configuration.UseLiveness)
1547 return false;
1548 const IRPosition &IRP = AA.getIRPosition();
1549 if (!Functions.count(key: IRP.getAnchorScope()))
1550 return false;
1551 return isAssumedDead(IRP, QueryingAA: &AA, FnLivenessAA, UsedAssumedInformation,
1552 CheckBBLivenessOnly, DepClass);
1553}
1554
1555bool Attributor::isAssumedDead(const Use &U,
1556 const AbstractAttribute *QueryingAA,
1557 const AAIsDead *FnLivenessAA,
1558 bool &UsedAssumedInformation,
1559 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1560 if (!Configuration.UseLiveness)
1561 return false;
1562 Instruction *UserI = dyn_cast<Instruction>(Val: U.getUser());
1563 if (!UserI)
1564 return isAssumedDead(IRP: IRPosition::value(V: *U.get()), QueryingAA, FnLivenessAA,
1565 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1566
1567 if (auto *CB = dyn_cast<CallBase>(Val: UserI)) {
1568 // For call site argument uses we can check if the argument is
1569 // unused/dead.
1570 if (CB->isArgOperand(U: &U)) {
1571 const IRPosition &CSArgPos =
1572 IRPosition::callsite_argument(CB: *CB, ArgNo: CB->getArgOperandNo(U: &U));
1573 return isAssumedDead(IRP: CSArgPos, QueryingAA, FnLivenessAA,
1574 UsedAssumedInformation, CheckBBLivenessOnly,
1575 DepClass);
1576 }
1577 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: UserI)) {
1578 const IRPosition &RetPos = IRPosition::returned(F: *RI->getFunction());
1579 return isAssumedDead(IRP: RetPos, QueryingAA, FnLivenessAA,
1580 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1581 } else if (PHINode *PHI = dyn_cast<PHINode>(Val: UserI)) {
1582 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1583 return isAssumedDead(I: *IncomingBB->getTerminator(), QueryingAA, LivenessAA: FnLivenessAA,
1584 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1585 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: UserI)) {
1586 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1587 const IRPosition IRP = IRPosition::inst(I: *SI);
1588 const AAIsDead *IsDeadAA =
1589 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1590 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1591 if (QueryingAA)
1592 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1593 if (!IsDeadAA->isKnown(BitsEncoding: AAIsDead::IS_REMOVABLE))
1594 UsedAssumedInformation = true;
1595 return true;
1596 }
1597 }
1598 }
1599
1600 return isAssumedDead(IRP: IRPosition::inst(I: *UserI), QueryingAA, FnLivenessAA,
1601 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1602}
1603
1604bool Attributor::isAssumedDead(const Instruction &I,
1605 const AbstractAttribute *QueryingAA,
1606 const AAIsDead *FnLivenessAA,
1607 bool &UsedAssumedInformation,
1608 bool CheckBBLivenessOnly, DepClassTy DepClass,
1609 bool CheckForDeadStore) {
1610 if (!Configuration.UseLiveness)
1611 return false;
1612 const IRPosition::CallBaseContext *CBCtx =
1613 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1614
1615 if (ManifestAddedBlocks.contains(Ptr: I.getParent()))
1616 return false;
1617
1618 const Function &F = *I.getFunction();
1619 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1620 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRP: IRPosition::function(F, CBContext: CBCtx),
1621 QueryingAA, DepClass: DepClassTy::NONE);
1622
1623 // Don't use recursive reasoning.
1624 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1625 return false;
1626
1627 // If we have a context instruction and a liveness AA we use it.
1628 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(BB: I.getParent())
1629 : FnLivenessAA->isAssumedDead(I: &I)) {
1630 if (QueryingAA)
1631 recordDependence(FromAA: *FnLivenessAA, ToAA: *QueryingAA, DepClass);
1632 if (!FnLivenessAA->isKnownDead(I: &I))
1633 UsedAssumedInformation = true;
1634 return true;
1635 }
1636
1637 if (CheckBBLivenessOnly)
1638 return false;
1639
1640 const IRPosition IRP = IRPosition::inst(I, CBContext: CBCtx);
1641 const AAIsDead *IsDeadAA =
1642 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1643
1644 // Don't use recursive reasoning.
1645 if (!IsDeadAA || QueryingAA == IsDeadAA)
1646 return false;
1647
1648 if (IsDeadAA->isAssumedDead()) {
1649 if (QueryingAA)
1650 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1651 if (!IsDeadAA->isKnownDead())
1652 UsedAssumedInformation = true;
1653 return true;
1654 }
1655
1656 if (CheckForDeadStore && isa<StoreInst>(Val: I) && IsDeadAA->isRemovableStore()) {
1657 if (QueryingAA)
1658 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1659 if (!IsDeadAA->isKnownDead())
1660 UsedAssumedInformation = true;
1661 return true;
1662 }
1663
1664 return false;
1665}
1666
1667bool Attributor::isAssumedDead(const IRPosition &IRP,
1668 const AbstractAttribute *QueryingAA,
1669 const AAIsDead *FnLivenessAA,
1670 bool &UsedAssumedInformation,
1671 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1672 if (!Configuration.UseLiveness)
1673 return false;
1674 // Don't check liveness for constants, e.g. functions, used as (floating)
1675 // values since the context instruction and such is here meaningless.
1676 if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1677 isa<Constant>(Val: IRP.getAssociatedValue())) {
1678 return false;
1679 }
1680
1681 Instruction *CtxI = IRP.getCtxI();
1682 if (CtxI &&
1683 isAssumedDead(I: *CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1684 /* CheckBBLivenessOnly */ true,
1685 DepClass: CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1686 return true;
1687
1688 if (CheckBBLivenessOnly)
1689 return false;
1690
1691 // If we haven't succeeded we query the specific liveness info for the IRP.
1692 const AAIsDead *IsDeadAA;
1693 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1694 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1695 IRP: IRPosition::callsite_returned(CB: cast<CallBase>(Val&: IRP.getAssociatedValue())),
1696 QueryingAA, DepClass: DepClassTy::NONE);
1697 else
1698 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClass: DepClassTy::NONE);
1699
1700 // Don't use recursive reasoning.
1701 if (!IsDeadAA || QueryingAA == IsDeadAA)
1702 return false;
1703
1704 if (IsDeadAA->isAssumedDead()) {
1705 if (QueryingAA)
1706 recordDependence(FromAA: *IsDeadAA, ToAA: *QueryingAA, DepClass);
1707 if (!IsDeadAA->isKnownDead())
1708 UsedAssumedInformation = true;
1709 return true;
1710 }
1711
1712 return false;
1713}
1714
1715bool Attributor::isAssumedDead(const BasicBlock &BB,
1716 const AbstractAttribute *QueryingAA,
1717 const AAIsDead *FnLivenessAA,
1718 DepClassTy DepClass) {
1719 if (!Configuration.UseLiveness)
1720 return false;
1721 const Function &F = *BB.getParent();
1722 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1723 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRP: IRPosition::function(F),
1724 QueryingAA, DepClass: DepClassTy::NONE);
1725
1726 // Don't use recursive reasoning.
1727 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1728 return false;
1729
1730 if (FnLivenessAA->isAssumedDead(BB: &BB)) {
1731 if (QueryingAA)
1732 recordDependence(FromAA: *FnLivenessAA, ToAA: *QueryingAA, DepClass);
1733 return true;
1734 }
1735
1736 return false;
1737}
1738
1739bool Attributor::checkForAllCallees(
1740 function_ref<bool(ArrayRef<const Function *>)> Pred,
1741 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1742 if (const Function *Callee = dyn_cast<Function>(Val: CB.getCalledOperand()))
1743 return Pred(Callee);
1744
1745 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1746 QueryingAA, IRP: IRPosition::callsite_function(CB), DepClass: DepClassTy::OPTIONAL);
1747 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1748 return false;
1749
1750 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1751 return Pred(Callees.getArrayRef());
1752}
1753
1754bool canMarkAsVisited(const User *Usr) {
1755 return isa<PHINode>(Val: Usr) || !isa<Instruction>(Val: Usr);
1756}
1757
1758bool Attributor::checkForAllUses(
1759 function_ref<bool(const Use &, bool &)> Pred,
1760 const AbstractAttribute &QueryingAA, const Value &V,
1761 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1762 bool IgnoreDroppableUses,
1763 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1764
1765 // Check virtual uses first.
1766 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(Val: &V))
1767 if (!CB(*this, &QueryingAA))
1768 return false;
1769
1770 if (isa<ConstantData>(Val: V))
1771 return false;
1772
1773 // Check the trivial case first as it catches void values.
1774 if (V.use_empty())
1775 return true;
1776
1777 const IRPosition &IRP = QueryingAA.getIRPosition();
1778 SmallVector<const Use *, 16> Worklist;
1779 SmallPtrSet<const Use *, 16> Visited;
1780
1781 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1782 for (const Use &UU : V.uses()) {
1783 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1784 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1785 "rejected by the equivalence call back: "
1786 << *UU << "!\n");
1787 return false;
1788 }
1789
1790 Worklist.push_back(Elt: &UU);
1791 }
1792 return true;
1793 };
1794
1795 AddUsers(V, /* OldUse */ nullptr);
1796
1797 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1798 << " initial uses to check\n");
1799
1800 const Function *ScopeFn = IRP.getAnchorScope();
1801 const auto *LivenessAA =
1802 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRP: IRPosition::function(F: *ScopeFn),
1803 DepClass: DepClassTy::NONE)
1804 : nullptr;
1805
1806 while (!Worklist.empty()) {
1807 const Use *U = Worklist.pop_back_val();
1808 if (canMarkAsVisited(Usr: U->getUser()) && !Visited.insert(Ptr: U).second)
1809 continue;
1810 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1811 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1812 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1813 << "\n";
1814 else
1815 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1816 << "\n";
1817 });
1818 bool UsedAssumedInformation = false;
1819 if (isAssumedDead(U: *U, QueryingAA: &QueryingAA, FnLivenessAA: LivenessAA, UsedAssumedInformation,
1820 CheckBBLivenessOnly, DepClass: LivenessDepClass)) {
1821 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1822 dbgs() << "[Attributor] Dead use, skip!\n");
1823 continue;
1824 }
1825 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1826 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1827 dbgs() << "[Attributor] Droppable user, skip!\n");
1828 continue;
1829 }
1830
1831 if (auto *SI = dyn_cast<StoreInst>(Val: U->getUser())) {
1832 if (&SI->getOperandUse(i: 0) == U) {
1833 if (!Visited.insert(Ptr: U).second)
1834 continue;
1835 SmallSetVector<Value *, 4> PotentialCopies;
1836 if (AA::getPotentialCopiesOfStoredValue(
1837 A&: *this, SI&: *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1838 /* OnlyExact */ true)) {
1839 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1840 dbgs()
1841 << "[Attributor] Value is stored, continue with "
1842 << PotentialCopies.size()
1843 << " potential copies instead!\n");
1844 for (Value *PotentialCopy : PotentialCopies)
1845 if (!AddUsers(*PotentialCopy, U))
1846 return false;
1847 continue;
1848 }
1849 }
1850 }
1851
1852 bool Follow = false;
1853 if (!Pred(*U, Follow))
1854 return false;
1855 if (!Follow)
1856 continue;
1857
1858 User &Usr = *U->getUser();
1859 AddUsers(Usr, /* OldUse */ nullptr);
1860 }
1861
1862 return true;
1863}
1864
1865bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1866 const AbstractAttribute &QueryingAA,
1867 bool RequireAllCallSites,
1868 bool &UsedAssumedInformation) {
1869 // We can try to determine information from
1870 // the call sites. However, this is only possible all call sites are known,
1871 // hence the function has internal linkage.
1872 const IRPosition &IRP = QueryingAA.getIRPosition();
1873 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1874 if (!AssociatedFunction) {
1875 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1876 << "\n");
1877 return false;
1878 }
1879
1880 return checkForAllCallSites(Pred, Fn: *AssociatedFunction, RequireAllCallSites,
1881 QueryingAA: &QueryingAA, UsedAssumedInformation);
1882}
1883
1884bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1885 const Function &Fn,
1886 bool RequireAllCallSites,
1887 const AbstractAttribute *QueryingAA,
1888 bool &UsedAssumedInformation,
1889 bool CheckPotentiallyDead) {
1890 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1891 LLVM_DEBUG(
1892 dbgs()
1893 << "[Attributor] Function " << Fn.getName()
1894 << " has no internal linkage, hence not all call sites are known\n");
1895 return false;
1896 }
1897 // Check virtual uses first.
1898 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(Val: &Fn))
1899 if (!CB(*this, QueryingAA))
1900 return false;
1901
1902 SmallVector<const Use *, 8> Uses(make_pointer_range(Range: Fn.uses()));
1903 for (unsigned u = 0; u < Uses.size(); ++u) {
1904 const Use &U = *Uses[u];
1905 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1906 if (auto *Fn = dyn_cast<Function>(U))
1907 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1908 << *U.getUser() << "\n";
1909 else
1910 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1911 << "\n";
1912 });
1913 if (!CheckPotentiallyDead &&
1914 isAssumedDead(U, QueryingAA, FnLivenessAA: nullptr, UsedAssumedInformation,
1915 /* CheckBBLivenessOnly */ true)) {
1916 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1917 dbgs() << "[Attributor] Dead use, skip!\n");
1918 continue;
1919 }
1920 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: U.getUser())) {
1921 if (CE->isCast() && CE->getType()->isPointerTy()) {
1922 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1923 dbgs() << "[Attributor] Use, is constant cast expression, add "
1924 << CE->getNumUses() << " uses of that expression instead!\n";
1925 });
1926 for (const Use &CEU : CE->uses())
1927 Uses.push_back(Elt: &CEU);
1928 continue;
1929 }
1930 }
1931
1932 AbstractCallSite ACS(&U);
1933 if (!ACS) {
1934 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1935 << " has non call site use " << *U.get() << " in "
1936 << *U.getUser() << "\n");
1937 return false;
1938 }
1939
1940 const Use *EffectiveUse =
1941 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1942 if (!ACS.isCallee(U: EffectiveUse)) {
1943 if (!RequireAllCallSites) {
1944 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1945 << " is not a call of " << Fn.getName()
1946 << ", skip use\n");
1947 continue;
1948 }
1949 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1950 << " is an invalid use of " << Fn.getName() << "\n");
1951 return false;
1952 }
1953
1954 // Make sure the arguments that can be matched between the call site and the
1955 // callee argee on their type. It is unlikely they do not and it doesn't
1956 // make sense for all attributes to know/care about this.
1957 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1958 unsigned MinArgsParams =
1959 std::min(a: size_t(ACS.getNumArgOperands()), b: Fn.arg_size());
1960 for (unsigned u = 0; u < MinArgsParams; ++u) {
1961 Value *CSArgOp = ACS.getCallArgOperand(ArgNo: u);
1962 if (CSArgOp && Fn.getArg(i: u)->getType() != CSArgOp->getType()) {
1963 LLVM_DEBUG(
1964 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1965 << u << "@" << Fn.getName() << ": "
1966 << *Fn.getArg(u)->getType() << " vs. "
1967 << *ACS.getCallArgOperand(u)->getType() << "\n");
1968 return false;
1969 }
1970 }
1971
1972 if (Pred(ACS))
1973 continue;
1974
1975 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1976 << *ACS.getInstruction() << "\n");
1977 return false;
1978 }
1979
1980 return true;
1981}
1982
1983bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1984 // TODO: Maintain a cache of Values that are
1985 // on the pathway from a Argument to a Instruction that would effect the
1986 // liveness/return state etc.
1987 return EnableCallSiteSpecific;
1988}
1989
1990bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
1991 const AbstractAttribute &QueryingAA,
1992 AA::ValueScope S,
1993 bool RecurseForSelectAndPHI) {
1994
1995 const IRPosition &IRP = QueryingAA.getIRPosition();
1996 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1997 if (!AssociatedFunction)
1998 return false;
1999
2000 bool UsedAssumedInformation = false;
2001 SmallVector<AA::ValueAndContext> Values;
2002 if (!getAssumedSimplifiedValues(
2003 InitialIRP: IRPosition::returned(F: *AssociatedFunction), AA: &QueryingAA, Values, S,
2004 UsedAssumedInformation, RecurseForSelectAndPHI))
2005 return false;
2006
2007 return llvm::all_of(Range&: Values, P: [&](const AA::ValueAndContext &VAC) {
2008 return Pred(*VAC.getValue());
2009 });
2010}
2011
2012static bool checkForAllInstructionsImpl(
2013 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
2014 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2015 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2016 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2017 bool CheckPotentiallyDead = false) {
2018 for (unsigned Opcode : Opcodes) {
2019 // Check if we have instructions with this opcode at all first.
2020 auto *Insts = OpcodeInstMap.lookup(Val: Opcode);
2021 if (!Insts)
2022 continue;
2023
2024 for (Instruction *I : *Insts) {
2025 // Skip dead instructions.
2026 if (A && !CheckPotentiallyDead &&
2027 A->isAssumedDead(IRP: IRPosition::inst(I: *I), QueryingAA, FnLivenessAA: LivenessAA,
2028 UsedAssumedInformation, CheckBBLivenessOnly)) {
2029 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2030 dbgs() << "[Attributor] Instruction " << *I
2031 << " is potentially dead, skip!\n";);
2032 continue;
2033 }
2034
2035 if (!Pred(*I))
2036 return false;
2037 }
2038 }
2039 return true;
2040}
2041
2042bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2043 const Function *Fn,
2044 const AbstractAttribute *QueryingAA,
2045 ArrayRef<unsigned> Opcodes,
2046 bool &UsedAssumedInformation,
2047 bool CheckBBLivenessOnly,
2048 bool CheckPotentiallyDead) {
2049 // Since we need to provide instructions we have to have an exact definition.
2050 if (!Fn || Fn->isDeclaration())
2051 return false;
2052
2053 const IRPosition &QueryIRP = IRPosition::function(F: *Fn);
2054 const auto *LivenessAA =
2055 CheckPotentiallyDead && QueryingAA
2056 ? (getAAFor<AAIsDead>(QueryingAA: *QueryingAA, IRP: QueryIRP, DepClass: DepClassTy::NONE))
2057 : nullptr;
2058
2059 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F: *Fn);
2060 if (!checkForAllInstructionsImpl(A: this, OpcodeInstMap, Pred, QueryingAA,
2061 LivenessAA, Opcodes, UsedAssumedInformation,
2062 CheckBBLivenessOnly, CheckPotentiallyDead))
2063 return false;
2064
2065 return true;
2066}
2067
2068bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2069 const AbstractAttribute &QueryingAA,
2070 ArrayRef<unsigned> Opcodes,
2071 bool &UsedAssumedInformation,
2072 bool CheckBBLivenessOnly,
2073 bool CheckPotentiallyDead) {
2074 const IRPosition &IRP = QueryingAA.getIRPosition();
2075 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2076 return checkForAllInstructions(Pred, Fn: AssociatedFunction, QueryingAA: &QueryingAA, Opcodes,
2077 UsedAssumedInformation, CheckBBLivenessOnly,
2078 CheckPotentiallyDead);
2079}
2080
2081bool Attributor::checkForAllReadWriteInstructions(
2082 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2083 bool &UsedAssumedInformation) {
2084 TimeTraceScope TS("checkForAllReadWriteInstructions");
2085
2086 const Function *AssociatedFunction =
2087 QueryingAA.getIRPosition().getAssociatedFunction();
2088 if (!AssociatedFunction)
2089 return false;
2090
2091 const IRPosition &QueryIRP = IRPosition::function(F: *AssociatedFunction);
2092 const auto *LivenessAA =
2093 getAAFor<AAIsDead>(QueryingAA, IRP: QueryIRP, DepClass: DepClassTy::NONE);
2094
2095 for (Instruction *I :
2096 InfoCache.getReadOrWriteInstsForFunction(F: *AssociatedFunction)) {
2097 // Skip dead instructions.
2098 if (isAssumedDead(IRP: IRPosition::inst(I: *I), QueryingAA: &QueryingAA, FnLivenessAA: LivenessAA,
2099 UsedAssumedInformation))
2100 continue;
2101
2102 if (!Pred(*I))
2103 return false;
2104 }
2105
2106 return true;
2107}
2108
2109void Attributor::runTillFixpoint() {
2110 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2111 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2112 << DG.SyntheticRoot.Deps.size()
2113 << " abstract attributes.\n");
2114
2115 // Now that all abstract attributes are collected and initialized we start
2116 // the abstract analysis.
2117
2118 unsigned IterationCounter = 1;
2119 unsigned MaxIterations =
2120 Configuration.MaxFixpointIterations.value_or(u&: SetFixpointIterations);
2121
2122 SmallVector<AbstractAttribute *, 32> ChangedAAs;
2123 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2124 Worklist.insert_range(R&: DG.SyntheticRoot);
2125
2126 do {
2127 // Remember the size to determine new attributes.
2128 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2129 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2130 << ", Worklist size: " << Worklist.size() << "\n");
2131
2132 // For invalid AAs we can fix dependent AAs that have a required dependence,
2133 // thereby folding long dependence chains in a single step without the need
2134 // to run updates.
2135 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2136 AbstractAttribute *InvalidAA = InvalidAAs[u];
2137
2138 // Check the dependences to fast track invalidation.
2139 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2140 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2141 << " has " << InvalidAA->Deps.size()
2142 << " required & optional dependences\n");
2143 for (auto &DepIt : InvalidAA->Deps) {
2144 AbstractAttribute *DepAA = cast<AbstractAttribute>(Val: DepIt.getPointer());
2145 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2146 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2147 dbgs() << " - recompute: " << *DepAA);
2148 Worklist.insert(X: DepAA);
2149 continue;
2150 }
2151 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2152 << " - invalidate: " << *DepAA);
2153 DepAA->getState().indicatePessimisticFixpoint();
2154 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2155 if (!DepAA->getState().isValidState())
2156 InvalidAAs.insert(X: DepAA);
2157 else
2158 ChangedAAs.push_back(Elt: DepAA);
2159 }
2160 InvalidAA->Deps.clear();
2161 }
2162
2163 // Add all abstract attributes that are potentially dependent on one that
2164 // changed to the work list.
2165 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2166 for (auto &DepIt : ChangedAA->Deps)
2167 Worklist.insert(X: cast<AbstractAttribute>(Val: DepIt.getPointer()));
2168 ChangedAA->Deps.clear();
2169 }
2170
2171 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2172 << ", Worklist+Dependent size: " << Worklist.size()
2173 << "\n");
2174
2175 // Reset the changed and invalid set.
2176 ChangedAAs.clear();
2177 InvalidAAs.clear();
2178
2179 // Update all abstract attribute in the work list and record the ones that
2180 // changed.
2181 for (AbstractAttribute *AA : Worklist) {
2182 const auto &AAState = AA->getState();
2183 if (!AAState.isAtFixpoint())
2184 if (updateAA(AA&: *AA) == ChangeStatus::CHANGED)
2185 ChangedAAs.push_back(Elt: AA);
2186
2187 // Use the InvalidAAs vector to propagate invalid states fast transitively
2188 // without requiring updates.
2189 if (!AAState.isValidState())
2190 InvalidAAs.insert(X: AA);
2191 }
2192
2193 // Add attributes to the changed set if they have been created in the last
2194 // iteration.
2195 ChangedAAs.append(in_start: DG.SyntheticRoot.begin() + NumAAs,
2196 in_end: DG.SyntheticRoot.end());
2197
2198 // Reset the work list and repopulate with the changed abstract attributes.
2199 // Note that dependent ones are added above.
2200 Worklist.clear();
2201 Worklist.insert_range(R&: ChangedAAs);
2202 Worklist.insert_range(R&: QueryAAsAwaitingUpdate);
2203 QueryAAsAwaitingUpdate.clear();
2204
2205 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2206
2207 if (IterationCounter > MaxIterations && !Functions.empty()) {
2208 auto Remark = [&](OptimizationRemarkMissed ORM) {
2209 return ORM << "Attributor did not reach a fixpoint after "
2210 << ore::NV("Iterations", MaxIterations) << " iterations.";
2211 };
2212 Function *F = Functions.front();
2213 emitRemark<OptimizationRemarkMissed>(F, RemarkName: "FixedPoint", RemarkCB&: Remark);
2214 }
2215
2216 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2217 << IterationCounter << "/" << MaxIterations
2218 << " iterations\n");
2219
2220 // Reset abstract arguments not settled in a sound fixpoint by now. This
2221 // happens when we stopped the fixpoint iteration early. Note that only the
2222 // ones marked as "changed" *and* the ones transitively depending on them
2223 // need to be reverted to a pessimistic state. Others might not be in a
2224 // fixpoint state but we can use the optimistic results for them anyway.
2225 SmallPtrSet<AbstractAttribute *, 32> Visited;
2226 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2227 AbstractAttribute *ChangedAA = ChangedAAs[u];
2228 if (!Visited.insert(Ptr: ChangedAA).second)
2229 continue;
2230
2231 AbstractState &State = ChangedAA->getState();
2232 if (!State.isAtFixpoint()) {
2233 State.indicatePessimisticFixpoint();
2234
2235 NumAttributesTimedOut++;
2236 }
2237
2238 for (auto &DepIt : ChangedAA->Deps)
2239 ChangedAAs.push_back(Elt: cast<AbstractAttribute>(Val: DepIt.getPointer()));
2240 ChangedAA->Deps.clear();
2241 }
2242
2243 LLVM_DEBUG({
2244 if (!Visited.empty())
2245 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2246 << " abstract attributes.\n";
2247 });
2248}
2249
2250void Attributor::registerForUpdate(AbstractAttribute &AA) {
2251 assert(AA.isQueryAA() &&
2252 "Non-query AAs should not be required to register for updates!");
2253 QueryAAsAwaitingUpdate.insert(X: &AA);
2254}
2255
2256ChangeStatus Attributor::manifestAttributes() {
2257 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2258 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2259
2260 unsigned NumManifested = 0;
2261 unsigned NumAtFixpoint = 0;
2262 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2263 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2264 AbstractAttribute *AA = cast<AbstractAttribute>(Val: DepAA.getPointer());
2265 AbstractState &State = AA->getState();
2266
2267 // If there is not already a fixpoint reached, we can now take the
2268 // optimistic state. This is correct because we enforced a pessimistic one
2269 // on abstract attributes that were transitively dependent on a changed one
2270 // already above.
2271 if (!State.isAtFixpoint())
2272 State.indicateOptimisticFixpoint();
2273
2274 // We must not manifest Attributes that use Callbase info.
2275 if (AA->hasCallBaseContext())
2276 continue;
2277 // If the state is invalid, we do not try to manifest it.
2278 if (!State.isValidState())
2279 continue;
2280
2281 if (AA->getCtxI() && !isRunOn(Fn&: *AA->getAnchorScope()))
2282 continue;
2283
2284 // Skip dead code.
2285 bool UsedAssumedInformation = false;
2286 if (isAssumedDead(AA: *AA, FnLivenessAA: nullptr, UsedAssumedInformation,
2287 /* CheckBBLivenessOnly */ true))
2288 continue;
2289 // Check if the manifest debug counter that allows skipping manifestation of
2290 // AAs
2291 if (!DebugCounter::shouldExecute(CounterName: ManifestDBGCounter))
2292 continue;
2293 // Manifest the state and record if we changed the IR.
2294 ChangeStatus LocalChange = AA->manifest(A&: *this);
2295 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2296 AA->trackStatistics();
2297 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2298 << "\n");
2299
2300 ManifestChange = ManifestChange | LocalChange;
2301
2302 NumAtFixpoint++;
2303 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2304 }
2305
2306 (void)NumManifested;
2307 (void)NumAtFixpoint;
2308 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2309 << " arguments while " << NumAtFixpoint
2310 << " were in a valid fixpoint state\n");
2311
2312 NumAttributesManifested += NumManifested;
2313 NumAttributesValidFixpoint += NumAtFixpoint;
2314
2315 (void)NumFinalAAs;
2316 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2317 auto DepIt = DG.SyntheticRoot.Deps.begin();
2318 for (unsigned u = 0; u < NumFinalAAs; ++u)
2319 ++DepIt;
2320 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2321 ++u, ++DepIt) {
2322 errs() << "Unexpected abstract attribute: "
2323 << cast<AbstractAttribute>(Val: DepIt->getPointer()) << " :: "
2324 << cast<AbstractAttribute>(Val: DepIt->getPointer())
2325 ->getIRPosition()
2326 .getAssociatedValue()
2327 << "\n";
2328 }
2329 llvm_unreachable("Expected the final number of abstract attributes to "
2330 "remain unchanged!");
2331 }
2332
2333 for (auto &It : AttrsMap) {
2334 AttributeList &AL = It.getSecond();
2335 const IRPosition &IRP =
2336 isa<Function>(Val: It.getFirst())
2337 ? IRPosition::function(F: *cast<Function>(Val: It.getFirst()))
2338 : IRPosition::callsite_function(CB: *cast<CallBase>(Val: It.getFirst()));
2339 IRP.setAttrList(AL);
2340 }
2341
2342 return ManifestChange;
2343}
2344
2345void Attributor::identifyDeadInternalFunctions() {
2346 // Early exit if we don't intend to delete functions.
2347 if (!Configuration.DeleteFns)
2348 return;
2349
2350 // To avoid triggering an assertion in the lazy call graph we will not delete
2351 // any internal library functions. We should modify the assertion though and
2352 // allow internals to be deleted.
2353 const auto *TLI =
2354 isModulePass()
2355 ? nullptr
2356 : getInfoCache().getTargetLibraryInfoForFunction(F: *Functions.back());
2357 LibFunc LF;
2358
2359 // Identify dead internal functions and delete them. This happens outside
2360 // the other fixpoint analysis as we might treat potentially dead functions
2361 // as live to lower the number of iterations. If they happen to be dead, the
2362 // below fixpoint loop will identify and eliminate them.
2363
2364 SmallVector<Function *, 8> InternalFns;
2365 for (Function *F : Functions)
2366 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(FDecl: *F, F&: LF)))
2367 InternalFns.push_back(Elt: F);
2368
2369 SmallPtrSet<Function *, 8> LiveInternalFns;
2370 bool FoundLiveInternal = true;
2371 while (FoundLiveInternal) {
2372 FoundLiveInternal = false;
2373 for (Function *&F : InternalFns) {
2374 if (!F)
2375 continue;
2376
2377 bool UsedAssumedInformation = false;
2378 if (checkForAllCallSites(
2379 Pred: [&](AbstractCallSite ACS) {
2380 Function *Callee = ACS.getInstruction()->getFunction();
2381 return ToBeDeletedFunctions.count(key: Callee) ||
2382 (Functions.count(key: Callee) && Callee->hasLocalLinkage() &&
2383 !LiveInternalFns.count(Ptr: Callee));
2384 },
2385 Fn: *F, RequireAllCallSites: true, QueryingAA: nullptr, UsedAssumedInformation)) {
2386 continue;
2387 }
2388
2389 LiveInternalFns.insert(Ptr: F);
2390 F = nullptr;
2391 FoundLiveInternal = true;
2392 }
2393 }
2394
2395 for (Function *F : InternalFns)
2396 if (F)
2397 ToBeDeletedFunctions.insert(X: F);
2398}
2399
2400ChangeStatus Attributor::cleanupIR() {
2401 TimeTraceScope TimeScope("Attributor::cleanupIR");
2402 // Delete stuff at the end to avoid invalid references and a nice order.
2403 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2404 << ToBeDeletedFunctions.size() << " functions and "
2405 << ToBeDeletedBlocks.size() << " blocks and "
2406 << ToBeDeletedInsts.size() << " instructions and "
2407 << ToBeChangedValues.size() << " values and "
2408 << ToBeChangedUses.size() << " uses. To insert "
2409 << ToBeChangedToUnreachableInsts.size()
2410 << " unreachables.\n"
2411 << "Preserve manifest added " << ManifestAddedBlocks.size()
2412 << " blocks\n");
2413
2414 SmallVector<WeakTrackingVH, 32> DeadInsts;
2415 SmallVector<Instruction *, 32> TerminatorsToFold;
2416
2417 auto ReplaceUse = [&](Use *U, Value *NewV) {
2418 Value *OldV = U->get();
2419
2420 // If we plan to replace NewV we need to update it at this point.
2421 do {
2422 const auto &Entry = ToBeChangedValues.lookup(Key: NewV);
2423 if (!get<0>(Pair: Entry))
2424 break;
2425 NewV = get<0>(Pair: Entry);
2426 } while (true);
2427
2428 Instruction *I = dyn_cast<Instruction>(Val: U->getUser());
2429 assert((!I || isRunOn(*I->getFunction())) &&
2430 "Cannot replace an instruction outside the current SCC!");
2431
2432 // Do not replace uses in returns if the value is a must-tail call we will
2433 // not delete.
2434 if (auto *RI = dyn_cast_or_null<ReturnInst>(Val: I)) {
2435 if (auto *CI = dyn_cast<CallInst>(Val: OldV->stripPointerCasts()))
2436 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(key: CI))
2437 return;
2438 // If we rewrite a return and the new value is not an argument, strip the
2439 // `returned` attribute as it is wrong now.
2440 if (!isa<Argument>(Val: NewV))
2441 for (auto &Arg : RI->getFunction()->args())
2442 Arg.removeAttr(Kind: Attribute::Returned);
2443 }
2444
2445 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2446 << " instead of " << *OldV << "\n");
2447 U->set(NewV);
2448
2449 if (Instruction *I = dyn_cast<Instruction>(Val: OldV)) {
2450 CGModifiedFunctions.insert(X: I->getFunction());
2451 if (!isa<PHINode>(Val: I) && !ToBeDeletedInsts.count(key: I) &&
2452 isInstructionTriviallyDead(I))
2453 DeadInsts.push_back(Elt: I);
2454 }
2455 if (isa<UndefValue>(Val: NewV) && isa<CallBase>(Val: U->getUser())) {
2456 auto *CB = cast<CallBase>(Val: U->getUser());
2457 if (CB->isArgOperand(U)) {
2458 unsigned Idx = CB->getArgOperandNo(U);
2459 CB->removeParamAttr(ArgNo: Idx, Kind: Attribute::NoUndef);
2460 auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand());
2461 if (Callee && Callee->arg_size() > Idx)
2462 Callee->removeParamAttr(ArgNo: Idx, Kind: Attribute::NoUndef);
2463 }
2464 }
2465 if (isa<Constant>(Val: NewV) && isa<BranchInst>(Val: U->getUser())) {
2466 Instruction *UserI = cast<Instruction>(Val: U->getUser());
2467 if (isa<UndefValue>(Val: NewV)) {
2468 ToBeChangedToUnreachableInsts.insert(X: UserI);
2469 } else {
2470 TerminatorsToFold.push_back(Elt: UserI);
2471 }
2472 }
2473 };
2474
2475 for (auto &It : ToBeChangedUses) {
2476 Use *U = It.first;
2477 Value *NewV = It.second;
2478 ReplaceUse(U, NewV);
2479 }
2480
2481 SmallVector<Use *, 4> Uses;
2482 for (auto &It : ToBeChangedValues) {
2483 Value *OldV = It.first;
2484 auto [NewV, Done] = It.second;
2485 Uses.clear();
2486 for (auto &U : OldV->uses())
2487 if (Done || !U.getUser()->isDroppable())
2488 Uses.push_back(Elt: &U);
2489 for (Use *U : Uses) {
2490 if (auto *I = dyn_cast<Instruction>(Val: U->getUser()))
2491 if (!isRunOn(Fn&: *I->getFunction()))
2492 continue;
2493 ReplaceUse(U, NewV);
2494 }
2495 }
2496
2497 for (const auto &V : InvokeWithDeadSuccessor)
2498 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(Val: V)) {
2499 assert(isRunOn(*II->getFunction()) &&
2500 "Cannot replace an invoke outside the current SCC!");
2501 bool UnwindBBIsDead = II->hasFnAttr(Kind: Attribute::NoUnwind);
2502 bool NormalBBIsDead = II->hasFnAttr(Kind: Attribute::NoReturn);
2503 bool Invoke2CallAllowed =
2504 !AAIsDead::mayCatchAsynchronousExceptions(F: *II->getFunction());
2505 assert((UnwindBBIsDead || NormalBBIsDead) &&
2506 "Invoke does not have dead successors!");
2507 BasicBlock *BB = II->getParent();
2508 BasicBlock *NormalDestBB = II->getNormalDest();
2509 if (UnwindBBIsDead) {
2510 Instruction *NormalNextIP = &NormalDestBB->front();
2511 if (Invoke2CallAllowed) {
2512 changeToCall(II);
2513 NormalNextIP = BB->getTerminator();
2514 }
2515 if (NormalBBIsDead)
2516 ToBeChangedToUnreachableInsts.insert(X: NormalNextIP);
2517 } else {
2518 assert(NormalBBIsDead && "Broken invariant!");
2519 if (!NormalDestBB->getUniquePredecessor())
2520 NormalDestBB = SplitBlockPredecessors(BB: NormalDestBB, Preds: {BB}, Suffix: ".dead");
2521 ToBeChangedToUnreachableInsts.insert(X: &NormalDestBB->front());
2522 }
2523 }
2524 for (Instruction *I : TerminatorsToFold) {
2525 assert(isRunOn(*I->getFunction()) &&
2526 "Cannot replace a terminator outside the current SCC!");
2527 CGModifiedFunctions.insert(X: I->getFunction());
2528 ConstantFoldTerminator(BB: I->getParent());
2529 }
2530 for (const auto &V : ToBeChangedToUnreachableInsts)
2531 if (Instruction *I = dyn_cast_or_null<Instruction>(Val: V)) {
2532 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2533 << "\n");
2534 assert(isRunOn(*I->getFunction()) &&
2535 "Cannot replace an instruction outside the current SCC!");
2536 CGModifiedFunctions.insert(X: I->getFunction());
2537 changeToUnreachable(I);
2538 }
2539
2540 for (const auto &V : ToBeDeletedInsts) {
2541 if (Instruction *I = dyn_cast_or_null<Instruction>(Val: V)) {
2542 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2543 isRunOn(*I->getFunction())) &&
2544 "Cannot delete an instruction outside the current SCC!");
2545 I->dropDroppableUses();
2546 CGModifiedFunctions.insert(X: I->getFunction());
2547 if (!I->getType()->isVoidTy())
2548 I->replaceAllUsesWith(V: UndefValue::get(T: I->getType()));
2549 if (!isa<PHINode>(Val: I) && isInstructionTriviallyDead(I))
2550 DeadInsts.push_back(Elt: I);
2551 else
2552 I->eraseFromParent();
2553 }
2554 }
2555
2556 llvm::erase_if(C&: DeadInsts, P: [&](WeakTrackingVH I) { return !I; });
2557
2558 LLVM_DEBUG({
2559 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2560 for (auto &I : DeadInsts)
2561 if (I)
2562 dbgs() << " - " << *I << "\n";
2563 });
2564
2565 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2566
2567 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2568 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2569 ToBeDeletedBBs.reserve(N: NumDeadBlocks);
2570 for (BasicBlock *BB : ToBeDeletedBlocks) {
2571 assert(isRunOn(*BB->getParent()) &&
2572 "Cannot delete a block outside the current SCC!");
2573 CGModifiedFunctions.insert(X: BB->getParent());
2574 // Do not delete BBs added during manifests of AAs.
2575 if (ManifestAddedBlocks.contains(Ptr: BB))
2576 continue;
2577 ToBeDeletedBBs.push_back(Elt: BB);
2578 }
2579 // Actually we do not delete the blocks but squash them into a single
2580 // unreachable but untangling branches that jump here is something we need
2581 // to do in a more generic way.
2582 detachDeadBlocks(BBs: ToBeDeletedBBs, Updates: nullptr);
2583 }
2584
2585 identifyDeadInternalFunctions();
2586
2587 // Rewrite the functions as requested during manifest.
2588 ChangeStatus ManifestChange = rewriteFunctionSignatures(ModifiedFns&: CGModifiedFunctions);
2589
2590 for (Function *Fn : CGModifiedFunctions)
2591 if (!ToBeDeletedFunctions.count(key: Fn) && Functions.count(key: Fn))
2592 Configuration.CGUpdater.reanalyzeFunction(Fn&: *Fn);
2593
2594 for (Function *Fn : ToBeDeletedFunctions) {
2595 if (!Functions.count(key: Fn))
2596 continue;
2597 Configuration.CGUpdater.removeFunction(Fn&: *Fn);
2598 }
2599
2600 if (!ToBeChangedUses.empty())
2601 ManifestChange = ChangeStatus::CHANGED;
2602
2603 if (!ToBeChangedToUnreachableInsts.empty())
2604 ManifestChange = ChangeStatus::CHANGED;
2605
2606 if (!ToBeDeletedFunctions.empty())
2607 ManifestChange = ChangeStatus::CHANGED;
2608
2609 if (!ToBeDeletedBlocks.empty())
2610 ManifestChange = ChangeStatus::CHANGED;
2611
2612 if (!ToBeDeletedInsts.empty())
2613 ManifestChange = ChangeStatus::CHANGED;
2614
2615 if (!InvokeWithDeadSuccessor.empty())
2616 ManifestChange = ChangeStatus::CHANGED;
2617
2618 if (!DeadInsts.empty())
2619 ManifestChange = ChangeStatus::CHANGED;
2620
2621 NumFnDeleted += ToBeDeletedFunctions.size();
2622
2623 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2624 << " functions after manifest.\n");
2625
2626#ifdef EXPENSIVE_CHECKS
2627 for (Function *F : Functions) {
2628 if (ToBeDeletedFunctions.count(F))
2629 continue;
2630 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2631 }
2632#endif
2633
2634 return ManifestChange;
2635}
2636
2637ChangeStatus Attributor::run() {
2638 TimeTraceScope TimeScope("Attributor::run");
2639 AttributorCallGraph ACallGraph(*this);
2640
2641 if (PrintCallGraph)
2642 ACallGraph.populateAll();
2643
2644 Phase = AttributorPhase::UPDATE;
2645 runTillFixpoint();
2646
2647 // dump graphs on demand
2648 if (DumpDepGraph)
2649 DG.dumpGraph();
2650
2651 if (ViewDepGraph)
2652 DG.viewGraph();
2653
2654 if (PrintDependencies)
2655 DG.print();
2656
2657 Phase = AttributorPhase::MANIFEST;
2658 ChangeStatus ManifestChange = manifestAttributes();
2659
2660 Phase = AttributorPhase::CLEANUP;
2661 ChangeStatus CleanupChange = cleanupIR();
2662
2663 if (PrintCallGraph)
2664 ACallGraph.print();
2665
2666 return ManifestChange | CleanupChange;
2667}
2668
2669ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2670 TimeTraceScope TimeScope("updateAA", [&]() {
2671 return AA.getName().str() +
2672 std::to_string(val: AA.getIRPosition().getPositionKind());
2673 });
2674 assert(Phase == AttributorPhase::UPDATE &&
2675 "We can update AA only in the update stage!");
2676
2677 // Use a new dependence vector for this update.
2678 DependenceVector DV;
2679 DependenceStack.push_back(Elt: &DV);
2680
2681 auto &AAState = AA.getState();
2682 ChangeStatus CS = ChangeStatus::UNCHANGED;
2683 bool UsedAssumedInformation = false;
2684 if (!isAssumedDead(AA, FnLivenessAA: nullptr, UsedAssumedInformation,
2685 /* CheckBBLivenessOnly */ true))
2686 CS = AA.update(A&: *this);
2687
2688 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2689 // If the AA did not rely on outside information but changed, we run it
2690 // again to see if it found a fixpoint. Most AAs do but we don't require
2691 // them to. Hence, it might take the AA multiple iterations to get to a
2692 // fixpoint even if it does not rely on outside information, which is fine.
2693 ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2694 if (CS == ChangeStatus::CHANGED)
2695 RerunCS = AA.update(A&: *this);
2696
2697 // If the attribute did not change during the run or rerun, and it still did
2698 // not query any non-fix information, the state will not change and we can
2699 // indicate that right at this point.
2700 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2701 AAState.indicateOptimisticFixpoint();
2702 }
2703
2704 if (!AAState.isAtFixpoint())
2705 rememberDependences();
2706
2707 // Verify the stack was used properly, that is we pop the dependence vector we
2708 // put there earlier.
2709 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2710 (void)PoppedDV;
2711 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2712
2713 return CS;
2714}
2715
2716void Attributor::createShallowWrapper(Function &F) {
2717 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2718
2719 Module &M = *F.getParent();
2720 LLVMContext &Ctx = M.getContext();
2721 FunctionType *FnTy = F.getFunctionType();
2722
2723 Function *Wrapper =
2724 Function::Create(Ty: FnTy, Linkage: F.getLinkage(), AddrSpace: F.getAddressSpace(), N: F.getName());
2725 F.setName(""); // set the inside function anonymous
2726 M.getFunctionList().insert(where: F.getIterator(), New: Wrapper);
2727
2728 F.setLinkage(GlobalValue::InternalLinkage);
2729
2730 F.replaceAllUsesWith(V: Wrapper);
2731 assert(F.use_empty() && "Uses remained after wrapper was created!");
2732
2733 // Move the COMDAT section to the wrapper.
2734 // TODO: Check if we need to keep it for F as well.
2735 Wrapper->setComdat(F.getComdat());
2736 F.setComdat(nullptr);
2737
2738 // Copy all metadata and attributes but keep them on F as well.
2739 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2740 F.getAllMetadata(MDs);
2741 for (auto MDIt : MDs)
2742 Wrapper->addMetadata(KindID: MDIt.first, MD&: *MDIt.second);
2743 Wrapper->setAttributes(F.getAttributes());
2744
2745 // Create the call in the wrapper.
2746 BasicBlock *EntryBB = BasicBlock::Create(Context&: Ctx, Name: "entry", Parent: Wrapper);
2747
2748 SmallVector<Value *, 8> Args;
2749 Argument *FArgIt = F.arg_begin();
2750 for (Argument &Arg : Wrapper->args()) {
2751 Args.push_back(Elt: &Arg);
2752 Arg.setName((FArgIt++)->getName());
2753 }
2754
2755 CallInst *CI = CallInst::Create(Func: &F, Args, NameStr: "", InsertBefore: EntryBB);
2756 CI->setTailCall(true);
2757 CI->addFnAttr(Kind: Attribute::NoInline);
2758 ReturnInst::Create(C&: Ctx, retVal: CI->getType()->isVoidTy() ? nullptr : CI, InsertBefore: EntryBB);
2759
2760 NumFnShallowWrappersCreated++;
2761}
2762
2763bool Attributor::isInternalizable(Function &F) {
2764 if (F.isDeclaration() || F.hasLocalLinkage() ||
2765 GlobalValue::isInterposableLinkage(Linkage: F.getLinkage()))
2766 return false;
2767 return true;
2768}
2769
2770Function *Attributor::internalizeFunction(Function &F, bool Force) {
2771 if (!AllowDeepWrapper && !Force)
2772 return nullptr;
2773 if (!isInternalizable(F))
2774 return nullptr;
2775
2776 SmallPtrSet<Function *, 2> FnSet = {&F};
2777 DenseMap<Function *, Function *> InternalizedFns;
2778 internalizeFunctions(FnSet, FnMap&: InternalizedFns);
2779
2780 return InternalizedFns[&F];
2781}
2782
2783bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2784 DenseMap<Function *, Function *> &FnMap) {
2785 for (Function *F : FnSet)
2786 if (!Attributor::isInternalizable(F&: *F))
2787 return false;
2788
2789 FnMap.clear();
2790 // Generate the internalized version of each function.
2791 for (Function *F : FnSet) {
2792 Module &M = *F->getParent();
2793 FunctionType *FnTy = F->getFunctionType();
2794
2795 // Create a copy of the current function
2796 Function *Copied =
2797 Function::Create(Ty: FnTy, Linkage: F->getLinkage(), AddrSpace: F->getAddressSpace(),
2798 N: F->getName() + ".internalized");
2799 ValueToValueMapTy VMap;
2800 auto *NewFArgIt = Copied->arg_begin();
2801 for (auto &Arg : F->args()) {
2802 auto ArgName = Arg.getName();
2803 NewFArgIt->setName(ArgName);
2804 VMap[&Arg] = &(*NewFArgIt++);
2805 }
2806 SmallVector<ReturnInst *, 8> Returns;
2807
2808 // Copy the body of the original function to the new one
2809 CloneFunctionInto(NewFunc: Copied, OldFunc: F, VMap,
2810 Changes: CloneFunctionChangeType::LocalChangesOnly, Returns);
2811
2812 // Set the linakage and visibility late as CloneFunctionInto has some
2813 // implicit requirements.
2814 Copied->setVisibility(GlobalValue::DefaultVisibility);
2815 Copied->setLinkage(GlobalValue::PrivateLinkage);
2816
2817 // Copy metadata
2818 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2819 F->getAllMetadata(MDs);
2820 for (auto MDIt : MDs)
2821 if (!Copied->hasMetadata())
2822 Copied->addMetadata(KindID: MDIt.first, MD&: *MDIt.second);
2823
2824 M.getFunctionList().insert(where: F->getIterator(), New: Copied);
2825 Copied->setDSOLocal(true);
2826 FnMap[F] = Copied;
2827 }
2828
2829 // Replace all uses of the old function with the new internalized function
2830 // unless the caller is a function that was just internalized.
2831 for (Function *F : FnSet) {
2832 auto &InternalizedFn = FnMap[F];
2833 auto IsNotInternalized = [&](Use &U) -> bool {
2834 if (auto *CB = dyn_cast<CallBase>(Val: U.getUser()))
2835 return !FnMap.lookup(Val: CB->getCaller());
2836 return false;
2837 };
2838 F->replaceUsesWithIf(New: InternalizedFn, ShouldReplace: IsNotInternalized);
2839 }
2840
2841 return true;
2842}
2843
2844bool Attributor::isValidFunctionSignatureRewrite(
2845 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2846
2847 if (!Configuration.RewriteSignatures)
2848 return false;
2849
2850 Function *Fn = Arg.getParent();
2851 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2852 // Forbid the call site to cast the function return type. If we need to
2853 // rewrite these functions we need to re-create a cast for the new call site
2854 // (if the old had uses).
2855 if (!ACS.getCalledFunction() ||
2856 ACS.getInstruction()->getType() !=
2857 ACS.getCalledFunction()->getReturnType())
2858 return false;
2859 if (cast<CallBase>(Val: ACS.getInstruction())->getCalledOperand()->getType() !=
2860 Fn->getType())
2861 return false;
2862 if (ACS.getNumArgOperands() != Fn->arg_size())
2863 return false;
2864 // Forbid must-tail calls for now.
2865 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2866 };
2867
2868 // Avoid var-arg functions for now.
2869 if (Fn->isVarArg()) {
2870 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2871 return false;
2872 }
2873
2874 // Avoid functions with complicated argument passing semantics.
2875 AttributeList FnAttributeList = Fn->getAttributes();
2876 if (FnAttributeList.hasAttrSomewhere(Kind: Attribute::Nest) ||
2877 FnAttributeList.hasAttrSomewhere(Kind: Attribute::StructRet) ||
2878 FnAttributeList.hasAttrSomewhere(Kind: Attribute::InAlloca) ||
2879 FnAttributeList.hasAttrSomewhere(Kind: Attribute::Preallocated)) {
2880 LLVM_DEBUG(
2881 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2882 return false;
2883 }
2884
2885 // Avoid callbacks for now.
2886 bool UsedAssumedInformation = false;
2887 if (!checkForAllCallSites(Pred: CallSiteCanBeChanged, Fn: *Fn, RequireAllCallSites: true, QueryingAA: nullptr,
2888 UsedAssumedInformation,
2889 /* CheckPotentiallyDead */ true)) {
2890 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2891 return false;
2892 }
2893
2894 auto InstPred = [](Instruction &I) {
2895 if (auto *CI = dyn_cast<CallInst>(Val: &I))
2896 return !CI->isMustTailCall();
2897 return true;
2898 };
2899
2900 // Forbid must-tail calls for now.
2901 // TODO:
2902 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F: *Fn);
2903 if (!checkForAllInstructionsImpl(A: nullptr, OpcodeInstMap, Pred: InstPred, QueryingAA: nullptr,
2904 LivenessAA: nullptr, Opcodes: {Instruction::Call},
2905 UsedAssumedInformation)) {
2906 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2907 return false;
2908 }
2909
2910 return true;
2911}
2912
2913bool Attributor::registerFunctionSignatureRewrite(
2914 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2915 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2916 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2917 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2918 << Arg.getParent()->getName() << " with "
2919 << ReplacementTypes.size() << " replacements\n");
2920 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2921 "Cannot register an invalid rewrite");
2922
2923 Function *Fn = Arg.getParent();
2924 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2925 ArgumentReplacementMap[Fn];
2926 if (ARIs.empty())
2927 ARIs.resize(N: Fn->arg_size());
2928
2929 // If we have a replacement already with less than or equal new arguments,
2930 // ignore this request.
2931 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2932 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2933 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2934 return false;
2935 }
2936
2937 // If we have a replacement already but we like the new one better, delete
2938 // the old.
2939 ARI.reset();
2940
2941 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2942 << Arg.getParent()->getName() << " with "
2943 << ReplacementTypes.size() << " replacements\n");
2944
2945 // Remember the replacement.
2946 ARI.reset(p: new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2947 std::move(CalleeRepairCB),
2948 std::move(ACSRepairCB)));
2949
2950 return true;
2951}
2952
2953bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2954 bool Result = true;
2955#ifndef NDEBUG
2956 if (SeedAllowList.size() != 0)
2957 Result = llvm::is_contained(SeedAllowList, AA.getName());
2958 Function *Fn = AA.getAnchorScope();
2959 if (FunctionSeedAllowList.size() != 0 && Fn)
2960 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2961#endif
2962 return Result;
2963}
2964
2965ChangeStatus Attributor::rewriteFunctionSignatures(
2966 SmallSetVector<Function *, 8> &ModifiedFns) {
2967 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2968
2969 for (auto &It : ArgumentReplacementMap) {
2970 Function *OldFn = It.getFirst();
2971
2972 // Deleted functions do not require rewrites.
2973 if (!Functions.count(key: OldFn) || ToBeDeletedFunctions.count(key: OldFn))
2974 continue;
2975
2976 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2977 It.getSecond();
2978 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2979
2980 SmallVector<Type *, 16> NewArgumentTypes;
2981 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2982
2983 // Collect replacement argument types and copy over existing attributes.
2984 AttributeList OldFnAttributeList = OldFn->getAttributes();
2985 for (Argument &Arg : OldFn->args()) {
2986 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2987 ARIs[Arg.getArgNo()]) {
2988 NewArgumentTypes.append(in_start: ARI->ReplacementTypes.begin(),
2989 in_end: ARI->ReplacementTypes.end());
2990 NewArgumentAttributes.append(NumInputs: ARI->getNumReplacementArgs(),
2991 Elt: AttributeSet());
2992 } else {
2993 NewArgumentTypes.push_back(Elt: Arg.getType());
2994 NewArgumentAttributes.push_back(
2995 Elt: OldFnAttributeList.getParamAttrs(ArgNo: Arg.getArgNo()));
2996 }
2997 }
2998
2999 uint64_t LargestVectorWidth = 0;
3000 for (auto *I : NewArgumentTypes)
3001 if (auto *VT = dyn_cast<llvm::VectorType>(Val: I))
3002 LargestVectorWidth =
3003 std::max(a: LargestVectorWidth,
3004 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
3005
3006 FunctionType *OldFnTy = OldFn->getFunctionType();
3007 Type *RetTy = OldFnTy->getReturnType();
3008
3009 // Construct the new function type using the new arguments types.
3010 FunctionType *NewFnTy =
3011 FunctionType::get(Result: RetTy, Params: NewArgumentTypes, isVarArg: OldFnTy->isVarArg());
3012
3013 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3014 << "' from " << *OldFn->getFunctionType() << " to "
3015 << *NewFnTy << "\n");
3016
3017 // Create the new function body and insert it into the module.
3018 Function *NewFn = Function::Create(Ty: NewFnTy, Linkage: OldFn->getLinkage(),
3019 AddrSpace: OldFn->getAddressSpace(), N: "");
3020 Functions.insert(X: NewFn);
3021 OldFn->getParent()->getFunctionList().insert(where: OldFn->getIterator(), New: NewFn);
3022 NewFn->takeName(V: OldFn);
3023 NewFn->copyAttributesFrom(Src: OldFn);
3024
3025 // Patch the pointer to LLVM function in debug info descriptor.
3026 NewFn->setSubprogram(OldFn->getSubprogram());
3027 OldFn->setSubprogram(nullptr);
3028
3029 // Recompute the parameter attributes list based on the new arguments for
3030 // the function.
3031 LLVMContext &Ctx = OldFn->getContext();
3032 NewFn->setAttributes(AttributeList::get(
3033 C&: Ctx, FnAttrs: OldFnAttributeList.getFnAttrs(), RetAttrs: OldFnAttributeList.getRetAttrs(),
3034 ArgAttrs: NewArgumentAttributes));
3035 AttributeFuncs::updateMinLegalVectorWidthAttr(Fn&: *NewFn, Width: LargestVectorWidth);
3036
3037 // Remove argmem from the memory effects if we have no more pointer
3038 // arguments, or they are readnone.
3039 MemoryEffects ME = NewFn->getMemoryEffects();
3040 int ArgNo = -1;
3041 if (ME.doesAccessArgPointees() && all_of(Range&: NewArgumentTypes, P: [&](Type *T) {
3042 ++ArgNo;
3043 return !T->isPtrOrPtrVectorTy() ||
3044 NewFn->hasParamAttribute(ArgNo, Kind: Attribute::ReadNone);
3045 })) {
3046 NewFn->setMemoryEffects(ME - MemoryEffects::argMemOnly());
3047 }
3048
3049 // Since we have now created the new function, splice the body of the old
3050 // function right into the new function, leaving the old rotting hulk of the
3051 // function empty.
3052 NewFn->splice(ToIt: NewFn->begin(), FromF: OldFn);
3053
3054 // Set of all "call-like" instructions that invoke the old function mapped
3055 // to their new replacements.
3056 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
3057
3058 // Callback to create a new "call-like" instruction for a given one.
3059 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3060 CallBase *OldCB = cast<CallBase>(Val: ACS.getInstruction());
3061 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3062
3063 // Collect the new argument operands for the replacement call site.
3064 SmallVector<Value *, 16> NewArgOperands;
3065 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3066 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3067 unsigned NewFirstArgNum = NewArgOperands.size();
3068 (void)NewFirstArgNum; // only used inside assert.
3069 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3070 ARIs[OldArgNum]) {
3071 if (ARI->ACSRepairCB)
3072 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3073 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3074 NewArgOperands.size() &&
3075 "ACS repair callback did not provide as many operand as new "
3076 "types were registered!");
3077 // TODO: Exose the attribute set to the ACS repair callback
3078 NewArgOperandAttributes.append(NumInputs: ARI->ReplacementTypes.size(),
3079 Elt: AttributeSet());
3080 } else {
3081 NewArgOperands.push_back(Elt: ACS.getCallArgOperand(ArgNo: OldArgNum));
3082 NewArgOperandAttributes.push_back(
3083 Elt: OldCallAttributeList.getParamAttrs(ArgNo: OldArgNum));
3084 }
3085 }
3086
3087 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3088 "Mismatch # argument operands vs. # argument operand attributes!");
3089 assert(NewArgOperands.size() == NewFn->arg_size() &&
3090 "Mismatch # argument operands vs. # function arguments!");
3091
3092 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3093 OldCB->getOperandBundlesAsDefs(Defs&: OperandBundleDefs);
3094
3095 // Create a new call or invoke instruction to replace the old one.
3096 CallBase *NewCB;
3097 if (InvokeInst *II = dyn_cast<InvokeInst>(Val: OldCB)) {
3098 NewCB = InvokeInst::Create(Func: NewFn, IfNormal: II->getNormalDest(),
3099 IfException: II->getUnwindDest(), Args: NewArgOperands,
3100 Bundles: OperandBundleDefs, NameStr: "", InsertBefore: OldCB->getIterator());
3101 } else {
3102 auto *NewCI = CallInst::Create(Func: NewFn, Args: NewArgOperands, Bundles: OperandBundleDefs,
3103 NameStr: "", InsertBefore: OldCB->getIterator());
3104 NewCI->setTailCallKind(cast<CallInst>(Val: OldCB)->getTailCallKind());
3105 NewCB = NewCI;
3106 }
3107
3108 // Copy over various properties and the new attributes.
3109 NewCB->copyMetadata(SrcInst: *OldCB, WL: {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3110 NewCB->setCallingConv(OldCB->getCallingConv());
3111 NewCB->takeName(V: OldCB);
3112 NewCB->setAttributes(AttributeList::get(
3113 C&: Ctx, FnAttrs: OldCallAttributeList.getFnAttrs(),
3114 RetAttrs: OldCallAttributeList.getRetAttrs(), ArgAttrs: NewArgOperandAttributes));
3115
3116 AttributeFuncs::updateMinLegalVectorWidthAttr(Fn&: *NewCB->getCaller(),
3117 Width: LargestVectorWidth);
3118
3119 CallSitePairs.push_back(Elt: {OldCB, NewCB});
3120 return true;
3121 };
3122
3123 // Use the CallSiteReplacementCreator to create replacement call sites.
3124 bool UsedAssumedInformation = false;
3125 bool Success = checkForAllCallSites(Pred: CallSiteReplacementCreator, Fn: *OldFn,
3126 RequireAllCallSites: true, QueryingAA: nullptr, UsedAssumedInformation,
3127 /* CheckPotentiallyDead */ true);
3128 (void)Success;
3129 assert(Success && "Assumed call site replacement to succeed!");
3130
3131 // Rewire the arguments.
3132 Argument *OldFnArgIt = OldFn->arg_begin();
3133 Argument *NewFnArgIt = NewFn->arg_begin();
3134 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3135 ++OldArgNum, ++OldFnArgIt) {
3136 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3137 ARIs[OldArgNum]) {
3138 if (ARI->CalleeRepairCB)
3139 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3140 if (ARI->ReplacementTypes.empty())
3141 OldFnArgIt->replaceAllUsesWith(
3142 V: PoisonValue::get(T: OldFnArgIt->getType()));
3143 NewFnArgIt += ARI->ReplacementTypes.size();
3144 } else {
3145 NewFnArgIt->takeName(V: &*OldFnArgIt);
3146 OldFnArgIt->replaceAllUsesWith(V: &*NewFnArgIt);
3147 ++NewFnArgIt;
3148 }
3149 }
3150
3151 // Eliminate the instructions *after* we visited all of them.
3152 for (auto &CallSitePair : CallSitePairs) {
3153 CallBase &OldCB = *CallSitePair.first;
3154 CallBase &NewCB = *CallSitePair.second;
3155 assert(OldCB.getType() == NewCB.getType() &&
3156 "Cannot handle call sites with different types!");
3157 ModifiedFns.insert(X: OldCB.getFunction());
3158 OldCB.replaceAllUsesWith(V: &NewCB);
3159 OldCB.eraseFromParent();
3160 }
3161
3162 // Replace the function in the call graph (if any).
3163 Configuration.CGUpdater.replaceFunctionWith(OldFn&: *OldFn, NewFn&: *NewFn);
3164
3165 // If the old function was modified and needed to be reanalyzed, the new one
3166 // does now.
3167 if (ModifiedFns.remove(X: OldFn))
3168 ModifiedFns.insert(X: NewFn);
3169
3170 Changed = ChangeStatus::CHANGED;
3171 }
3172
3173 return Changed;
3174}
3175
3176void InformationCache::initializeInformationCache(const Function &CF,
3177 FunctionInfo &FI) {
3178 // As we do not modify the function here we can remove the const
3179 // withouth breaking implicit assumptions. At the end of the day, we could
3180 // initialize the cache eagerly which would look the same to the users.
3181 Function &F = const_cast<Function &>(CF);
3182
3183 FI.IsKernel = F.hasFnAttribute(Kind: "kernel");
3184
3185 // Walk all instructions to find interesting instructions that might be
3186 // queried by abstract attributes during their initialization or update.
3187 // This has to happen before we create attributes.
3188
3189 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3190
3191 // Add \p V to the assume uses map which track the number of uses outside of
3192 // "visited" assumes. If no outside uses are left the value is added to the
3193 // assume only use vector.
3194 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3195 SmallVector<const Instruction *> Worklist;
3196 if (auto *I = dyn_cast<Instruction>(Val: &V))
3197 Worklist.push_back(Elt: I);
3198 while (!Worklist.empty()) {
3199 const Instruction *I = Worklist.pop_back_val();
3200 std::optional<short> &NumUses = AssumeUsesMap[I];
3201 if (!NumUses)
3202 NumUses = I->getNumUses();
3203 NumUses = *NumUses - /* this assume */ 1;
3204 if (*NumUses != 0)
3205 continue;
3206 AssumeOnlyValues.insert(X: I);
3207 for (const Value *Op : I->operands())
3208 if (auto *OpI = dyn_cast<Instruction>(Val: Op))
3209 Worklist.push_back(Elt: OpI);
3210 }
3211 };
3212
3213 for (Instruction &I : instructions(F: &F)) {
3214 bool IsInterestingOpcode = false;
3215
3216 // To allow easy access to all instructions in a function with a given
3217 // opcode we store them in the InfoCache. As not all opcodes are interesting
3218 // to concrete attributes we only cache the ones that are as identified in
3219 // the following switch.
3220 // Note: There are no concrete attributes now so this is initially empty.
3221 switch (I.getOpcode()) {
3222 default:
3223 assert(!isa<CallBase>(&I) &&
3224 "New call base instruction type needs to be known in the "
3225 "Attributor.");
3226 break;
3227 case Instruction::Call:
3228 // Calls are interesting on their own, additionally:
3229 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3230 // For `must-tail` calls we remember the caller and callee.
3231 if (auto *Assume = dyn_cast<AssumeInst>(Val: &I)) {
3232 AssumeOnlyValues.insert(X: Assume);
3233 fillMapFromAssume(Assume&: *Assume, Result&: KnowledgeMap);
3234 AddToAssumeUsesMap(*Assume->getArgOperand(i: 0));
3235 } else if (cast<CallInst>(Val&: I).isMustTailCall()) {
3236 FI.ContainsMustTailCall = true;
3237 if (auto *Callee = dyn_cast_if_present<Function>(
3238 Val: cast<CallInst>(Val&: I).getCalledOperand()))
3239 getFunctionInfo(F: *Callee).CalledViaMustTail = true;
3240 }
3241 [[fallthrough]];
3242 case Instruction::CallBr:
3243 case Instruction::Invoke:
3244 case Instruction::CleanupRet:
3245 case Instruction::CatchSwitch:
3246 case Instruction::AtomicRMW:
3247 case Instruction::AtomicCmpXchg:
3248 case Instruction::Br:
3249 case Instruction::Resume:
3250 case Instruction::Ret:
3251 case Instruction::Load:
3252 // The alignment of a pointer is interesting for loads.
3253 case Instruction::Store:
3254 // The alignment of a pointer is interesting for stores.
3255 case Instruction::Alloca:
3256 case Instruction::AddrSpaceCast:
3257 IsInterestingOpcode = true;
3258 }
3259 if (IsInterestingOpcode) {
3260 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3261 if (!Insts)
3262 Insts = new (Allocator) InstructionVectorTy();
3263 Insts->push_back(Elt: &I);
3264 }
3265 if (I.mayReadOrWriteMemory())
3266 FI.RWInsts.push_back(Elt: &I);
3267 }
3268
3269 if (F.hasFnAttribute(Kind: Attribute::AlwaysInline) &&
3270 isInlineViable(Callee&: F).isSuccess())
3271 InlineableFunctions.insert(Ptr: &F);
3272}
3273
3274InformationCache::FunctionInfo::~FunctionInfo() {
3275 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3276 // manually destroy them.
3277 for (auto &It : OpcodeInstMap)
3278 It.getSecond()->~InstructionVectorTy();
3279}
3280
3281ArrayRef<Function *>
3282InformationCache::getIndirectlyCallableFunctions(Attributor &A) const {
3283 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3284 return IndirectlyCallableFunctions;
3285}
3286
3287std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3288 if (TargetTriple.isGPU())
3289 return 0;
3290 return std::nullopt;
3291}
3292
3293void Attributor::recordDependence(const AbstractAttribute &FromAA,
3294 const AbstractAttribute &ToAA,
3295 DepClassTy DepClass) {
3296 if (DepClass == DepClassTy::NONE)
3297 return;
3298 // If we are outside of an update, thus before the actual fixpoint iteration
3299 // started (= when we create AAs), we do not track dependences because we will
3300 // put all AAs into the initial worklist anyway.
3301 if (DependenceStack.empty())
3302 return;
3303 if (FromAA.getState().isAtFixpoint())
3304 return;
3305 DependenceStack.back()->push_back(Elt: {.FromAA: &FromAA, .ToAA: &ToAA, .DepClass: DepClass});
3306}
3307
3308void Attributor::rememberDependences() {
3309 assert(!DependenceStack.empty() && "No dependences to remember!");
3310
3311 for (DepInfo &DI : *DependenceStack.back()) {
3312 assert((DI.DepClass == DepClassTy::REQUIRED ||
3313 DI.DepClass == DepClassTy::OPTIONAL) &&
3314 "Expected required or optional dependence (1 bit)!");
3315 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3316 DepAAs.insert(X: AbstractAttribute::DepTy(
3317 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3318 }
3319}
3320
3321template <Attribute::AttrKind AK, typename AAType>
3322void Attributor::checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs,
3323 bool SkipHasAttrCheck) {
3324 bool IsKnown;
3325 if (SkipHasAttrCheck || !Attrs.hasAttribute(Kind: AK))
3326 if (!Configuration.Allowed || Configuration.Allowed->count(V: &AAType::ID))
3327 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3328 IsKnown))
3329 getOrCreateAAFor<AAType>(IRP);
3330}
3331
3332void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3333 if (!VisitedFunctions.insert(V: &F).second)
3334 return;
3335 if (F.isDeclaration())
3336 return;
3337
3338 // In non-module runs we need to look at the call sites of a function to
3339 // determine if it is part of a must-tail call edge. This will influence what
3340 // attributes we can derive.
3341 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3342 if (!isModulePass() && !FI.CalledViaMustTail) {
3343 for (const Use &U : F.uses())
3344 if (const auto *CB = dyn_cast<CallBase>(Val: U.getUser()))
3345 if (CB->isCallee(U: &U) && CB->isMustTailCall())
3346 FI.CalledViaMustTail = true;
3347 }
3348
3349 IRPosition FPos = IRPosition::function(F);
3350 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3351 auto Attrs = F.getAttributes();
3352 auto FnAttrs = Attrs.getFnAttrs();
3353
3354 // Check for dead BasicBlocks in every function.
3355 // We need dead instruction detection because we do not want to deal with
3356 // broken IR in which SSA rules do not apply.
3357 getOrCreateAAFor<AAIsDead>(IRP: FPos);
3358
3359 // Every function might contain instructions that cause "undefined
3360 // behavior".
3361 getOrCreateAAFor<AAUndefinedBehavior>(IRP: FPos);
3362
3363 // Every function might be applicable for Heap-To-Stack conversion.
3364 if (EnableHeapToStack)
3365 getOrCreateAAFor<AAHeapToStack>(IRP: FPos);
3366
3367 // Every function might be "must-progress".
3368 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(IRP: FPos, Attrs: FnAttrs);
3369
3370 // Every function might be "no-free".
3371 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: FPos, Attrs: FnAttrs);
3372
3373 // Every function might be "will-return".
3374 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(IRP: FPos, Attrs: FnAttrs);
3375
3376 // Every function might be marked "nosync"
3377 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(IRP: FPos, Attrs: FnAttrs);
3378
3379 // Everything that is visible from the outside (=function, argument, return
3380 // positions), cannot be changed if the function is not IPO amendable. We can
3381 // however analyse the code inside.
3382 if (IsIPOAmendable) {
3383
3384 // Every function can be nounwind.
3385 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(IRP: FPos, Attrs: FnAttrs);
3386
3387 // Every function might be "no-return".
3388 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(IRP: FPos, Attrs: FnAttrs);
3389
3390 // Every function might be "no-recurse".
3391 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(IRP: FPos, Attrs: FnAttrs);
3392
3393 // Every function can be "non-convergent".
3394 if (Attrs.hasFnAttr(Kind: Attribute::Convergent))
3395 getOrCreateAAFor<AANonConvergent>(IRP: FPos);
3396
3397 // Every function might be "readnone/readonly/writeonly/...".
3398 getOrCreateAAFor<AAMemoryBehavior>(IRP: FPos);
3399
3400 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3401 getOrCreateAAFor<AAMemoryLocation>(IRP: FPos);
3402
3403 // Every function can track active assumptions.
3404 getOrCreateAAFor<AAAssumptionInfo>(IRP: FPos);
3405
3406 // If we're not using a dynamic mode for float, there's nothing worthwhile
3407 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3408 // denormal-fp-math-f32=something, but that likely has no real world use.
3409 DenormalMode Mode = F.getDenormalMode(FPType: APFloat::IEEEsingle());
3410 if (Mode.Input == DenormalMode::Dynamic ||
3411 Mode.Output == DenormalMode::Dynamic)
3412 getOrCreateAAFor<AADenormalFPMath>(IRP: FPos);
3413
3414 // Return attributes are only appropriate if the return type is non void.
3415 Type *ReturnType = F.getReturnType();
3416 if (!ReturnType->isVoidTy()) {
3417 IRPosition RetPos = IRPosition::returned(F);
3418 AttributeSet RetAttrs = Attrs.getRetAttrs();
3419
3420 // Every returned value might be dead.
3421 getOrCreateAAFor<AAIsDead>(IRP: RetPos);
3422
3423 // Every function might be simplified.
3424 bool UsedAssumedInformation = false;
3425 getAssumedSimplified(IRP: RetPos, AA: nullptr, UsedAssumedInformation,
3426 S: AA::Intraprocedural);
3427
3428 // Every returned value might be marked noundef.
3429 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: RetPos, Attrs: RetAttrs);
3430
3431 if (ReturnType->isPointerTy()) {
3432
3433 // Every function with pointer return type might be marked align.
3434 getOrCreateAAFor<AAAlign>(IRP: RetPos);
3435
3436 // Every function with pointer return type might be marked nonnull.
3437 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: RetPos, Attrs: RetAttrs);
3438
3439 // Every function with pointer return type might be marked noalias.
3440 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: RetPos, Attrs: RetAttrs);
3441
3442 // Every function with pointer return type might be marked
3443 // dereferenceable.
3444 getOrCreateAAFor<AADereferenceable>(IRP: RetPos);
3445 } else if (AttributeFuncs::isNoFPClassCompatibleType(Ty: ReturnType)) {
3446 getOrCreateAAFor<AANoFPClass>(IRP: RetPos);
3447 }
3448 }
3449 }
3450
3451 for (Argument &Arg : F.args()) {
3452 IRPosition ArgPos = IRPosition::argument(Arg);
3453 auto ArgNo = Arg.getArgNo();
3454 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3455
3456 if (!IsIPOAmendable) {
3457 if (Arg.getType()->isPointerTy())
3458 // Every argument with pointer type might be marked nofree.
3459 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: ArgPos, Attrs: ArgAttrs);
3460 continue;
3461 }
3462
3463 // Every argument might be simplified. We have to go through the
3464 // Attributor interface though as outside AAs can register custom
3465 // simplification callbacks.
3466 bool UsedAssumedInformation = false;
3467 getAssumedSimplified(IRP: ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3468 S: AA::Intraprocedural);
3469
3470 // Every argument might be dead.
3471 getOrCreateAAFor<AAIsDead>(IRP: ArgPos);
3472
3473 // Every argument might be marked noundef.
3474 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: ArgPos, Attrs: ArgAttrs);
3475
3476 if (Arg.getType()->isPointerTy()) {
3477 // Every argument with pointer type might be marked nonnull.
3478 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: ArgPos, Attrs: ArgAttrs);
3479
3480 // Every argument with pointer type might be marked noalias.
3481 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: ArgPos, Attrs: ArgAttrs);
3482
3483 // Every argument with pointer type might be marked dereferenceable.
3484 getOrCreateAAFor<AADereferenceable>(IRP: ArgPos);
3485
3486 // Every argument with pointer type might be marked align.
3487 getOrCreateAAFor<AAAlign>(IRP: ArgPos);
3488
3489 // Every argument with pointer type might be marked nocapture.
3490 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3491 IRP: ArgPos, Attrs: ArgAttrs, /*SkipHasAttrCheck=*/true);
3492
3493 // Every argument with pointer type might be marked
3494 // "readnone/readonly/writeonly/..."
3495 getOrCreateAAFor<AAMemoryBehavior>(IRP: ArgPos);
3496
3497 // Every argument with pointer type might be marked nofree.
3498 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: ArgPos, Attrs: ArgAttrs);
3499
3500 // Every argument with pointer type might be privatizable (or
3501 // promotable)
3502 getOrCreateAAFor<AAPrivatizablePtr>(IRP: ArgPos);
3503 } else if (AttributeFuncs::isNoFPClassCompatibleType(Ty: Arg.getType())) {
3504 getOrCreateAAFor<AANoFPClass>(IRP: ArgPos);
3505 }
3506 }
3507
3508 auto CallSitePred = [&](Instruction &I) -> bool {
3509 auto &CB = cast<CallBase>(Val&: I);
3510 IRPosition CBInstPos = IRPosition::inst(I: CB);
3511 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3512
3513 // Call sites might be dead if they do not have side effects and no live
3514 // users. The return value might be dead if there are no live users.
3515 getOrCreateAAFor<AAIsDead>(IRP: CBInstPos);
3516
3517 Function *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand());
3518 // TODO: Even if the callee is not known now we might be able to simplify
3519 // the call/callee.
3520 if (!Callee) {
3521 getOrCreateAAFor<AAIndirectCallInfo>(IRP: CBFnPos);
3522 return true;
3523 }
3524
3525 // Every call site can track active assumptions.
3526 getOrCreateAAFor<AAAssumptionInfo>(IRP: CBFnPos);
3527
3528 // Skip declarations except if annotations on their call sites were
3529 // explicitly requested.
3530 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3531 !Callee->hasMetadata(KindID: LLVMContext::MD_callback))
3532 return true;
3533
3534 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3535 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3536 bool UsedAssumedInformation = false;
3537 getAssumedSimplified(IRP: CBRetPos, AA: nullptr, UsedAssumedInformation,
3538 S: AA::Intraprocedural);
3539
3540 if (AttributeFuncs::isNoFPClassCompatibleType(Ty: Callee->getReturnType()))
3541 getOrCreateAAFor<AANoFPClass>(IRP: CBInstPos);
3542 }
3543
3544 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3545 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3546
3547 IRPosition CBArgPos = IRPosition::callsite_argument(CB, ArgNo: I);
3548 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(ArgNo: I);
3549
3550 // Every call site argument might be dead.
3551 getOrCreateAAFor<AAIsDead>(IRP: CBArgPos);
3552
3553 // Call site argument might be simplified. We have to go through the
3554 // Attributor interface though as outside AAs can register custom
3555 // simplification callbacks.
3556 bool UsedAssumedInformation = false;
3557 getAssumedSimplified(IRP: CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3558 S: AA::Intraprocedural);
3559
3560 // Every call site argument might be marked "noundef".
3561 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(IRP: CBArgPos, Attrs: CBArgAttrs);
3562
3563 Type *ArgTy = CB.getArgOperand(i: I)->getType();
3564
3565 if (!ArgTy->isPointerTy()) {
3566 if (AttributeFuncs::isNoFPClassCompatibleType(Ty: ArgTy))
3567 getOrCreateAAFor<AANoFPClass>(IRP: CBArgPos);
3568
3569 continue;
3570 }
3571
3572 // Call site argument attribute "non-null".
3573 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(IRP: CBArgPos, Attrs: CBArgAttrs);
3574
3575 // Call site argument attribute "captures(none)".
3576 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3577 IRP: CBArgPos, Attrs: CBArgAttrs, /*SkipHasAttrCheck=*/true);
3578
3579 // Call site argument attribute "no-alias".
3580 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(IRP: CBArgPos, Attrs: CBArgAttrs);
3581
3582 // Call site argument attribute "dereferenceable".
3583 getOrCreateAAFor<AADereferenceable>(IRP: CBArgPos);
3584
3585 // Call site argument attribute "align".
3586 getOrCreateAAFor<AAAlign>(IRP: CBArgPos);
3587
3588 // Call site argument attribute
3589 // "readnone/readonly/writeonly/..."
3590 if (!CBAttrs.hasParamAttr(ArgNo: I, Kind: Attribute::ReadNone))
3591 getOrCreateAAFor<AAMemoryBehavior>(IRP: CBArgPos);
3592
3593 // Call site argument attribute "nofree".
3594 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(IRP: CBArgPos, Attrs: CBArgAttrs);
3595 }
3596 return true;
3597 };
3598
3599 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3600 [[maybe_unused]] bool Success;
3601 bool UsedAssumedInformation = false;
3602 Success = checkForAllInstructionsImpl(
3603 A: nullptr, OpcodeInstMap, Pred: CallSitePred, QueryingAA: nullptr, LivenessAA: nullptr,
3604 Opcodes: {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3605 (unsigned)Instruction::Call},
3606 UsedAssumedInformation);
3607 assert(Success && "Expected the check call to be successful!");
3608
3609 auto LoadStorePred = [&](Instruction &I) -> bool {
3610 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
3611 getOrCreateAAFor<AAAlign>(IRP: IRPosition::value(V: *LI->getPointerOperand()));
3612 if (SimplifyAllLoads)
3613 getAssumedSimplified(IRP: IRPosition::value(V: I), AA: nullptr,
3614 UsedAssumedInformation, S: AA::Intraprocedural);
3615 getOrCreateAAFor<AAInvariantLoadPointer>(
3616 IRP: IRPosition::value(V: *LI->getPointerOperand()));
3617 getOrCreateAAFor<AAAddressSpace>(
3618 IRP: IRPosition::value(V: *LI->getPointerOperand()));
3619 } else {
3620 auto &SI = cast<StoreInst>(Val&: I);
3621 getOrCreateAAFor<AAIsDead>(IRP: IRPosition::inst(I));
3622 getAssumedSimplified(IRP: IRPosition::value(V: *SI.getValueOperand()), AA: nullptr,
3623 UsedAssumedInformation, S: AA::Intraprocedural);
3624 getOrCreateAAFor<AAAlign>(IRP: IRPosition::value(V: *SI.getPointerOperand()));
3625 getOrCreateAAFor<AAAddressSpace>(
3626 IRP: IRPosition::value(V: *SI.getPointerOperand()));
3627 }
3628 return true;
3629 };
3630 Success = checkForAllInstructionsImpl(
3631 A: nullptr, OpcodeInstMap, Pred: LoadStorePred, QueryingAA: nullptr, LivenessAA: nullptr,
3632 Opcodes: {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3633 UsedAssumedInformation);
3634 assert(Success && "Expected the check call to be successful!");
3635
3636 // AllocaInstPredicate
3637 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3638 getOrCreateAAFor<AAAllocationInfo>(IRP: IRPosition::value(V: I));
3639 return true;
3640 };
3641
3642 Success = checkForAllInstructionsImpl(
3643 A: nullptr, OpcodeInstMap, Pred: AAAllocationInfoPred, QueryingAA: nullptr, LivenessAA: nullptr,
3644 Opcodes: {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3645 assert(Success && "Expected the check call to be successful!");
3646}
3647
3648bool Attributor::isClosedWorldModule() const {
3649 if (CloseWorldAssumption.getNumOccurrences())
3650 return CloseWorldAssumption;
3651 return isModulePass() && Configuration.IsClosedWorldModule;
3652}
3653
3654/// Helpers to ease debugging through output streams and print calls.
3655///
3656///{
3657raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3658 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3659}
3660
3661raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3662 switch (AP) {
3663 case IRPosition::IRP_INVALID:
3664 return OS << "inv";
3665 case IRPosition::IRP_FLOAT:
3666 return OS << "flt";
3667 case IRPosition::IRP_RETURNED:
3668 return OS << "fn_ret";
3669 case IRPosition::IRP_CALL_SITE_RETURNED:
3670 return OS << "cs_ret";
3671 case IRPosition::IRP_FUNCTION:
3672 return OS << "fn";
3673 case IRPosition::IRP_CALL_SITE:
3674 return OS << "cs";
3675 case IRPosition::IRP_ARGUMENT:
3676 return OS << "arg";
3677 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3678 return OS << "cs_arg";
3679 }
3680 llvm_unreachable("Unknown attribute position!");
3681}
3682
3683raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3684 const Value &AV = Pos.getAssociatedValue();
3685 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3686 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3687
3688 if (Pos.hasCallBaseContext())
3689 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3690 return OS << "}";
3691}
3692
3693raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3694 OS << "range-state(" << S.getBitWidth() << ")<";
3695 S.getKnown().print(OS);
3696 OS << " / ";
3697 S.getAssumed().print(OS);
3698 OS << ">";
3699
3700 return OS << static_cast<const AbstractState &>(S);
3701}
3702
3703raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3704 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3705}
3706
3707raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3708 AA.print(OS);
3709 return OS;
3710}
3711
3712raw_ostream &llvm::operator<<(raw_ostream &OS,
3713 const PotentialConstantIntValuesState &S) {
3714 OS << "set-state(< {";
3715 if (!S.isValidState())
3716 OS << "full-set";
3717 else {
3718 for (const auto &It : S.getAssumedSet())
3719 OS << It << ", ";
3720 if (S.undefIsContained())
3721 OS << "undef ";
3722 }
3723 OS << "} >)";
3724
3725 return OS;
3726}
3727
3728raw_ostream &llvm::operator<<(raw_ostream &OS,
3729 const PotentialLLVMValuesState &S) {
3730 OS << "set-state(< {";
3731 if (!S.isValidState())
3732 OS << "full-set";
3733 else {
3734 for (const auto &It : S.getAssumedSet()) {
3735 if (auto *F = dyn_cast<Function>(Val: It.first.getValue()))
3736 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3737 else
3738 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3739 }
3740 if (S.undefIsContained())
3741 OS << "undef ";
3742 }
3743 OS << "} >)";
3744
3745 return OS;
3746}
3747
3748void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3749 OS << "[";
3750 OS << getName();
3751 OS << "] for CtxI ";
3752
3753 if (auto *I = getCtxI()) {
3754 OS << "'";
3755 I->print(O&: OS);
3756 OS << "'";
3757 } else
3758 OS << "<<null inst>>";
3759
3760 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3761 << '\n';
3762}
3763
3764void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3765 print(OS);
3766
3767 for (const auto &DepAA : Deps) {
3768 auto *AA = DepAA.getPointer();
3769 OS << " updates ";
3770 AA->print(OS);
3771 }
3772
3773 OS << '\n';
3774}
3775
3776raw_ostream &llvm::operator<<(raw_ostream &OS,
3777 const AAPointerInfo::Access &Acc) {
3778 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3779 if (Acc.getLocalInst() != Acc.getRemoteInst())
3780 OS << " via " << *Acc.getLocalInst();
3781 if (Acc.getContent()) {
3782 if (*Acc.getContent())
3783 OS << " [" << **Acc.getContent() << "]";
3784 else
3785 OS << " [ <unknown> ]";
3786 }
3787 return OS;
3788}
3789///}
3790
3791/// ----------------------------------------------------------------------------
3792/// Pass (Manager) Boilerplate
3793/// ----------------------------------------------------------------------------
3794
3795static bool runAttributorOnFunctions(InformationCache &InfoCache,
3796 SetVector<Function *> &Functions,
3797 AnalysisGetter &AG,
3798 CallGraphUpdater &CGUpdater,
3799 bool DeleteFns, bool IsModulePass) {
3800 if (Functions.empty())
3801 return false;
3802
3803 LLVM_DEBUG({
3804 dbgs() << "[Attributor] Run on module with " << Functions.size()
3805 << " functions:\n";
3806 for (Function *Fn : Functions)
3807 dbgs() << " - " << Fn->getName() << "\n";
3808 });
3809
3810 // Create an Attributor and initially empty information cache that is filled
3811 // while we identify default attribute opportunities.
3812 AttributorConfig AC(CGUpdater);
3813 AC.IsModulePass = IsModulePass;
3814 AC.DeleteFns = DeleteFns;
3815
3816 /// Tracking callback for specialization of indirect calls.
3817 DenseMap<CallBase *, std::unique_ptr<SmallPtrSet<Function *, 8>>>
3818 IndirectCalleeTrackingMap;
3819 if (MaxSpecializationPerCB.getNumOccurrences()) {
3820 AC.IndirectCalleeSpecializationCallback =
3821 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3822 Function &Callee, unsigned) {
3823 if (MaxSpecializationPerCB == 0)
3824 return false;
3825 auto &Set = IndirectCalleeTrackingMap[&CB];
3826 if (!Set)
3827 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3828 if (Set->size() >= MaxSpecializationPerCB)
3829 return Set->contains(Ptr: &Callee);
3830 Set->insert(Ptr: &Callee);
3831 return true;
3832 };
3833 }
3834
3835 Attributor A(Functions, InfoCache, AC);
3836
3837 // Create shallow wrappers for all functions that are not IPO amendable
3838 if (AllowShallowWrappers)
3839 for (Function *F : Functions)
3840 if (!A.isFunctionIPOAmendable(F: *F))
3841 Attributor::createShallowWrapper(F&: *F);
3842
3843 // Internalize non-exact functions
3844 // TODO: for now we eagerly internalize functions without calculating the
3845 // cost, we need a cost interface to determine whether internalizing
3846 // a function is "beneficial"
3847 if (AllowDeepWrapper) {
3848 unsigned FunSize = Functions.size();
3849 for (unsigned u = 0; u < FunSize; u++) {
3850 Function *F = Functions[u];
3851 if (!F->isDeclaration() && !F->isDefinitionExact() && !F->use_empty() &&
3852 !GlobalValue::isInterposableLinkage(Linkage: F->getLinkage())) {
3853 Function *NewF = Attributor::internalizeFunction(F&: *F);
3854 assert(NewF && "Could not internalize function.");
3855 Functions.insert(X: NewF);
3856
3857 // Update call graph
3858 CGUpdater.replaceFunctionWith(OldFn&: *F, NewFn&: *NewF);
3859 for (const Use &U : NewF->uses())
3860 if (CallBase *CB = dyn_cast<CallBase>(Val: U.getUser())) {
3861 auto *CallerF = CB->getCaller();
3862 CGUpdater.reanalyzeFunction(Fn&: *CallerF);
3863 }
3864 }
3865 }
3866 }
3867
3868 for (Function *F : Functions) {
3869 if (F->hasExactDefinition())
3870 NumFnWithExactDefinition++;
3871 else
3872 NumFnWithoutExactDefinition++;
3873
3874 // We look at internal functions only on-demand but if any use is not a
3875 // direct call or outside the current set of analyzed functions, we have
3876 // to do it eagerly.
3877 if (F->hasLocalLinkage()) {
3878 if (llvm::all_of(Range: F->uses(), P: [&Functions](const Use &U) {
3879 const auto *CB = dyn_cast<CallBase>(Val: U.getUser());
3880 return CB && CB->isCallee(U: &U) &&
3881 Functions.count(key: const_cast<Function *>(CB->getCaller()));
3882 }))
3883 continue;
3884 }
3885
3886 // Populate the Attributor with abstract attribute opportunities in the
3887 // function and the information cache with IR information.
3888 A.identifyDefaultAbstractAttributes(F&: *F);
3889 }
3890
3891 ChangeStatus Changed = A.run();
3892
3893 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3894 << " functions, result: " << Changed << ".\n");
3895 return Changed == ChangeStatus::CHANGED;
3896}
3897
3898static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
3899 SetVector<Function *> &Functions,
3900 AnalysisGetter &AG,
3901 CallGraphUpdater &CGUpdater,
3902 FunctionAnalysisManager &FAM,
3903 bool IsModulePass) {
3904 if (Functions.empty())
3905 return false;
3906
3907 LLVM_DEBUG({
3908 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3909 << " functions:\n";
3910 for (Function *Fn : Functions)
3911 dbgs() << " - " << Fn->getName() << "\n";
3912 });
3913
3914 // Create an Attributor and initially empty information cache that is filled
3915 // while we identify default attribute opportunities.
3916 AttributorConfig AC(CGUpdater);
3917 AC.IsModulePass = IsModulePass;
3918 AC.DeleteFns = false;
3919 DenseSet<const char *> Allowed(
3920 {&AAWillReturn::ID, &AANoUnwind::ID, &AANoRecurse::ID, &AANoSync::ID,
3921 &AANoFree::ID, &AANoReturn::ID, &AAMemoryLocation::ID,
3922 &AAMemoryBehavior::ID, &AAUnderlyingObjects::ID, &AANoCapture::ID,
3923 &AAInterFnReachability::ID, &AAIntraFnReachability::ID, &AACallEdges::ID,
3924 &AANoFPClass::ID, &AAMustProgress::ID, &AANonNull::ID});
3925 AC.Allowed = &Allowed;
3926 AC.UseLiveness = false;
3927
3928 Attributor A(Functions, InfoCache, AC);
3929
3930 for (Function *F : Functions) {
3931 if (F->hasExactDefinition())
3932 NumFnWithExactDefinition++;
3933 else
3934 NumFnWithoutExactDefinition++;
3935
3936 // We look at internal functions only on-demand but if any use is not a
3937 // direct call or outside the current set of analyzed functions, we have
3938 // to do it eagerly.
3939 if (AC.UseLiveness && F->hasLocalLinkage()) {
3940 if (llvm::all_of(Range: F->uses(), P: [&Functions](const Use &U) {
3941 const auto *CB = dyn_cast<CallBase>(Val: U.getUser());
3942 return CB && CB->isCallee(U: &U) &&
3943 Functions.count(key: const_cast<Function *>(CB->getCaller()));
3944 }))
3945 continue;
3946 }
3947
3948 // Populate the Attributor with abstract attribute opportunities in the
3949 // function and the information cache with IR information.
3950 A.identifyDefaultAbstractAttributes(F&: *F);
3951 }
3952
3953 ChangeStatus Changed = A.run();
3954
3955 if (Changed == ChangeStatus::CHANGED) {
3956 // Invalidate analyses for modified functions so that we don't have to
3957 // invalidate all analyses for all functions in this SCC.
3958 PreservedAnalyses FuncPA;
3959 // We haven't changed the CFG for modified functions.
3960 FuncPA.preserveSet<CFGAnalyses>();
3961 for (Function *Changed : A.getModifiedFunctions()) {
3962 FAM.invalidate(IR&: *Changed, PA: FuncPA);
3963 // Also invalidate any direct callers of changed functions since analyses
3964 // may care about attributes of direct callees. For example, MemorySSA
3965 // cares about whether or not a call's callee modifies memory and queries
3966 // that through function attributes.
3967 for (auto *U : Changed->users()) {
3968 if (auto *Call = dyn_cast<CallBase>(Val: U)) {
3969 if (Call->getCalledFunction() == Changed)
3970 FAM.invalidate(IR&: *Call->getFunction(), PA: FuncPA);
3971 }
3972 }
3973 }
3974 }
3975 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3976 << " functions, result: " << Changed << ".\n");
3977 return Changed == ChangeStatus::CHANGED;
3978}
3979
3980void AADepGraph::viewGraph() { llvm::ViewGraph(G: this, Name: "Dependency Graph"); }
3981
3982void AADepGraph::dumpGraph() {
3983 static std::atomic<int> CallTimes;
3984 std::string Prefix;
3985
3986 if (!DepGraphDotFileNamePrefix.empty())
3987 Prefix = DepGraphDotFileNamePrefix;
3988 else
3989 Prefix = "dep_graph";
3990 std::string Filename =
3991 Prefix + "_" + std::to_string(val: CallTimes.load()) + ".dot";
3992
3993 outs() << "Dependency graph dump to " << Filename << ".\n";
3994
3995 std::error_code EC;
3996
3997 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3998 if (!EC)
3999 llvm::WriteGraph(O&: File, G: this);
4000
4001 CallTimes++;
4002}
4003
4004void AADepGraph::print() {
4005 for (auto DepAA : SyntheticRoot.Deps)
4006 cast<AbstractAttribute>(Val: DepAA.getPointer())->printWithDeps(OS&: outs());
4007}
4008
4009PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
4010 FunctionAnalysisManager &FAM =
4011 AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
4012 AnalysisGetter AG(FAM);
4013
4014 SetVector<Function *> Functions;
4015 for (Function &F : M)
4016 Functions.insert(X: &F);
4017
4018 CallGraphUpdater CGUpdater;
4019 BumpPtrAllocator Allocator;
4020 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4021 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4022 /* DeleteFns */ true, /* IsModulePass */ true)) {
4023 // FIXME: Think about passes we will preserve and add them here.
4024 return PreservedAnalyses::none();
4025 }
4026 return PreservedAnalyses::all();
4027}
4028
4029PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
4030 CGSCCAnalysisManager &AM,
4031 LazyCallGraph &CG,
4032 CGSCCUpdateResult &UR) {
4033 FunctionAnalysisManager &FAM =
4034 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
4035 AnalysisGetter AG(FAM);
4036
4037 SetVector<Function *> Functions;
4038 for (LazyCallGraph::Node &N : C)
4039 Functions.insert(X: &N.getFunction());
4040
4041 if (Functions.empty())
4042 return PreservedAnalyses::all();
4043
4044 Module &M = *Functions.back()->getParent();
4045 CallGraphUpdater CGUpdater;
4046 CGUpdater.initialize(LCG&: CG, SCC&: C, AM, UR);
4047 BumpPtrAllocator Allocator;
4048 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4049 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4050 /* DeleteFns */ false,
4051 /* IsModulePass */ false)) {
4052 // FIXME: Think about passes we will preserve and add them here.
4053 PreservedAnalyses PA;
4054 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4055 return PA;
4056 }
4057 return PreservedAnalyses::all();
4058}
4059
4060PreservedAnalyses AttributorLightPass::run(Module &M,
4061 ModuleAnalysisManager &AM) {
4062 FunctionAnalysisManager &FAM =
4063 AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
4064 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4065
4066 SetVector<Function *> Functions;
4067 for (Function &F : M)
4068 Functions.insert(X: &F);
4069
4070 CallGraphUpdater CGUpdater;
4071 BumpPtrAllocator Allocator;
4072 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4073 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4074 /* IsModulePass */ true)) {
4075 PreservedAnalyses PA;
4076 // We have not added or removed functions.
4077 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4078 // We already invalidated all relevant function analyses above.
4079 PA.preserveSet<AllAnalysesOn<Function>>();
4080 return PA;
4081 }
4082 return PreservedAnalyses::all();
4083}
4084
4085PreservedAnalyses AttributorLightCGSCCPass::run(LazyCallGraph::SCC &C,
4086 CGSCCAnalysisManager &AM,
4087 LazyCallGraph &CG,
4088 CGSCCUpdateResult &UR) {
4089 FunctionAnalysisManager &FAM =
4090 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(IR&: C, ExtraArgs&: CG).getManager();
4091 AnalysisGetter AG(FAM);
4092
4093 SetVector<Function *> Functions;
4094 for (LazyCallGraph::Node &N : C)
4095 Functions.insert(X: &N.getFunction());
4096
4097 if (Functions.empty())
4098 return PreservedAnalyses::all();
4099
4100 Module &M = *Functions.back()->getParent();
4101 CallGraphUpdater CGUpdater;
4102 CGUpdater.initialize(LCG&: CG, SCC&: C, AM, UR);
4103 BumpPtrAllocator Allocator;
4104 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4105 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4106 /* IsModulePass */ false)) {
4107 PreservedAnalyses PA;
4108 // We have not added or removed functions.
4109 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4110 // We already invalidated all relevant function analyses above.
4111 PA.preserveSet<AllAnalysesOn<Function>>();
4112 return PA;
4113 }
4114 return PreservedAnalyses::all();
4115}
4116namespace llvm {
4117
4118template <> struct GraphTraits<AADepGraphNode *> {
4119 using NodeRef = AADepGraphNode *;
4120 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
4121 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
4122
4123 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4124 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4125
4126 using ChildIteratorType =
4127 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4128 using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
4129
4130 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4131
4132 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4133};
4134
4135template <>
4136struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
4137 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4138
4139 using nodes_iterator =
4140 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4141
4142 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4143
4144 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4145};
4146
4147template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4148 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4149
4150 static std::string getNodeLabel(const AADepGraphNode *Node,
4151 const AADepGraph *DG) {
4152 std::string AAString;
4153 raw_string_ostream O(AAString);
4154 Node->print(OS&: O);
4155 return AAString;
4156 }
4157};
4158
4159} // end namespace llvm
4160