1 | //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // See the Attributor.h file comment and the class descriptions in that file for |
10 | // more information. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/Transforms/IPO/Attributor.h" |
15 | |
16 | #include "llvm/ADT/APInt.h" |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/DenseMapInfo.h" |
19 | #include "llvm/ADT/MapVector.h" |
20 | #include "llvm/ADT/SCCIterator.h" |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SetOperations.h" |
23 | #include "llvm/ADT/SetVector.h" |
24 | #include "llvm/ADT/SmallPtrSet.h" |
25 | #include "llvm/ADT/SmallVector.h" |
26 | #include "llvm/ADT/Statistic.h" |
27 | #include "llvm/ADT/StringExtras.h" |
28 | #include "llvm/Analysis/AliasAnalysis.h" |
29 | #include "llvm/Analysis/AssumeBundleQueries.h" |
30 | #include "llvm/Analysis/AssumptionCache.h" |
31 | #include "llvm/Analysis/CaptureTracking.h" |
32 | #include "llvm/Analysis/CycleAnalysis.h" |
33 | #include "llvm/Analysis/InstructionSimplify.h" |
34 | #include "llvm/Analysis/LazyValueInfo.h" |
35 | #include "llvm/Analysis/MemoryBuiltins.h" |
36 | #include "llvm/Analysis/ScalarEvolution.h" |
37 | #include "llvm/Analysis/TargetTransformInfo.h" |
38 | #include "llvm/Analysis/ValueTracking.h" |
39 | #include "llvm/IR/Argument.h" |
40 | #include "llvm/IR/Assumptions.h" |
41 | #include "llvm/IR/Attributes.h" |
42 | #include "llvm/IR/BasicBlock.h" |
43 | #include "llvm/IR/Constant.h" |
44 | #include "llvm/IR/Constants.h" |
45 | #include "llvm/IR/DataLayout.h" |
46 | #include "llvm/IR/DerivedTypes.h" |
47 | #include "llvm/IR/GlobalValue.h" |
48 | #include "llvm/IR/IRBuilder.h" |
49 | #include "llvm/IR/InlineAsm.h" |
50 | #include "llvm/IR/InstrTypes.h" |
51 | #include "llvm/IR/Instruction.h" |
52 | #include "llvm/IR/Instructions.h" |
53 | #include "llvm/IR/IntrinsicInst.h" |
54 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
55 | #include "llvm/IR/IntrinsicsNVPTX.h" |
56 | #include "llvm/IR/LLVMContext.h" |
57 | #include "llvm/IR/MDBuilder.h" |
58 | #include "llvm/IR/NoFolder.h" |
59 | #include "llvm/IR/Value.h" |
60 | #include "llvm/IR/ValueHandle.h" |
61 | #include "llvm/Support/Alignment.h" |
62 | #include "llvm/Support/Casting.h" |
63 | #include "llvm/Support/CommandLine.h" |
64 | #include "llvm/Support/ErrorHandling.h" |
65 | #include "llvm/Support/GraphWriter.h" |
66 | #include "llvm/Support/InterleavedRange.h" |
67 | #include "llvm/Support/KnownFPClass.h" |
68 | #include "llvm/Support/MathExtras.h" |
69 | #include "llvm/Support/TypeSize.h" |
70 | #include "llvm/Support/raw_ostream.h" |
71 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
72 | #include "llvm/Transforms/Utils/CallPromotionUtils.h" |
73 | #include "llvm/Transforms/Utils/Local.h" |
74 | #include "llvm/Transforms/Utils/ValueMapper.h" |
75 | #include <cassert> |
76 | #include <numeric> |
77 | #include <optional> |
78 | #include <string> |
79 | |
80 | using namespace llvm; |
81 | |
82 | #define DEBUG_TYPE "attributor" |
83 | |
84 | static cl::opt<bool> ManifestInternal( |
85 | "attributor-manifest-internal" , cl::Hidden, |
86 | cl::desc("Manifest Attributor internal string attributes." ), |
87 | cl::init(Val: false)); |
88 | |
89 | static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size" , cl::init(Val: 128), |
90 | cl::Hidden); |
91 | |
92 | template <> |
93 | unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; |
94 | |
95 | template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1; |
96 | |
97 | static cl::opt<unsigned, true> MaxPotentialValues( |
98 | "attributor-max-potential-values" , cl::Hidden, |
99 | cl::desc("Maximum number of potential values to be " |
100 | "tracked for each position." ), |
101 | cl::location(L&: llvm::PotentialConstantIntValuesState::MaxPotentialValues), |
102 | cl::init(Val: 7)); |
103 | |
104 | static cl::opt<int> MaxPotentialValuesIterations( |
105 | "attributor-max-potential-values-iterations" , cl::Hidden, |
106 | cl::desc( |
107 | "Maximum number of iterations we keep dismantling potential values." ), |
108 | cl::init(Val: 64)); |
109 | |
110 | STATISTIC(NumAAs, "Number of abstract attributes created" ); |
111 | STATISTIC(NumIndirectCallsPromoted, "Number of indirect calls promoted" ); |
112 | |
113 | // Some helper macros to deal with statistics tracking. |
114 | // |
115 | // Usage: |
116 | // For simple IR attribute tracking overload trackStatistics in the abstract |
117 | // attribute and choose the right STATS_DECLTRACK_********* macro, |
118 | // e.g.,: |
119 | // void trackStatistics() const override { |
120 | // STATS_DECLTRACK_ARG_ATTR(returned) |
121 | // } |
122 | // If there is a single "increment" side one can use the macro |
123 | // STATS_DECLTRACK with a custom message. If there are multiple increment |
124 | // sides, STATS_DECL and STATS_TRACK can also be used separately. |
125 | // |
126 | #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ |
127 | ("Number of " #TYPE " marked '" #NAME "'") |
128 | #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME |
129 | #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); |
130 | #define STATS_DECL(NAME, TYPE, MSG) \ |
131 | STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); |
132 | #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); |
133 | #define STATS_DECLTRACK(NAME, TYPE, MSG) \ |
134 | {STATS_DECL(NAME, TYPE, MSG) STATS_TRACK(NAME, TYPE)} |
135 | #define STATS_DECLTRACK_ARG_ATTR(NAME) \ |
136 | STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) |
137 | #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ |
138 | STATS_DECLTRACK(NAME, CSArguments, \ |
139 | BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) |
140 | #define STATS_DECLTRACK_FN_ATTR(NAME) \ |
141 | STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) |
142 | #define STATS_DECLTRACK_CS_ATTR(NAME) \ |
143 | STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) |
144 | #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ |
145 | STATS_DECLTRACK(NAME, FunctionReturn, \ |
146 | BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) |
147 | #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ |
148 | STATS_DECLTRACK(NAME, CSReturn, \ |
149 | BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) |
150 | #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ |
151 | STATS_DECLTRACK(NAME, Floating, \ |
152 | ("Number of floating values known to be '" #NAME "'")) |
153 | |
154 | // Specialization of the operator<< for abstract attributes subclasses. This |
155 | // disambiguates situations where multiple operators are applicable. |
156 | namespace llvm { |
157 | #define PIPE_OPERATOR(CLASS) \ |
158 | raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ |
159 | return OS << static_cast<const AbstractAttribute &>(AA); \ |
160 | } |
161 | |
162 | PIPE_OPERATOR(AAIsDead) |
163 | PIPE_OPERATOR(AANoUnwind) |
164 | PIPE_OPERATOR(AANoSync) |
165 | PIPE_OPERATOR(AANoRecurse) |
166 | PIPE_OPERATOR(AANonConvergent) |
167 | PIPE_OPERATOR(AAWillReturn) |
168 | PIPE_OPERATOR(AANoReturn) |
169 | PIPE_OPERATOR(AANonNull) |
170 | PIPE_OPERATOR(AAMustProgress) |
171 | PIPE_OPERATOR(AANoAlias) |
172 | PIPE_OPERATOR(AADereferenceable) |
173 | PIPE_OPERATOR(AAAlign) |
174 | PIPE_OPERATOR(AAInstanceInfo) |
175 | PIPE_OPERATOR(AANoCapture) |
176 | PIPE_OPERATOR(AAValueSimplify) |
177 | PIPE_OPERATOR(AANoFree) |
178 | PIPE_OPERATOR(AAHeapToStack) |
179 | PIPE_OPERATOR(AAIntraFnReachability) |
180 | PIPE_OPERATOR(AAMemoryBehavior) |
181 | PIPE_OPERATOR(AAMemoryLocation) |
182 | PIPE_OPERATOR(AAValueConstantRange) |
183 | PIPE_OPERATOR(AAPrivatizablePtr) |
184 | PIPE_OPERATOR(AAUndefinedBehavior) |
185 | PIPE_OPERATOR(AAPotentialConstantValues) |
186 | PIPE_OPERATOR(AAPotentialValues) |
187 | PIPE_OPERATOR(AANoUndef) |
188 | PIPE_OPERATOR(AANoFPClass) |
189 | PIPE_OPERATOR(AACallEdges) |
190 | PIPE_OPERATOR(AAInterFnReachability) |
191 | PIPE_OPERATOR(AAPointerInfo) |
192 | PIPE_OPERATOR(AAAssumptionInfo) |
193 | PIPE_OPERATOR(AAUnderlyingObjects) |
194 | PIPE_OPERATOR(AAInvariantLoadPointer) |
195 | PIPE_OPERATOR(AAAddressSpace) |
196 | PIPE_OPERATOR(AAAllocationInfo) |
197 | PIPE_OPERATOR(AAIndirectCallInfo) |
198 | PIPE_OPERATOR(AAGlobalValueInfo) |
199 | PIPE_OPERATOR(AADenormalFPMath) |
200 | |
201 | #undef PIPE_OPERATOR |
202 | |
203 | template <> |
204 | ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, |
205 | const DerefState &R) { |
206 | ChangeStatus CS0 = |
207 | clampStateAndIndicateChange(S&: S.DerefBytesState, R: R.DerefBytesState); |
208 | ChangeStatus CS1 = clampStateAndIndicateChange(S&: S.GlobalState, R: R.GlobalState); |
209 | return CS0 | CS1; |
210 | } |
211 | |
212 | } // namespace llvm |
213 | |
214 | static bool mayBeInCycle(const CycleInfo *CI, const Instruction *I, |
215 | bool , Cycle **CPtr = nullptr) { |
216 | if (!CI) |
217 | return true; |
218 | auto *BB = I->getParent(); |
219 | auto *C = CI->getCycle(Block: BB); |
220 | if (!C) |
221 | return false; |
222 | if (CPtr) |
223 | *CPtr = C; |
224 | return !HeaderOnly || BB == C->getHeader(); |
225 | } |
226 | |
227 | /// Checks if a type could have padding bytes. |
228 | static bool isDenselyPacked(Type *Ty, const DataLayout &DL) { |
229 | // There is no size information, so be conservative. |
230 | if (!Ty->isSized()) |
231 | return false; |
232 | |
233 | // If the alloc size is not equal to the storage size, then there are padding |
234 | // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. |
235 | if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty)) |
236 | return false; |
237 | |
238 | // FIXME: This isn't the right way to check for padding in vectors with |
239 | // non-byte-size elements. |
240 | if (VectorType *SeqTy = dyn_cast<VectorType>(Val: Ty)) |
241 | return isDenselyPacked(Ty: SeqTy->getElementType(), DL); |
242 | |
243 | // For array types, check for padding within members. |
244 | if (ArrayType *SeqTy = dyn_cast<ArrayType>(Val: Ty)) |
245 | return isDenselyPacked(Ty: SeqTy->getElementType(), DL); |
246 | |
247 | if (!isa<StructType>(Val: Ty)) |
248 | return true; |
249 | |
250 | // Check for padding within and between elements of a struct. |
251 | StructType *StructTy = cast<StructType>(Val: Ty); |
252 | const StructLayout *Layout = DL.getStructLayout(Ty: StructTy); |
253 | uint64_t StartPos = 0; |
254 | for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) { |
255 | Type *ElTy = StructTy->getElementType(N: I); |
256 | if (!isDenselyPacked(Ty: ElTy, DL)) |
257 | return false; |
258 | if (StartPos != Layout->getElementOffsetInBits(Idx: I)) |
259 | return false; |
260 | StartPos += DL.getTypeAllocSizeInBits(Ty: ElTy); |
261 | } |
262 | |
263 | return true; |
264 | } |
265 | |
266 | /// Get pointer operand of memory accessing instruction. If \p I is |
267 | /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, |
268 | /// is set to false and the instruction is volatile, return nullptr. |
269 | static const Value *getPointerOperand(const Instruction *I, |
270 | bool AllowVolatile) { |
271 | if (!AllowVolatile && I->isVolatile()) |
272 | return nullptr; |
273 | |
274 | if (auto *LI = dyn_cast<LoadInst>(Val: I)) { |
275 | return LI->getPointerOperand(); |
276 | } |
277 | |
278 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) { |
279 | return SI->getPointerOperand(); |
280 | } |
281 | |
282 | if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I)) { |
283 | return CXI->getPointerOperand(); |
284 | } |
285 | |
286 | if (auto *RMWI = dyn_cast<AtomicRMWInst>(Val: I)) { |
287 | return RMWI->getPointerOperand(); |
288 | } |
289 | |
290 | return nullptr; |
291 | } |
292 | |
293 | /// Helper function to create a pointer based on \p Ptr, and advanced by \p |
294 | /// Offset bytes. |
295 | static Value *constructPointer(Value *Ptr, int64_t Offset, |
296 | IRBuilder<NoFolder> &IRB) { |
297 | LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset |
298 | << "-bytes\n" ); |
299 | |
300 | if (Offset) |
301 | Ptr = IRB.CreatePtrAdd(Ptr, Offset: IRB.getInt64(C: Offset), |
302 | Name: Ptr->getName() + ".b" + Twine(Offset)); |
303 | return Ptr; |
304 | } |
305 | |
306 | static const Value * |
307 | stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, |
308 | const Value *Val, const DataLayout &DL, APInt &Offset, |
309 | bool GetMinOffset, bool AllowNonInbounds, |
310 | bool UseAssumed = false) { |
311 | |
312 | auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { |
313 | const IRPosition &Pos = IRPosition::value(V); |
314 | // Only track dependence if we are going to use the assumed info. |
315 | const AAValueConstantRange *ValueConstantRangeAA = |
316 | A.getAAFor<AAValueConstantRange>(QueryingAA, IRP: Pos, |
317 | DepClass: UseAssumed ? DepClassTy::OPTIONAL |
318 | : DepClassTy::NONE); |
319 | if (!ValueConstantRangeAA) |
320 | return false; |
321 | ConstantRange Range = UseAssumed ? ValueConstantRangeAA->getAssumed() |
322 | : ValueConstantRangeAA->getKnown(); |
323 | if (Range.isFullSet()) |
324 | return false; |
325 | |
326 | // We can only use the lower part of the range because the upper part can |
327 | // be higher than what the value can really be. |
328 | if (GetMinOffset) |
329 | ROffset = Range.getSignedMin(); |
330 | else |
331 | ROffset = Range.getSignedMax(); |
332 | return true; |
333 | }; |
334 | |
335 | return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, |
336 | /* AllowInvariant */ AllowInvariantGroup: true, |
337 | ExternalAnalysis: AttributorAnalysis); |
338 | } |
339 | |
340 | static const Value * |
341 | getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, |
342 | const Value *Ptr, int64_t &BytesOffset, |
343 | const DataLayout &DL, bool AllowNonInbounds = false) { |
344 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), 0); |
345 | const Value *Base = |
346 | stripAndAccumulateOffsets(A, QueryingAA, Val: Ptr, DL, Offset&: OffsetAPInt, |
347 | /* GetMinOffset */ true, AllowNonInbounds); |
348 | |
349 | BytesOffset = OffsetAPInt.getSExtValue(); |
350 | return Base; |
351 | } |
352 | |
353 | /// Clamp the information known for all returned values of a function |
354 | /// (identified by \p QueryingAA) into \p S. |
355 | template <typename AAType, typename StateType = typename AAType::StateType, |
356 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind, |
357 | bool RecurseForSelectAndPHI = true> |
358 | static void clampReturnedValueStates( |
359 | Attributor &A, const AAType &QueryingAA, StateType &S, |
360 | const IRPosition::CallBaseContext *CBContext = nullptr) { |
361 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " |
362 | << QueryingAA << " into " << S << "\n" ); |
363 | |
364 | assert((QueryingAA.getIRPosition().getPositionKind() == |
365 | IRPosition::IRP_RETURNED || |
366 | QueryingAA.getIRPosition().getPositionKind() == |
367 | IRPosition::IRP_CALL_SITE_RETURNED) && |
368 | "Can only clamp returned value states for a function returned or call " |
369 | "site returned position!" ); |
370 | |
371 | // Use an optional state as there might not be any return values and we want |
372 | // to join (IntegerState::operator&) the state of all there are. |
373 | std::optional<StateType> T; |
374 | |
375 | // Callback for each possibly returned value. |
376 | auto CheckReturnValue = [&](Value &RV) -> bool { |
377 | const IRPosition &RVPos = IRPosition::value(V: RV, CBContext); |
378 | // If possible, use the hasAssumedIRAttr interface. |
379 | if (Attribute::isEnumAttrKind(Kind: IRAttributeKind)) { |
380 | bool IsKnown; |
381 | return AA::hasAssumedIRAttr<IRAttributeKind>( |
382 | A, &QueryingAA, RVPos, DepClassTy::REQUIRED, IsKnown); |
383 | } |
384 | |
385 | const AAType *AA = |
386 | A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); |
387 | if (!AA) |
388 | return false; |
389 | LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV |
390 | << " AA: " << AA->getAsStr(&A) << " @ " << RVPos << "\n" ); |
391 | const StateType &AAS = AA->getState(); |
392 | if (!T) |
393 | T = StateType::getBestState(AAS); |
394 | *T &= AAS; |
395 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T |
396 | << "\n" ); |
397 | return T->isValidState(); |
398 | }; |
399 | |
400 | if (!A.checkForAllReturnedValues(Pred: CheckReturnValue, QueryingAA, |
401 | S: AA::ValueScope::Intraprocedural, |
402 | RecurseForSelectAndPHI)) |
403 | S.indicatePessimisticFixpoint(); |
404 | else if (T) |
405 | S ^= *T; |
406 | } |
407 | |
408 | namespace { |
409 | /// Helper class for generic deduction: return value -> returned position. |
410 | template <typename AAType, typename BaseType, |
411 | typename StateType = typename BaseType::StateType, |
412 | bool PropagateCallBaseContext = false, |
413 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind, |
414 | bool RecurseForSelectAndPHI = true> |
415 | struct AAReturnedFromReturnedValues : public BaseType { |
416 | AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) |
417 | : BaseType(IRP, A) {} |
418 | |
419 | /// See AbstractAttribute::updateImpl(...). |
420 | ChangeStatus updateImpl(Attributor &A) override { |
421 | StateType S(StateType::getBestState(this->getState())); |
422 | clampReturnedValueStates<AAType, StateType, IRAttributeKind, |
423 | RecurseForSelectAndPHI>( |
424 | A, *this, S, |
425 | PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); |
426 | // TODO: If we know we visited all returned values, thus no are assumed |
427 | // dead, we can take the known information from the state T. |
428 | return clampStateAndIndicateChange<StateType>(this->getState(), S); |
429 | } |
430 | }; |
431 | |
432 | /// Clamp the information known at all call sites for a given argument |
433 | /// (identified by \p QueryingAA) into \p S. |
434 | template <typename AAType, typename StateType = typename AAType::StateType, |
435 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind> |
436 | static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, |
437 | StateType &S) { |
438 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " |
439 | << QueryingAA << " into " << S << "\n" ); |
440 | |
441 | assert(QueryingAA.getIRPosition().getPositionKind() == |
442 | IRPosition::IRP_ARGUMENT && |
443 | "Can only clamp call site argument states for an argument position!" ); |
444 | |
445 | // Use an optional state as there might not be any return values and we want |
446 | // to join (IntegerState::operator&) the state of all there are. |
447 | std::optional<StateType> T; |
448 | |
449 | // The argument number which is also the call site argument number. |
450 | unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); |
451 | |
452 | auto CallSiteCheck = [&](AbstractCallSite ACS) { |
453 | const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); |
454 | // Check if a coresponding argument was found or if it is on not associated |
455 | // (which can happen for callback calls). |
456 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
457 | return false; |
458 | |
459 | // If possible, use the hasAssumedIRAttr interface. |
460 | if (Attribute::isEnumAttrKind(Kind: IRAttributeKind)) { |
461 | bool IsKnown; |
462 | return AA::hasAssumedIRAttr<IRAttributeKind>( |
463 | A, &QueryingAA, ACSArgPos, DepClassTy::REQUIRED, IsKnown); |
464 | } |
465 | |
466 | const AAType *AA = |
467 | A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); |
468 | if (!AA) |
469 | return false; |
470 | LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() |
471 | << " AA: " << AA->getAsStr(&A) << " @" << ACSArgPos |
472 | << "\n" ); |
473 | const StateType &AAS = AA->getState(); |
474 | if (!T) |
475 | T = StateType::getBestState(AAS); |
476 | *T &= AAS; |
477 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T |
478 | << "\n" ); |
479 | return T->isValidState(); |
480 | }; |
481 | |
482 | bool UsedAssumedInformation = false; |
483 | if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, |
484 | UsedAssumedInformation)) |
485 | S.indicatePessimisticFixpoint(); |
486 | else if (T) |
487 | S ^= *T; |
488 | } |
489 | |
490 | /// This function is the bridge between argument position and the call base |
491 | /// context. |
492 | template <typename AAType, typename BaseType, |
493 | typename StateType = typename AAType::StateType, |
494 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind> |
495 | bool getArgumentStateFromCallBaseContext(Attributor &A, |
496 | BaseType &QueryingAttribute, |
497 | IRPosition &Pos, StateType &State) { |
498 | assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && |
499 | "Expected an 'argument' position !" ); |
500 | const CallBase *CBContext = Pos.getCallBaseContext(); |
501 | if (!CBContext) |
502 | return false; |
503 | |
504 | int ArgNo = Pos.getCallSiteArgNo(); |
505 | assert(ArgNo >= 0 && "Invalid Arg No!" ); |
506 | const IRPosition CBArgPos = IRPosition::callsite_argument(CB: *CBContext, ArgNo); |
507 | |
508 | // If possible, use the hasAssumedIRAttr interface. |
509 | if (Attribute::isEnumAttrKind(Kind: IRAttributeKind)) { |
510 | bool IsKnown; |
511 | return AA::hasAssumedIRAttr<IRAttributeKind>( |
512 | A, &QueryingAttribute, CBArgPos, DepClassTy::REQUIRED, IsKnown); |
513 | } |
514 | |
515 | const auto *AA = |
516 | A.getAAFor<AAType>(QueryingAttribute, CBArgPos, DepClassTy::REQUIRED); |
517 | if (!AA) |
518 | return false; |
519 | const StateType &CBArgumentState = |
520 | static_cast<const StateType &>(AA->getState()); |
521 | |
522 | LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" |
523 | << "Position:" << Pos << "CB Arg state:" << CBArgumentState |
524 | << "\n" ); |
525 | |
526 | // NOTE: If we want to do call site grouping it should happen here. |
527 | State ^= CBArgumentState; |
528 | return true; |
529 | } |
530 | |
531 | /// Helper class for generic deduction: call site argument -> argument position. |
532 | template <typename AAType, typename BaseType, |
533 | typename StateType = typename AAType::StateType, |
534 | bool BridgeCallBaseContext = false, |
535 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind> |
536 | struct AAArgumentFromCallSiteArguments : public BaseType { |
537 | AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) |
538 | : BaseType(IRP, A) {} |
539 | |
540 | /// See AbstractAttribute::updateImpl(...). |
541 | ChangeStatus updateImpl(Attributor &A) override { |
542 | StateType S = StateType::getBestState(this->getState()); |
543 | |
544 | if (BridgeCallBaseContext) { |
545 | bool Success = |
546 | getArgumentStateFromCallBaseContext<AAType, BaseType, StateType, |
547 | IRAttributeKind>( |
548 | A, *this, this->getIRPosition(), S); |
549 | if (Success) |
550 | return clampStateAndIndicateChange<StateType>(this->getState(), S); |
551 | } |
552 | clampCallSiteArgumentStates<AAType, StateType, IRAttributeKind>(A, *this, |
553 | S); |
554 | |
555 | // TODO: If we know we visited all incoming values, thus no are assumed |
556 | // dead, we can take the known information from the state T. |
557 | return clampStateAndIndicateChange<StateType>(this->getState(), S); |
558 | } |
559 | }; |
560 | |
561 | /// Helper class for generic replication: function returned -> cs returned. |
562 | template <typename AAType, typename BaseType, |
563 | typename StateType = typename BaseType::StateType, |
564 | bool IntroduceCallBaseContext = false, |
565 | Attribute::AttrKind IRAttributeKind = AAType::IRAttributeKind> |
566 | struct AACalleeToCallSite : public BaseType { |
567 | AACalleeToCallSite(const IRPosition &IRP, Attributor &A) : BaseType(IRP, A) {} |
568 | |
569 | /// See AbstractAttribute::updateImpl(...). |
570 | ChangeStatus updateImpl(Attributor &A) override { |
571 | auto IRPKind = this->getIRPosition().getPositionKind(); |
572 | assert((IRPKind == IRPosition::IRP_CALL_SITE_RETURNED || |
573 | IRPKind == IRPosition::IRP_CALL_SITE) && |
574 | "Can only wrap function returned positions for call site " |
575 | "returned positions!" ); |
576 | auto &S = this->getState(); |
577 | |
578 | CallBase &CB = cast<CallBase>(this->getAnchorValue()); |
579 | if (IntroduceCallBaseContext) |
580 | LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" << CB |
581 | << "\n" ); |
582 | |
583 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
584 | auto CalleePred = [&](ArrayRef<const Function *> Callees) { |
585 | for (const Function *Callee : Callees) { |
586 | IRPosition FnPos = |
587 | IRPKind == llvm::IRPosition::IRP_CALL_SITE_RETURNED |
588 | ? IRPosition::returned(F: *Callee, |
589 | CBContext: IntroduceCallBaseContext ? &CB : nullptr) |
590 | : IRPosition::function( |
591 | F: *Callee, CBContext: IntroduceCallBaseContext ? &CB : nullptr); |
592 | // If possible, use the hasAssumedIRAttr interface. |
593 | if (Attribute::isEnumAttrKind(Kind: IRAttributeKind)) { |
594 | bool IsKnown; |
595 | if (!AA::hasAssumedIRAttr<IRAttributeKind>( |
596 | A, this, FnPos, DepClassTy::REQUIRED, IsKnown)) |
597 | return false; |
598 | continue; |
599 | } |
600 | |
601 | const AAType *AA = |
602 | A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); |
603 | if (!AA) |
604 | return false; |
605 | Changed |= clampStateAndIndicateChange(S, AA->getState()); |
606 | if (S.isAtFixpoint()) |
607 | return S.isValidState(); |
608 | } |
609 | return true; |
610 | }; |
611 | if (!A.checkForAllCallees(Pred: CalleePred, QueryingAA: *this, CB)) |
612 | return S.indicatePessimisticFixpoint(); |
613 | return Changed; |
614 | } |
615 | }; |
616 | |
617 | /// Helper function to accumulate uses. |
618 | template <class AAType, typename StateType = typename AAType::StateType> |
619 | static void followUsesInContext(AAType &AA, Attributor &A, |
620 | MustBeExecutedContextExplorer &Explorer, |
621 | const Instruction *CtxI, |
622 | SetVector<const Use *> &Uses, |
623 | StateType &State) { |
624 | auto EIt = Explorer.begin(PP: CtxI), EEnd = Explorer.end(CtxI); |
625 | for (unsigned u = 0; u < Uses.size(); ++u) { |
626 | const Use *U = Uses[u]; |
627 | if (const Instruction *UserI = dyn_cast<Instruction>(Val: U->getUser())) { |
628 | bool Found = Explorer.findInContextOf(I: UserI, EIt, EEnd); |
629 | if (Found && AA.followUseInMBEC(A, U, UserI, State)) |
630 | Uses.insert_range(R: llvm::make_pointer_range(Range: UserI->uses())); |
631 | } |
632 | } |
633 | } |
634 | |
635 | /// Use the must-be-executed-context around \p I to add information into \p S. |
636 | /// The AAType class is required to have `followUseInMBEC` method with the |
637 | /// following signature and behaviour: |
638 | /// |
639 | /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) |
640 | /// U - Underlying use. |
641 | /// I - The user of the \p U. |
642 | /// Returns true if the value should be tracked transitively. |
643 | /// |
644 | template <class AAType, typename StateType = typename AAType::StateType> |
645 | static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, |
646 | Instruction &CtxI) { |
647 | const Value &Val = AA.getIRPosition().getAssociatedValue(); |
648 | if (isa<ConstantData>(Val)) |
649 | return; |
650 | |
651 | MustBeExecutedContextExplorer *Explorer = |
652 | A.getInfoCache().getMustBeExecutedContextExplorer(); |
653 | if (!Explorer) |
654 | return; |
655 | |
656 | // Container for (transitive) uses of the associated value. |
657 | SetVector<const Use *> Uses; |
658 | for (const Use &U : Val.uses()) |
659 | Uses.insert(X: &U); |
660 | |
661 | followUsesInContext<AAType>(AA, A, *Explorer, &CtxI, Uses, S); |
662 | |
663 | if (S.isAtFixpoint()) |
664 | return; |
665 | |
666 | SmallVector<const BranchInst *, 4> BrInsts; |
667 | auto Pred = [&](const Instruction *I) { |
668 | if (const BranchInst *Br = dyn_cast<BranchInst>(Val: I)) |
669 | if (Br->isConditional()) |
670 | BrInsts.push_back(Elt: Br); |
671 | return true; |
672 | }; |
673 | |
674 | // Here, accumulate conditional branch instructions in the context. We |
675 | // explore the child paths and collect the known states. The disjunction of |
676 | // those states can be merged to its own state. Let ParentState_i be a state |
677 | // to indicate the known information for an i-th branch instruction in the |
678 | // context. ChildStates are created for its successors respectively. |
679 | // |
680 | // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} |
681 | // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} |
682 | // ... |
683 | // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} |
684 | // |
685 | // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m |
686 | // |
687 | // FIXME: Currently, recursive branches are not handled. For example, we |
688 | // can't deduce that ptr must be dereferenced in below function. |
689 | // |
690 | // void f(int a, int c, int *ptr) { |
691 | // if(a) |
692 | // if (b) { |
693 | // *ptr = 0; |
694 | // } else { |
695 | // *ptr = 1; |
696 | // } |
697 | // else { |
698 | // if (b) { |
699 | // *ptr = 0; |
700 | // } else { |
701 | // *ptr = 1; |
702 | // } |
703 | // } |
704 | // } |
705 | |
706 | Explorer->checkForAllContext(PP: &CtxI, Pred); |
707 | for (const BranchInst *Br : BrInsts) { |
708 | StateType ParentState; |
709 | |
710 | // The known state of the parent state is a conjunction of children's |
711 | // known states so it is initialized with a best state. |
712 | ParentState.indicateOptimisticFixpoint(); |
713 | |
714 | for (const BasicBlock *BB : Br->successors()) { |
715 | StateType ChildState; |
716 | |
717 | size_t BeforeSize = Uses.size(); |
718 | followUsesInContext(AA, A, *Explorer, &BB->front(), Uses, ChildState); |
719 | |
720 | // Erase uses which only appear in the child. |
721 | for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) |
722 | It = Uses.erase(I: It); |
723 | |
724 | ParentState &= ChildState; |
725 | } |
726 | |
727 | // Use only known state. |
728 | S += ParentState; |
729 | } |
730 | } |
731 | } // namespace |
732 | |
733 | /// ------------------------ PointerInfo --------------------------------------- |
734 | |
735 | namespace llvm { |
736 | namespace AA { |
737 | namespace PointerInfo { |
738 | |
739 | struct State; |
740 | |
741 | } // namespace PointerInfo |
742 | } // namespace AA |
743 | |
744 | /// Helper for AA::PointerInfo::Access DenseMap/Set usage. |
745 | template <> |
746 | struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { |
747 | using Access = AAPointerInfo::Access; |
748 | static inline Access getEmptyKey(); |
749 | static inline Access getTombstoneKey(); |
750 | static unsigned getHashValue(const Access &A); |
751 | static bool isEqual(const Access &LHS, const Access &RHS); |
752 | }; |
753 | |
754 | /// Helper that allows RangeTy as a key in a DenseMap. |
755 | template <> struct DenseMapInfo<AA::RangeTy> { |
756 | static inline AA::RangeTy getEmptyKey() { |
757 | auto EmptyKey = DenseMapInfo<int64_t>::getEmptyKey(); |
758 | return AA::RangeTy{EmptyKey, EmptyKey}; |
759 | } |
760 | |
761 | static inline AA::RangeTy getTombstoneKey() { |
762 | auto TombstoneKey = DenseMapInfo<int64_t>::getTombstoneKey(); |
763 | return AA::RangeTy{TombstoneKey, TombstoneKey}; |
764 | } |
765 | |
766 | static unsigned getHashValue(const AA::RangeTy &Range) { |
767 | return detail::combineHashValue( |
768 | a: DenseMapInfo<int64_t>::getHashValue(Val: Range.Offset), |
769 | b: DenseMapInfo<int64_t>::getHashValue(Val: Range.Size)); |
770 | } |
771 | |
772 | static bool isEqual(const AA::RangeTy &A, const AA::RangeTy B) { |
773 | return A == B; |
774 | } |
775 | }; |
776 | |
777 | /// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign |
778 | /// but the instruction |
779 | struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { |
780 | using Base = DenseMapInfo<Instruction *>; |
781 | using Access = AAPointerInfo::Access; |
782 | static inline Access getEmptyKey(); |
783 | static inline Access getTombstoneKey(); |
784 | static unsigned getHashValue(const Access &A); |
785 | static bool isEqual(const Access &LHS, const Access &RHS); |
786 | }; |
787 | |
788 | } // namespace llvm |
789 | |
790 | /// A type to track pointer/struct usage and accesses for AAPointerInfo. |
791 | struct AA::PointerInfo::State : public AbstractState { |
792 | /// Return the best possible representable state. |
793 | static State getBestState(const State &SIS) { return State(); } |
794 | |
795 | /// Return the worst possible representable state. |
796 | static State getWorstState(const State &SIS) { |
797 | State R; |
798 | R.indicatePessimisticFixpoint(); |
799 | return R; |
800 | } |
801 | |
802 | State() = default; |
803 | State(State &&SIS) = default; |
804 | |
805 | const State &getAssumed() const { return *this; } |
806 | |
807 | /// See AbstractState::isValidState(). |
808 | bool isValidState() const override { return BS.isValidState(); } |
809 | |
810 | /// See AbstractState::isAtFixpoint(). |
811 | bool isAtFixpoint() const override { return BS.isAtFixpoint(); } |
812 | |
813 | /// See AbstractState::indicateOptimisticFixpoint(). |
814 | ChangeStatus indicateOptimisticFixpoint() override { |
815 | BS.indicateOptimisticFixpoint(); |
816 | return ChangeStatus::UNCHANGED; |
817 | } |
818 | |
819 | /// See AbstractState::indicatePessimisticFixpoint(). |
820 | ChangeStatus indicatePessimisticFixpoint() override { |
821 | BS.indicatePessimisticFixpoint(); |
822 | return ChangeStatus::CHANGED; |
823 | } |
824 | |
825 | State &operator=(const State &R) { |
826 | if (this == &R) |
827 | return *this; |
828 | BS = R.BS; |
829 | AccessList = R.AccessList; |
830 | OffsetBins = R.OffsetBins; |
831 | RemoteIMap = R.RemoteIMap; |
832 | ReturnedOffsets = R.ReturnedOffsets; |
833 | return *this; |
834 | } |
835 | |
836 | State &operator=(State &&R) { |
837 | if (this == &R) |
838 | return *this; |
839 | std::swap(a&: BS, b&: R.BS); |
840 | std::swap(LHS&: AccessList, RHS&: R.AccessList); |
841 | std::swap(a&: OffsetBins, b&: R.OffsetBins); |
842 | std::swap(a&: RemoteIMap, b&: R.RemoteIMap); |
843 | std::swap(a&: ReturnedOffsets, b&: R.ReturnedOffsets); |
844 | return *this; |
845 | } |
846 | |
847 | /// Add a new Access to the state at offset \p Offset and with size \p Size. |
848 | /// The access is associated with \p I, writes \p Content (if anything), and |
849 | /// is of kind \p Kind. If an Access already exists for the same \p I and same |
850 | /// \p RemoteI, the two are combined, potentially losing information about |
851 | /// offset and size. The resulting access must now be moved from its original |
852 | /// OffsetBin to the bin for its new offset. |
853 | /// |
854 | /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. |
855 | ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges, |
856 | Instruction &I, std::optional<Value *> Content, |
857 | AAPointerInfo::AccessKind Kind, Type *Ty, |
858 | Instruction *RemoteI = nullptr); |
859 | |
860 | AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); } |
861 | AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); } |
862 | int64_t numOffsetBins() const { return OffsetBins.size(); } |
863 | |
864 | const AAPointerInfo::Access &getAccess(unsigned Index) const { |
865 | return AccessList[Index]; |
866 | } |
867 | |
868 | protected: |
869 | // Every memory instruction results in an Access object. We maintain a list of |
870 | // all Access objects that we own, along with the following maps: |
871 | // |
872 | // - OffsetBins: RangeTy -> { Access } |
873 | // - RemoteIMap: RemoteI x LocalI -> Access |
874 | // |
875 | // A RemoteI is any instruction that accesses memory. RemoteI is different |
876 | // from LocalI if and only if LocalI is a call; then RemoteI is some |
877 | // instruction in the callgraph starting from LocalI. Multiple paths in the |
878 | // callgraph from LocalI to RemoteI may produce multiple accesses, but these |
879 | // are all combined into a single Access object. This may result in loss of |
880 | // information in RangeTy in the Access object. |
881 | SmallVector<AAPointerInfo::Access> AccessList; |
882 | AAPointerInfo::OffsetBinsTy OffsetBins; |
883 | DenseMap<const Instruction *, SmallVector<unsigned>> RemoteIMap; |
884 | |
885 | /// Flag to determine if the underlying pointer is reaching a return statement |
886 | /// in the associated function or not. Returns in other functions cause |
887 | /// invalidation. |
888 | AAPointerInfo::OffsetInfo ReturnedOffsets; |
889 | |
890 | /// See AAPointerInfo::forallInterferingAccesses. |
891 | template <typename F> |
892 | bool forallInterferingAccesses(AA::RangeTy Range, F CB) const { |
893 | if (!isValidState() || !ReturnedOffsets.isUnassigned()) |
894 | return false; |
895 | |
896 | for (const auto &It : OffsetBins) { |
897 | AA::RangeTy ItRange = It.getFirst(); |
898 | if (!Range.mayOverlap(Range: ItRange)) |
899 | continue; |
900 | bool IsExact = Range == ItRange && !Range.offsetOrSizeAreUnknown(); |
901 | for (auto Index : It.getSecond()) { |
902 | auto &Access = AccessList[Index]; |
903 | if (!CB(Access, IsExact)) |
904 | return false; |
905 | } |
906 | } |
907 | return true; |
908 | } |
909 | |
910 | /// See AAPointerInfo::forallInterferingAccesses. |
911 | template <typename F> |
912 | bool forallInterferingAccesses(Instruction &I, F CB, |
913 | AA::RangeTy &Range) const { |
914 | if (!isValidState() || !ReturnedOffsets.isUnassigned()) |
915 | return false; |
916 | |
917 | auto LocalList = RemoteIMap.find(Val: &I); |
918 | if (LocalList == RemoteIMap.end()) { |
919 | return true; |
920 | } |
921 | |
922 | for (unsigned Index : LocalList->getSecond()) { |
923 | for (auto &R : AccessList[Index]) { |
924 | Range &= R; |
925 | if (Range.offsetAndSizeAreUnknown()) |
926 | break; |
927 | } |
928 | } |
929 | return forallInterferingAccesses(Range, CB); |
930 | } |
931 | |
932 | private: |
933 | /// State to track fixpoint and validity. |
934 | BooleanState BS; |
935 | }; |
936 | |
937 | ChangeStatus AA::PointerInfo::State::addAccess( |
938 | Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I, |
939 | std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty, |
940 | Instruction *RemoteI) { |
941 | RemoteI = RemoteI ? RemoteI : &I; |
942 | |
943 | // Check if we have an access for this instruction, if not, simply add it. |
944 | auto &LocalList = RemoteIMap[RemoteI]; |
945 | bool AccExists = false; |
946 | unsigned AccIndex = AccessList.size(); |
947 | for (auto Index : LocalList) { |
948 | auto &A = AccessList[Index]; |
949 | if (A.getLocalInst() == &I) { |
950 | AccExists = true; |
951 | AccIndex = Index; |
952 | break; |
953 | } |
954 | } |
955 | |
956 | auto AddToBins = [&](const AAPointerInfo::RangeList &ToAdd) { |
957 | LLVM_DEBUG(if (ToAdd.size()) dbgs() |
958 | << "[AAPointerInfo] Inserting access in new offset bins\n" ;); |
959 | |
960 | for (auto Key : ToAdd) { |
961 | LLVM_DEBUG(dbgs() << " key " << Key << "\n" ); |
962 | OffsetBins[Key].insert(V: AccIndex); |
963 | } |
964 | }; |
965 | |
966 | if (!AccExists) { |
967 | AccessList.emplace_back(Args: &I, Args&: RemoteI, Args: Ranges, Args&: Content, Args&: Kind, Args&: Ty); |
968 | assert((AccessList.size() == AccIndex + 1) && |
969 | "New Access should have been at AccIndex" ); |
970 | LocalList.push_back(Elt: AccIndex); |
971 | AddToBins(AccessList[AccIndex].getRanges()); |
972 | return ChangeStatus::CHANGED; |
973 | } |
974 | |
975 | // Combine the new Access with the existing Access, and then update the |
976 | // mapping in the offset bins. |
977 | AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty); |
978 | auto &Current = AccessList[AccIndex]; |
979 | auto Before = Current; |
980 | Current &= Acc; |
981 | if (Current == Before) |
982 | return ChangeStatus::UNCHANGED; |
983 | |
984 | auto &ExistingRanges = Before.getRanges(); |
985 | auto &NewRanges = Current.getRanges(); |
986 | |
987 | // Ranges that are in the old access but not the new access need to be removed |
988 | // from the offset bins. |
989 | AAPointerInfo::RangeList ToRemove; |
990 | AAPointerInfo::RangeList::set_difference(L: ExistingRanges, R: NewRanges, D&: ToRemove); |
991 | LLVM_DEBUG(if (ToRemove.size()) dbgs() |
992 | << "[AAPointerInfo] Removing access from old offset bins\n" ;); |
993 | |
994 | for (auto Key : ToRemove) { |
995 | LLVM_DEBUG(dbgs() << " key " << Key << "\n" ); |
996 | assert(OffsetBins.count(Key) && "Existing Access must be in some bin." ); |
997 | auto &Bin = OffsetBins[Key]; |
998 | assert(Bin.count(AccIndex) && |
999 | "Expected bin to actually contain the Access." ); |
1000 | Bin.erase(V: AccIndex); |
1001 | } |
1002 | |
1003 | // Ranges that are in the new access but not the old access need to be added |
1004 | // to the offset bins. |
1005 | AAPointerInfo::RangeList ToAdd; |
1006 | AAPointerInfo::RangeList::set_difference(L: NewRanges, R: ExistingRanges, D&: ToAdd); |
1007 | AddToBins(ToAdd); |
1008 | return ChangeStatus::CHANGED; |
1009 | } |
1010 | |
1011 | namespace { |
1012 | |
1013 | #ifndef NDEBUG |
1014 | static raw_ostream &operator<<(raw_ostream &OS, |
1015 | const AAPointerInfo::OffsetInfo &OI) { |
1016 | OS << llvm::interleaved_array(OI); |
1017 | return OS; |
1018 | } |
1019 | #endif // NDEBUG |
1020 | |
1021 | struct AAPointerInfoImpl |
1022 | : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { |
1023 | using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; |
1024 | AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} |
1025 | |
1026 | /// See AbstractAttribute::getAsStr(). |
1027 | const std::string getAsStr(Attributor *A) const override { |
1028 | return std::string("PointerInfo " ) + |
1029 | (isValidState() ? (std::string("#" ) + |
1030 | std::to_string(val: OffsetBins.size()) + " bins" ) |
1031 | : "<invalid>" ) + |
1032 | (reachesReturn() |
1033 | ? (" (returned:" + |
1034 | join(R: map_range(C: ReturnedOffsets, |
1035 | F: [](int64_t O) { return std::to_string(val: O); }), |
1036 | Separator: ", " ) + |
1037 | ")" ) |
1038 | : "" ); |
1039 | } |
1040 | |
1041 | /// See AbstractAttribute::manifest(...). |
1042 | ChangeStatus manifest(Attributor &A) override { |
1043 | return AAPointerInfo::manifest(A); |
1044 | } |
1045 | |
1046 | virtual const_bin_iterator begin() const override { return State::begin(); } |
1047 | virtual const_bin_iterator end() const override { return State::end(); } |
1048 | virtual int64_t numOffsetBins() const override { |
1049 | return State::numOffsetBins(); |
1050 | } |
1051 | virtual bool reachesReturn() const override { |
1052 | return !ReturnedOffsets.isUnassigned(); |
1053 | } |
1054 | virtual void addReturnedOffsetsTo(OffsetInfo &OI) const override { |
1055 | if (ReturnedOffsets.isUnknown()) { |
1056 | OI.setUnknown(); |
1057 | return; |
1058 | } |
1059 | |
1060 | OffsetInfo MergedOI; |
1061 | for (auto Offset : ReturnedOffsets) { |
1062 | OffsetInfo TmpOI = OI; |
1063 | TmpOI.addToAll(Inc: Offset); |
1064 | MergedOI.merge(R: TmpOI); |
1065 | } |
1066 | OI = std::move(MergedOI); |
1067 | } |
1068 | |
1069 | ChangeStatus setReachesReturn(const OffsetInfo &ReachedReturnedOffsets) { |
1070 | if (ReturnedOffsets.isUnknown()) |
1071 | return ChangeStatus::UNCHANGED; |
1072 | if (ReachedReturnedOffsets.isUnknown()) { |
1073 | ReturnedOffsets.setUnknown(); |
1074 | return ChangeStatus::CHANGED; |
1075 | } |
1076 | if (ReturnedOffsets.merge(R: ReachedReturnedOffsets)) |
1077 | return ChangeStatus::CHANGED; |
1078 | return ChangeStatus::UNCHANGED; |
1079 | } |
1080 | |
1081 | bool forallInterferingAccesses( |
1082 | AA::RangeTy Range, |
1083 | function_ref<bool(const AAPointerInfo::Access &, bool)> CB) |
1084 | const override { |
1085 | return State::forallInterferingAccesses(Range, CB); |
1086 | } |
1087 | |
1088 | bool forallInterferingAccesses( |
1089 | Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, |
1090 | bool FindInterferingWrites, bool FindInterferingReads, |
1091 | function_ref<bool(const Access &, bool)> UserCB, bool &HasBeenWrittenTo, |
1092 | AA::RangeTy &Range, |
1093 | function_ref<bool(const Access &)> SkipCB) const override { |
1094 | HasBeenWrittenTo = false; |
1095 | |
1096 | SmallPtrSet<const Access *, 8> DominatingWrites; |
1097 | SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; |
1098 | |
1099 | Function &Scope = *I.getFunction(); |
1100 | bool IsKnownNoSync; |
1101 | bool IsAssumedNoSync = AA::hasAssumedIRAttr<Attribute::NoSync>( |
1102 | A, QueryingAA: &QueryingAA, IRP: IRPosition::function(F: Scope), DepClass: DepClassTy::OPTIONAL, |
1103 | IsKnown&: IsKnownNoSync); |
1104 | const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( |
1105 | IRP: IRPosition::function(F: Scope), QueryingAA: &QueryingAA, DepClass: DepClassTy::NONE); |
1106 | bool AllInSameNoSyncFn = IsAssumedNoSync; |
1107 | bool InstIsExecutedByInitialThreadOnly = |
1108 | ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I); |
1109 | |
1110 | // If the function is not ending in aligned barriers, we need the stores to |
1111 | // be in aligned barriers. The load being in one is not sufficient since the |
1112 | // store might be executed by a thread that disappears after, causing the |
1113 | // aligned barrier guarding the load to unblock and the load to read a value |
1114 | // that has no CFG path to the load. |
1115 | bool InstIsExecutedInAlignedRegion = |
1116 | FindInterferingReads && ExecDomainAA && |
1117 | ExecDomainAA->isExecutedInAlignedRegion(A, I); |
1118 | |
1119 | if (InstIsExecutedInAlignedRegion || InstIsExecutedByInitialThreadOnly) |
1120 | A.recordDependence(FromAA: *ExecDomainAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL); |
1121 | |
1122 | InformationCache &InfoCache = A.getInfoCache(); |
1123 | bool IsThreadLocalObj = |
1124 | AA::isAssumedThreadLocalObject(A, Obj&: getAssociatedValue(), QueryingAA: *this); |
1125 | |
1126 | // Helper to determine if we need to consider threading, which we cannot |
1127 | // right now. However, if the function is (assumed) nosync or the thread |
1128 | // executing all instructions is the main thread only we can ignore |
1129 | // threading. Also, thread-local objects do not require threading reasoning. |
1130 | // Finally, we can ignore threading if either access is executed in an |
1131 | // aligned region. |
1132 | auto CanIgnoreThreadingForInst = [&](const Instruction &I) -> bool { |
1133 | if (IsThreadLocalObj || AllInSameNoSyncFn) |
1134 | return true; |
1135 | const auto *FnExecDomainAA = |
1136 | I.getFunction() == &Scope |
1137 | ? ExecDomainAA |
1138 | : A.lookupAAFor<AAExecutionDomain>( |
1139 | IRP: IRPosition::function(F: *I.getFunction()), QueryingAA: &QueryingAA, |
1140 | DepClass: DepClassTy::NONE); |
1141 | if (!FnExecDomainAA) |
1142 | return false; |
1143 | if (InstIsExecutedInAlignedRegion || |
1144 | (FindInterferingWrites && |
1145 | FnExecDomainAA->isExecutedInAlignedRegion(A, I))) { |
1146 | A.recordDependence(FromAA: *FnExecDomainAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL); |
1147 | return true; |
1148 | } |
1149 | if (InstIsExecutedByInitialThreadOnly && |
1150 | FnExecDomainAA->isExecutedByInitialThreadOnly(I)) { |
1151 | A.recordDependence(FromAA: *FnExecDomainAA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL); |
1152 | return true; |
1153 | } |
1154 | return false; |
1155 | }; |
1156 | |
1157 | // Helper to determine if the access is executed by the same thread as the |
1158 | // given instruction, for now it is sufficient to avoid any potential |
1159 | // threading effects as we cannot deal with them anyway. |
1160 | auto CanIgnoreThreading = [&](const Access &Acc) -> bool { |
1161 | return CanIgnoreThreadingForInst(*Acc.getRemoteInst()) || |
1162 | (Acc.getRemoteInst() != Acc.getLocalInst() && |
1163 | CanIgnoreThreadingForInst(*Acc.getLocalInst())); |
1164 | }; |
1165 | |
1166 | // TODO: Use inter-procedural reachability and dominance. |
1167 | bool IsKnownNoRecurse; |
1168 | AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
1169 | A, QueryingAA: this, IRP: IRPosition::function(F: Scope), DepClass: DepClassTy::OPTIONAL, |
1170 | IsKnown&: IsKnownNoRecurse); |
1171 | |
1172 | // TODO: Use reaching kernels from AAKernelInfo (or move it to |
1173 | // AAExecutionDomain) such that we allow scopes other than kernels as long |
1174 | // as the reaching kernels are disjoint. |
1175 | bool InstInKernel = A.getInfoCache().isKernel(F: Scope); |
1176 | bool ObjHasKernelLifetime = false; |
1177 | const bool UseDominanceReasoning = |
1178 | FindInterferingWrites && IsKnownNoRecurse; |
1179 | const DominatorTree *DT = |
1180 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F: Scope); |
1181 | |
1182 | // Helper to check if a value has "kernel lifetime", that is it will not |
1183 | // outlive a GPU kernel. This is true for shared, constant, and local |
1184 | // globals on AMD and NVIDIA GPUs. |
1185 | auto HasKernelLifetime = [&](Value *V, Module &M) { |
1186 | if (!AA::isGPU(M)) |
1187 | return false; |
1188 | switch (AA::GPUAddressSpace(V->getType()->getPointerAddressSpace())) { |
1189 | case AA::GPUAddressSpace::Shared: |
1190 | case AA::GPUAddressSpace::Constant: |
1191 | case AA::GPUAddressSpace::Local: |
1192 | return true; |
1193 | default: |
1194 | return false; |
1195 | }; |
1196 | }; |
1197 | |
1198 | // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query |
1199 | // to determine if we should look at reachability from the callee. For |
1200 | // certain pointers we know the lifetime and we do not have to step into the |
1201 | // callee to determine reachability as the pointer would be dead in the |
1202 | // callee. See the conditional initialization below. |
1203 | std::function<bool(const Function &)> IsLiveInCalleeCB; |
1204 | |
1205 | if (auto *AI = dyn_cast<AllocaInst>(Val: &getAssociatedValue())) { |
1206 | // If the alloca containing function is not recursive the alloca |
1207 | // must be dead in the callee. |
1208 | const Function *AIFn = AI->getFunction(); |
1209 | ObjHasKernelLifetime = A.getInfoCache().isKernel(F: *AIFn); |
1210 | bool IsKnownNoRecurse; |
1211 | if (AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
1212 | A, QueryingAA: this, IRP: IRPosition::function(F: *AIFn), DepClass: DepClassTy::OPTIONAL, |
1213 | IsKnown&: IsKnownNoRecurse)) { |
1214 | IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; |
1215 | } |
1216 | } else if (auto *GV = dyn_cast<GlobalValue>(Val: &getAssociatedValue())) { |
1217 | // If the global has kernel lifetime we can stop if we reach a kernel |
1218 | // as it is "dead" in the (unknown) callees. |
1219 | ObjHasKernelLifetime = HasKernelLifetime(GV, *GV->getParent()); |
1220 | if (ObjHasKernelLifetime) |
1221 | IsLiveInCalleeCB = [&A](const Function &Fn) { |
1222 | return !A.getInfoCache().isKernel(F: Fn); |
1223 | }; |
1224 | } |
1225 | |
1226 | // Set of accesses/instructions that will overwrite the result and are |
1227 | // therefore blockers in the reachability traversal. |
1228 | AA::InstExclusionSetTy ExclusionSet; |
1229 | |
1230 | auto AccessCB = [&](const Access &Acc, bool Exact) { |
1231 | Function *AccScope = Acc.getRemoteInst()->getFunction(); |
1232 | bool AccInSameScope = AccScope == &Scope; |
1233 | |
1234 | // If the object has kernel lifetime we can ignore accesses only reachable |
1235 | // by other kernels. For now we only skip accesses *in* other kernels. |
1236 | if (InstInKernel && ObjHasKernelLifetime && !AccInSameScope && |
1237 | A.getInfoCache().isKernel(F: *AccScope)) |
1238 | return true; |
1239 | |
1240 | if (Exact && Acc.isMustAccess() && Acc.getRemoteInst() != &I) { |
1241 | if (Acc.isWrite() || (isa<LoadInst>(Val: I) && Acc.isWriteOrAssumption())) |
1242 | ExclusionSet.insert(Ptr: Acc.getRemoteInst()); |
1243 | } |
1244 | |
1245 | if ((!FindInterferingWrites || !Acc.isWriteOrAssumption()) && |
1246 | (!FindInterferingReads || !Acc.isRead())) |
1247 | return true; |
1248 | |
1249 | bool Dominates = FindInterferingWrites && DT && Exact && |
1250 | Acc.isMustAccess() && AccInSameScope && |
1251 | DT->dominates(Def: Acc.getRemoteInst(), User: &I); |
1252 | if (Dominates) |
1253 | DominatingWrites.insert(Ptr: &Acc); |
1254 | |
1255 | // Track if all interesting accesses are in the same `nosync` function as |
1256 | // the given instruction. |
1257 | AllInSameNoSyncFn &= Acc.getRemoteInst()->getFunction() == &Scope; |
1258 | |
1259 | InterferingAccesses.push_back(Elt: {&Acc, Exact}); |
1260 | return true; |
1261 | }; |
1262 | if (!State::forallInterferingAccesses(I, CB: AccessCB, Range)) |
1263 | return false; |
1264 | |
1265 | HasBeenWrittenTo = !DominatingWrites.empty(); |
1266 | |
1267 | // Dominating writes form a chain, find the least/lowest member. |
1268 | Instruction *LeastDominatingWriteInst = nullptr; |
1269 | for (const Access *Acc : DominatingWrites) { |
1270 | if (!LeastDominatingWriteInst) { |
1271 | LeastDominatingWriteInst = Acc->getRemoteInst(); |
1272 | } else if (DT->dominates(Def: LeastDominatingWriteInst, |
1273 | User: Acc->getRemoteInst())) { |
1274 | LeastDominatingWriteInst = Acc->getRemoteInst(); |
1275 | } |
1276 | } |
1277 | |
1278 | // Helper to determine if we can skip a specific write access. |
1279 | auto CanSkipAccess = [&](const Access &Acc, bool Exact) { |
1280 | if (SkipCB && SkipCB(Acc)) |
1281 | return true; |
1282 | if (!CanIgnoreThreading(Acc)) |
1283 | return false; |
1284 | |
1285 | // Check read (RAW) dependences and write (WAR) dependences as necessary. |
1286 | // If we successfully excluded all effects we are interested in, the |
1287 | // access can be skipped. |
1288 | bool ReadChecked = !FindInterferingReads; |
1289 | bool WriteChecked = !FindInterferingWrites; |
1290 | |
1291 | // If the instruction cannot reach the access, the former does not |
1292 | // interfere with what the access reads. |
1293 | if (!ReadChecked) { |
1294 | if (!AA::isPotentiallyReachable(A, FromI: I, ToI: *Acc.getRemoteInst(), QueryingAA, |
1295 | ExclusionSet: &ExclusionSet, GoBackwardsCB: IsLiveInCalleeCB)) |
1296 | ReadChecked = true; |
1297 | } |
1298 | // If the instruction cannot be reach from the access, the latter does not |
1299 | // interfere with what the instruction reads. |
1300 | if (!WriteChecked) { |
1301 | if (!AA::isPotentiallyReachable(A, FromI: *Acc.getRemoteInst(), ToI: I, QueryingAA, |
1302 | ExclusionSet: &ExclusionSet, GoBackwardsCB: IsLiveInCalleeCB)) |
1303 | WriteChecked = true; |
1304 | } |
1305 | |
1306 | // If we still might be affected by the write of the access but there are |
1307 | // dominating writes in the function of the instruction |
1308 | // (HasBeenWrittenTo), we can try to reason that the access is overwritten |
1309 | // by them. This would have happend above if they are all in the same |
1310 | // function, so we only check the inter-procedural case. Effectively, we |
1311 | // want to show that there is no call after the dominting write that might |
1312 | // reach the access, and when it returns reach the instruction with the |
1313 | // updated value. To this end, we iterate all call sites, check if they |
1314 | // might reach the instruction without going through another access |
1315 | // (ExclusionSet) and at the same time might reach the access. However, |
1316 | // that is all part of AAInterFnReachability. |
1317 | if (!WriteChecked && HasBeenWrittenTo && |
1318 | Acc.getRemoteInst()->getFunction() != &Scope) { |
1319 | |
1320 | const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>( |
1321 | QueryingAA, IRP: IRPosition::function(F: Scope), DepClass: DepClassTy::OPTIONAL); |
1322 | if (FnReachabilityAA) { |
1323 | // Without going backwards in the call tree, can we reach the access |
1324 | // from the least dominating write. Do not allow to pass the |
1325 | // instruction itself either. |
1326 | bool Inserted = ExclusionSet.insert(Ptr: &I).second; |
1327 | |
1328 | if (!FnReachabilityAA->instructionCanReach( |
1329 | A, Inst: *LeastDominatingWriteInst, |
1330 | Fn: *Acc.getRemoteInst()->getFunction(), ExclusionSet: &ExclusionSet)) |
1331 | WriteChecked = true; |
1332 | |
1333 | if (Inserted) |
1334 | ExclusionSet.erase(Ptr: &I); |
1335 | } |
1336 | } |
1337 | |
1338 | if (ReadChecked && WriteChecked) |
1339 | return true; |
1340 | |
1341 | if (!DT || !UseDominanceReasoning) |
1342 | return false; |
1343 | if (!DominatingWrites.count(Ptr: &Acc)) |
1344 | return false; |
1345 | return LeastDominatingWriteInst != Acc.getRemoteInst(); |
1346 | }; |
1347 | |
1348 | // Run the user callback on all accesses we cannot skip and return if |
1349 | // that succeeded for all or not. |
1350 | for (auto &It : InterferingAccesses) { |
1351 | if ((!AllInSameNoSyncFn && !IsThreadLocalObj && !ExecDomainAA) || |
1352 | !CanSkipAccess(*It.first, It.second)) { |
1353 | if (!UserCB(*It.first, It.second)) |
1354 | return false; |
1355 | } |
1356 | } |
1357 | return true; |
1358 | } |
1359 | |
1360 | ChangeStatus translateAndAddStateFromCallee(Attributor &A, |
1361 | const AAPointerInfo &OtherAA, |
1362 | CallBase &CB) { |
1363 | using namespace AA::PointerInfo; |
1364 | if (!OtherAA.getState().isValidState() || !isValidState()) |
1365 | return indicatePessimisticFixpoint(); |
1366 | |
1367 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
1368 | const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA); |
1369 | bool IsByval = OtherAAImpl.getAssociatedArgument()->hasByValAttr(); |
1370 | Changed |= setReachesReturn(OtherAAImpl.ReturnedOffsets); |
1371 | |
1372 | // Combine the accesses bin by bin. |
1373 | const auto &State = OtherAAImpl.getState(); |
1374 | for (const auto &It : State) { |
1375 | for (auto Index : It.getSecond()) { |
1376 | const auto &RAcc = State.getAccess(Index); |
1377 | if (IsByval && !RAcc.isRead()) |
1378 | continue; |
1379 | bool UsedAssumedInformation = false; |
1380 | AccessKind AK = RAcc.getKind(); |
1381 | auto Content = A.translateArgumentToCallSiteContent( |
1382 | V: RAcc.getContent(), CB, AA: *this, UsedAssumedInformation); |
1383 | AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW)); |
1384 | AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST)); |
1385 | |
1386 | Changed |= addAccess(A, Ranges: RAcc.getRanges(), I&: CB, Content, Kind: AK, |
1387 | Ty: RAcc.getType(), RemoteI: RAcc.getRemoteInst()); |
1388 | } |
1389 | } |
1390 | return Changed; |
1391 | } |
1392 | |
1393 | ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA, |
1394 | const OffsetInfo &Offsets, CallBase &CB, |
1395 | bool IsMustAcc) { |
1396 | using namespace AA::PointerInfo; |
1397 | if (!OtherAA.getState().isValidState() || !isValidState()) |
1398 | return indicatePessimisticFixpoint(); |
1399 | |
1400 | const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA); |
1401 | |
1402 | // Combine the accesses bin by bin. |
1403 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
1404 | const auto &State = OtherAAImpl.getState(); |
1405 | for (const auto &It : State) { |
1406 | for (auto Index : It.getSecond()) { |
1407 | const auto &RAcc = State.getAccess(Index); |
1408 | if (!IsMustAcc && RAcc.isAssumption()) |
1409 | continue; |
1410 | for (auto Offset : Offsets) { |
1411 | auto NewRanges = Offset == AA::RangeTy::Unknown |
1412 | ? AA::RangeTy::getUnknown() |
1413 | : RAcc.getRanges(); |
1414 | if (!NewRanges.isUnknown()) { |
1415 | NewRanges.addToAllOffsets(Inc: Offset); |
1416 | } |
1417 | AccessKind AK = RAcc.getKind(); |
1418 | if (!IsMustAcc) |
1419 | AK = AccessKind((AK & ~AK_MUST) | AK_MAY); |
1420 | Changed |= addAccess(A, Ranges: NewRanges, I&: CB, Content: RAcc.getContent(), Kind: AK, |
1421 | Ty: RAcc.getType(), RemoteI: RAcc.getRemoteInst()); |
1422 | } |
1423 | } |
1424 | } |
1425 | return Changed; |
1426 | } |
1427 | |
1428 | /// Statistic tracking for all AAPointerInfo implementations. |
1429 | /// See AbstractAttribute::trackStatistics(). |
1430 | void trackPointerInfoStatistics(const IRPosition &IRP) const {} |
1431 | |
1432 | /// Dump the state into \p O. |
1433 | void dumpState(raw_ostream &O) { |
1434 | for (auto &It : OffsetBins) { |
1435 | O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size |
1436 | << "] : " << It.getSecond().size() << "\n" ; |
1437 | for (auto AccIndex : It.getSecond()) { |
1438 | auto &Acc = AccessList[AccIndex]; |
1439 | O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n" ; |
1440 | if (Acc.getLocalInst() != Acc.getRemoteInst()) |
1441 | O << " --> " << *Acc.getRemoteInst() |
1442 | << "\n" ; |
1443 | if (!Acc.isWrittenValueYetUndetermined()) { |
1444 | if (isa_and_nonnull<Function>(Val: Acc.getWrittenValue())) |
1445 | O << " - c: func " << Acc.getWrittenValue()->getName() |
1446 | << "\n" ; |
1447 | else if (Acc.getWrittenValue()) |
1448 | O << " - c: " << *Acc.getWrittenValue() << "\n" ; |
1449 | else |
1450 | O << " - c: <unknown>\n" ; |
1451 | } |
1452 | } |
1453 | } |
1454 | } |
1455 | }; |
1456 | |
1457 | struct AAPointerInfoFloating : public AAPointerInfoImpl { |
1458 | using AccessKind = AAPointerInfo::AccessKind; |
1459 | AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) |
1460 | : AAPointerInfoImpl(IRP, A) {} |
1461 | |
1462 | /// Deal with an access and signal if it was handled successfully. |
1463 | bool handleAccess(Attributor &A, Instruction &I, |
1464 | std::optional<Value *> Content, AccessKind Kind, |
1465 | OffsetInfo::VecTy &Offsets, ChangeStatus &Changed, |
1466 | Type &Ty) { |
1467 | using namespace AA::PointerInfo; |
1468 | auto Size = AA::RangeTy::Unknown; |
1469 | const DataLayout &DL = A.getDataLayout(); |
1470 | TypeSize AccessSize = DL.getTypeStoreSize(Ty: &Ty); |
1471 | if (!AccessSize.isScalable()) |
1472 | Size = AccessSize.getFixedValue(); |
1473 | |
1474 | // Make a strictly ascending list of offsets as required by addAccess() |
1475 | SmallVector<int64_t> OffsetsSorted(Offsets.begin(), Offsets.end()); |
1476 | llvm::sort(C&: OffsetsSorted); |
1477 | |
1478 | VectorType *VT = dyn_cast<VectorType>(Val: &Ty); |
1479 | if (!VT || VT->getElementCount().isScalable() || |
1480 | !Content.value_or(u: nullptr) || !isa<Constant>(Val: *Content) || |
1481 | (*Content)->getType() != VT || |
1482 | DL.getTypeStoreSize(Ty: VT->getElementType()).isScalable()) { |
1483 | Changed = |
1484 | Changed | addAccess(A, Ranges: {OffsetsSorted, Size}, I, Content, Kind, Ty: &Ty); |
1485 | } else { |
1486 | // Handle vector stores with constant content element-wise. |
1487 | // TODO: We could look for the elements or create instructions |
1488 | // representing them. |
1489 | // TODO: We need to push the Content into the range abstraction |
1490 | // (AA::RangeTy) to allow different content values for different |
1491 | // ranges. ranges. Hence, support vectors storing different values. |
1492 | Type *ElementType = VT->getElementType(); |
1493 | int64_t ElementSize = DL.getTypeStoreSize(Ty: ElementType).getFixedValue(); |
1494 | auto *ConstContent = cast<Constant>(Val: *Content); |
1495 | Type *Int32Ty = Type::getInt32Ty(C&: ElementType->getContext()); |
1496 | SmallVector<int64_t> ElementOffsets(Offsets.begin(), Offsets.end()); |
1497 | |
1498 | for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) { |
1499 | Value *ElementContent = ConstantExpr::getExtractElement( |
1500 | Vec: ConstContent, Idx: ConstantInt::get(Ty: Int32Ty, V: i)); |
1501 | |
1502 | // Add the element access. |
1503 | Changed = Changed | addAccess(A, Ranges: {ElementOffsets, ElementSize}, I, |
1504 | Content: ElementContent, Kind, Ty: ElementType); |
1505 | |
1506 | // Advance the offsets for the next element. |
1507 | for (auto &ElementOffset : ElementOffsets) |
1508 | ElementOffset += ElementSize; |
1509 | } |
1510 | } |
1511 | return true; |
1512 | }; |
1513 | |
1514 | /// See AbstractAttribute::updateImpl(...). |
1515 | ChangeStatus updateImpl(Attributor &A) override; |
1516 | |
1517 | /// If the indices to \p GEP can be traced to constants, incorporate all |
1518 | /// of these into \p UsrOI. |
1519 | /// |
1520 | /// \return true iff \p UsrOI is updated. |
1521 | bool collectConstantsForGEP(Attributor &A, const DataLayout &DL, |
1522 | OffsetInfo &UsrOI, const OffsetInfo &PtrOI, |
1523 | const GEPOperator *GEP); |
1524 | |
1525 | /// See AbstractAttribute::trackStatistics() |
1526 | void trackStatistics() const override { |
1527 | AAPointerInfoImpl::trackPointerInfoStatistics(IRP: getIRPosition()); |
1528 | } |
1529 | }; |
1530 | |
1531 | bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, |
1532 | const DataLayout &DL, |
1533 | OffsetInfo &UsrOI, |
1534 | const OffsetInfo &PtrOI, |
1535 | const GEPOperator *GEP) { |
1536 | unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GEP->getType()); |
1537 | SmallMapVector<Value *, APInt, 4> VariableOffsets; |
1538 | APInt ConstantOffset(BitWidth, 0); |
1539 | |
1540 | assert(!UsrOI.isUnknown() && !PtrOI.isUnknown() && |
1541 | "Don't look for constant values if the offset has already been " |
1542 | "determined to be unknown." ); |
1543 | |
1544 | if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) { |
1545 | UsrOI.setUnknown(); |
1546 | return true; |
1547 | } |
1548 | |
1549 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset is " |
1550 | << (VariableOffsets.empty() ? "" : "not" ) << " constant " |
1551 | << *GEP << "\n" ); |
1552 | |
1553 | auto Union = PtrOI; |
1554 | Union.addToAll(Inc: ConstantOffset.getSExtValue()); |
1555 | |
1556 | // Each VI in VariableOffsets has a set of potential constant values. Every |
1557 | // combination of elements, picked one each from these sets, is separately |
1558 | // added to the original set of offsets, thus resulting in more offsets. |
1559 | for (const auto &VI : VariableOffsets) { |
1560 | auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>( |
1561 | QueryingAA: *this, IRP: IRPosition::value(V: *VI.first), DepClass: DepClassTy::OPTIONAL); |
1562 | if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) { |
1563 | UsrOI.setUnknown(); |
1564 | return true; |
1565 | } |
1566 | |
1567 | // UndefValue is treated as a zero, which leaves Union as is. |
1568 | if (PotentialConstantsAA->undefIsContained()) |
1569 | continue; |
1570 | |
1571 | // We need at least one constant in every set to compute an actual offset. |
1572 | // Otherwise, we end up pessimizing AAPointerInfo by respecting offsets that |
1573 | // don't actually exist. In other words, the absence of constant values |
1574 | // implies that the operation can be assumed dead for now. |
1575 | auto &AssumedSet = PotentialConstantsAA->getAssumedSet(); |
1576 | if (AssumedSet.empty()) |
1577 | return false; |
1578 | |
1579 | OffsetInfo Product; |
1580 | for (const auto &ConstOffset : AssumedSet) { |
1581 | auto CopyPerOffset = Union; |
1582 | CopyPerOffset.addToAll(Inc: ConstOffset.getSExtValue() * |
1583 | VI.second.getZExtValue()); |
1584 | Product.merge(R: CopyPerOffset); |
1585 | } |
1586 | Union = Product; |
1587 | } |
1588 | |
1589 | UsrOI = std::move(Union); |
1590 | return true; |
1591 | } |
1592 | |
1593 | ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { |
1594 | using namespace AA::PointerInfo; |
1595 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
1596 | const DataLayout &DL = A.getDataLayout(); |
1597 | Value &AssociatedValue = getAssociatedValue(); |
1598 | |
1599 | DenseMap<Value *, OffsetInfo> OffsetInfoMap; |
1600 | OffsetInfoMap[&AssociatedValue].insert(Offset: 0); |
1601 | |
1602 | auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) { |
1603 | // One does not simply walk into a map and assign a reference to a possibly |
1604 | // new location. That can cause an invalidation before the assignment |
1605 | // happens, like so: |
1606 | // |
1607 | // OffsetInfoMap[Usr] = OffsetInfoMap[CurPtr]; /* bad idea! */ |
1608 | // |
1609 | // The RHS is a reference that may be invalidated by an insertion caused by |
1610 | // the LHS. So we ensure that the side-effect of the LHS happens first. |
1611 | |
1612 | assert(OffsetInfoMap.contains(CurPtr) && |
1613 | "CurPtr does not exist in the map!" ); |
1614 | |
1615 | auto &UsrOI = OffsetInfoMap[Usr]; |
1616 | auto &PtrOI = OffsetInfoMap[CurPtr]; |
1617 | assert(!PtrOI.isUnassigned() && |
1618 | "Cannot pass through if the input Ptr was not visited!" ); |
1619 | UsrOI.merge(R: PtrOI); |
1620 | Follow = true; |
1621 | return true; |
1622 | }; |
1623 | |
1624 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
1625 | Value *CurPtr = U.get(); |
1626 | User *Usr = U.getUser(); |
1627 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr |
1628 | << "\n" ); |
1629 | assert(OffsetInfoMap.count(CurPtr) && |
1630 | "The current pointer offset should have been seeded!" ); |
1631 | assert(!OffsetInfoMap[CurPtr].isUnassigned() && |
1632 | "Current pointer should be assigned" ); |
1633 | |
1634 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: Usr)) { |
1635 | if (CE->isCast()) |
1636 | return HandlePassthroughUser(Usr, CurPtr, Follow); |
1637 | if (!isa<GEPOperator>(Val: CE)) { |
1638 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE |
1639 | << "\n" ); |
1640 | return false; |
1641 | } |
1642 | } |
1643 | if (auto *GEP = dyn_cast<GEPOperator>(Val: Usr)) { |
1644 | // Note the order here, the Usr access might change the map, CurPtr is |
1645 | // already in it though. |
1646 | auto &UsrOI = OffsetInfoMap[Usr]; |
1647 | auto &PtrOI = OffsetInfoMap[CurPtr]; |
1648 | |
1649 | if (UsrOI.isUnknown()) |
1650 | return true; |
1651 | |
1652 | if (PtrOI.isUnknown()) { |
1653 | Follow = true; |
1654 | UsrOI.setUnknown(); |
1655 | return true; |
1656 | } |
1657 | |
1658 | Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP); |
1659 | return true; |
1660 | } |
1661 | if (isa<PtrToIntInst>(Val: Usr)) |
1662 | return false; |
1663 | if (isa<CastInst>(Val: Usr) || isa<SelectInst>(Val: Usr)) |
1664 | return HandlePassthroughUser(Usr, CurPtr, Follow); |
1665 | // Returns are allowed if they are in the associated functions. Users can |
1666 | // then check the call site return. Returns from other functions can't be |
1667 | // tracked and are cause for invalidation. |
1668 | if (auto *RI = dyn_cast<ReturnInst>(Val: Usr)) { |
1669 | if (RI->getFunction() == getAssociatedFunction()) { |
1670 | auto &PtrOI = OffsetInfoMap[CurPtr]; |
1671 | Changed |= setReachesReturn(PtrOI); |
1672 | return true; |
1673 | } |
1674 | return false; |
1675 | } |
1676 | |
1677 | // For PHIs we need to take care of the recurrence explicitly as the value |
1678 | // might change while we iterate through a loop. For now, we give up if |
1679 | // the PHI is not invariant. |
1680 | if (auto *PHI = dyn_cast<PHINode>(Val: Usr)) { |
1681 | // Note the order here, the Usr access might change the map, CurPtr is |
1682 | // already in it though. |
1683 | auto [PhiIt, IsFirstPHIUser] = OffsetInfoMap.try_emplace(Key: PHI); |
1684 | auto &UsrOI = PhiIt->second; |
1685 | auto &PtrOI = OffsetInfoMap[CurPtr]; |
1686 | |
1687 | // Check if the PHI operand has already an unknown offset as we can't |
1688 | // improve on that anymore. |
1689 | if (PtrOI.isUnknown()) { |
1690 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown " |
1691 | << *CurPtr << " in " << *PHI << "\n" ); |
1692 | Follow = !UsrOI.isUnknown(); |
1693 | UsrOI.setUnknown(); |
1694 | return true; |
1695 | } |
1696 | |
1697 | // Check if the PHI is invariant (so far). |
1698 | if (UsrOI == PtrOI) { |
1699 | assert(!PtrOI.isUnassigned() && |
1700 | "Cannot assign if the current Ptr was not visited!" ); |
1701 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)" ); |
1702 | return true; |
1703 | } |
1704 | |
1705 | // Check if the PHI operand can be traced back to AssociatedValue. |
1706 | APInt Offset( |
1707 | DL.getIndexSizeInBits(AS: CurPtr->getType()->getPointerAddressSpace()), |
1708 | 0); |
1709 | Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets( |
1710 | DL, Offset, /* AllowNonInbounds */ true); |
1711 | auto It = OffsetInfoMap.find(Val: CurPtrBase); |
1712 | if (It == OffsetInfoMap.end()) { |
1713 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " |
1714 | << *CurPtr << " in " << *PHI |
1715 | << " (base: " << *CurPtrBase << ")\n" ); |
1716 | UsrOI.setUnknown(); |
1717 | Follow = true; |
1718 | return true; |
1719 | } |
1720 | |
1721 | // Check if the PHI operand is not dependent on the PHI itself. Every |
1722 | // recurrence is a cyclic net of PHIs in the data flow, and has an |
1723 | // equivalent Cycle in the control flow. One of those PHIs must be in the |
1724 | // header of that control flow Cycle. This is independent of the choice of |
1725 | // Cycles reported by CycleInfo. It is sufficient to check the PHIs in |
1726 | // every Cycle header; if such a node is marked unknown, this will |
1727 | // eventually propagate through the whole net of PHIs in the recurrence. |
1728 | const auto *CI = |
1729 | A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>( |
1730 | F: *PHI->getFunction()); |
1731 | if (mayBeInCycle(CI, I: cast<Instruction>(Val: Usr), /* HeaderOnly */ true)) { |
1732 | auto BaseOI = It->getSecond(); |
1733 | BaseOI.addToAll(Inc: Offset.getZExtValue()); |
1734 | if (IsFirstPHIUser || BaseOI == UsrOI) { |
1735 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr |
1736 | << " in " << *Usr << "\n" ); |
1737 | return HandlePassthroughUser(Usr, CurPtr, Follow); |
1738 | } |
1739 | |
1740 | LLVM_DEBUG( |
1741 | dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch " |
1742 | << *CurPtr << " in " << *PHI << "\n" ); |
1743 | UsrOI.setUnknown(); |
1744 | Follow = true; |
1745 | return true; |
1746 | } |
1747 | |
1748 | UsrOI.merge(R: PtrOI); |
1749 | Follow = true; |
1750 | return true; |
1751 | } |
1752 | |
1753 | if (auto *LoadI = dyn_cast<LoadInst>(Val: Usr)) { |
1754 | // If the access is to a pointer that may or may not be the associated |
1755 | // value, e.g. due to a PHI, we cannot assume it will be read. |
1756 | AccessKind AK = AccessKind::AK_R; |
1757 | if (getUnderlyingObject(V: CurPtr) == &AssociatedValue) |
1758 | AK = AccessKind(AK | AccessKind::AK_MUST); |
1759 | else |
1760 | AK = AccessKind(AK | AccessKind::AK_MAY); |
1761 | if (!handleAccess(A, I&: *LoadI, /* Content */ nullptr, Kind: AK, |
1762 | Offsets&: OffsetInfoMap[CurPtr].Offsets, Changed, |
1763 | Ty&: *LoadI->getType())) |
1764 | return false; |
1765 | |
1766 | auto IsAssumption = [](Instruction &I) { |
1767 | if (auto *II = dyn_cast<IntrinsicInst>(Val: &I)) |
1768 | return II->isAssumeLikeIntrinsic(); |
1769 | return false; |
1770 | }; |
1771 | |
1772 | auto IsImpactedInRange = [&](Instruction *FromI, Instruction *ToI) { |
1773 | // Check if the assumption and the load are executed together without |
1774 | // memory modification. |
1775 | do { |
1776 | if (FromI->mayWriteToMemory() && !IsAssumption(*FromI)) |
1777 | return true; |
1778 | FromI = FromI->getNextNonDebugInstruction(); |
1779 | } while (FromI && FromI != ToI); |
1780 | return false; |
1781 | }; |
1782 | |
1783 | BasicBlock *BB = LoadI->getParent(); |
1784 | auto IsValidAssume = [&](IntrinsicInst &IntrI) { |
1785 | if (IntrI.getIntrinsicID() != Intrinsic::assume) |
1786 | return false; |
1787 | BasicBlock *IntrBB = IntrI.getParent(); |
1788 | if (IntrI.getParent() == BB) { |
1789 | if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI)) |
1790 | return false; |
1791 | } else { |
1792 | auto PredIt = pred_begin(BB: IntrBB); |
1793 | if (PredIt == pred_end(BB: IntrBB)) |
1794 | return false; |
1795 | if ((*PredIt) != BB) |
1796 | return false; |
1797 | if (++PredIt != pred_end(BB: IntrBB)) |
1798 | return false; |
1799 | for (auto *SuccBB : successors(BB)) { |
1800 | if (SuccBB == IntrBB) |
1801 | continue; |
1802 | if (isa<UnreachableInst>(Val: SuccBB->getTerminator())) |
1803 | continue; |
1804 | return false; |
1805 | } |
1806 | if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), |
1807 | BB->getTerminator())) |
1808 | return false; |
1809 | if (IsImpactedInRange(&IntrBB->front(), &IntrI)) |
1810 | return false; |
1811 | } |
1812 | return true; |
1813 | }; |
1814 | |
1815 | std::pair<Value *, IntrinsicInst *> Assumption; |
1816 | for (const Use &LoadU : LoadI->uses()) { |
1817 | if (auto *CmpI = dyn_cast<CmpInst>(Val: LoadU.getUser())) { |
1818 | if (!CmpI->isEquality() || !CmpI->isTrueWhenEqual()) |
1819 | continue; |
1820 | for (const Use &CmpU : CmpI->uses()) { |
1821 | if (auto *IntrI = dyn_cast<IntrinsicInst>(Val: CmpU.getUser())) { |
1822 | if (!IsValidAssume(*IntrI)) |
1823 | continue; |
1824 | int Idx = CmpI->getOperandUse(i: 0) == LoadU; |
1825 | Assumption = {CmpI->getOperand(i_nocapture: Idx), IntrI}; |
1826 | break; |
1827 | } |
1828 | } |
1829 | } |
1830 | if (Assumption.first) |
1831 | break; |
1832 | } |
1833 | |
1834 | // Check if we found an assumption associated with this load. |
1835 | if (!Assumption.first || !Assumption.second) |
1836 | return true; |
1837 | |
1838 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Assumption found " |
1839 | << *Assumption.second << ": " << *LoadI |
1840 | << " == " << *Assumption.first << "\n" ); |
1841 | bool UsedAssumedInformation = false; |
1842 | std::optional<Value *> Content = nullptr; |
1843 | if (Assumption.first) |
1844 | Content = |
1845 | A.getAssumedSimplified(V: *Assumption.first, AA: *this, |
1846 | UsedAssumedInformation, S: AA::Interprocedural); |
1847 | return handleAccess( |
1848 | A, I&: *Assumption.second, Content, Kind: AccessKind::AK_ASSUMPTION, |
1849 | Offsets&: OffsetInfoMap[CurPtr].Offsets, Changed, Ty&: *LoadI->getType()); |
1850 | } |
1851 | |
1852 | auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy, |
1853 | ArrayRef<Value *> OtherOps, AccessKind AK) { |
1854 | for (auto *OtherOp : OtherOps) { |
1855 | if (OtherOp == CurPtr) { |
1856 | LLVM_DEBUG( |
1857 | dbgs() |
1858 | << "[AAPointerInfo] Escaping use in store like instruction " << I |
1859 | << "\n" ); |
1860 | return false; |
1861 | } |
1862 | } |
1863 | |
1864 | // If the access is to a pointer that may or may not be the associated |
1865 | // value, e.g. due to a PHI, we cannot assume it will be written. |
1866 | if (getUnderlyingObject(V: CurPtr) == &AssociatedValue) |
1867 | AK = AccessKind(AK | AccessKind::AK_MUST); |
1868 | else |
1869 | AK = AccessKind(AK | AccessKind::AK_MAY); |
1870 | bool UsedAssumedInformation = false; |
1871 | std::optional<Value *> Content = nullptr; |
1872 | if (ValueOp) |
1873 | Content = A.getAssumedSimplified( |
1874 | V: *ValueOp, AA: *this, UsedAssumedInformation, S: AA::Interprocedural); |
1875 | return handleAccess(A, I, Content, Kind: AK, Offsets&: OffsetInfoMap[CurPtr].Offsets, |
1876 | Changed, Ty&: ValueTy); |
1877 | }; |
1878 | |
1879 | if (auto *StoreI = dyn_cast<StoreInst>(Val: Usr)) |
1880 | return HandleStoreLike(*StoreI, StoreI->getValueOperand(), |
1881 | *StoreI->getValueOperand()->getType(), |
1882 | {StoreI->getValueOperand()}, AccessKind::AK_W); |
1883 | if (auto *RMWI = dyn_cast<AtomicRMWInst>(Val: Usr)) |
1884 | return HandleStoreLike(*RMWI, nullptr, *RMWI->getValOperand()->getType(), |
1885 | {RMWI->getValOperand()}, AccessKind::AK_RW); |
1886 | if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(Val: Usr)) |
1887 | return HandleStoreLike( |
1888 | *CXI, nullptr, *CXI->getNewValOperand()->getType(), |
1889 | {CXI->getCompareOperand(), CXI->getNewValOperand()}, |
1890 | AccessKind::AK_RW); |
1891 | |
1892 | if (auto *CB = dyn_cast<CallBase>(Val: Usr)) { |
1893 | if (CB->isLifetimeStartOrEnd()) |
1894 | return true; |
1895 | const auto *TLI = |
1896 | A.getInfoCache().getTargetLibraryInfoForFunction(F: *CB->getFunction()); |
1897 | if (getFreedOperand(CB, TLI) == U) |
1898 | return true; |
1899 | if (CB->isArgOperand(U: &U)) { |
1900 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
1901 | const auto *CSArgPI = A.getAAFor<AAPointerInfo>( |
1902 | QueryingAA: *this, IRP: IRPosition::callsite_argument(CB: *CB, ArgNo), |
1903 | DepClass: DepClassTy::REQUIRED); |
1904 | if (!CSArgPI) |
1905 | return false; |
1906 | bool IsArgMustAcc = (getUnderlyingObject(V: CurPtr) == &AssociatedValue); |
1907 | Changed = translateAndAddState(A, OtherAA: *CSArgPI, Offsets: OffsetInfoMap[CurPtr], CB&: *CB, |
1908 | IsMustAcc: IsArgMustAcc) | |
1909 | Changed; |
1910 | if (!CSArgPI->reachesReturn()) |
1911 | return isValidState(); |
1912 | |
1913 | Function *Callee = CB->getCalledFunction(); |
1914 | if (!Callee || Callee->arg_size() <= ArgNo) |
1915 | return false; |
1916 | bool UsedAssumedInformation = false; |
1917 | auto ReturnedValue = A.getAssumedSimplified( |
1918 | IRP: IRPosition::returned(F: *Callee), AA: *this, UsedAssumedInformation, |
1919 | S: AA::ValueScope::Intraprocedural); |
1920 | auto *ReturnedArg = |
1921 | dyn_cast_or_null<Argument>(Val: ReturnedValue.value_or(u: nullptr)); |
1922 | auto *Arg = Callee->getArg(i: ArgNo); |
1923 | if (ReturnedArg && Arg != ReturnedArg) |
1924 | return true; |
1925 | bool IsRetMustAcc = IsArgMustAcc && (ReturnedArg == Arg); |
1926 | const auto *CSRetPI = A.getAAFor<AAPointerInfo>( |
1927 | QueryingAA: *this, IRP: IRPosition::callsite_returned(CB: *CB), DepClass: DepClassTy::REQUIRED); |
1928 | if (!CSRetPI) |
1929 | return false; |
1930 | OffsetInfo OI = OffsetInfoMap[CurPtr]; |
1931 | CSArgPI->addReturnedOffsetsTo(OI); |
1932 | Changed = |
1933 | translateAndAddState(A, OtherAA: *CSRetPI, Offsets: OI, CB&: *CB, IsMustAcc: IsRetMustAcc) | Changed; |
1934 | return isValidState(); |
1935 | } |
1936 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB |
1937 | << "\n" ); |
1938 | return false; |
1939 | } |
1940 | |
1941 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n" ); |
1942 | return false; |
1943 | }; |
1944 | auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { |
1945 | assert(OffsetInfoMap.count(OldU) && "Old use should be known already!" ); |
1946 | assert(!OffsetInfoMap[OldU].isUnassigned() && "Old use should be assinged" ); |
1947 | if (OffsetInfoMap.count(Val: NewU)) { |
1948 | LLVM_DEBUG({ |
1949 | if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) { |
1950 | dbgs() << "[AAPointerInfo] Equivalent use callback failed: " |
1951 | << OffsetInfoMap[NewU] << " vs " << OffsetInfoMap[OldU] |
1952 | << "\n" ; |
1953 | } |
1954 | }); |
1955 | return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; |
1956 | } |
1957 | bool Unused; |
1958 | return HandlePassthroughUser(NewU.get(), OldU.get(), Unused); |
1959 | }; |
1960 | if (!A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V: AssociatedValue, |
1961 | /* CheckBBLivenessOnly */ true, LivenessDepClass: DepClassTy::OPTIONAL, |
1962 | /* IgnoreDroppableUses */ true, EquivalentUseCB)) { |
1963 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n" ); |
1964 | return indicatePessimisticFixpoint(); |
1965 | } |
1966 | |
1967 | LLVM_DEBUG({ |
1968 | dbgs() << "Accesses by bin after update:\n" ; |
1969 | dumpState(dbgs()); |
1970 | }); |
1971 | |
1972 | return Changed; |
1973 | } |
1974 | |
1975 | struct AAPointerInfoReturned final : AAPointerInfoImpl { |
1976 | AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) |
1977 | : AAPointerInfoImpl(IRP, A) {} |
1978 | |
1979 | /// See AbstractAttribute::updateImpl(...). |
1980 | ChangeStatus updateImpl(Attributor &A) override { |
1981 | return indicatePessimisticFixpoint(); |
1982 | } |
1983 | |
1984 | /// See AbstractAttribute::trackStatistics() |
1985 | void trackStatistics() const override { |
1986 | AAPointerInfoImpl::trackPointerInfoStatistics(IRP: getIRPosition()); |
1987 | } |
1988 | }; |
1989 | |
1990 | struct AAPointerInfoArgument final : AAPointerInfoFloating { |
1991 | AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) |
1992 | : AAPointerInfoFloating(IRP, A) {} |
1993 | |
1994 | /// See AbstractAttribute::trackStatistics() |
1995 | void trackStatistics() const override { |
1996 | AAPointerInfoImpl::trackPointerInfoStatistics(IRP: getIRPosition()); |
1997 | } |
1998 | }; |
1999 | |
2000 | struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { |
2001 | AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) |
2002 | : AAPointerInfoFloating(IRP, A) {} |
2003 | |
2004 | /// See AbstractAttribute::updateImpl(...). |
2005 | ChangeStatus updateImpl(Attributor &A) override { |
2006 | using namespace AA::PointerInfo; |
2007 | // We handle memory intrinsics explicitly, at least the first (= |
2008 | // destination) and second (=source) arguments as we know how they are |
2009 | // accessed. |
2010 | if (auto *MI = dyn_cast_or_null<MemIntrinsic>(Val: getCtxI())) { |
2011 | ConstantInt *Length = dyn_cast<ConstantInt>(Val: MI->getLength()); |
2012 | int64_t LengthVal = AA::RangeTy::Unknown; |
2013 | if (Length) |
2014 | LengthVal = Length->getSExtValue(); |
2015 | unsigned ArgNo = getIRPosition().getCallSiteArgNo(); |
2016 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
2017 | if (ArgNo > 1) { |
2018 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " |
2019 | << *MI << "\n" ); |
2020 | return indicatePessimisticFixpoint(); |
2021 | } else { |
2022 | auto Kind = |
2023 | ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ; |
2024 | Changed = |
2025 | Changed | addAccess(A, Ranges: {0, LengthVal}, I&: *MI, Content: nullptr, Kind, Ty: nullptr); |
2026 | } |
2027 | LLVM_DEBUG({ |
2028 | dbgs() << "Accesses by bin after update:\n" ; |
2029 | dumpState(dbgs()); |
2030 | }); |
2031 | |
2032 | return Changed; |
2033 | } |
2034 | |
2035 | // TODO: Once we have call site specific value information we can provide |
2036 | // call site specific liveness information and then it makes |
2037 | // sense to specialize attributes for call sites arguments instead of |
2038 | // redirecting requests to the callee argument. |
2039 | Argument *Arg = getAssociatedArgument(); |
2040 | if (Arg) { |
2041 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
2042 | auto *ArgAA = |
2043 | A.getAAFor<AAPointerInfo>(QueryingAA: *this, IRP: ArgPos, DepClass: DepClassTy::REQUIRED); |
2044 | if (ArgAA && ArgAA->getState().isValidState()) |
2045 | return translateAndAddStateFromCallee(A, OtherAA: *ArgAA, |
2046 | CB&: *cast<CallBase>(Val: getCtxI())); |
2047 | if (!Arg->getParent()->isDeclaration()) |
2048 | return indicatePessimisticFixpoint(); |
2049 | } |
2050 | |
2051 | bool IsKnownNoCapture; |
2052 | if (!AA::hasAssumedIRAttr<Attribute::Captures>( |
2053 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture)) |
2054 | return indicatePessimisticFixpoint(); |
2055 | |
2056 | bool IsKnown = false; |
2057 | if (AA::isAssumedReadNone(A, IRP: getIRPosition(), QueryingAA: *this, IsKnown)) |
2058 | return ChangeStatus::UNCHANGED; |
2059 | bool ReadOnly = AA::isAssumedReadOnly(A, IRP: getIRPosition(), QueryingAA: *this, IsKnown); |
2060 | auto Kind = |
2061 | ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE; |
2062 | return addAccess(A, Ranges: AA::RangeTy::getUnknown(), I&: *getCtxI(), Content: nullptr, Kind, |
2063 | Ty: nullptr); |
2064 | } |
2065 | |
2066 | /// See AbstractAttribute::trackStatistics() |
2067 | void trackStatistics() const override { |
2068 | AAPointerInfoImpl::trackPointerInfoStatistics(IRP: getIRPosition()); |
2069 | } |
2070 | }; |
2071 | |
2072 | struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { |
2073 | AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) |
2074 | : AAPointerInfoFloating(IRP, A) {} |
2075 | |
2076 | /// See AbstractAttribute::trackStatistics() |
2077 | void trackStatistics() const override { |
2078 | AAPointerInfoImpl::trackPointerInfoStatistics(IRP: getIRPosition()); |
2079 | } |
2080 | }; |
2081 | } // namespace |
2082 | |
2083 | /// -----------------------NoUnwind Function Attribute-------------------------- |
2084 | |
2085 | namespace { |
2086 | struct AANoUnwindImpl : AANoUnwind { |
2087 | AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} |
2088 | |
2089 | /// See AbstractAttribute::initialize(...). |
2090 | void initialize(Attributor &A) override { |
2091 | bool IsKnown; |
2092 | assert(!AA::hasAssumedIRAttr<Attribute::NoUnwind>( |
2093 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
2094 | (void)IsKnown; |
2095 | } |
2096 | |
2097 | const std::string getAsStr(Attributor *A) const override { |
2098 | return getAssumed() ? "nounwind" : "may-unwind" ; |
2099 | } |
2100 | |
2101 | /// See AbstractAttribute::updateImpl(...). |
2102 | ChangeStatus updateImpl(Attributor &A) override { |
2103 | auto Opcodes = { |
2104 | (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, |
2105 | (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, |
2106 | (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; |
2107 | |
2108 | auto CheckForNoUnwind = [&](Instruction &I) { |
2109 | if (!I.mayThrow(/* IncludePhaseOneUnwind */ true)) |
2110 | return true; |
2111 | |
2112 | if (const auto *CB = dyn_cast<CallBase>(Val: &I)) { |
2113 | bool IsKnownNoUnwind; |
2114 | return AA::hasAssumedIRAttr<Attribute::NoUnwind>( |
2115 | A, QueryingAA: this, IRP: IRPosition::callsite_function(CB: *CB), DepClass: DepClassTy::REQUIRED, |
2116 | IsKnown&: IsKnownNoUnwind); |
2117 | } |
2118 | return false; |
2119 | }; |
2120 | |
2121 | bool UsedAssumedInformation = false; |
2122 | if (!A.checkForAllInstructions(Pred: CheckForNoUnwind, QueryingAA: *this, Opcodes, |
2123 | UsedAssumedInformation)) |
2124 | return indicatePessimisticFixpoint(); |
2125 | |
2126 | return ChangeStatus::UNCHANGED; |
2127 | } |
2128 | }; |
2129 | |
2130 | struct AANoUnwindFunction final : public AANoUnwindImpl { |
2131 | AANoUnwindFunction(const IRPosition &IRP, Attributor &A) |
2132 | : AANoUnwindImpl(IRP, A) {} |
2133 | |
2134 | /// See AbstractAttribute::trackStatistics() |
2135 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } |
2136 | }; |
2137 | |
2138 | /// NoUnwind attribute deduction for a call sites. |
2139 | struct AANoUnwindCallSite final |
2140 | : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl> { |
2141 | AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) |
2142 | : AACalleeToCallSite<AANoUnwind, AANoUnwindImpl>(IRP, A) {} |
2143 | |
2144 | /// See AbstractAttribute::trackStatistics() |
2145 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } |
2146 | }; |
2147 | } // namespace |
2148 | |
2149 | /// ------------------------ NoSync Function Attribute ------------------------- |
2150 | |
2151 | bool AANoSync::isAlignedBarrier(const CallBase &CB, bool ExecutedAligned) { |
2152 | switch (CB.getIntrinsicID()) { |
2153 | case Intrinsic::nvvm_barrier_cta_sync_aligned_all: |
2154 | case Intrinsic::nvvm_barrier_cta_sync_aligned_count: |
2155 | case Intrinsic::nvvm_barrier0_and: |
2156 | case Intrinsic::nvvm_barrier0_or: |
2157 | case Intrinsic::nvvm_barrier0_popc: |
2158 | return true; |
2159 | case Intrinsic::amdgcn_s_barrier: |
2160 | if (ExecutedAligned) |
2161 | return true; |
2162 | break; |
2163 | default: |
2164 | break; |
2165 | } |
2166 | return hasAssumption(CB, AssumptionStr: KnownAssumptionString("ompx_aligned_barrier" )); |
2167 | } |
2168 | |
2169 | bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { |
2170 | if (!I->isAtomic()) |
2171 | return false; |
2172 | |
2173 | if (auto *FI = dyn_cast<FenceInst>(Val: I)) |
2174 | // All legal orderings for fence are stronger than monotonic. |
2175 | return FI->getSyncScopeID() != SyncScope::SingleThread; |
2176 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Val: I)) { |
2177 | // Unordered is not a legal ordering for cmpxchg. |
2178 | return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || |
2179 | AI->getFailureOrdering() != AtomicOrdering::Monotonic); |
2180 | } |
2181 | |
2182 | AtomicOrdering Ordering; |
2183 | switch (I->getOpcode()) { |
2184 | case Instruction::AtomicRMW: |
2185 | Ordering = cast<AtomicRMWInst>(Val: I)->getOrdering(); |
2186 | break; |
2187 | case Instruction::Store: |
2188 | Ordering = cast<StoreInst>(Val: I)->getOrdering(); |
2189 | break; |
2190 | case Instruction::Load: |
2191 | Ordering = cast<LoadInst>(Val: I)->getOrdering(); |
2192 | break; |
2193 | default: |
2194 | llvm_unreachable( |
2195 | "New atomic operations need to be known in the attributor." ); |
2196 | } |
2197 | |
2198 | return (Ordering != AtomicOrdering::Unordered && |
2199 | Ordering != AtomicOrdering::Monotonic); |
2200 | } |
2201 | |
2202 | /// Return true if this intrinsic is nosync. This is only used for intrinsics |
2203 | /// which would be nosync except that they have a volatile flag. All other |
2204 | /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. |
2205 | bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { |
2206 | if (auto *MI = dyn_cast<MemIntrinsic>(Val: I)) |
2207 | return !MI->isVolatile(); |
2208 | return false; |
2209 | } |
2210 | |
2211 | namespace { |
2212 | struct AANoSyncImpl : AANoSync { |
2213 | AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} |
2214 | |
2215 | /// See AbstractAttribute::initialize(...). |
2216 | void initialize(Attributor &A) override { |
2217 | bool IsKnown; |
2218 | assert(!AA::hasAssumedIRAttr<Attribute::NoSync>(A, nullptr, getIRPosition(), |
2219 | DepClassTy::NONE, IsKnown)); |
2220 | (void)IsKnown; |
2221 | } |
2222 | |
2223 | const std::string getAsStr(Attributor *A) const override { |
2224 | return getAssumed() ? "nosync" : "may-sync" ; |
2225 | } |
2226 | |
2227 | /// See AbstractAttribute::updateImpl(...). |
2228 | ChangeStatus updateImpl(Attributor &A) override; |
2229 | }; |
2230 | |
2231 | ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { |
2232 | |
2233 | auto CheckRWInstForNoSync = [&](Instruction &I) { |
2234 | return AA::isNoSyncInst(A, I, QueryingAA: *this); |
2235 | }; |
2236 | |
2237 | auto CheckForNoSync = [&](Instruction &I) { |
2238 | // At this point we handled all read/write effects and they are all |
2239 | // nosync, so they can be skipped. |
2240 | if (I.mayReadOrWriteMemory()) |
2241 | return true; |
2242 | |
2243 | bool IsKnown; |
2244 | CallBase &CB = cast<CallBase>(Val&: I); |
2245 | if (AA::hasAssumedIRAttr<Attribute::NoSync>( |
2246 | A, QueryingAA: this, IRP: IRPosition::callsite_function(CB), DepClass: DepClassTy::OPTIONAL, |
2247 | IsKnown)) |
2248 | return true; |
2249 | |
2250 | // non-convergent and readnone imply nosync. |
2251 | return !CB.isConvergent(); |
2252 | }; |
2253 | |
2254 | bool UsedAssumedInformation = false; |
2255 | if (!A.checkForAllReadWriteInstructions(Pred: CheckRWInstForNoSync, QueryingAA&: *this, |
2256 | UsedAssumedInformation) || |
2257 | !A.checkForAllCallLikeInstructions(Pred: CheckForNoSync, QueryingAA: *this, |
2258 | UsedAssumedInformation)) |
2259 | return indicatePessimisticFixpoint(); |
2260 | |
2261 | return ChangeStatus::UNCHANGED; |
2262 | } |
2263 | |
2264 | struct AANoSyncFunction final : public AANoSyncImpl { |
2265 | AANoSyncFunction(const IRPosition &IRP, Attributor &A) |
2266 | : AANoSyncImpl(IRP, A) {} |
2267 | |
2268 | /// See AbstractAttribute::trackStatistics() |
2269 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } |
2270 | }; |
2271 | |
2272 | /// NoSync attribute deduction for a call sites. |
2273 | struct AANoSyncCallSite final : AACalleeToCallSite<AANoSync, AANoSyncImpl> { |
2274 | AANoSyncCallSite(const IRPosition &IRP, Attributor &A) |
2275 | : AACalleeToCallSite<AANoSync, AANoSyncImpl>(IRP, A) {} |
2276 | |
2277 | /// See AbstractAttribute::trackStatistics() |
2278 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } |
2279 | }; |
2280 | } // namespace |
2281 | |
2282 | /// ------------------------ No-Free Attributes ---------------------------- |
2283 | |
2284 | namespace { |
2285 | struct AANoFreeImpl : public AANoFree { |
2286 | AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} |
2287 | |
2288 | /// See AbstractAttribute::initialize(...). |
2289 | void initialize(Attributor &A) override { |
2290 | bool IsKnown; |
2291 | assert(!AA::hasAssumedIRAttr<Attribute::NoFree>(A, nullptr, getIRPosition(), |
2292 | DepClassTy::NONE, IsKnown)); |
2293 | (void)IsKnown; |
2294 | } |
2295 | |
2296 | /// See AbstractAttribute::updateImpl(...). |
2297 | ChangeStatus updateImpl(Attributor &A) override { |
2298 | auto CheckForNoFree = [&](Instruction &I) { |
2299 | bool IsKnown; |
2300 | return AA::hasAssumedIRAttr<Attribute::NoFree>( |
2301 | A, QueryingAA: this, IRP: IRPosition::callsite_function(CB: cast<CallBase>(Val&: I)), |
2302 | DepClass: DepClassTy::REQUIRED, IsKnown); |
2303 | }; |
2304 | |
2305 | bool UsedAssumedInformation = false; |
2306 | if (!A.checkForAllCallLikeInstructions(Pred: CheckForNoFree, QueryingAA: *this, |
2307 | UsedAssumedInformation)) |
2308 | return indicatePessimisticFixpoint(); |
2309 | return ChangeStatus::UNCHANGED; |
2310 | } |
2311 | |
2312 | /// See AbstractAttribute::getAsStr(). |
2313 | const std::string getAsStr(Attributor *A) const override { |
2314 | return getAssumed() ? "nofree" : "may-free" ; |
2315 | } |
2316 | }; |
2317 | |
2318 | struct AANoFreeFunction final : public AANoFreeImpl { |
2319 | AANoFreeFunction(const IRPosition &IRP, Attributor &A) |
2320 | : AANoFreeImpl(IRP, A) {} |
2321 | |
2322 | /// See AbstractAttribute::trackStatistics() |
2323 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } |
2324 | }; |
2325 | |
2326 | /// NoFree attribute deduction for a call sites. |
2327 | struct AANoFreeCallSite final : AACalleeToCallSite<AANoFree, AANoFreeImpl> { |
2328 | AANoFreeCallSite(const IRPosition &IRP, Attributor &A) |
2329 | : AACalleeToCallSite<AANoFree, AANoFreeImpl>(IRP, A) {} |
2330 | |
2331 | /// See AbstractAttribute::trackStatistics() |
2332 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } |
2333 | }; |
2334 | |
2335 | /// NoFree attribute for floating values. |
2336 | struct AANoFreeFloating : AANoFreeImpl { |
2337 | AANoFreeFloating(const IRPosition &IRP, Attributor &A) |
2338 | : AANoFreeImpl(IRP, A) {} |
2339 | |
2340 | /// See AbstractAttribute::trackStatistics() |
2341 | void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} |
2342 | |
2343 | /// See Abstract Attribute::updateImpl(...). |
2344 | ChangeStatus updateImpl(Attributor &A) override { |
2345 | const IRPosition &IRP = getIRPosition(); |
2346 | |
2347 | bool IsKnown; |
2348 | if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, QueryingAA: this, |
2349 | IRP: IRPosition::function_scope(IRP), |
2350 | DepClass: DepClassTy::OPTIONAL, IsKnown)) |
2351 | return ChangeStatus::UNCHANGED; |
2352 | |
2353 | Value &AssociatedValue = getIRPosition().getAssociatedValue(); |
2354 | auto Pred = [&](const Use &U, bool &Follow) -> bool { |
2355 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
2356 | if (auto *CB = dyn_cast<CallBase>(Val: UserI)) { |
2357 | if (CB->isBundleOperand(U: &U)) |
2358 | return false; |
2359 | if (!CB->isArgOperand(U: &U)) |
2360 | return true; |
2361 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
2362 | |
2363 | bool IsKnown; |
2364 | return AA::hasAssumedIRAttr<Attribute::NoFree>( |
2365 | A, QueryingAA: this, IRP: IRPosition::callsite_argument(CB: *CB, ArgNo), |
2366 | DepClass: DepClassTy::REQUIRED, IsKnown); |
2367 | } |
2368 | |
2369 | if (isa<GetElementPtrInst>(Val: UserI) || isa<PHINode>(Val: UserI) || |
2370 | isa<SelectInst>(Val: UserI)) { |
2371 | Follow = true; |
2372 | return true; |
2373 | } |
2374 | if (isa<StoreInst>(Val: UserI) || isa<LoadInst>(Val: UserI)) |
2375 | return true; |
2376 | |
2377 | if (isa<ReturnInst>(Val: UserI) && getIRPosition().isArgumentPosition()) |
2378 | return true; |
2379 | |
2380 | // Unknown user. |
2381 | return false; |
2382 | }; |
2383 | if (!A.checkForAllUses(Pred, QueryingAA: *this, V: AssociatedValue)) |
2384 | return indicatePessimisticFixpoint(); |
2385 | |
2386 | return ChangeStatus::UNCHANGED; |
2387 | } |
2388 | }; |
2389 | |
2390 | /// NoFree attribute for a call site argument. |
2391 | struct AANoFreeArgument final : AANoFreeFloating { |
2392 | AANoFreeArgument(const IRPosition &IRP, Attributor &A) |
2393 | : AANoFreeFloating(IRP, A) {} |
2394 | |
2395 | /// See AbstractAttribute::trackStatistics() |
2396 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } |
2397 | }; |
2398 | |
2399 | /// NoFree attribute for call site arguments. |
2400 | struct AANoFreeCallSiteArgument final : AANoFreeFloating { |
2401 | AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) |
2402 | : AANoFreeFloating(IRP, A) {} |
2403 | |
2404 | /// See AbstractAttribute::updateImpl(...). |
2405 | ChangeStatus updateImpl(Attributor &A) override { |
2406 | // TODO: Once we have call site specific value information we can provide |
2407 | // call site specific liveness information and then it makes |
2408 | // sense to specialize attributes for call sites arguments instead of |
2409 | // redirecting requests to the callee argument. |
2410 | Argument *Arg = getAssociatedArgument(); |
2411 | if (!Arg) |
2412 | return indicatePessimisticFixpoint(); |
2413 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
2414 | bool IsKnown; |
2415 | if (AA::hasAssumedIRAttr<Attribute::NoFree>(A, QueryingAA: this, IRP: ArgPos, |
2416 | DepClass: DepClassTy::REQUIRED, IsKnown)) |
2417 | return ChangeStatus::UNCHANGED; |
2418 | return indicatePessimisticFixpoint(); |
2419 | } |
2420 | |
2421 | /// See AbstractAttribute::trackStatistics() |
2422 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nofree) }; |
2423 | }; |
2424 | |
2425 | /// NoFree attribute for function return value. |
2426 | struct AANoFreeReturned final : AANoFreeFloating { |
2427 | AANoFreeReturned(const IRPosition &IRP, Attributor &A) |
2428 | : AANoFreeFloating(IRP, A) { |
2429 | llvm_unreachable("NoFree is not applicable to function returns!" ); |
2430 | } |
2431 | |
2432 | /// See AbstractAttribute::initialize(...). |
2433 | void initialize(Attributor &A) override { |
2434 | llvm_unreachable("NoFree is not applicable to function returns!" ); |
2435 | } |
2436 | |
2437 | /// See AbstractAttribute::updateImpl(...). |
2438 | ChangeStatus updateImpl(Attributor &A) override { |
2439 | llvm_unreachable("NoFree is not applicable to function returns!" ); |
2440 | } |
2441 | |
2442 | /// See AbstractAttribute::trackStatistics() |
2443 | void trackStatistics() const override {} |
2444 | }; |
2445 | |
2446 | /// NoFree attribute deduction for a call site return value. |
2447 | struct AANoFreeCallSiteReturned final : AANoFreeFloating { |
2448 | AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) |
2449 | : AANoFreeFloating(IRP, A) {} |
2450 | |
2451 | ChangeStatus manifest(Attributor &A) override { |
2452 | return ChangeStatus::UNCHANGED; |
2453 | } |
2454 | /// See AbstractAttribute::trackStatistics() |
2455 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } |
2456 | }; |
2457 | } // namespace |
2458 | |
2459 | /// ------------------------ NonNull Argument Attribute ------------------------ |
2460 | |
2461 | bool AANonNull::isImpliedByIR(Attributor &A, const IRPosition &IRP, |
2462 | Attribute::AttrKind ImpliedAttributeKind, |
2463 | bool IgnoreSubsumingPositions) { |
2464 | SmallVector<Attribute::AttrKind, 2> AttrKinds; |
2465 | AttrKinds.push_back(Elt: Attribute::NonNull); |
2466 | if (!NullPointerIsDefined(F: IRP.getAnchorScope(), |
2467 | AS: IRP.getAssociatedType()->getPointerAddressSpace())) |
2468 | AttrKinds.push_back(Elt: Attribute::Dereferenceable); |
2469 | if (A.hasAttr(IRP, AKs: AttrKinds, IgnoreSubsumingPositions, ImpliedAttributeKind: Attribute::NonNull)) |
2470 | return true; |
2471 | |
2472 | DominatorTree *DT = nullptr; |
2473 | AssumptionCache *AC = nullptr; |
2474 | InformationCache &InfoCache = A.getInfoCache(); |
2475 | if (const Function *Fn = IRP.getAnchorScope()) { |
2476 | if (!Fn->isDeclaration()) { |
2477 | DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F: *Fn); |
2478 | AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(F: *Fn); |
2479 | } |
2480 | } |
2481 | |
2482 | SmallVector<AA::ValueAndContext> Worklist; |
2483 | if (IRP.getPositionKind() != IRP_RETURNED) { |
2484 | Worklist.push_back(Elt: {IRP.getAssociatedValue(), IRP.getCtxI()}); |
2485 | } else { |
2486 | bool UsedAssumedInformation = false; |
2487 | if (!A.checkForAllInstructions( |
2488 | Pred: [&](Instruction &I) { |
2489 | Worklist.push_back(Elt: {*cast<ReturnInst>(Val&: I).getReturnValue(), &I}); |
2490 | return true; |
2491 | }, |
2492 | Fn: IRP.getAssociatedFunction(), QueryingAA: nullptr, Opcodes: {Instruction::Ret}, |
2493 | UsedAssumedInformation, CheckBBLivenessOnly: false, /*CheckPotentiallyDead=*/true)) |
2494 | return false; |
2495 | } |
2496 | |
2497 | if (llvm::any_of(Range&: Worklist, P: [&](AA::ValueAndContext VAC) { |
2498 | return !isKnownNonZero( |
2499 | V: VAC.getValue(), |
2500 | Q: SimplifyQuery(A.getDataLayout(), DT, AC, VAC.getCtxI())); |
2501 | })) |
2502 | return false; |
2503 | |
2504 | A.manifestAttrs(IRP, DeducedAttrs: {Attribute::get(Context&: IRP.getAnchorValue().getContext(), |
2505 | Kind: Attribute::NonNull)}); |
2506 | return true; |
2507 | } |
2508 | |
2509 | namespace { |
2510 | static int64_t getKnownNonNullAndDerefBytesForUse( |
2511 | Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, |
2512 | const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { |
2513 | TrackUse = false; |
2514 | |
2515 | const Value *UseV = U->get(); |
2516 | if (!UseV->getType()->isPointerTy()) |
2517 | return 0; |
2518 | |
2519 | // We need to follow common pointer manipulation uses to the accesses they |
2520 | // feed into. We can try to be smart to avoid looking through things we do not |
2521 | // like for now, e.g., non-inbounds GEPs. |
2522 | if (isa<CastInst>(Val: I)) { |
2523 | TrackUse = true; |
2524 | return 0; |
2525 | } |
2526 | |
2527 | if (isa<GetElementPtrInst>(Val: I)) { |
2528 | TrackUse = true; |
2529 | return 0; |
2530 | } |
2531 | |
2532 | Type *PtrTy = UseV->getType(); |
2533 | const Function *F = I->getFunction(); |
2534 | bool NullPointerIsDefined = |
2535 | F ? llvm::NullPointerIsDefined(F, AS: PtrTy->getPointerAddressSpace()) : true; |
2536 | const DataLayout &DL = A.getInfoCache().getDL(); |
2537 | if (const auto *CB = dyn_cast<CallBase>(Val: I)) { |
2538 | if (CB->isBundleOperand(U)) { |
2539 | if (RetainedKnowledge RK = getKnowledgeFromUse( |
2540 | U, AttrKinds: {Attribute::NonNull, Attribute::Dereferenceable})) { |
2541 | IsNonNull |= |
2542 | (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); |
2543 | return RK.ArgValue; |
2544 | } |
2545 | return 0; |
2546 | } |
2547 | |
2548 | if (CB->isCallee(U)) { |
2549 | IsNonNull |= !NullPointerIsDefined; |
2550 | return 0; |
2551 | } |
2552 | |
2553 | unsigned ArgNo = CB->getArgOperandNo(U); |
2554 | IRPosition IRP = IRPosition::callsite_argument(CB: *CB, ArgNo); |
2555 | // As long as we only use known information there is no need to track |
2556 | // dependences here. |
2557 | bool IsKnownNonNull; |
2558 | AA::hasAssumedIRAttr<Attribute::NonNull>(A, QueryingAA: &QueryingAA, IRP, |
2559 | DepClass: DepClassTy::NONE, IsKnown&: IsKnownNonNull); |
2560 | IsNonNull |= IsKnownNonNull; |
2561 | auto *DerefAA = |
2562 | A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClass: DepClassTy::NONE); |
2563 | return DerefAA ? DerefAA->getKnownDereferenceableBytes() : 0; |
2564 | } |
2565 | |
2566 | std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(Inst: I); |
2567 | if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || |
2568 | Loc->Size.isScalable() || I->isVolatile()) |
2569 | return 0; |
2570 | |
2571 | int64_t Offset; |
2572 | const Value *Base = |
2573 | getMinimalBaseOfPointer(A, QueryingAA, Ptr: Loc->Ptr, BytesOffset&: Offset, DL); |
2574 | if (Base && Base == &AssociatedValue) { |
2575 | int64_t DerefBytes = Loc->Size.getValue() + Offset; |
2576 | IsNonNull |= !NullPointerIsDefined; |
2577 | return std::max(a: int64_t(0), b: DerefBytes); |
2578 | } |
2579 | |
2580 | /// Corner case when an offset is 0. |
2581 | Base = GetPointerBaseWithConstantOffset(Ptr: Loc->Ptr, Offset, DL, |
2582 | /*AllowNonInbounds*/ true); |
2583 | if (Base && Base == &AssociatedValue && Offset == 0) { |
2584 | int64_t DerefBytes = Loc->Size.getValue(); |
2585 | IsNonNull |= !NullPointerIsDefined; |
2586 | return std::max(a: int64_t(0), b: DerefBytes); |
2587 | } |
2588 | |
2589 | return 0; |
2590 | } |
2591 | |
2592 | struct AANonNullImpl : AANonNull { |
2593 | AANonNullImpl(const IRPosition &IRP, Attributor &A) : AANonNull(IRP, A) {} |
2594 | |
2595 | /// See AbstractAttribute::initialize(...). |
2596 | void initialize(Attributor &A) override { |
2597 | Value &V = *getAssociatedValue().stripPointerCasts(); |
2598 | if (isa<ConstantPointerNull>(Val: V)) { |
2599 | indicatePessimisticFixpoint(); |
2600 | return; |
2601 | } |
2602 | |
2603 | if (Instruction *CtxI = getCtxI()) |
2604 | followUsesInMBEC(AA&: *this, A, S&: getState(), CtxI&: *CtxI); |
2605 | } |
2606 | |
2607 | /// See followUsesInMBEC |
2608 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
2609 | AANonNull::StateType &State) { |
2610 | bool IsNonNull = false; |
2611 | bool TrackUse = false; |
2612 | getKnownNonNullAndDerefBytesForUse(A, QueryingAA: *this, AssociatedValue&: getAssociatedValue(), U, I, |
2613 | IsNonNull, TrackUse); |
2614 | State.setKnown(IsNonNull); |
2615 | return TrackUse; |
2616 | } |
2617 | |
2618 | /// See AbstractAttribute::getAsStr(). |
2619 | const std::string getAsStr(Attributor *A) const override { |
2620 | return getAssumed() ? "nonnull" : "may-null" ; |
2621 | } |
2622 | }; |
2623 | |
2624 | /// NonNull attribute for a floating value. |
2625 | struct AANonNullFloating : public AANonNullImpl { |
2626 | AANonNullFloating(const IRPosition &IRP, Attributor &A) |
2627 | : AANonNullImpl(IRP, A) {} |
2628 | |
2629 | /// See AbstractAttribute::updateImpl(...). |
2630 | ChangeStatus updateImpl(Attributor &A) override { |
2631 | auto CheckIRP = [&](const IRPosition &IRP) { |
2632 | bool IsKnownNonNull; |
2633 | return AA::hasAssumedIRAttr<Attribute::NonNull>( |
2634 | A, QueryingAA: *this, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNonNull); |
2635 | }; |
2636 | |
2637 | bool Stripped; |
2638 | bool UsedAssumedInformation = false; |
2639 | Value *AssociatedValue = &getAssociatedValue(); |
2640 | SmallVector<AA::ValueAndContext> Values; |
2641 | if (!A.getAssumedSimplifiedValues(IRP: getIRPosition(), AA: *this, Values, |
2642 | S: AA::AnyScope, UsedAssumedInformation)) |
2643 | Stripped = false; |
2644 | else |
2645 | Stripped = |
2646 | Values.size() != 1 || Values.front().getValue() != AssociatedValue; |
2647 | |
2648 | if (!Stripped) { |
2649 | bool IsKnown; |
2650 | if (auto *PHI = dyn_cast<PHINode>(Val: AssociatedValue)) |
2651 | if (llvm::all_of(Range: PHI->incoming_values(), P: [&](Value *Op) { |
2652 | return AA::hasAssumedIRAttr<Attribute::NonNull>( |
2653 | A, QueryingAA: this, IRP: IRPosition::value(V: *Op), DepClass: DepClassTy::OPTIONAL, |
2654 | IsKnown); |
2655 | })) |
2656 | return ChangeStatus::UNCHANGED; |
2657 | if (auto *Select = dyn_cast<SelectInst>(Val: AssociatedValue)) |
2658 | if (AA::hasAssumedIRAttr<Attribute::NonNull>( |
2659 | A, QueryingAA: this, IRP: IRPosition::value(V: *Select->getFalseValue()), |
2660 | DepClass: DepClassTy::OPTIONAL, IsKnown) && |
2661 | AA::hasAssumedIRAttr<Attribute::NonNull>( |
2662 | A, QueryingAA: this, IRP: IRPosition::value(V: *Select->getTrueValue()), |
2663 | DepClass: DepClassTy::OPTIONAL, IsKnown)) |
2664 | return ChangeStatus::UNCHANGED; |
2665 | |
2666 | // If we haven't stripped anything we might still be able to use a |
2667 | // different AA, but only if the IRP changes. Effectively when we |
2668 | // interpret this not as a call site value but as a floating/argument |
2669 | // value. |
2670 | const IRPosition AVIRP = IRPosition::value(V: *AssociatedValue); |
2671 | if (AVIRP == getIRPosition() || !CheckIRP(AVIRP)) |
2672 | return indicatePessimisticFixpoint(); |
2673 | return ChangeStatus::UNCHANGED; |
2674 | } |
2675 | |
2676 | for (const auto &VAC : Values) |
2677 | if (!CheckIRP(IRPosition::value(V: *VAC.getValue()))) |
2678 | return indicatePessimisticFixpoint(); |
2679 | |
2680 | return ChangeStatus::UNCHANGED; |
2681 | } |
2682 | |
2683 | /// See AbstractAttribute::trackStatistics() |
2684 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } |
2685 | }; |
2686 | |
2687 | /// NonNull attribute for function return value. |
2688 | struct AANonNullReturned final |
2689 | : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType, |
2690 | false, AANonNull::IRAttributeKind, false> { |
2691 | AANonNullReturned(const IRPosition &IRP, Attributor &A) |
2692 | : AAReturnedFromReturnedValues<AANonNull, AANonNull, AANonNull::StateType, |
2693 | false, Attribute::NonNull, false>(IRP, A) { |
2694 | } |
2695 | |
2696 | /// See AbstractAttribute::getAsStr(). |
2697 | const std::string getAsStr(Attributor *A) const override { |
2698 | return getAssumed() ? "nonnull" : "may-null" ; |
2699 | } |
2700 | |
2701 | /// See AbstractAttribute::trackStatistics() |
2702 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } |
2703 | }; |
2704 | |
2705 | /// NonNull attribute for function argument. |
2706 | struct AANonNullArgument final |
2707 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { |
2708 | AANonNullArgument(const IRPosition &IRP, Attributor &A) |
2709 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} |
2710 | |
2711 | /// See AbstractAttribute::trackStatistics() |
2712 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } |
2713 | }; |
2714 | |
2715 | struct AANonNullCallSiteArgument final : AANonNullFloating { |
2716 | AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) |
2717 | : AANonNullFloating(IRP, A) {} |
2718 | |
2719 | /// See AbstractAttribute::trackStatistics() |
2720 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } |
2721 | }; |
2722 | |
2723 | /// NonNull attribute for a call site return position. |
2724 | struct AANonNullCallSiteReturned final |
2725 | : AACalleeToCallSite<AANonNull, AANonNullImpl> { |
2726 | AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) |
2727 | : AACalleeToCallSite<AANonNull, AANonNullImpl>(IRP, A) {} |
2728 | |
2729 | /// See AbstractAttribute::trackStatistics() |
2730 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } |
2731 | }; |
2732 | } // namespace |
2733 | |
2734 | /// ------------------------ Must-Progress Attributes -------------------------- |
2735 | namespace { |
2736 | struct AAMustProgressImpl : public AAMustProgress { |
2737 | AAMustProgressImpl(const IRPosition &IRP, Attributor &A) |
2738 | : AAMustProgress(IRP, A) {} |
2739 | |
2740 | /// See AbstractAttribute::initialize(...). |
2741 | void initialize(Attributor &A) override { |
2742 | bool IsKnown; |
2743 | assert(!AA::hasAssumedIRAttr<Attribute::MustProgress>( |
2744 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
2745 | (void)IsKnown; |
2746 | } |
2747 | |
2748 | /// See AbstractAttribute::getAsStr() |
2749 | const std::string getAsStr(Attributor *A) const override { |
2750 | return getAssumed() ? "mustprogress" : "may-not-progress" ; |
2751 | } |
2752 | }; |
2753 | |
2754 | struct AAMustProgressFunction final : AAMustProgressImpl { |
2755 | AAMustProgressFunction(const IRPosition &IRP, Attributor &A) |
2756 | : AAMustProgressImpl(IRP, A) {} |
2757 | |
2758 | /// See AbstractAttribute::updateImpl(...). |
2759 | ChangeStatus updateImpl(Attributor &A) override { |
2760 | bool IsKnown; |
2761 | if (AA::hasAssumedIRAttr<Attribute::WillReturn>( |
2762 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL, IsKnown)) { |
2763 | if (IsKnown) |
2764 | return indicateOptimisticFixpoint(); |
2765 | return ChangeStatus::UNCHANGED; |
2766 | } |
2767 | |
2768 | auto CheckForMustProgress = [&](AbstractCallSite ACS) { |
2769 | IRPosition IPos = IRPosition::callsite_function(CB: *ACS.getInstruction()); |
2770 | bool IsKnownMustProgress; |
2771 | return AA::hasAssumedIRAttr<Attribute::MustProgress>( |
2772 | A, QueryingAA: this, IRP: IPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownMustProgress, |
2773 | /* IgnoreSubsumingPositions */ true); |
2774 | }; |
2775 | |
2776 | bool AllCallSitesKnown = true; |
2777 | if (!A.checkForAllCallSites(Pred: CheckForMustProgress, QueryingAA: *this, |
2778 | /* RequireAllCallSites */ true, |
2779 | UsedAssumedInformation&: AllCallSitesKnown)) |
2780 | return indicatePessimisticFixpoint(); |
2781 | |
2782 | return ChangeStatus::UNCHANGED; |
2783 | } |
2784 | |
2785 | /// See AbstractAttribute::trackStatistics() |
2786 | void trackStatistics() const override { |
2787 | STATS_DECLTRACK_FN_ATTR(mustprogress) |
2788 | } |
2789 | }; |
2790 | |
2791 | /// MustProgress attribute deduction for a call sites. |
2792 | struct AAMustProgressCallSite final : AAMustProgressImpl { |
2793 | AAMustProgressCallSite(const IRPosition &IRP, Attributor &A) |
2794 | : AAMustProgressImpl(IRP, A) {} |
2795 | |
2796 | /// See AbstractAttribute::updateImpl(...). |
2797 | ChangeStatus updateImpl(Attributor &A) override { |
2798 | // TODO: Once we have call site specific value information we can provide |
2799 | // call site specific liveness information and then it makes |
2800 | // sense to specialize attributes for call sites arguments instead of |
2801 | // redirecting requests to the callee argument. |
2802 | const IRPosition &FnPos = IRPosition::function(F: *getAnchorScope()); |
2803 | bool IsKnownMustProgress; |
2804 | if (!AA::hasAssumedIRAttr<Attribute::MustProgress>( |
2805 | A, QueryingAA: this, IRP: FnPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownMustProgress)) |
2806 | return indicatePessimisticFixpoint(); |
2807 | return ChangeStatus::UNCHANGED; |
2808 | } |
2809 | |
2810 | /// See AbstractAttribute::trackStatistics() |
2811 | void trackStatistics() const override { |
2812 | STATS_DECLTRACK_CS_ATTR(mustprogress); |
2813 | } |
2814 | }; |
2815 | } // namespace |
2816 | |
2817 | /// ------------------------ No-Recurse Attributes ---------------------------- |
2818 | |
2819 | namespace { |
2820 | struct AANoRecurseImpl : public AANoRecurse { |
2821 | AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} |
2822 | |
2823 | /// See AbstractAttribute::initialize(...). |
2824 | void initialize(Attributor &A) override { |
2825 | bool IsKnown; |
2826 | assert(!AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
2827 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
2828 | (void)IsKnown; |
2829 | } |
2830 | |
2831 | /// See AbstractAttribute::getAsStr() |
2832 | const std::string getAsStr(Attributor *A) const override { |
2833 | return getAssumed() ? "norecurse" : "may-recurse" ; |
2834 | } |
2835 | }; |
2836 | |
2837 | struct AANoRecurseFunction final : AANoRecurseImpl { |
2838 | AANoRecurseFunction(const IRPosition &IRP, Attributor &A) |
2839 | : AANoRecurseImpl(IRP, A) {} |
2840 | |
2841 | /// See AbstractAttribute::updateImpl(...). |
2842 | ChangeStatus updateImpl(Attributor &A) override { |
2843 | |
2844 | // If all live call sites are known to be no-recurse, we are as well. |
2845 | auto CallSitePred = [&](AbstractCallSite ACS) { |
2846 | bool IsKnownNoRecurse; |
2847 | if (!AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
2848 | A, QueryingAA: this, |
2849 | IRP: IRPosition::function(F: *ACS.getInstruction()->getFunction()), |
2850 | DepClass: DepClassTy::NONE, IsKnown&: IsKnownNoRecurse)) |
2851 | return false; |
2852 | return IsKnownNoRecurse; |
2853 | }; |
2854 | bool UsedAssumedInformation = false; |
2855 | if (A.checkForAllCallSites(Pred: CallSitePred, QueryingAA: *this, RequireAllCallSites: true, |
2856 | UsedAssumedInformation)) { |
2857 | // If we know all call sites and all are known no-recurse, we are done. |
2858 | // If all known call sites, which might not be all that exist, are known |
2859 | // to be no-recurse, we are not done but we can continue to assume |
2860 | // no-recurse. If one of the call sites we have not visited will become |
2861 | // live, another update is triggered. |
2862 | if (!UsedAssumedInformation) |
2863 | indicateOptimisticFixpoint(); |
2864 | return ChangeStatus::UNCHANGED; |
2865 | } |
2866 | |
2867 | const AAInterFnReachability *EdgeReachability = |
2868 | A.getAAFor<AAInterFnReachability>(QueryingAA: *this, IRP: getIRPosition(), |
2869 | DepClass: DepClassTy::REQUIRED); |
2870 | if (EdgeReachability && EdgeReachability->canReach(A, Fn: *getAnchorScope())) |
2871 | return indicatePessimisticFixpoint(); |
2872 | return ChangeStatus::UNCHANGED; |
2873 | } |
2874 | |
2875 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } |
2876 | }; |
2877 | |
2878 | /// NoRecurse attribute deduction for a call sites. |
2879 | struct AANoRecurseCallSite final |
2880 | : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl> { |
2881 | AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) |
2882 | : AACalleeToCallSite<AANoRecurse, AANoRecurseImpl>(IRP, A) {} |
2883 | |
2884 | /// See AbstractAttribute::trackStatistics() |
2885 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } |
2886 | }; |
2887 | } // namespace |
2888 | |
2889 | /// ------------------------ No-Convergent Attribute -------------------------- |
2890 | |
2891 | namespace { |
2892 | struct AANonConvergentImpl : public AANonConvergent { |
2893 | AANonConvergentImpl(const IRPosition &IRP, Attributor &A) |
2894 | : AANonConvergent(IRP, A) {} |
2895 | |
2896 | /// See AbstractAttribute::getAsStr() |
2897 | const std::string getAsStr(Attributor *A) const override { |
2898 | return getAssumed() ? "non-convergent" : "may-be-convergent" ; |
2899 | } |
2900 | }; |
2901 | |
2902 | struct AANonConvergentFunction final : AANonConvergentImpl { |
2903 | AANonConvergentFunction(const IRPosition &IRP, Attributor &A) |
2904 | : AANonConvergentImpl(IRP, A) {} |
2905 | |
2906 | /// See AbstractAttribute::updateImpl(...). |
2907 | ChangeStatus updateImpl(Attributor &A) override { |
2908 | // If all function calls are known to not be convergent, we are not |
2909 | // convergent. |
2910 | auto CalleeIsNotConvergent = [&](Instruction &Inst) { |
2911 | CallBase &CB = cast<CallBase>(Val&: Inst); |
2912 | auto *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand()); |
2913 | if (!Callee || Callee->isIntrinsic()) { |
2914 | return false; |
2915 | } |
2916 | if (Callee->isDeclaration()) { |
2917 | return !Callee->hasFnAttribute(Kind: Attribute::Convergent); |
2918 | } |
2919 | const auto *ConvergentAA = A.getAAFor<AANonConvergent>( |
2920 | QueryingAA: *this, IRP: IRPosition::function(F: *Callee), DepClass: DepClassTy::REQUIRED); |
2921 | return ConvergentAA && ConvergentAA->isAssumedNotConvergent(); |
2922 | }; |
2923 | |
2924 | bool UsedAssumedInformation = false; |
2925 | if (!A.checkForAllCallLikeInstructions(Pred: CalleeIsNotConvergent, QueryingAA: *this, |
2926 | UsedAssumedInformation)) { |
2927 | return indicatePessimisticFixpoint(); |
2928 | } |
2929 | return ChangeStatus::UNCHANGED; |
2930 | } |
2931 | |
2932 | ChangeStatus manifest(Attributor &A) override { |
2933 | if (isKnownNotConvergent() && |
2934 | A.hasAttr(IRP: getIRPosition(), AKs: Attribute::Convergent)) { |
2935 | A.removeAttrs(IRP: getIRPosition(), AttrKinds: {Attribute::Convergent}); |
2936 | return ChangeStatus::CHANGED; |
2937 | } |
2938 | return ChangeStatus::UNCHANGED; |
2939 | } |
2940 | |
2941 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(convergent) } |
2942 | }; |
2943 | } // namespace |
2944 | |
2945 | /// -------------------- Undefined-Behavior Attributes ------------------------ |
2946 | |
2947 | namespace { |
2948 | struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { |
2949 | AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) |
2950 | : AAUndefinedBehavior(IRP, A) {} |
2951 | |
2952 | /// See AbstractAttribute::updateImpl(...). |
2953 | // through a pointer (i.e. also branches etc.) |
2954 | ChangeStatus updateImpl(Attributor &A) override { |
2955 | const size_t UBPrevSize = KnownUBInsts.size(); |
2956 | const size_t NoUBPrevSize = AssumedNoUBInsts.size(); |
2957 | |
2958 | auto InspectMemAccessInstForUB = [&](Instruction &I) { |
2959 | // Lang ref now states volatile store is not UB, let's skip them. |
2960 | if (I.isVolatile() && I.mayWriteToMemory()) |
2961 | return true; |
2962 | |
2963 | // Skip instructions that are already saved. |
2964 | if (AssumedNoUBInsts.count(Ptr: &I) || KnownUBInsts.count(Ptr: &I)) |
2965 | return true; |
2966 | |
2967 | // If we reach here, we know we have an instruction |
2968 | // that accesses memory through a pointer operand, |
2969 | // for which getPointerOperand() should give it to us. |
2970 | Value *PtrOp = |
2971 | const_cast<Value *>(getPointerOperand(I: &I, /* AllowVolatile */ true)); |
2972 | assert(PtrOp && |
2973 | "Expected pointer operand of memory accessing instruction" ); |
2974 | |
2975 | // Either we stopped and the appropriate action was taken, |
2976 | // or we got back a simplified value to continue. |
2977 | std::optional<Value *> SimplifiedPtrOp = |
2978 | stopOnUndefOrAssumed(A, V: PtrOp, I: &I); |
2979 | if (!SimplifiedPtrOp || !*SimplifiedPtrOp) |
2980 | return true; |
2981 | const Value *PtrOpVal = *SimplifiedPtrOp; |
2982 | |
2983 | // A memory access through a pointer is considered UB |
2984 | // only if the pointer has constant null value. |
2985 | // TODO: Expand it to not only check constant values. |
2986 | if (!isa<ConstantPointerNull>(Val: PtrOpVal)) { |
2987 | AssumedNoUBInsts.insert(Ptr: &I); |
2988 | return true; |
2989 | } |
2990 | const Type *PtrTy = PtrOpVal->getType(); |
2991 | |
2992 | // Because we only consider instructions inside functions, |
2993 | // assume that a parent function exists. |
2994 | const Function *F = I.getFunction(); |
2995 | |
2996 | // A memory access using constant null pointer is only considered UB |
2997 | // if null pointer is _not_ defined for the target platform. |
2998 | if (llvm::NullPointerIsDefined(F, AS: PtrTy->getPointerAddressSpace())) |
2999 | AssumedNoUBInsts.insert(Ptr: &I); |
3000 | else |
3001 | KnownUBInsts.insert(Ptr: &I); |
3002 | return true; |
3003 | }; |
3004 | |
3005 | auto InspectBrInstForUB = [&](Instruction &I) { |
3006 | // A conditional branch instruction is considered UB if it has `undef` |
3007 | // condition. |
3008 | |
3009 | // Skip instructions that are already saved. |
3010 | if (AssumedNoUBInsts.count(Ptr: &I) || KnownUBInsts.count(Ptr: &I)) |
3011 | return true; |
3012 | |
3013 | // We know we have a branch instruction. |
3014 | auto *BrInst = cast<BranchInst>(Val: &I); |
3015 | |
3016 | // Unconditional branches are never considered UB. |
3017 | if (BrInst->isUnconditional()) |
3018 | return true; |
3019 | |
3020 | // Either we stopped and the appropriate action was taken, |
3021 | // or we got back a simplified value to continue. |
3022 | std::optional<Value *> SimplifiedCond = |
3023 | stopOnUndefOrAssumed(A, V: BrInst->getCondition(), I: BrInst); |
3024 | if (!SimplifiedCond || !*SimplifiedCond) |
3025 | return true; |
3026 | AssumedNoUBInsts.insert(Ptr: &I); |
3027 | return true; |
3028 | }; |
3029 | |
3030 | auto InspectCallSiteForUB = [&](Instruction &I) { |
3031 | // Check whether a callsite always cause UB or not |
3032 | |
3033 | // Skip instructions that are already saved. |
3034 | if (AssumedNoUBInsts.count(Ptr: &I) || KnownUBInsts.count(Ptr: &I)) |
3035 | return true; |
3036 | |
3037 | // Check nonnull and noundef argument attribute violation for each |
3038 | // callsite. |
3039 | CallBase &CB = cast<CallBase>(Val&: I); |
3040 | auto *Callee = dyn_cast_if_present<Function>(Val: CB.getCalledOperand()); |
3041 | if (!Callee) |
3042 | return true; |
3043 | for (unsigned idx = 0; idx < CB.arg_size(); idx++) { |
3044 | // If current argument is known to be simplified to null pointer and the |
3045 | // corresponding argument position is known to have nonnull attribute, |
3046 | // the argument is poison. Furthermore, if the argument is poison and |
3047 | // the position is known to have noundef attriubte, this callsite is |
3048 | // considered UB. |
3049 | if (idx >= Callee->arg_size()) |
3050 | break; |
3051 | Value *ArgVal = CB.getArgOperand(i: idx); |
3052 | if (!ArgVal) |
3053 | continue; |
3054 | // Here, we handle three cases. |
3055 | // (1) Not having a value means it is dead. (we can replace the value |
3056 | // with undef) |
3057 | // (2) Simplified to undef. The argument violate noundef attriubte. |
3058 | // (3) Simplified to null pointer where known to be nonnull. |
3059 | // The argument is a poison value and violate noundef attribute. |
3060 | IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, ArgNo: idx); |
3061 | bool IsKnownNoUndef; |
3062 | AA::hasAssumedIRAttr<Attribute::NoUndef>( |
3063 | A, QueryingAA: this, IRP: CalleeArgumentIRP, DepClass: DepClassTy::NONE, IsKnown&: IsKnownNoUndef); |
3064 | if (!IsKnownNoUndef) |
3065 | continue; |
3066 | bool UsedAssumedInformation = false; |
3067 | std::optional<Value *> SimplifiedVal = |
3068 | A.getAssumedSimplified(IRP: IRPosition::value(V: *ArgVal), AA: *this, |
3069 | UsedAssumedInformation, S: AA::Interprocedural); |
3070 | if (UsedAssumedInformation) |
3071 | continue; |
3072 | if (SimplifiedVal && !*SimplifiedVal) |
3073 | return true; |
3074 | if (!SimplifiedVal || isa<UndefValue>(Val: **SimplifiedVal)) { |
3075 | KnownUBInsts.insert(Ptr: &I); |
3076 | continue; |
3077 | } |
3078 | if (!ArgVal->getType()->isPointerTy() || |
3079 | !isa<ConstantPointerNull>(Val: **SimplifiedVal)) |
3080 | continue; |
3081 | bool IsKnownNonNull; |
3082 | AA::hasAssumedIRAttr<Attribute::NonNull>( |
3083 | A, QueryingAA: this, IRP: CalleeArgumentIRP, DepClass: DepClassTy::NONE, IsKnown&: IsKnownNonNull); |
3084 | if (IsKnownNonNull) |
3085 | KnownUBInsts.insert(Ptr: &I); |
3086 | } |
3087 | return true; |
3088 | }; |
3089 | |
3090 | auto InspectReturnInstForUB = [&](Instruction &I) { |
3091 | auto &RI = cast<ReturnInst>(Val&: I); |
3092 | // Either we stopped and the appropriate action was taken, |
3093 | // or we got back a simplified return value to continue. |
3094 | std::optional<Value *> SimplifiedRetValue = |
3095 | stopOnUndefOrAssumed(A, V: RI.getReturnValue(), I: &I); |
3096 | if (!SimplifiedRetValue || !*SimplifiedRetValue) |
3097 | return true; |
3098 | |
3099 | // Check if a return instruction always cause UB or not |
3100 | // Note: It is guaranteed that the returned position of the anchor |
3101 | // scope has noundef attribute when this is called. |
3102 | // We also ensure the return position is not "assumed dead" |
3103 | // because the returned value was then potentially simplified to |
3104 | // `undef` in AAReturnedValues without removing the `noundef` |
3105 | // attribute yet. |
3106 | |
3107 | // When the returned position has noundef attriubte, UB occurs in the |
3108 | // following cases. |
3109 | // (1) Returned value is known to be undef. |
3110 | // (2) The value is known to be a null pointer and the returned |
3111 | // position has nonnull attribute (because the returned value is |
3112 | // poison). |
3113 | if (isa<ConstantPointerNull>(Val: *SimplifiedRetValue)) { |
3114 | bool IsKnownNonNull; |
3115 | AA::hasAssumedIRAttr<Attribute::NonNull>( |
3116 | A, QueryingAA: this, IRP: IRPosition::returned(F: *getAnchorScope()), DepClass: DepClassTy::NONE, |
3117 | IsKnown&: IsKnownNonNull); |
3118 | if (IsKnownNonNull) |
3119 | KnownUBInsts.insert(Ptr: &I); |
3120 | } |
3121 | |
3122 | return true; |
3123 | }; |
3124 | |
3125 | bool UsedAssumedInformation = false; |
3126 | A.checkForAllInstructions(Pred: InspectMemAccessInstForUB, QueryingAA: *this, |
3127 | Opcodes: {Instruction::Load, Instruction::Store, |
3128 | Instruction::AtomicCmpXchg, |
3129 | Instruction::AtomicRMW}, |
3130 | UsedAssumedInformation, |
3131 | /* CheckBBLivenessOnly */ true); |
3132 | A.checkForAllInstructions(Pred: InspectBrInstForUB, QueryingAA: *this, Opcodes: {Instruction::Br}, |
3133 | UsedAssumedInformation, |
3134 | /* CheckBBLivenessOnly */ true); |
3135 | A.checkForAllCallLikeInstructions(Pred: InspectCallSiteForUB, QueryingAA: *this, |
3136 | UsedAssumedInformation); |
3137 | |
3138 | // If the returned position of the anchor scope has noundef attriubte, check |
3139 | // all returned instructions. |
3140 | if (!getAnchorScope()->getReturnType()->isVoidTy()) { |
3141 | const IRPosition &ReturnIRP = IRPosition::returned(F: *getAnchorScope()); |
3142 | if (!A.isAssumedDead(IRP: ReturnIRP, QueryingAA: this, FnLivenessAA: nullptr, UsedAssumedInformation)) { |
3143 | bool IsKnownNoUndef; |
3144 | AA::hasAssumedIRAttr<Attribute::NoUndef>( |
3145 | A, QueryingAA: this, IRP: ReturnIRP, DepClass: DepClassTy::NONE, IsKnown&: IsKnownNoUndef); |
3146 | if (IsKnownNoUndef) |
3147 | A.checkForAllInstructions(Pred: InspectReturnInstForUB, QueryingAA: *this, |
3148 | Opcodes: {Instruction::Ret}, UsedAssumedInformation, |
3149 | /* CheckBBLivenessOnly */ true); |
3150 | } |
3151 | } |
3152 | |
3153 | if (NoUBPrevSize != AssumedNoUBInsts.size() || |
3154 | UBPrevSize != KnownUBInsts.size()) |
3155 | return ChangeStatus::CHANGED; |
3156 | return ChangeStatus::UNCHANGED; |
3157 | } |
3158 | |
3159 | bool isKnownToCauseUB(Instruction *I) const override { |
3160 | return KnownUBInsts.count(Ptr: I); |
3161 | } |
3162 | |
3163 | bool isAssumedToCauseUB(Instruction *I) const override { |
3164 | // In simple words, if an instruction is not in the assumed to _not_ |
3165 | // cause UB, then it is assumed UB (that includes those |
3166 | // in the KnownUBInsts set). The rest is boilerplate |
3167 | // is to ensure that it is one of the instructions we test |
3168 | // for UB. |
3169 | |
3170 | switch (I->getOpcode()) { |
3171 | case Instruction::Load: |
3172 | case Instruction::Store: |
3173 | case Instruction::AtomicCmpXchg: |
3174 | case Instruction::AtomicRMW: |
3175 | return !AssumedNoUBInsts.count(Ptr: I); |
3176 | case Instruction::Br: { |
3177 | auto *BrInst = cast<BranchInst>(Val: I); |
3178 | if (BrInst->isUnconditional()) |
3179 | return false; |
3180 | return !AssumedNoUBInsts.count(Ptr: I); |
3181 | } break; |
3182 | default: |
3183 | return false; |
3184 | } |
3185 | return false; |
3186 | } |
3187 | |
3188 | ChangeStatus manifest(Attributor &A) override { |
3189 | if (KnownUBInsts.empty()) |
3190 | return ChangeStatus::UNCHANGED; |
3191 | for (Instruction *I : KnownUBInsts) |
3192 | A.changeToUnreachableAfterManifest(I); |
3193 | return ChangeStatus::CHANGED; |
3194 | } |
3195 | |
3196 | /// See AbstractAttribute::getAsStr() |
3197 | const std::string getAsStr(Attributor *A) const override { |
3198 | return getAssumed() ? "undefined-behavior" : "no-ub" ; |
3199 | } |
3200 | |
3201 | /// Note: The correctness of this analysis depends on the fact that the |
3202 | /// following 2 sets will stop changing after some point. |
3203 | /// "Change" here means that their size changes. |
3204 | /// The size of each set is monotonically increasing |
3205 | /// (we only add items to them) and it is upper bounded by the number of |
3206 | /// instructions in the processed function (we can never save more |
3207 | /// elements in either set than this number). Hence, at some point, |
3208 | /// they will stop increasing. |
3209 | /// Consequently, at some point, both sets will have stopped |
3210 | /// changing, effectively making the analysis reach a fixpoint. |
3211 | |
3212 | /// Note: These 2 sets are disjoint and an instruction can be considered |
3213 | /// one of 3 things: |
3214 | /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in |
3215 | /// the KnownUBInsts set. |
3216 | /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior |
3217 | /// has a reason to assume it). |
3218 | /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior |
3219 | /// could not find a reason to assume or prove that it can cause UB, |
3220 | /// hence it assumes it doesn't. We have a set for these instructions |
3221 | /// so that we don't reprocess them in every update. |
3222 | /// Note however that instructions in this set may cause UB. |
3223 | |
3224 | protected: |
3225 | /// A set of all live instructions _known_ to cause UB. |
3226 | SmallPtrSet<Instruction *, 8> KnownUBInsts; |
3227 | |
3228 | private: |
3229 | /// A set of all the (live) instructions that are assumed to _not_ cause UB. |
3230 | SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; |
3231 | |
3232 | // Should be called on updates in which if we're processing an instruction |
3233 | // \p I that depends on a value \p V, one of the following has to happen: |
3234 | // - If the value is assumed, then stop. |
3235 | // - If the value is known but undef, then consider it UB. |
3236 | // - Otherwise, do specific processing with the simplified value. |
3237 | // We return std::nullopt in the first 2 cases to signify that an appropriate |
3238 | // action was taken and the caller should stop. |
3239 | // Otherwise, we return the simplified value that the caller should |
3240 | // use for specific processing. |
3241 | std::optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, |
3242 | Instruction *I) { |
3243 | bool UsedAssumedInformation = false; |
3244 | std::optional<Value *> SimplifiedV = |
3245 | A.getAssumedSimplified(IRP: IRPosition::value(V: *V), AA: *this, |
3246 | UsedAssumedInformation, S: AA::Interprocedural); |
3247 | if (!UsedAssumedInformation) { |
3248 | // Don't depend on assumed values. |
3249 | if (!SimplifiedV) { |
3250 | // If it is known (which we tested above) but it doesn't have a value, |
3251 | // then we can assume `undef` and hence the instruction is UB. |
3252 | KnownUBInsts.insert(Ptr: I); |
3253 | return std::nullopt; |
3254 | } |
3255 | if (!*SimplifiedV) |
3256 | return nullptr; |
3257 | V = *SimplifiedV; |
3258 | } |
3259 | if (isa<UndefValue>(Val: V)) { |
3260 | KnownUBInsts.insert(Ptr: I); |
3261 | return std::nullopt; |
3262 | } |
3263 | return V; |
3264 | } |
3265 | }; |
3266 | |
3267 | struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { |
3268 | AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) |
3269 | : AAUndefinedBehaviorImpl(IRP, A) {} |
3270 | |
3271 | /// See AbstractAttribute::trackStatistics() |
3272 | void trackStatistics() const override { |
3273 | STATS_DECL(UndefinedBehaviorInstruction, Instruction, |
3274 | "Number of instructions known to have UB" ); |
3275 | BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += |
3276 | KnownUBInsts.size(); |
3277 | } |
3278 | }; |
3279 | } // namespace |
3280 | |
3281 | /// ------------------------ Will-Return Attributes ---------------------------- |
3282 | |
3283 | namespace { |
3284 | // Helper function that checks whether a function has any cycle which we don't |
3285 | // know if it is bounded or not. |
3286 | // Loops with maximum trip count are considered bounded, any other cycle not. |
3287 | static bool mayContainUnboundedCycle(Function &F, Attributor &A) { |
3288 | ScalarEvolution *SE = |
3289 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); |
3290 | LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); |
3291 | // If either SCEV or LoopInfo is not available for the function then we assume |
3292 | // any cycle to be unbounded cycle. |
3293 | // We use scc_iterator which uses Tarjan algorithm to find all the maximal |
3294 | // SCCs.To detect if there's a cycle, we only need to find the maximal ones. |
3295 | if (!SE || !LI) { |
3296 | for (scc_iterator<Function *> SCCI = scc_begin(G: &F); !SCCI.isAtEnd(); ++SCCI) |
3297 | if (SCCI.hasCycle()) |
3298 | return true; |
3299 | return false; |
3300 | } |
3301 | |
3302 | // If there's irreducible control, the function may contain non-loop cycles. |
3303 | if (mayContainIrreducibleControl(F, LI)) |
3304 | return true; |
3305 | |
3306 | // Any loop that does not have a max trip count is considered unbounded cycle. |
3307 | for (auto *L : LI->getLoopsInPreorder()) { |
3308 | if (!SE->getSmallConstantMaxTripCount(L)) |
3309 | return true; |
3310 | } |
3311 | return false; |
3312 | } |
3313 | |
3314 | struct AAWillReturnImpl : public AAWillReturn { |
3315 | AAWillReturnImpl(const IRPosition &IRP, Attributor &A) |
3316 | : AAWillReturn(IRP, A) {} |
3317 | |
3318 | /// See AbstractAttribute::initialize(...). |
3319 | void initialize(Attributor &A) override { |
3320 | bool IsKnown; |
3321 | assert(!AA::hasAssumedIRAttr<Attribute::WillReturn>( |
3322 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
3323 | (void)IsKnown; |
3324 | } |
3325 | |
3326 | /// Check for `mustprogress` and `readonly` as they imply `willreturn`. |
3327 | bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { |
3328 | if (!A.hasAttr(IRP: getIRPosition(), AKs: {Attribute::MustProgress})) |
3329 | return false; |
3330 | |
3331 | bool IsKnown; |
3332 | if (AA::isAssumedReadOnly(A, IRP: getIRPosition(), QueryingAA: *this, IsKnown)) |
3333 | return IsKnown || !KnownOnly; |
3334 | return false; |
3335 | } |
3336 | |
3337 | /// See AbstractAttribute::updateImpl(...). |
3338 | ChangeStatus updateImpl(Attributor &A) override { |
3339 | if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) |
3340 | return ChangeStatus::UNCHANGED; |
3341 | |
3342 | auto CheckForWillReturn = [&](Instruction &I) { |
3343 | IRPosition IPos = IRPosition::callsite_function(CB: cast<CallBase>(Val&: I)); |
3344 | bool IsKnown; |
3345 | if (AA::hasAssumedIRAttr<Attribute::WillReturn>( |
3346 | A, QueryingAA: this, IRP: IPos, DepClass: DepClassTy::REQUIRED, IsKnown)) { |
3347 | if (IsKnown) |
3348 | return true; |
3349 | } else { |
3350 | return false; |
3351 | } |
3352 | bool IsKnownNoRecurse; |
3353 | return AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
3354 | A, QueryingAA: this, IRP: IPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoRecurse); |
3355 | }; |
3356 | |
3357 | bool UsedAssumedInformation = false; |
3358 | if (!A.checkForAllCallLikeInstructions(Pred: CheckForWillReturn, QueryingAA: *this, |
3359 | UsedAssumedInformation)) |
3360 | return indicatePessimisticFixpoint(); |
3361 | |
3362 | return ChangeStatus::UNCHANGED; |
3363 | } |
3364 | |
3365 | /// See AbstractAttribute::getAsStr() |
3366 | const std::string getAsStr(Attributor *A) const override { |
3367 | return getAssumed() ? "willreturn" : "may-noreturn" ; |
3368 | } |
3369 | }; |
3370 | |
3371 | struct AAWillReturnFunction final : AAWillReturnImpl { |
3372 | AAWillReturnFunction(const IRPosition &IRP, Attributor &A) |
3373 | : AAWillReturnImpl(IRP, A) {} |
3374 | |
3375 | /// See AbstractAttribute::initialize(...). |
3376 | void initialize(Attributor &A) override { |
3377 | AAWillReturnImpl::initialize(A); |
3378 | |
3379 | Function *F = getAnchorScope(); |
3380 | assert(F && "Did expect an anchor function" ); |
3381 | if (F->isDeclaration() || mayContainUnboundedCycle(F&: *F, A)) |
3382 | indicatePessimisticFixpoint(); |
3383 | } |
3384 | |
3385 | /// See AbstractAttribute::trackStatistics() |
3386 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } |
3387 | }; |
3388 | |
3389 | /// WillReturn attribute deduction for a call sites. |
3390 | struct AAWillReturnCallSite final |
3391 | : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl> { |
3392 | AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) |
3393 | : AACalleeToCallSite<AAWillReturn, AAWillReturnImpl>(IRP, A) {} |
3394 | |
3395 | /// See AbstractAttribute::updateImpl(...). |
3396 | ChangeStatus updateImpl(Attributor &A) override { |
3397 | if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) |
3398 | return ChangeStatus::UNCHANGED; |
3399 | |
3400 | return AACalleeToCallSite::updateImpl(A); |
3401 | } |
3402 | |
3403 | /// See AbstractAttribute::trackStatistics() |
3404 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } |
3405 | }; |
3406 | } // namespace |
3407 | |
3408 | /// -------------------AAIntraFnReachability Attribute-------------------------- |
3409 | |
3410 | /// All information associated with a reachability query. This boilerplate code |
3411 | /// is used by both AAIntraFnReachability and AAInterFnReachability, with |
3412 | /// different \p ToTy values. |
3413 | template <typename ToTy> struct ReachabilityQueryInfo { |
3414 | enum class Reachable { |
3415 | No, |
3416 | Yes, |
3417 | }; |
3418 | |
3419 | /// Start here, |
3420 | const Instruction *From = nullptr; |
3421 | /// reach this place, |
3422 | const ToTy *To = nullptr; |
3423 | /// without going through any of these instructions, |
3424 | const AA::InstExclusionSetTy *ExclusionSet = nullptr; |
3425 | /// and remember if it worked: |
3426 | Reachable Result = Reachable::No; |
3427 | |
3428 | /// Precomputed hash for this RQI. |
3429 | unsigned Hash = 0; |
3430 | |
3431 | unsigned computeHashValue() const { |
3432 | assert(Hash == 0 && "Computed hash twice!" ); |
3433 | using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>; |
3434 | using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>; |
3435 | return const_cast<ReachabilityQueryInfo<ToTy> *>(this)->Hash = |
3436 | detail::combineHashValue(a: PairDMI ::getHashValue({From, To}), |
3437 | b: InstSetDMI::getHashValue(BES: ExclusionSet)); |
3438 | } |
3439 | |
3440 | ReachabilityQueryInfo(const Instruction *From, const ToTy *To) |
3441 | : From(From), To(To) {} |
3442 | |
3443 | /// Constructor replacement to ensure unique and stable sets are used for the |
3444 | /// cache. |
3445 | ReachabilityQueryInfo(Attributor &A, const Instruction &From, const ToTy &To, |
3446 | const AA::InstExclusionSetTy *ES, bool MakeUnique) |
3447 | : From(&From), To(&To), ExclusionSet(ES) { |
3448 | |
3449 | if (!ES || ES->empty()) { |
3450 | ExclusionSet = nullptr; |
3451 | } else if (MakeUnique) { |
3452 | ExclusionSet = A.getInfoCache().getOrCreateUniqueBlockExecutionSet(BES: ES); |
3453 | } |
3454 | } |
3455 | |
3456 | ReachabilityQueryInfo(const ReachabilityQueryInfo &RQI) |
3457 | : From(RQI.From), To(RQI.To), ExclusionSet(RQI.ExclusionSet) {} |
3458 | }; |
3459 | |
3460 | namespace llvm { |
3461 | template <typename ToTy> struct DenseMapInfo<ReachabilityQueryInfo<ToTy> *> { |
3462 | using InstSetDMI = DenseMapInfo<const AA::InstExclusionSetTy *>; |
3463 | using PairDMI = DenseMapInfo<std::pair<const Instruction *, const ToTy *>>; |
3464 | |
3465 | static ReachabilityQueryInfo<ToTy> EmptyKey; |
3466 | static ReachabilityQueryInfo<ToTy> TombstoneKey; |
3467 | |
3468 | static inline ReachabilityQueryInfo<ToTy> *getEmptyKey() { return &EmptyKey; } |
3469 | static inline ReachabilityQueryInfo<ToTy> *getTombstoneKey() { |
3470 | return &TombstoneKey; |
3471 | } |
3472 | static unsigned getHashValue(const ReachabilityQueryInfo<ToTy> *RQI) { |
3473 | return RQI->Hash ? RQI->Hash : RQI->computeHashValue(); |
3474 | } |
3475 | static bool isEqual(const ReachabilityQueryInfo<ToTy> *LHS, |
3476 | const ReachabilityQueryInfo<ToTy> *RHS) { |
3477 | if (!PairDMI::isEqual({LHS->From, LHS->To}, {RHS->From, RHS->To})) |
3478 | return false; |
3479 | return InstSetDMI::isEqual(LHS: LHS->ExclusionSet, RHS: RHS->ExclusionSet); |
3480 | } |
3481 | }; |
3482 | |
3483 | #define DefineKeys(ToTy) \ |
3484 | template <> \ |
3485 | ReachabilityQueryInfo<ToTy> \ |
3486 | DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::EmptyKey = \ |
3487 | ReachabilityQueryInfo<ToTy>( \ |
3488 | DenseMapInfo<const Instruction *>::getEmptyKey(), \ |
3489 | DenseMapInfo<const ToTy *>::getEmptyKey()); \ |
3490 | template <> \ |
3491 | ReachabilityQueryInfo<ToTy> \ |
3492 | DenseMapInfo<ReachabilityQueryInfo<ToTy> *>::TombstoneKey = \ |
3493 | ReachabilityQueryInfo<ToTy>( \ |
3494 | DenseMapInfo<const Instruction *>::getTombstoneKey(), \ |
3495 | DenseMapInfo<const ToTy *>::getTombstoneKey()); |
3496 | |
3497 | DefineKeys(Instruction) DefineKeys(Function) |
3498 | #undef DefineKeys |
3499 | |
3500 | } // namespace llvm |
3501 | |
3502 | namespace { |
3503 | |
3504 | template <typename BaseTy, typename ToTy> |
3505 | struct CachedReachabilityAA : public BaseTy { |
3506 | using RQITy = ReachabilityQueryInfo<ToTy>; |
3507 | |
3508 | CachedReachabilityAA(const IRPosition &IRP, Attributor &A) : BaseTy(IRP, A) {} |
3509 | |
3510 | /// See AbstractAttribute::isQueryAA. |
3511 | bool isQueryAA() const override { return true; } |
3512 | |
3513 | /// See AbstractAttribute::updateImpl(...). |
3514 | ChangeStatus updateImpl(Attributor &A) override { |
3515 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
3516 | for (unsigned u = 0, e = QueryVector.size(); u < e; ++u) { |
3517 | RQITy *RQI = QueryVector[u]; |
3518 | if (RQI->Result == RQITy::Reachable::No && |
3519 | isReachableImpl(A, RQI&: *RQI, /*IsTemporaryRQI=*/false)) |
3520 | Changed = ChangeStatus::CHANGED; |
3521 | } |
3522 | return Changed; |
3523 | } |
3524 | |
3525 | virtual bool isReachableImpl(Attributor &A, RQITy &RQI, |
3526 | bool IsTemporaryRQI) = 0; |
3527 | |
3528 | bool rememberResult(Attributor &A, typename RQITy::Reachable Result, |
3529 | RQITy &RQI, bool UsedExclusionSet, bool IsTemporaryRQI) { |
3530 | RQI.Result = Result; |
3531 | |
3532 | // Remove the temporary RQI from the cache. |
3533 | if (IsTemporaryRQI) |
3534 | QueryCache.erase(&RQI); |
3535 | |
3536 | // Insert a plain RQI (w/o exclusion set) if that makes sense. Two options: |
3537 | // 1) If it is reachable, it doesn't matter if we have an exclusion set for |
3538 | // this query. 2) We did not use the exclusion set, potentially because |
3539 | // there is none. |
3540 | if (Result == RQITy::Reachable::Yes || !UsedExclusionSet) { |
3541 | RQITy PlainRQI(RQI.From, RQI.To); |
3542 | if (!QueryCache.count(&PlainRQI)) { |
3543 | RQITy *RQIPtr = new (A.Allocator) RQITy(RQI.From, RQI.To); |
3544 | RQIPtr->Result = Result; |
3545 | QueryVector.push_back(RQIPtr); |
3546 | QueryCache.insert(RQIPtr); |
3547 | } |
3548 | } |
3549 | |
3550 | // Check if we need to insert a new permanent RQI with the exclusion set. |
3551 | if (IsTemporaryRQI && Result != RQITy::Reachable::Yes && UsedExclusionSet) { |
3552 | assert((!RQI.ExclusionSet || !RQI.ExclusionSet->empty()) && |
3553 | "Did not expect empty set!" ); |
3554 | RQITy *RQIPtr = new (A.Allocator) |
3555 | RQITy(A, *RQI.From, *RQI.To, RQI.ExclusionSet, true); |
3556 | assert(RQIPtr->Result == RQITy::Reachable::No && "Already reachable?" ); |
3557 | RQIPtr->Result = Result; |
3558 | assert(!QueryCache.count(RQIPtr)); |
3559 | QueryVector.push_back(RQIPtr); |
3560 | QueryCache.insert(RQIPtr); |
3561 | } |
3562 | |
3563 | if (Result == RQITy::Reachable::No && IsTemporaryRQI) |
3564 | A.registerForUpdate(AA&: *this); |
3565 | return Result == RQITy::Reachable::Yes; |
3566 | } |
3567 | |
3568 | const std::string getAsStr(Attributor *A) const override { |
3569 | // TODO: Return the number of reachable queries. |
3570 | return "#queries(" + std::to_string(QueryVector.size()) + ")" ; |
3571 | } |
3572 | |
3573 | bool checkQueryCache(Attributor &A, RQITy &StackRQI, |
3574 | typename RQITy::Reachable &Result) { |
3575 | if (!this->getState().isValidState()) { |
3576 | Result = RQITy::Reachable::Yes; |
3577 | return true; |
3578 | } |
3579 | |
3580 | // If we have an exclusion set we might be able to find our answer by |
3581 | // ignoring it first. |
3582 | if (StackRQI.ExclusionSet) { |
3583 | RQITy PlainRQI(StackRQI.From, StackRQI.To); |
3584 | auto It = QueryCache.find(&PlainRQI); |
3585 | if (It != QueryCache.end() && (*It)->Result == RQITy::Reachable::No) { |
3586 | Result = RQITy::Reachable::No; |
3587 | return true; |
3588 | } |
3589 | } |
3590 | |
3591 | auto It = QueryCache.find(&StackRQI); |
3592 | if (It != QueryCache.end()) { |
3593 | Result = (*It)->Result; |
3594 | return true; |
3595 | } |
3596 | |
3597 | // Insert a temporary for recursive queries. We will replace it with a |
3598 | // permanent entry later. |
3599 | QueryCache.insert(&StackRQI); |
3600 | return false; |
3601 | } |
3602 | |
3603 | private: |
3604 | SmallVector<RQITy *> QueryVector; |
3605 | DenseSet<RQITy *> QueryCache; |
3606 | }; |
3607 | |
3608 | struct AAIntraFnReachabilityFunction final |
3609 | : public CachedReachabilityAA<AAIntraFnReachability, Instruction> { |
3610 | using Base = CachedReachabilityAA<AAIntraFnReachability, Instruction>; |
3611 | AAIntraFnReachabilityFunction(const IRPosition &IRP, Attributor &A) |
3612 | : Base(IRP, A) { |
3613 | DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>( |
3614 | F: *IRP.getAssociatedFunction()); |
3615 | } |
3616 | |
3617 | bool isAssumedReachable( |
3618 | Attributor &A, const Instruction &From, const Instruction &To, |
3619 | const AA::InstExclusionSetTy *ExclusionSet) const override { |
3620 | auto *NonConstThis = const_cast<AAIntraFnReachabilityFunction *>(this); |
3621 | if (&From == &To) |
3622 | return true; |
3623 | |
3624 | RQITy StackRQI(A, From, To, ExclusionSet, false); |
3625 | typename RQITy::Reachable Result; |
3626 | if (!NonConstThis->checkQueryCache(A, StackRQI, Result)) |
3627 | return NonConstThis->isReachableImpl(A, RQI&: StackRQI, |
3628 | /*IsTemporaryRQI=*/true); |
3629 | return Result == RQITy::Reachable::Yes; |
3630 | } |
3631 | |
3632 | ChangeStatus updateImpl(Attributor &A) override { |
3633 | // We only depend on liveness. DeadEdges is all we care about, check if any |
3634 | // of them changed. |
3635 | auto *LivenessAA = |
3636 | A.getAAFor<AAIsDead>(QueryingAA: *this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL); |
3637 | if (LivenessAA && |
3638 | llvm::all_of(Range&: DeadEdges, |
3639 | P: [&](const auto &DeadEdge) { |
3640 | return LivenessAA->isEdgeDead(From: DeadEdge.first, |
3641 | To: DeadEdge.second); |
3642 | }) && |
3643 | llvm::all_of(Range&: DeadBlocks, P: [&](const BasicBlock *BB) { |
3644 | return LivenessAA->isAssumedDead(BB); |
3645 | })) { |
3646 | return ChangeStatus::UNCHANGED; |
3647 | } |
3648 | DeadEdges.clear(); |
3649 | DeadBlocks.clear(); |
3650 | return Base::updateImpl(A); |
3651 | } |
3652 | |
3653 | bool isReachableImpl(Attributor &A, RQITy &RQI, |
3654 | bool IsTemporaryRQI) override { |
3655 | const Instruction *Origin = RQI.From; |
3656 | bool UsedExclusionSet = false; |
3657 | |
3658 | auto WillReachInBlock = [&](const Instruction &From, const Instruction &To, |
3659 | const AA::InstExclusionSetTy *ExclusionSet) { |
3660 | const Instruction *IP = &From; |
3661 | while (IP && IP != &To) { |
3662 | if (ExclusionSet && IP != Origin && ExclusionSet->count(Ptr: IP)) { |
3663 | UsedExclusionSet = true; |
3664 | break; |
3665 | } |
3666 | IP = IP->getNextNode(); |
3667 | } |
3668 | return IP == &To; |
3669 | }; |
3670 | |
3671 | const BasicBlock *FromBB = RQI.From->getParent(); |
3672 | const BasicBlock *ToBB = RQI.To->getParent(); |
3673 | assert(FromBB->getParent() == ToBB->getParent() && |
3674 | "Not an intra-procedural query!" ); |
3675 | |
3676 | // Check intra-block reachability, however, other reaching paths are still |
3677 | // possible. |
3678 | if (FromBB == ToBB && |
3679 | WillReachInBlock(*RQI.From, *RQI.To, RQI.ExclusionSet)) |
3680 | return rememberResult(A, Result: RQITy::Reachable::Yes, RQI, UsedExclusionSet, |
3681 | IsTemporaryRQI); |
3682 | |
3683 | // Check if reaching the ToBB block is sufficient or if even that would not |
3684 | // ensure reaching the target. In the latter case we are done. |
3685 | if (!WillReachInBlock(ToBB->front(), *RQI.To, RQI.ExclusionSet)) |
3686 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet, |
3687 | IsTemporaryRQI); |
3688 | |
3689 | const Function *Fn = FromBB->getParent(); |
3690 | SmallPtrSet<const BasicBlock *, 16> ExclusionBlocks; |
3691 | if (RQI.ExclusionSet) |
3692 | for (auto *I : *RQI.ExclusionSet) |
3693 | if (I->getFunction() == Fn) |
3694 | ExclusionBlocks.insert(Ptr: I->getParent()); |
3695 | |
3696 | // Check if we make it out of the FromBB block at all. |
3697 | if (ExclusionBlocks.count(Ptr: FromBB) && |
3698 | !WillReachInBlock(*RQI.From, *FromBB->getTerminator(), |
3699 | RQI.ExclusionSet)) |
3700 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet: true, IsTemporaryRQI); |
3701 | |
3702 | auto *LivenessAA = |
3703 | A.getAAFor<AAIsDead>(QueryingAA: *this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL); |
3704 | if (LivenessAA && LivenessAA->isAssumedDead(BB: ToBB)) { |
3705 | DeadBlocks.insert(V: ToBB); |
3706 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet, |
3707 | IsTemporaryRQI); |
3708 | } |
3709 | |
3710 | SmallPtrSet<const BasicBlock *, 16> Visited; |
3711 | SmallVector<const BasicBlock *, 16> Worklist; |
3712 | Worklist.push_back(Elt: FromBB); |
3713 | |
3714 | DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> LocalDeadEdges; |
3715 | while (!Worklist.empty()) { |
3716 | const BasicBlock *BB = Worklist.pop_back_val(); |
3717 | if (!Visited.insert(Ptr: BB).second) |
3718 | continue; |
3719 | for (const BasicBlock *SuccBB : successors(BB)) { |
3720 | if (LivenessAA && LivenessAA->isEdgeDead(From: BB, To: SuccBB)) { |
3721 | LocalDeadEdges.insert(V: {BB, SuccBB}); |
3722 | continue; |
3723 | } |
3724 | // We checked before if we just need to reach the ToBB block. |
3725 | if (SuccBB == ToBB) |
3726 | return rememberResult(A, Result: RQITy::Reachable::Yes, RQI, UsedExclusionSet, |
3727 | IsTemporaryRQI); |
3728 | if (DT && ExclusionBlocks.empty() && DT->dominates(A: BB, B: ToBB)) |
3729 | return rememberResult(A, Result: RQITy::Reachable::Yes, RQI, UsedExclusionSet, |
3730 | IsTemporaryRQI); |
3731 | |
3732 | if (ExclusionBlocks.count(Ptr: SuccBB)) { |
3733 | UsedExclusionSet = true; |
3734 | continue; |
3735 | } |
3736 | Worklist.push_back(Elt: SuccBB); |
3737 | } |
3738 | } |
3739 | |
3740 | DeadEdges.insert_range(R&: LocalDeadEdges); |
3741 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet, |
3742 | IsTemporaryRQI); |
3743 | } |
3744 | |
3745 | /// See AbstractAttribute::trackStatistics() |
3746 | void trackStatistics() const override {} |
3747 | |
3748 | private: |
3749 | // Set of assumed dead blocks we used in the last query. If any changes we |
3750 | // update the state. |
3751 | DenseSet<const BasicBlock *> DeadBlocks; |
3752 | |
3753 | // Set of assumed dead edges we used in the last query. If any changes we |
3754 | // update the state. |
3755 | DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> DeadEdges; |
3756 | |
3757 | /// The dominator tree of the function to short-circuit reasoning. |
3758 | const DominatorTree *DT = nullptr; |
3759 | }; |
3760 | } // namespace |
3761 | |
3762 | /// ------------------------ NoAlias Argument Attribute ------------------------ |
3763 | |
3764 | bool AANoAlias::isImpliedByIR(Attributor &A, const IRPosition &IRP, |
3765 | Attribute::AttrKind ImpliedAttributeKind, |
3766 | bool IgnoreSubsumingPositions) { |
3767 | assert(ImpliedAttributeKind == Attribute::NoAlias && |
3768 | "Unexpected attribute kind" ); |
3769 | Value *Val = &IRP.getAssociatedValue(); |
3770 | if (IRP.getPositionKind() != IRP_CALL_SITE_ARGUMENT) { |
3771 | if (isa<AllocaInst>(Val)) |
3772 | return true; |
3773 | } else { |
3774 | IgnoreSubsumingPositions = true; |
3775 | } |
3776 | |
3777 | if (isa<UndefValue>(Val)) |
3778 | return true; |
3779 | |
3780 | if (isa<ConstantPointerNull>(Val) && |
3781 | !NullPointerIsDefined(F: IRP.getAnchorScope(), |
3782 | AS: Val->getType()->getPointerAddressSpace())) |
3783 | return true; |
3784 | |
3785 | if (A.hasAttr(IRP, AKs: {Attribute::ByVal, Attribute::NoAlias}, |
3786 | IgnoreSubsumingPositions, ImpliedAttributeKind: Attribute::NoAlias)) |
3787 | return true; |
3788 | |
3789 | return false; |
3790 | } |
3791 | |
3792 | namespace { |
3793 | struct AANoAliasImpl : AANoAlias { |
3794 | AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { |
3795 | assert(getAssociatedType()->isPointerTy() && |
3796 | "Noalias is a pointer attribute" ); |
3797 | } |
3798 | |
3799 | const std::string getAsStr(Attributor *A) const override { |
3800 | return getAssumed() ? "noalias" : "may-alias" ; |
3801 | } |
3802 | }; |
3803 | |
3804 | /// NoAlias attribute for a floating value. |
3805 | struct AANoAliasFloating final : AANoAliasImpl { |
3806 | AANoAliasFloating(const IRPosition &IRP, Attributor &A) |
3807 | : AANoAliasImpl(IRP, A) {} |
3808 | |
3809 | /// See AbstractAttribute::updateImpl(...). |
3810 | ChangeStatus updateImpl(Attributor &A) override { |
3811 | // TODO: Implement this. |
3812 | return indicatePessimisticFixpoint(); |
3813 | } |
3814 | |
3815 | /// See AbstractAttribute::trackStatistics() |
3816 | void trackStatistics() const override { |
3817 | STATS_DECLTRACK_FLOATING_ATTR(noalias) |
3818 | } |
3819 | }; |
3820 | |
3821 | /// NoAlias attribute for an argument. |
3822 | struct AANoAliasArgument final |
3823 | : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { |
3824 | using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; |
3825 | AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
3826 | |
3827 | /// See AbstractAttribute::update(...). |
3828 | ChangeStatus updateImpl(Attributor &A) override { |
3829 | // We have to make sure no-alias on the argument does not break |
3830 | // synchronization when this is a callback argument, see also [1] below. |
3831 | // If synchronization cannot be affected, we delegate to the base updateImpl |
3832 | // function, otherwise we give up for now. |
3833 | |
3834 | // If the function is no-sync, no-alias cannot break synchronization. |
3835 | bool IsKnownNoSycn; |
3836 | if (AA::hasAssumedIRAttr<Attribute::NoSync>( |
3837 | A, QueryingAA: this, IRP: IRPosition::function_scope(IRP: getIRPosition()), |
3838 | DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoSycn)) |
3839 | return Base::updateImpl(A); |
3840 | |
3841 | // If the argument is read-only, no-alias cannot break synchronization. |
3842 | bool IsKnown; |
3843 | if (AA::isAssumedReadOnly(A, IRP: getIRPosition(), QueryingAA: *this, IsKnown)) |
3844 | return Base::updateImpl(A); |
3845 | |
3846 | // If the argument is never passed through callbacks, no-alias cannot break |
3847 | // synchronization. |
3848 | bool UsedAssumedInformation = false; |
3849 | if (A.checkForAllCallSites( |
3850 | Pred: [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, QueryingAA: *this, |
3851 | RequireAllCallSites: true, UsedAssumedInformation)) |
3852 | return Base::updateImpl(A); |
3853 | |
3854 | // TODO: add no-alias but make sure it doesn't break synchronization by |
3855 | // introducing fake uses. See: |
3856 | // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, |
3857 | // International Workshop on OpenMP 2018, |
3858 | // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf |
3859 | |
3860 | return indicatePessimisticFixpoint(); |
3861 | } |
3862 | |
3863 | /// See AbstractAttribute::trackStatistics() |
3864 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } |
3865 | }; |
3866 | |
3867 | struct AANoAliasCallSiteArgument final : AANoAliasImpl { |
3868 | AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) |
3869 | : AANoAliasImpl(IRP, A) {} |
3870 | |
3871 | /// Determine if the underlying value may alias with the call site argument |
3872 | /// \p OtherArgNo of \p ICS (= the underlying call site). |
3873 | bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, |
3874 | const AAMemoryBehavior &MemBehaviorAA, |
3875 | const CallBase &CB, unsigned OtherArgNo) { |
3876 | // We do not need to worry about aliasing with the underlying IRP. |
3877 | if (this->getCalleeArgNo() == (int)OtherArgNo) |
3878 | return false; |
3879 | |
3880 | // If it is not a pointer or pointer vector we do not alias. |
3881 | const Value *ArgOp = CB.getArgOperand(i: OtherArgNo); |
3882 | if (!ArgOp->getType()->isPtrOrPtrVectorTy()) |
3883 | return false; |
3884 | |
3885 | auto *CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( |
3886 | QueryingAA: *this, IRP: IRPosition::callsite_argument(CB, ArgNo: OtherArgNo), DepClass: DepClassTy::NONE); |
3887 | |
3888 | // If the argument is readnone, there is no read-write aliasing. |
3889 | if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadNone()) { |
3890 | A.recordDependence(FromAA: *CBArgMemBehaviorAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
3891 | return false; |
3892 | } |
3893 | |
3894 | // If the argument is readonly and the underlying value is readonly, there |
3895 | // is no read-write aliasing. |
3896 | bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); |
3897 | if (CBArgMemBehaviorAA && CBArgMemBehaviorAA->isAssumedReadOnly() && |
3898 | IsReadOnly) { |
3899 | A.recordDependence(FromAA: MemBehaviorAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
3900 | A.recordDependence(FromAA: *CBArgMemBehaviorAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
3901 | return false; |
3902 | } |
3903 | |
3904 | // We have to utilize actual alias analysis queries so we need the object. |
3905 | if (!AAR) |
3906 | AAR = A.getInfoCache().getAnalysisResultForFunction<AAManager>( |
3907 | F: *getAnchorScope()); |
3908 | |
3909 | // Try to rule it out at the call site. |
3910 | bool IsAliasing = !AAR || !AAR->isNoAlias(V1: &getAssociatedValue(), V2: ArgOp); |
3911 | LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " |
3912 | "callsite arguments: " |
3913 | << getAssociatedValue() << " " << *ArgOp << " => " |
3914 | << (IsAliasing ? "" : "no-" ) << "alias \n" ); |
3915 | |
3916 | return IsAliasing; |
3917 | } |
3918 | |
3919 | bool isKnownNoAliasDueToNoAliasPreservation( |
3920 | Attributor &A, AAResults *&AAR, const AAMemoryBehavior &MemBehaviorAA) { |
3921 | // We can deduce "noalias" if the following conditions hold. |
3922 | // (i) Associated value is assumed to be noalias in the definition. |
3923 | // (ii) Associated value is assumed to be no-capture in all the uses |
3924 | // possibly executed before this callsite. |
3925 | // (iii) There is no other pointer argument which could alias with the |
3926 | // value. |
3927 | |
3928 | const IRPosition &VIRP = IRPosition::value(V: getAssociatedValue()); |
3929 | const Function *ScopeFn = VIRP.getAnchorScope(); |
3930 | // Check whether the value is captured in the scope using AANoCapture. |
3931 | // Look at CFG and check only uses possibly executed before this |
3932 | // callsite. |
3933 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
3934 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
3935 | |
3936 | // If UserI is the curr instruction and there is a single potential use of |
3937 | // the value in UserI we allow the use. |
3938 | // TODO: We should inspect the operands and allow those that cannot alias |
3939 | // with the value. |
3940 | if (UserI == getCtxI() && UserI->getNumOperands() == 1) |
3941 | return true; |
3942 | |
3943 | if (ScopeFn) { |
3944 | if (auto *CB = dyn_cast<CallBase>(Val: UserI)) { |
3945 | if (CB->isArgOperand(U: &U)) { |
3946 | |
3947 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
3948 | |
3949 | bool IsKnownNoCapture; |
3950 | if (AA::hasAssumedIRAttr<Attribute::Captures>( |
3951 | A, QueryingAA: this, IRP: IRPosition::callsite_argument(CB: *CB, ArgNo), |
3952 | DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture)) |
3953 | return true; |
3954 | } |
3955 | } |
3956 | |
3957 | if (!AA::isPotentiallyReachable( |
3958 | A, FromI: *UserI, ToI: *getCtxI(), QueryingAA: *this, /* ExclusionSet */ nullptr, |
3959 | GoBackwardsCB: [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; })) |
3960 | return true; |
3961 | } |
3962 | |
3963 | // TODO: We should track the capturing uses in AANoCapture but the problem |
3964 | // is CGSCC runs. For those we would need to "allow" AANoCapture for |
3965 | // a value in the module slice. |
3966 | // TODO(captures): Make this more precise. |
3967 | UseCaptureInfo CI = DetermineUseCaptureKind(U, /*Base=*/nullptr); |
3968 | if (capturesNothing(CC: CI)) |
3969 | return true; |
3970 | if (CI.isPassthrough()) { |
3971 | Follow = true; |
3972 | return true; |
3973 | } |
3974 | LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI << "\n" ); |
3975 | return false; |
3976 | }; |
3977 | |
3978 | bool IsKnownNoCapture; |
3979 | const AANoCapture *NoCaptureAA = nullptr; |
3980 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
3981 | A, QueryingAA: this, IRP: VIRP, DepClass: DepClassTy::NONE, IsKnown&: IsKnownNoCapture, IgnoreSubsumingPositions: false, AAPtr: &NoCaptureAA); |
3982 | if (!IsAssumedNoCapture && |
3983 | (!NoCaptureAA || !NoCaptureAA->isAssumedNoCaptureMaybeReturned())) { |
3984 | if (!A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V: getAssociatedValue())) { |
3985 | LLVM_DEBUG( |
3986 | dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() |
3987 | << " cannot be noalias as it is potentially captured\n" ); |
3988 | return false; |
3989 | } |
3990 | } |
3991 | if (NoCaptureAA) |
3992 | A.recordDependence(FromAA: *NoCaptureAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
3993 | |
3994 | // Check there is no other pointer argument which could alias with the |
3995 | // value passed at this call site. |
3996 | // TODO: AbstractCallSite |
3997 | const auto &CB = cast<CallBase>(Val&: getAnchorValue()); |
3998 | for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) |
3999 | if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) |
4000 | return false; |
4001 | |
4002 | return true; |
4003 | } |
4004 | |
4005 | /// See AbstractAttribute::updateImpl(...). |
4006 | ChangeStatus updateImpl(Attributor &A) override { |
4007 | // If the argument is readnone we are done as there are no accesses via the |
4008 | // argument. |
4009 | auto *MemBehaviorAA = |
4010 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: getIRPosition(), DepClass: DepClassTy::NONE); |
4011 | if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) { |
4012 | A.recordDependence(FromAA: *MemBehaviorAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
4013 | return ChangeStatus::UNCHANGED; |
4014 | } |
4015 | |
4016 | bool IsKnownNoAlias; |
4017 | const IRPosition &VIRP = IRPosition::value(V: getAssociatedValue()); |
4018 | if (!AA::hasAssumedIRAttr<Attribute::NoAlias>( |
4019 | A, QueryingAA: this, IRP: VIRP, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoAlias)) { |
4020 | LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() |
4021 | << " is not no-alias at the definition\n" ); |
4022 | return indicatePessimisticFixpoint(); |
4023 | } |
4024 | |
4025 | AAResults *AAR = nullptr; |
4026 | if (MemBehaviorAA && |
4027 | isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA: *MemBehaviorAA)) { |
4028 | LLVM_DEBUG( |
4029 | dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n" ); |
4030 | return ChangeStatus::UNCHANGED; |
4031 | } |
4032 | |
4033 | return indicatePessimisticFixpoint(); |
4034 | } |
4035 | |
4036 | /// See AbstractAttribute::trackStatistics() |
4037 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } |
4038 | }; |
4039 | |
4040 | /// NoAlias attribute for function return value. |
4041 | struct AANoAliasReturned final : AANoAliasImpl { |
4042 | AANoAliasReturned(const IRPosition &IRP, Attributor &A) |
4043 | : AANoAliasImpl(IRP, A) {} |
4044 | |
4045 | /// See AbstractAttribute::updateImpl(...). |
4046 | ChangeStatus updateImpl(Attributor &A) override { |
4047 | |
4048 | auto CheckReturnValue = [&](Value &RV) -> bool { |
4049 | if (Constant *C = dyn_cast<Constant>(Val: &RV)) |
4050 | if (C->isNullValue() || isa<UndefValue>(Val: C)) |
4051 | return true; |
4052 | |
4053 | /// For now, we can only deduce noalias if we have call sites. |
4054 | /// FIXME: add more support. |
4055 | if (!isa<CallBase>(Val: &RV)) |
4056 | return false; |
4057 | |
4058 | const IRPosition &RVPos = IRPosition::value(V: RV); |
4059 | bool IsKnownNoAlias; |
4060 | if (!AA::hasAssumedIRAttr<Attribute::NoAlias>( |
4061 | A, QueryingAA: this, IRP: RVPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoAlias)) |
4062 | return false; |
4063 | |
4064 | bool IsKnownNoCapture; |
4065 | const AANoCapture *NoCaptureAA = nullptr; |
4066 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
4067 | A, QueryingAA: this, IRP: RVPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoCapture, IgnoreSubsumingPositions: false, |
4068 | AAPtr: &NoCaptureAA); |
4069 | return IsAssumedNoCapture || |
4070 | (NoCaptureAA && NoCaptureAA->isAssumedNoCaptureMaybeReturned()); |
4071 | }; |
4072 | |
4073 | if (!A.checkForAllReturnedValues(Pred: CheckReturnValue, QueryingAA: *this)) |
4074 | return indicatePessimisticFixpoint(); |
4075 | |
4076 | return ChangeStatus::UNCHANGED; |
4077 | } |
4078 | |
4079 | /// See AbstractAttribute::trackStatistics() |
4080 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } |
4081 | }; |
4082 | |
4083 | /// NoAlias attribute deduction for a call site return value. |
4084 | struct AANoAliasCallSiteReturned final |
4085 | : AACalleeToCallSite<AANoAlias, AANoAliasImpl> { |
4086 | AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) |
4087 | : AACalleeToCallSite<AANoAlias, AANoAliasImpl>(IRP, A) {} |
4088 | |
4089 | /// See AbstractAttribute::trackStatistics() |
4090 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } |
4091 | }; |
4092 | } // namespace |
4093 | |
4094 | /// -------------------AAIsDead Function Attribute----------------------- |
4095 | |
4096 | namespace { |
4097 | struct AAIsDeadValueImpl : public AAIsDead { |
4098 | AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} |
4099 | |
4100 | /// See AAIsDead::isAssumedDead(). |
4101 | bool isAssumedDead() const override { return isAssumed(BitsEncoding: IS_DEAD); } |
4102 | |
4103 | /// See AAIsDead::isKnownDead(). |
4104 | bool isKnownDead() const override { return isKnown(BitsEncoding: IS_DEAD); } |
4105 | |
4106 | /// See AAIsDead::isAssumedDead(BasicBlock *). |
4107 | bool isAssumedDead(const BasicBlock *BB) const override { return false; } |
4108 | |
4109 | /// See AAIsDead::isKnownDead(BasicBlock *). |
4110 | bool isKnownDead(const BasicBlock *BB) const override { return false; } |
4111 | |
4112 | /// See AAIsDead::isAssumedDead(Instruction *I). |
4113 | bool isAssumedDead(const Instruction *I) const override { |
4114 | return I == getCtxI() && isAssumedDead(); |
4115 | } |
4116 | |
4117 | /// See AAIsDead::isKnownDead(Instruction *I). |
4118 | bool isKnownDead(const Instruction *I) const override { |
4119 | return isAssumedDead(I) && isKnownDead(); |
4120 | } |
4121 | |
4122 | /// See AbstractAttribute::getAsStr(). |
4123 | const std::string getAsStr(Attributor *A) const override { |
4124 | return isAssumedDead() ? "assumed-dead" : "assumed-live" ; |
4125 | } |
4126 | |
4127 | /// Check if all uses are assumed dead. |
4128 | bool areAllUsesAssumedDead(Attributor &A, Value &V) { |
4129 | // Callers might not check the type, void has no uses. |
4130 | if (V.getType()->isVoidTy() || V.use_empty()) |
4131 | return true; |
4132 | |
4133 | // If we replace a value with a constant there are no uses left afterwards. |
4134 | if (!isa<Constant>(Val: V)) { |
4135 | if (auto *I = dyn_cast<Instruction>(Val: &V)) |
4136 | if (!A.isRunOn(Fn&: *I->getFunction())) |
4137 | return false; |
4138 | bool UsedAssumedInformation = false; |
4139 | std::optional<Constant *> C = |
4140 | A.getAssumedConstant(V, AA: *this, UsedAssumedInformation); |
4141 | if (!C || *C) |
4142 | return true; |
4143 | } |
4144 | |
4145 | auto UsePred = [&](const Use &U, bool &Follow) { return false; }; |
4146 | // Explicitly set the dependence class to required because we want a long |
4147 | // chain of N dependent instructions to be considered live as soon as one is |
4148 | // without going through N update cycles. This is not required for |
4149 | // correctness. |
4150 | return A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V, /* CheckBBLivenessOnly */ false, |
4151 | LivenessDepClass: DepClassTy::REQUIRED, |
4152 | /* IgnoreDroppableUses */ false); |
4153 | } |
4154 | |
4155 | /// Determine if \p I is assumed to be side-effect free. |
4156 | bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { |
4157 | if (!I || wouldInstructionBeTriviallyDead(I)) |
4158 | return true; |
4159 | |
4160 | auto *CB = dyn_cast<CallBase>(Val: I); |
4161 | if (!CB || isa<IntrinsicInst>(Val: CB)) |
4162 | return false; |
4163 | |
4164 | const IRPosition &CallIRP = IRPosition::callsite_function(CB: *CB); |
4165 | |
4166 | bool IsKnownNoUnwind; |
4167 | if (!AA::hasAssumedIRAttr<Attribute::NoUnwind>( |
4168 | A, QueryingAA: this, IRP: CallIRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoUnwind)) |
4169 | return false; |
4170 | |
4171 | bool IsKnown; |
4172 | return AA::isAssumedReadOnly(A, IRP: CallIRP, QueryingAA: *this, IsKnown); |
4173 | } |
4174 | }; |
4175 | |
4176 | struct AAIsDeadFloating : public AAIsDeadValueImpl { |
4177 | AAIsDeadFloating(const IRPosition &IRP, Attributor &A) |
4178 | : AAIsDeadValueImpl(IRP, A) {} |
4179 | |
4180 | /// See AbstractAttribute::initialize(...). |
4181 | void initialize(Attributor &A) override { |
4182 | AAIsDeadValueImpl::initialize(A); |
4183 | |
4184 | if (isa<UndefValue>(Val: getAssociatedValue())) { |
4185 | indicatePessimisticFixpoint(); |
4186 | return; |
4187 | } |
4188 | |
4189 | Instruction *I = dyn_cast<Instruction>(Val: &getAssociatedValue()); |
4190 | if (!isAssumedSideEffectFree(A, I)) { |
4191 | if (!isa_and_nonnull<StoreInst>(Val: I) && !isa_and_nonnull<FenceInst>(Val: I)) |
4192 | indicatePessimisticFixpoint(); |
4193 | else |
4194 | removeAssumedBits(BitsEncoding: HAS_NO_EFFECT); |
4195 | } |
4196 | } |
4197 | |
4198 | bool isDeadFence(Attributor &A, FenceInst &FI) { |
4199 | const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( |
4200 | IRP: IRPosition::function(F: *FI.getFunction()), QueryingAA: *this, DepClass: DepClassTy::NONE); |
4201 | if (!ExecDomainAA || !ExecDomainAA->isNoOpFence(FI)) |
4202 | return false; |
4203 | A.recordDependence(FromAA: *ExecDomainAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
4204 | return true; |
4205 | } |
4206 | |
4207 | bool isDeadStore(Attributor &A, StoreInst &SI, |
4208 | SmallSetVector<Instruction *, 8> *AssumeOnlyInst = nullptr) { |
4209 | // Lang ref now states volatile store is not UB/dead, let's skip them. |
4210 | if (SI.isVolatile()) |
4211 | return false; |
4212 | |
4213 | // If we are collecting assumes to be deleted we are in the manifest stage. |
4214 | // It's problematic to collect the potential copies again now so we use the |
4215 | // cached ones. |
4216 | bool UsedAssumedInformation = false; |
4217 | if (!AssumeOnlyInst) { |
4218 | PotentialCopies.clear(); |
4219 | if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, QueryingAA: *this, |
4220 | UsedAssumedInformation)) { |
4221 | LLVM_DEBUG( |
4222 | dbgs() |
4223 | << "[AAIsDead] Could not determine potential copies of store!\n" ); |
4224 | return false; |
4225 | } |
4226 | } |
4227 | LLVM_DEBUG(dbgs() << "[AAIsDead] Store has " << PotentialCopies.size() |
4228 | << " potential copies.\n" ); |
4229 | |
4230 | InformationCache &InfoCache = A.getInfoCache(); |
4231 | return llvm::all_of(Range&: PotentialCopies, P: [&](Value *V) { |
4232 | if (A.isAssumedDead(IRP: IRPosition::value(V: *V), QueryingAA: this, FnLivenessAA: nullptr, |
4233 | UsedAssumedInformation)) |
4234 | return true; |
4235 | if (auto *LI = dyn_cast<LoadInst>(Val: V)) { |
4236 | if (llvm::all_of(Range: LI->uses(), P: [&](const Use &U) { |
4237 | auto &UserI = cast<Instruction>(Val&: *U.getUser()); |
4238 | if (InfoCache.isOnlyUsedByAssume(I: UserI)) { |
4239 | if (AssumeOnlyInst) |
4240 | AssumeOnlyInst->insert(X: &UserI); |
4241 | return true; |
4242 | } |
4243 | return A.isAssumedDead(U, QueryingAA: this, FnLivenessAA: nullptr, UsedAssumedInformation); |
4244 | })) { |
4245 | return true; |
4246 | } |
4247 | } |
4248 | LLVM_DEBUG(dbgs() << "[AAIsDead] Potential copy " << *V |
4249 | << " is assumed live!\n" ); |
4250 | return false; |
4251 | }); |
4252 | } |
4253 | |
4254 | /// See AbstractAttribute::getAsStr(). |
4255 | const std::string getAsStr(Attributor *A) const override { |
4256 | Instruction *I = dyn_cast<Instruction>(Val: &getAssociatedValue()); |
4257 | if (isa_and_nonnull<StoreInst>(Val: I)) |
4258 | if (isValidState()) |
4259 | return "assumed-dead-store" ; |
4260 | if (isa_and_nonnull<FenceInst>(Val: I)) |
4261 | if (isValidState()) |
4262 | return "assumed-dead-fence" ; |
4263 | return AAIsDeadValueImpl::getAsStr(A); |
4264 | } |
4265 | |
4266 | /// See AbstractAttribute::updateImpl(...). |
4267 | ChangeStatus updateImpl(Attributor &A) override { |
4268 | Instruction *I = dyn_cast<Instruction>(Val: &getAssociatedValue()); |
4269 | if (auto *SI = dyn_cast_or_null<StoreInst>(Val: I)) { |
4270 | if (!isDeadStore(A, SI&: *SI)) |
4271 | return indicatePessimisticFixpoint(); |
4272 | } else if (auto *FI = dyn_cast_or_null<FenceInst>(Val: I)) { |
4273 | if (!isDeadFence(A, FI&: *FI)) |
4274 | return indicatePessimisticFixpoint(); |
4275 | } else { |
4276 | if (!isAssumedSideEffectFree(A, I)) |
4277 | return indicatePessimisticFixpoint(); |
4278 | if (!areAllUsesAssumedDead(A, V&: getAssociatedValue())) |
4279 | return indicatePessimisticFixpoint(); |
4280 | } |
4281 | return ChangeStatus::UNCHANGED; |
4282 | } |
4283 | |
4284 | bool isRemovableStore() const override { |
4285 | return isAssumed(BitsEncoding: IS_REMOVABLE) && isa<StoreInst>(Val: &getAssociatedValue()); |
4286 | } |
4287 | |
4288 | /// See AbstractAttribute::manifest(...). |
4289 | ChangeStatus manifest(Attributor &A) override { |
4290 | Value &V = getAssociatedValue(); |
4291 | if (auto *I = dyn_cast<Instruction>(Val: &V)) { |
4292 | // If we get here we basically know the users are all dead. We check if |
4293 | // isAssumedSideEffectFree returns true here again because it might not be |
4294 | // the case and only the users are dead but the instruction (=call) is |
4295 | // still needed. |
4296 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) { |
4297 | SmallSetVector<Instruction *, 8> AssumeOnlyInst; |
4298 | bool IsDead = isDeadStore(A, SI&: *SI, AssumeOnlyInst: &AssumeOnlyInst); |
4299 | (void)IsDead; |
4300 | assert(IsDead && "Store was assumed to be dead!" ); |
4301 | A.deleteAfterManifest(I&: *I); |
4302 | for (size_t i = 0; i < AssumeOnlyInst.size(); ++i) { |
4303 | Instruction *AOI = AssumeOnlyInst[i]; |
4304 | for (auto *Usr : AOI->users()) |
4305 | AssumeOnlyInst.insert(X: cast<Instruction>(Val: Usr)); |
4306 | A.deleteAfterManifest(I&: *AOI); |
4307 | } |
4308 | return ChangeStatus::CHANGED; |
4309 | } |
4310 | if (auto *FI = dyn_cast<FenceInst>(Val: I)) { |
4311 | assert(isDeadFence(A, *FI)); |
4312 | A.deleteAfterManifest(I&: *FI); |
4313 | return ChangeStatus::CHANGED; |
4314 | } |
4315 | if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(Val: I)) { |
4316 | A.deleteAfterManifest(I&: *I); |
4317 | return ChangeStatus::CHANGED; |
4318 | } |
4319 | } |
4320 | return ChangeStatus::UNCHANGED; |
4321 | } |
4322 | |
4323 | /// See AbstractAttribute::trackStatistics() |
4324 | void trackStatistics() const override { |
4325 | STATS_DECLTRACK_FLOATING_ATTR(IsDead) |
4326 | } |
4327 | |
4328 | private: |
4329 | // The potential copies of a dead store, used for deletion during manifest. |
4330 | SmallSetVector<Value *, 4> PotentialCopies; |
4331 | }; |
4332 | |
4333 | struct AAIsDeadArgument : public AAIsDeadFloating { |
4334 | AAIsDeadArgument(const IRPosition &IRP, Attributor &A) |
4335 | : AAIsDeadFloating(IRP, A) {} |
4336 | |
4337 | /// See AbstractAttribute::manifest(...). |
4338 | ChangeStatus manifest(Attributor &A) override { |
4339 | Argument &Arg = *getAssociatedArgument(); |
4340 | if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) |
4341 | if (A.registerFunctionSignatureRewrite( |
4342 | Arg, /* ReplacementTypes */ {}, |
4343 | CalleeRepairCB: Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, |
4344 | ACSRepairCB: Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { |
4345 | return ChangeStatus::CHANGED; |
4346 | } |
4347 | return ChangeStatus::UNCHANGED; |
4348 | } |
4349 | |
4350 | /// See AbstractAttribute::trackStatistics() |
4351 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } |
4352 | }; |
4353 | |
4354 | struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { |
4355 | AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) |
4356 | : AAIsDeadValueImpl(IRP, A) {} |
4357 | |
4358 | /// See AbstractAttribute::initialize(...). |
4359 | void initialize(Attributor &A) override { |
4360 | AAIsDeadValueImpl::initialize(A); |
4361 | if (isa<UndefValue>(Val: getAssociatedValue())) |
4362 | indicatePessimisticFixpoint(); |
4363 | } |
4364 | |
4365 | /// See AbstractAttribute::updateImpl(...). |
4366 | ChangeStatus updateImpl(Attributor &A) override { |
4367 | // TODO: Once we have call site specific value information we can provide |
4368 | // call site specific liveness information and then it makes |
4369 | // sense to specialize attributes for call sites arguments instead of |
4370 | // redirecting requests to the callee argument. |
4371 | Argument *Arg = getAssociatedArgument(); |
4372 | if (!Arg) |
4373 | return indicatePessimisticFixpoint(); |
4374 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
4375 | auto *ArgAA = A.getAAFor<AAIsDead>(QueryingAA: *this, IRP: ArgPos, DepClass: DepClassTy::REQUIRED); |
4376 | if (!ArgAA) |
4377 | return indicatePessimisticFixpoint(); |
4378 | return clampStateAndIndicateChange(S&: getState(), R: ArgAA->getState()); |
4379 | } |
4380 | |
4381 | /// See AbstractAttribute::manifest(...). |
4382 | ChangeStatus manifest(Attributor &A) override { |
4383 | CallBase &CB = cast<CallBase>(Val&: getAnchorValue()); |
4384 | Use &U = CB.getArgOperandUse(i: getCallSiteArgNo()); |
4385 | assert(!isa<UndefValue>(U.get()) && |
4386 | "Expected undef values to be filtered out!" ); |
4387 | UndefValue &UV = *UndefValue::get(T: U->getType()); |
4388 | if (A.changeUseAfterManifest(U, NV&: UV)) |
4389 | return ChangeStatus::CHANGED; |
4390 | return ChangeStatus::UNCHANGED; |
4391 | } |
4392 | |
4393 | /// See AbstractAttribute::trackStatistics() |
4394 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } |
4395 | }; |
4396 | |
4397 | struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { |
4398 | AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) |
4399 | : AAIsDeadFloating(IRP, A) {} |
4400 | |
4401 | /// See AAIsDead::isAssumedDead(). |
4402 | bool isAssumedDead() const override { |
4403 | return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; |
4404 | } |
4405 | |
4406 | /// See AbstractAttribute::initialize(...). |
4407 | void initialize(Attributor &A) override { |
4408 | AAIsDeadFloating::initialize(A); |
4409 | if (isa<UndefValue>(Val: getAssociatedValue())) { |
4410 | indicatePessimisticFixpoint(); |
4411 | return; |
4412 | } |
4413 | |
4414 | // We track this separately as a secondary state. |
4415 | IsAssumedSideEffectFree = isAssumedSideEffectFree(A, I: getCtxI()); |
4416 | } |
4417 | |
4418 | /// See AbstractAttribute::updateImpl(...). |
4419 | ChangeStatus updateImpl(Attributor &A) override { |
4420 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
4421 | if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, I: getCtxI())) { |
4422 | IsAssumedSideEffectFree = false; |
4423 | Changed = ChangeStatus::CHANGED; |
4424 | } |
4425 | if (!areAllUsesAssumedDead(A, V&: getAssociatedValue())) |
4426 | return indicatePessimisticFixpoint(); |
4427 | return Changed; |
4428 | } |
4429 | |
4430 | /// See AbstractAttribute::trackStatistics() |
4431 | void trackStatistics() const override { |
4432 | if (IsAssumedSideEffectFree) |
4433 | STATS_DECLTRACK_CSRET_ATTR(IsDead) |
4434 | else |
4435 | STATS_DECLTRACK_CSRET_ATTR(UnusedResult) |
4436 | } |
4437 | |
4438 | /// See AbstractAttribute::getAsStr(). |
4439 | const std::string getAsStr(Attributor *A) const override { |
4440 | return isAssumedDead() |
4441 | ? "assumed-dead" |
4442 | : (getAssumed() ? "assumed-dead-users" : "assumed-live" ); |
4443 | } |
4444 | |
4445 | private: |
4446 | bool IsAssumedSideEffectFree = true; |
4447 | }; |
4448 | |
4449 | struct AAIsDeadReturned : public AAIsDeadValueImpl { |
4450 | AAIsDeadReturned(const IRPosition &IRP, Attributor &A) |
4451 | : AAIsDeadValueImpl(IRP, A) {} |
4452 | |
4453 | /// See AbstractAttribute::updateImpl(...). |
4454 | ChangeStatus updateImpl(Attributor &A) override { |
4455 | |
4456 | bool UsedAssumedInformation = false; |
4457 | A.checkForAllInstructions(Pred: [](Instruction &) { return true; }, QueryingAA: *this, |
4458 | Opcodes: {Instruction::Ret}, UsedAssumedInformation); |
4459 | |
4460 | auto PredForCallSite = [&](AbstractCallSite ACS) { |
4461 | if (ACS.isCallbackCall() || !ACS.getInstruction()) |
4462 | return false; |
4463 | return areAllUsesAssumedDead(A, V&: *ACS.getInstruction()); |
4464 | }; |
4465 | |
4466 | if (!A.checkForAllCallSites(Pred: PredForCallSite, QueryingAA: *this, RequireAllCallSites: true, |
4467 | UsedAssumedInformation)) |
4468 | return indicatePessimisticFixpoint(); |
4469 | |
4470 | return ChangeStatus::UNCHANGED; |
4471 | } |
4472 | |
4473 | /// See AbstractAttribute::manifest(...). |
4474 | ChangeStatus manifest(Attributor &A) override { |
4475 | // TODO: Rewrite the signature to return void? |
4476 | bool AnyChange = false; |
4477 | UndefValue &UV = *UndefValue::get(T: getAssociatedFunction()->getReturnType()); |
4478 | auto RetInstPred = [&](Instruction &I) { |
4479 | ReturnInst &RI = cast<ReturnInst>(Val&: I); |
4480 | if (!isa<UndefValue>(Val: RI.getReturnValue())) |
4481 | AnyChange |= A.changeUseAfterManifest(U&: RI.getOperandUse(i: 0), NV&: UV); |
4482 | return true; |
4483 | }; |
4484 | bool UsedAssumedInformation = false; |
4485 | A.checkForAllInstructions(Pred: RetInstPred, QueryingAA: *this, Opcodes: {Instruction::Ret}, |
4486 | UsedAssumedInformation); |
4487 | return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
4488 | } |
4489 | |
4490 | /// See AbstractAttribute::trackStatistics() |
4491 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } |
4492 | }; |
4493 | |
4494 | struct AAIsDeadFunction : public AAIsDead { |
4495 | AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} |
4496 | |
4497 | /// See AbstractAttribute::initialize(...). |
4498 | void initialize(Attributor &A) override { |
4499 | Function *F = getAnchorScope(); |
4500 | assert(F && "Did expect an anchor function" ); |
4501 | if (!isAssumedDeadInternalFunction(A)) { |
4502 | ToBeExploredFrom.insert(X: &F->getEntryBlock().front()); |
4503 | assumeLive(A, BB: F->getEntryBlock()); |
4504 | } |
4505 | } |
4506 | |
4507 | bool isAssumedDeadInternalFunction(Attributor &A) { |
4508 | if (!getAnchorScope()->hasLocalLinkage()) |
4509 | return false; |
4510 | bool UsedAssumedInformation = false; |
4511 | return A.checkForAllCallSites(Pred: [](AbstractCallSite) { return false; }, QueryingAA: *this, |
4512 | RequireAllCallSites: true, UsedAssumedInformation); |
4513 | } |
4514 | |
4515 | /// See AbstractAttribute::getAsStr(). |
4516 | const std::string getAsStr(Attributor *A) const override { |
4517 | return "Live[#BB " + std::to_string(val: AssumedLiveBlocks.size()) + "/" + |
4518 | std::to_string(val: getAnchorScope()->size()) + "][#TBEP " + |
4519 | std::to_string(val: ToBeExploredFrom.size()) + "][#KDE " + |
4520 | std::to_string(val: KnownDeadEnds.size()) + "]" ; |
4521 | } |
4522 | |
4523 | /// See AbstractAttribute::manifest(...). |
4524 | ChangeStatus manifest(Attributor &A) override { |
4525 | assert(getState().isValidState() && |
4526 | "Attempted to manifest an invalid state!" ); |
4527 | |
4528 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; |
4529 | Function &F = *getAnchorScope(); |
4530 | |
4531 | if (AssumedLiveBlocks.empty()) { |
4532 | A.deleteAfterManifest(F); |
4533 | return ChangeStatus::CHANGED; |
4534 | } |
4535 | |
4536 | // Flag to determine if we can change an invoke to a call assuming the |
4537 | // callee is nounwind. This is not possible if the personality of the |
4538 | // function allows to catch asynchronous exceptions. |
4539 | bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); |
4540 | |
4541 | KnownDeadEnds.set_union(ToBeExploredFrom); |
4542 | for (const Instruction *DeadEndI : KnownDeadEnds) { |
4543 | auto *CB = dyn_cast<CallBase>(Val: DeadEndI); |
4544 | if (!CB) |
4545 | continue; |
4546 | bool IsKnownNoReturn; |
4547 | bool MayReturn = !AA::hasAssumedIRAttr<Attribute::NoReturn>( |
4548 | A, QueryingAA: this, IRP: IRPosition::callsite_function(CB: *CB), DepClass: DepClassTy::OPTIONAL, |
4549 | IsKnown&: IsKnownNoReturn); |
4550 | if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(Val: CB))) |
4551 | continue; |
4552 | |
4553 | if (auto *II = dyn_cast<InvokeInst>(Val: DeadEndI)) |
4554 | A.registerInvokeWithDeadSuccessor(II&: const_cast<InvokeInst &>(*II)); |
4555 | else |
4556 | A.changeToUnreachableAfterManifest( |
4557 | I: const_cast<Instruction *>(DeadEndI->getNextNode())); |
4558 | HasChanged = ChangeStatus::CHANGED; |
4559 | } |
4560 | |
4561 | STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted." ); |
4562 | for (BasicBlock &BB : F) |
4563 | if (!AssumedLiveBlocks.count(V: &BB)) { |
4564 | A.deleteAfterManifest(BB); |
4565 | ++BUILD_STAT_NAME(AAIsDead, BasicBlock); |
4566 | HasChanged = ChangeStatus::CHANGED; |
4567 | } |
4568 | |
4569 | return HasChanged; |
4570 | } |
4571 | |
4572 | /// See AbstractAttribute::updateImpl(...). |
4573 | ChangeStatus updateImpl(Attributor &A) override; |
4574 | |
4575 | bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { |
4576 | assert(From->getParent() == getAnchorScope() && |
4577 | To->getParent() == getAnchorScope() && |
4578 | "Used AAIsDead of the wrong function" ); |
4579 | return isValidState() && !AssumedLiveEdges.count(V: std::make_pair(x&: From, y&: To)); |
4580 | } |
4581 | |
4582 | /// See AbstractAttribute::trackStatistics() |
4583 | void trackStatistics() const override {} |
4584 | |
4585 | /// Returns true if the function is assumed dead. |
4586 | bool isAssumedDead() const override { return false; } |
4587 | |
4588 | /// See AAIsDead::isKnownDead(). |
4589 | bool isKnownDead() const override { return false; } |
4590 | |
4591 | /// See AAIsDead::isAssumedDead(BasicBlock *). |
4592 | bool isAssumedDead(const BasicBlock *BB) const override { |
4593 | assert(BB->getParent() == getAnchorScope() && |
4594 | "BB must be in the same anchor scope function." ); |
4595 | |
4596 | if (!getAssumed()) |
4597 | return false; |
4598 | return !AssumedLiveBlocks.count(V: BB); |
4599 | } |
4600 | |
4601 | /// See AAIsDead::isKnownDead(BasicBlock *). |
4602 | bool isKnownDead(const BasicBlock *BB) const override { |
4603 | return getKnown() && isAssumedDead(BB); |
4604 | } |
4605 | |
4606 | /// See AAIsDead::isAssumed(Instruction *I). |
4607 | bool isAssumedDead(const Instruction *I) const override { |
4608 | assert(I->getParent()->getParent() == getAnchorScope() && |
4609 | "Instruction must be in the same anchor scope function." ); |
4610 | |
4611 | if (!getAssumed()) |
4612 | return false; |
4613 | |
4614 | // If it is not in AssumedLiveBlocks then it for sure dead. |
4615 | // Otherwise, it can still be after noreturn call in a live block. |
4616 | if (!AssumedLiveBlocks.count(V: I->getParent())) |
4617 | return true; |
4618 | |
4619 | // If it is not after a liveness barrier it is live. |
4620 | const Instruction *PrevI = I->getPrevNode(); |
4621 | while (PrevI) { |
4622 | if (KnownDeadEnds.count(key: PrevI) || ToBeExploredFrom.count(key: PrevI)) |
4623 | return true; |
4624 | PrevI = PrevI->getPrevNode(); |
4625 | } |
4626 | return false; |
4627 | } |
4628 | |
4629 | /// See AAIsDead::isKnownDead(Instruction *I). |
4630 | bool isKnownDead(const Instruction *I) const override { |
4631 | return getKnown() && isAssumedDead(I); |
4632 | } |
4633 | |
4634 | /// Assume \p BB is (partially) live now and indicate to the Attributor \p A |
4635 | /// that internal function called from \p BB should now be looked at. |
4636 | bool assumeLive(Attributor &A, const BasicBlock &BB) { |
4637 | if (!AssumedLiveBlocks.insert(V: &BB).second) |
4638 | return false; |
4639 | |
4640 | // We assume that all of BB is (probably) live now and if there are calls to |
4641 | // internal functions we will assume that those are now live as well. This |
4642 | // is a performance optimization for blocks with calls to a lot of internal |
4643 | // functions. It can however cause dead functions to be treated as live. |
4644 | for (const Instruction &I : BB) |
4645 | if (const auto *CB = dyn_cast<CallBase>(Val: &I)) |
4646 | if (auto *F = dyn_cast_if_present<Function>(Val: CB->getCalledOperand())) |
4647 | if (F->hasLocalLinkage()) |
4648 | A.markLiveInternalFunction(F: *F); |
4649 | return true; |
4650 | } |
4651 | |
4652 | /// Collection of instructions that need to be explored again, e.g., we |
4653 | /// did assume they do not transfer control to (one of their) successors. |
4654 | SmallSetVector<const Instruction *, 8> ToBeExploredFrom; |
4655 | |
4656 | /// Collection of instructions that are known to not transfer control. |
4657 | SmallSetVector<const Instruction *, 8> KnownDeadEnds; |
4658 | |
4659 | /// Collection of all assumed live edges |
4660 | DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; |
4661 | |
4662 | /// Collection of all assumed live BasicBlocks. |
4663 | DenseSet<const BasicBlock *> AssumedLiveBlocks; |
4664 | }; |
4665 | |
4666 | static bool |
4667 | identifyAliveSuccessors(Attributor &A, const CallBase &CB, |
4668 | AbstractAttribute &AA, |
4669 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
4670 | const IRPosition &IPos = IRPosition::callsite_function(CB); |
4671 | |
4672 | bool IsKnownNoReturn; |
4673 | if (AA::hasAssumedIRAttr<Attribute::NoReturn>( |
4674 | A, QueryingAA: &AA, IRP: IPos, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoReturn)) |
4675 | return !IsKnownNoReturn; |
4676 | if (CB.isTerminator()) |
4677 | AliveSuccessors.push_back(Elt: &CB.getSuccessor(Idx: 0)->front()); |
4678 | else |
4679 | AliveSuccessors.push_back(Elt: CB.getNextNode()); |
4680 | return false; |
4681 | } |
4682 | |
4683 | static bool |
4684 | identifyAliveSuccessors(Attributor &A, const InvokeInst &II, |
4685 | AbstractAttribute &AA, |
4686 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
4687 | bool UsedAssumedInformation = |
4688 | identifyAliveSuccessors(A, CB: cast<CallBase>(Val: II), AA, AliveSuccessors); |
4689 | |
4690 | // First, determine if we can change an invoke to a call assuming the |
4691 | // callee is nounwind. This is not possible if the personality of the |
4692 | // function allows to catch asynchronous exceptions. |
4693 | if (AAIsDeadFunction::mayCatchAsynchronousExceptions(F: *II.getFunction())) { |
4694 | AliveSuccessors.push_back(Elt: &II.getUnwindDest()->front()); |
4695 | } else { |
4696 | const IRPosition &IPos = IRPosition::callsite_function(CB: II); |
4697 | |
4698 | bool IsKnownNoUnwind; |
4699 | if (AA::hasAssumedIRAttr<Attribute::NoUnwind>( |
4700 | A, QueryingAA: &AA, IRP: IPos, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoUnwind)) { |
4701 | UsedAssumedInformation |= !IsKnownNoUnwind; |
4702 | } else { |
4703 | AliveSuccessors.push_back(Elt: &II.getUnwindDest()->front()); |
4704 | } |
4705 | } |
4706 | return UsedAssumedInformation; |
4707 | } |
4708 | |
4709 | static bool |
4710 | identifyAliveSuccessors(Attributor &A, const BranchInst &BI, |
4711 | AbstractAttribute &AA, |
4712 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
4713 | bool UsedAssumedInformation = false; |
4714 | if (BI.getNumSuccessors() == 1) { |
4715 | AliveSuccessors.push_back(Elt: &BI.getSuccessor(i: 0)->front()); |
4716 | } else { |
4717 | std::optional<Constant *> C = |
4718 | A.getAssumedConstant(V: *BI.getCondition(), AA, UsedAssumedInformation); |
4719 | if (!C || isa_and_nonnull<UndefValue>(Val: *C)) { |
4720 | // No value yet, assume both edges are dead. |
4721 | } else if (isa_and_nonnull<ConstantInt>(Val: *C)) { |
4722 | const BasicBlock *SuccBB = |
4723 | BI.getSuccessor(i: 1 - cast<ConstantInt>(Val: *C)->getValue().getZExtValue()); |
4724 | AliveSuccessors.push_back(Elt: &SuccBB->front()); |
4725 | } else { |
4726 | AliveSuccessors.push_back(Elt: &BI.getSuccessor(i: 0)->front()); |
4727 | AliveSuccessors.push_back(Elt: &BI.getSuccessor(i: 1)->front()); |
4728 | UsedAssumedInformation = false; |
4729 | } |
4730 | } |
4731 | return UsedAssumedInformation; |
4732 | } |
4733 | |
4734 | static bool |
4735 | identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, |
4736 | AbstractAttribute &AA, |
4737 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
4738 | bool UsedAssumedInformation = false; |
4739 | SmallVector<AA::ValueAndContext> Values; |
4740 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::value(V: *SI.getCondition()), AA: &AA, |
4741 | Values, S: AA::AnyScope, |
4742 | UsedAssumedInformation)) { |
4743 | // Something went wrong, assume all successors are live. |
4744 | for (const BasicBlock *SuccBB : successors(BB: SI.getParent())) |
4745 | AliveSuccessors.push_back(Elt: &SuccBB->front()); |
4746 | return false; |
4747 | } |
4748 | |
4749 | if (Values.empty() || |
4750 | (Values.size() == 1 && |
4751 | isa_and_nonnull<UndefValue>(Val: Values.front().getValue()))) { |
4752 | // No valid value yet, assume all edges are dead. |
4753 | return UsedAssumedInformation; |
4754 | } |
4755 | |
4756 | Type &Ty = *SI.getCondition()->getType(); |
4757 | SmallPtrSet<ConstantInt *, 8> Constants; |
4758 | auto CheckForConstantInt = [&](Value *V) { |
4759 | if (auto *CI = dyn_cast_if_present<ConstantInt>(Val: AA::getWithType(V&: *V, Ty))) { |
4760 | Constants.insert(Ptr: CI); |
4761 | return true; |
4762 | } |
4763 | return false; |
4764 | }; |
4765 | |
4766 | if (!all_of(Range&: Values, P: [&](AA::ValueAndContext &VAC) { |
4767 | return CheckForConstantInt(VAC.getValue()); |
4768 | })) { |
4769 | for (const BasicBlock *SuccBB : successors(BB: SI.getParent())) |
4770 | AliveSuccessors.push_back(Elt: &SuccBB->front()); |
4771 | return UsedAssumedInformation; |
4772 | } |
4773 | |
4774 | unsigned MatchedCases = 0; |
4775 | for (const auto &CaseIt : SI.cases()) { |
4776 | if (Constants.count(Ptr: CaseIt.getCaseValue())) { |
4777 | ++MatchedCases; |
4778 | AliveSuccessors.push_back(Elt: &CaseIt.getCaseSuccessor()->front()); |
4779 | } |
4780 | } |
4781 | |
4782 | // If all potential values have been matched, we will not visit the default |
4783 | // case. |
4784 | if (MatchedCases < Constants.size()) |
4785 | AliveSuccessors.push_back(Elt: &SI.getDefaultDest()->front()); |
4786 | return UsedAssumedInformation; |
4787 | } |
4788 | |
4789 | ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { |
4790 | ChangeStatus Change = ChangeStatus::UNCHANGED; |
4791 | |
4792 | if (AssumedLiveBlocks.empty()) { |
4793 | if (isAssumedDeadInternalFunction(A)) |
4794 | return ChangeStatus::UNCHANGED; |
4795 | |
4796 | Function *F = getAnchorScope(); |
4797 | ToBeExploredFrom.insert(X: &F->getEntryBlock().front()); |
4798 | assumeLive(A, BB: F->getEntryBlock()); |
4799 | Change = ChangeStatus::CHANGED; |
4800 | } |
4801 | |
4802 | LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" |
4803 | << getAnchorScope()->size() << "] BBs and " |
4804 | << ToBeExploredFrom.size() << " exploration points and " |
4805 | << KnownDeadEnds.size() << " known dead ends\n" ); |
4806 | |
4807 | // Copy and clear the list of instructions we need to explore from. It is |
4808 | // refilled with instructions the next update has to look at. |
4809 | SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), |
4810 | ToBeExploredFrom.end()); |
4811 | decltype(ToBeExploredFrom) NewToBeExploredFrom; |
4812 | |
4813 | SmallVector<const Instruction *, 8> AliveSuccessors; |
4814 | while (!Worklist.empty()) { |
4815 | const Instruction *I = Worklist.pop_back_val(); |
4816 | LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n" ); |
4817 | |
4818 | // Fast forward for uninteresting instructions. We could look for UB here |
4819 | // though. |
4820 | while (!I->isTerminator() && !isa<CallBase>(Val: I)) |
4821 | I = I->getNextNode(); |
4822 | |
4823 | AliveSuccessors.clear(); |
4824 | |
4825 | bool UsedAssumedInformation = false; |
4826 | switch (I->getOpcode()) { |
4827 | // TODO: look for (assumed) UB to backwards propagate "deadness". |
4828 | default: |
4829 | assert(I->isTerminator() && |
4830 | "Expected non-terminators to be handled already!" ); |
4831 | for (const BasicBlock *SuccBB : successors(BB: I->getParent())) |
4832 | AliveSuccessors.push_back(Elt: &SuccBB->front()); |
4833 | break; |
4834 | case Instruction::Call: |
4835 | UsedAssumedInformation = identifyAliveSuccessors(A, CB: cast<CallInst>(Val: *I), |
4836 | AA&: *this, AliveSuccessors); |
4837 | break; |
4838 | case Instruction::Invoke: |
4839 | UsedAssumedInformation = identifyAliveSuccessors(A, II: cast<InvokeInst>(Val: *I), |
4840 | AA&: *this, AliveSuccessors); |
4841 | break; |
4842 | case Instruction::Br: |
4843 | UsedAssumedInformation = identifyAliveSuccessors(A, BI: cast<BranchInst>(Val: *I), |
4844 | AA&: *this, AliveSuccessors); |
4845 | break; |
4846 | case Instruction::Switch: |
4847 | UsedAssumedInformation = identifyAliveSuccessors(A, SI: cast<SwitchInst>(Val: *I), |
4848 | AA&: *this, AliveSuccessors); |
4849 | break; |
4850 | } |
4851 | |
4852 | if (UsedAssumedInformation) { |
4853 | NewToBeExploredFrom.insert(X: I); |
4854 | } else if (AliveSuccessors.empty() || |
4855 | (I->isTerminator() && |
4856 | AliveSuccessors.size() < I->getNumSuccessors())) { |
4857 | if (KnownDeadEnds.insert(X: I)) |
4858 | Change = ChangeStatus::CHANGED; |
4859 | } |
4860 | |
4861 | LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " |
4862 | << AliveSuccessors.size() << " UsedAssumedInformation: " |
4863 | << UsedAssumedInformation << "\n" ); |
4864 | |
4865 | for (const Instruction *AliveSuccessor : AliveSuccessors) { |
4866 | if (!I->isTerminator()) { |
4867 | assert(AliveSuccessors.size() == 1 && |
4868 | "Non-terminator expected to have a single successor!" ); |
4869 | Worklist.push_back(Elt: AliveSuccessor); |
4870 | } else { |
4871 | // record the assumed live edge |
4872 | auto Edge = std::make_pair(x: I->getParent(), y: AliveSuccessor->getParent()); |
4873 | if (AssumedLiveEdges.insert(V: Edge).second) |
4874 | Change = ChangeStatus::CHANGED; |
4875 | if (assumeLive(A, BB: *AliveSuccessor->getParent())) |
4876 | Worklist.push_back(Elt: AliveSuccessor); |
4877 | } |
4878 | } |
4879 | } |
4880 | |
4881 | // Check if the content of ToBeExploredFrom changed, ignore the order. |
4882 | if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || |
4883 | llvm::any_of(Range&: NewToBeExploredFrom, P: [&](const Instruction *I) { |
4884 | return !ToBeExploredFrom.count(key: I); |
4885 | })) { |
4886 | Change = ChangeStatus::CHANGED; |
4887 | ToBeExploredFrom = std::move(NewToBeExploredFrom); |
4888 | } |
4889 | |
4890 | // If we know everything is live there is no need to query for liveness. |
4891 | // Instead, indicating a pessimistic fixpoint will cause the state to be |
4892 | // "invalid" and all queries to be answered conservatively without lookups. |
4893 | // To be in this state we have to (1) finished the exploration and (3) not |
4894 | // discovered any non-trivial dead end and (2) not ruled unreachable code |
4895 | // dead. |
4896 | if (ToBeExploredFrom.empty() && |
4897 | getAnchorScope()->size() == AssumedLiveBlocks.size() && |
4898 | llvm::all_of(Range&: KnownDeadEnds, P: [](const Instruction *DeadEndI) { |
4899 | return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; |
4900 | })) |
4901 | return indicatePessimisticFixpoint(); |
4902 | return Change; |
4903 | } |
4904 | |
4905 | /// Liveness information for a call sites. |
4906 | struct AAIsDeadCallSite final : AAIsDeadFunction { |
4907 | AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) |
4908 | : AAIsDeadFunction(IRP, A) {} |
4909 | |
4910 | /// See AbstractAttribute::initialize(...). |
4911 | void initialize(Attributor &A) override { |
4912 | // TODO: Once we have call site specific value information we can provide |
4913 | // call site specific liveness information and then it makes |
4914 | // sense to specialize attributes for call sites instead of |
4915 | // redirecting requests to the callee. |
4916 | llvm_unreachable("Abstract attributes for liveness are not " |
4917 | "supported for call sites yet!" ); |
4918 | } |
4919 | |
4920 | /// See AbstractAttribute::updateImpl(...). |
4921 | ChangeStatus updateImpl(Attributor &A) override { |
4922 | return indicatePessimisticFixpoint(); |
4923 | } |
4924 | |
4925 | /// See AbstractAttribute::trackStatistics() |
4926 | void trackStatistics() const override {} |
4927 | }; |
4928 | } // namespace |
4929 | |
4930 | /// -------------------- Dereferenceable Argument Attribute -------------------- |
4931 | |
4932 | namespace { |
4933 | struct AADereferenceableImpl : AADereferenceable { |
4934 | AADereferenceableImpl(const IRPosition &IRP, Attributor &A) |
4935 | : AADereferenceable(IRP, A) {} |
4936 | using StateType = DerefState; |
4937 | |
4938 | /// See AbstractAttribute::initialize(...). |
4939 | void initialize(Attributor &A) override { |
4940 | Value &V = *getAssociatedValue().stripPointerCasts(); |
4941 | SmallVector<Attribute, 4> Attrs; |
4942 | A.getAttrs(IRP: getIRPosition(), |
4943 | AKs: {Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, |
4944 | Attrs, /* IgnoreSubsumingPositions */ false); |
4945 | for (const Attribute &Attr : Attrs) |
4946 | takeKnownDerefBytesMaximum(Bytes: Attr.getValueAsInt()); |
4947 | |
4948 | // Ensure we initialize the non-null AA (if necessary). |
4949 | bool IsKnownNonNull; |
4950 | AA::hasAssumedIRAttr<Attribute::NonNull>( |
4951 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNonNull); |
4952 | |
4953 | bool CanBeNull, CanBeFreed; |
4954 | takeKnownDerefBytesMaximum(Bytes: V.getPointerDereferenceableBytes( |
4955 | DL: A.getDataLayout(), CanBeNull, CanBeFreed)); |
4956 | |
4957 | if (Instruction *CtxI = getCtxI()) |
4958 | followUsesInMBEC(AA&: *this, A, S&: getState(), CtxI&: *CtxI); |
4959 | } |
4960 | |
4961 | /// See AbstractAttribute::getState() |
4962 | /// { |
4963 | StateType &getState() override { return *this; } |
4964 | const StateType &getState() const override { return *this; } |
4965 | /// } |
4966 | |
4967 | /// Helper function for collecting accessed bytes in must-be-executed-context |
4968 | void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, |
4969 | DerefState &State) { |
4970 | const Value *UseV = U->get(); |
4971 | if (!UseV->getType()->isPointerTy()) |
4972 | return; |
4973 | |
4974 | std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(Inst: I); |
4975 | if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) |
4976 | return; |
4977 | |
4978 | int64_t Offset; |
4979 | const Value *Base = GetPointerBaseWithConstantOffset( |
4980 | Ptr: Loc->Ptr, Offset, DL: A.getDataLayout(), /*AllowNonInbounds*/ true); |
4981 | if (Base && Base == &getAssociatedValue()) |
4982 | State.addAccessedBytes(Offset, Size: Loc->Size.getValue()); |
4983 | } |
4984 | |
4985 | /// See followUsesInMBEC |
4986 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
4987 | AADereferenceable::StateType &State) { |
4988 | bool IsNonNull = false; |
4989 | bool TrackUse = false; |
4990 | int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( |
4991 | A, QueryingAA: *this, AssociatedValue&: getAssociatedValue(), U, I, IsNonNull, TrackUse); |
4992 | LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes |
4993 | << " for instruction " << *I << "\n" ); |
4994 | |
4995 | addAccessedBytesForUse(A, U, I, State); |
4996 | State.takeKnownDerefBytesMaximum(Bytes: DerefBytes); |
4997 | return TrackUse; |
4998 | } |
4999 | |
5000 | /// See AbstractAttribute::manifest(...). |
5001 | ChangeStatus manifest(Attributor &A) override { |
5002 | ChangeStatus Change = AADereferenceable::manifest(A); |
5003 | bool IsKnownNonNull; |
5004 | bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>( |
5005 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::NONE, IsKnown&: IsKnownNonNull); |
5006 | if (IsAssumedNonNull && |
5007 | A.hasAttr(IRP: getIRPosition(), AKs: Attribute::DereferenceableOrNull)) { |
5008 | A.removeAttrs(IRP: getIRPosition(), AttrKinds: {Attribute::DereferenceableOrNull}); |
5009 | return ChangeStatus::CHANGED; |
5010 | } |
5011 | return Change; |
5012 | } |
5013 | |
5014 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
5015 | SmallVectorImpl<Attribute> &Attrs) const override { |
5016 | // TODO: Add *_globally support |
5017 | bool IsKnownNonNull; |
5018 | bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>( |
5019 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::NONE, IsKnown&: IsKnownNonNull); |
5020 | if (IsAssumedNonNull) |
5021 | Attrs.emplace_back(Args: Attribute::getWithDereferenceableBytes( |
5022 | Context&: Ctx, Bytes: getAssumedDereferenceableBytes())); |
5023 | else |
5024 | Attrs.emplace_back(Args: Attribute::getWithDereferenceableOrNullBytes( |
5025 | Context&: Ctx, Bytes: getAssumedDereferenceableBytes())); |
5026 | } |
5027 | |
5028 | /// See AbstractAttribute::getAsStr(). |
5029 | const std::string getAsStr(Attributor *A) const override { |
5030 | if (!getAssumedDereferenceableBytes()) |
5031 | return "unknown-dereferenceable" ; |
5032 | bool IsKnownNonNull; |
5033 | bool IsAssumedNonNull = false; |
5034 | if (A) |
5035 | IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>( |
5036 | A&: *A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::NONE, IsKnown&: IsKnownNonNull); |
5037 | return std::string("dereferenceable" ) + |
5038 | (IsAssumedNonNull ? "" : "_or_null" ) + |
5039 | (isAssumedGlobal() ? "_globally" : "" ) + "<" + |
5040 | std::to_string(val: getKnownDereferenceableBytes()) + "-" + |
5041 | std::to_string(val: getAssumedDereferenceableBytes()) + ">" + |
5042 | (!A ? " [non-null is unknown]" : "" ); |
5043 | } |
5044 | }; |
5045 | |
5046 | /// Dereferenceable attribute for a floating value. |
5047 | struct AADereferenceableFloating : AADereferenceableImpl { |
5048 | AADereferenceableFloating(const IRPosition &IRP, Attributor &A) |
5049 | : AADereferenceableImpl(IRP, A) {} |
5050 | |
5051 | /// See AbstractAttribute::updateImpl(...). |
5052 | ChangeStatus updateImpl(Attributor &A) override { |
5053 | bool Stripped; |
5054 | bool UsedAssumedInformation = false; |
5055 | SmallVector<AA::ValueAndContext> Values; |
5056 | if (!A.getAssumedSimplifiedValues(IRP: getIRPosition(), AA: *this, Values, |
5057 | S: AA::AnyScope, UsedAssumedInformation)) { |
5058 | Values.push_back(Elt: {getAssociatedValue(), getCtxI()}); |
5059 | Stripped = false; |
5060 | } else { |
5061 | Stripped = Values.size() != 1 || |
5062 | Values.front().getValue() != &getAssociatedValue(); |
5063 | } |
5064 | |
5065 | const DataLayout &DL = A.getDataLayout(); |
5066 | DerefState T; |
5067 | |
5068 | auto VisitValueCB = [&](const Value &V) -> bool { |
5069 | unsigned IdxWidth = |
5070 | DL.getIndexSizeInBits(AS: V.getType()->getPointerAddressSpace()); |
5071 | APInt Offset(IdxWidth, 0); |
5072 | const Value *Base = stripAndAccumulateOffsets( |
5073 | A, QueryingAA: *this, Val: &V, DL, Offset, /* GetMinOffset */ false, |
5074 | /* AllowNonInbounds */ true); |
5075 | |
5076 | const auto *AA = A.getAAFor<AADereferenceable>( |
5077 | QueryingAA: *this, IRP: IRPosition::value(V: *Base), DepClass: DepClassTy::REQUIRED); |
5078 | int64_t DerefBytes = 0; |
5079 | if (!AA || (!Stripped && this == AA)) { |
5080 | // Use IR information if we did not strip anything. |
5081 | // TODO: track globally. |
5082 | bool CanBeNull, CanBeFreed; |
5083 | DerefBytes = |
5084 | Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); |
5085 | T.GlobalState.indicatePessimisticFixpoint(); |
5086 | } else { |
5087 | const DerefState &DS = AA->getState(); |
5088 | DerefBytes = DS.DerefBytesState.getAssumed(); |
5089 | T.GlobalState &= DS.GlobalState; |
5090 | } |
5091 | |
5092 | // For now we do not try to "increase" dereferenceability due to negative |
5093 | // indices as we first have to come up with code to deal with loops and |
5094 | // for overflows of the dereferenceable bytes. |
5095 | int64_t OffsetSExt = Offset.getSExtValue(); |
5096 | if (OffsetSExt < 0) |
5097 | OffsetSExt = 0; |
5098 | |
5099 | T.takeAssumedDerefBytesMinimum( |
5100 | Bytes: std::max(a: int64_t(0), b: DerefBytes - OffsetSExt)); |
5101 | |
5102 | if (this == AA) { |
5103 | if (!Stripped) { |
5104 | // If nothing was stripped IR information is all we got. |
5105 | T.takeKnownDerefBytesMaximum( |
5106 | Bytes: std::max(a: int64_t(0), b: DerefBytes - OffsetSExt)); |
5107 | T.indicatePessimisticFixpoint(); |
5108 | } else if (OffsetSExt > 0) { |
5109 | // If something was stripped but there is circular reasoning we look |
5110 | // for the offset. If it is positive we basically decrease the |
5111 | // dereferenceable bytes in a circular loop now, which will simply |
5112 | // drive them down to the known value in a very slow way which we |
5113 | // can accelerate. |
5114 | T.indicatePessimisticFixpoint(); |
5115 | } |
5116 | } |
5117 | |
5118 | return T.isValidState(); |
5119 | }; |
5120 | |
5121 | for (const auto &VAC : Values) |
5122 | if (!VisitValueCB(*VAC.getValue())) |
5123 | return indicatePessimisticFixpoint(); |
5124 | |
5125 | return clampStateAndIndicateChange(S&: getState(), R: T); |
5126 | } |
5127 | |
5128 | /// See AbstractAttribute::trackStatistics() |
5129 | void trackStatistics() const override { |
5130 | STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) |
5131 | } |
5132 | }; |
5133 | |
5134 | /// Dereferenceable attribute for a return value. |
5135 | struct AADereferenceableReturned final |
5136 | : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { |
5137 | using Base = |
5138 | AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>; |
5139 | AADereferenceableReturned(const IRPosition &IRP, Attributor &A) |
5140 | : Base(IRP, A) {} |
5141 | |
5142 | /// See AbstractAttribute::trackStatistics() |
5143 | void trackStatistics() const override { |
5144 | STATS_DECLTRACK_FNRET_ATTR(dereferenceable) |
5145 | } |
5146 | }; |
5147 | |
5148 | /// Dereferenceable attribute for an argument |
5149 | struct AADereferenceableArgument final |
5150 | : AAArgumentFromCallSiteArguments<AADereferenceable, |
5151 | AADereferenceableImpl> { |
5152 | using Base = |
5153 | AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; |
5154 | AADereferenceableArgument(const IRPosition &IRP, Attributor &A) |
5155 | : Base(IRP, A) {} |
5156 | |
5157 | /// See AbstractAttribute::trackStatistics() |
5158 | void trackStatistics() const override { |
5159 | STATS_DECLTRACK_ARG_ATTR(dereferenceable) |
5160 | } |
5161 | }; |
5162 | |
5163 | /// Dereferenceable attribute for a call site argument. |
5164 | struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { |
5165 | AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) |
5166 | : AADereferenceableFloating(IRP, A) {} |
5167 | |
5168 | /// See AbstractAttribute::trackStatistics() |
5169 | void trackStatistics() const override { |
5170 | STATS_DECLTRACK_CSARG_ATTR(dereferenceable) |
5171 | } |
5172 | }; |
5173 | |
5174 | /// Dereferenceable attribute deduction for a call site return value. |
5175 | struct AADereferenceableCallSiteReturned final |
5176 | : AACalleeToCallSite<AADereferenceable, AADereferenceableImpl> { |
5177 | using Base = AACalleeToCallSite<AADereferenceable, AADereferenceableImpl>; |
5178 | AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) |
5179 | : Base(IRP, A) {} |
5180 | |
5181 | /// See AbstractAttribute::trackStatistics() |
5182 | void trackStatistics() const override { |
5183 | STATS_DECLTRACK_CS_ATTR(dereferenceable); |
5184 | } |
5185 | }; |
5186 | } // namespace |
5187 | |
5188 | // ------------------------ Align Argument Attribute ------------------------ |
5189 | |
5190 | namespace { |
5191 | static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, |
5192 | Value &AssociatedValue, const Use *U, |
5193 | const Instruction *I, bool &TrackUse) { |
5194 | // We need to follow common pointer manipulation uses to the accesses they |
5195 | // feed into. |
5196 | if (isa<CastInst>(Val: I)) { |
5197 | // Follow all but ptr2int casts. |
5198 | TrackUse = !isa<PtrToIntInst>(Val: I); |
5199 | return 0; |
5200 | } |
5201 | if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I)) { |
5202 | if (GEP->hasAllConstantIndices()) |
5203 | TrackUse = true; |
5204 | return 0; |
5205 | } |
5206 | |
5207 | MaybeAlign MA; |
5208 | if (const auto *CB = dyn_cast<CallBase>(Val: I)) { |
5209 | if (CB->isBundleOperand(U) || CB->isCallee(U)) |
5210 | return 0; |
5211 | |
5212 | unsigned ArgNo = CB->getArgOperandNo(U); |
5213 | IRPosition IRP = IRPosition::callsite_argument(CB: *CB, ArgNo); |
5214 | // As long as we only use known information there is no need to track |
5215 | // dependences here. |
5216 | auto *AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClass: DepClassTy::NONE); |
5217 | if (AlignAA) |
5218 | MA = MaybeAlign(AlignAA->getKnownAlign()); |
5219 | } |
5220 | |
5221 | const DataLayout &DL = A.getDataLayout(); |
5222 | const Value *UseV = U->get(); |
5223 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) { |
5224 | if (SI->getPointerOperand() == UseV) |
5225 | MA = SI->getAlign(); |
5226 | } else if (auto *LI = dyn_cast<LoadInst>(Val: I)) { |
5227 | if (LI->getPointerOperand() == UseV) |
5228 | MA = LI->getAlign(); |
5229 | } else if (auto *AI = dyn_cast<AtomicRMWInst>(Val: I)) { |
5230 | if (AI->getPointerOperand() == UseV) |
5231 | MA = AI->getAlign(); |
5232 | } else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Val: I)) { |
5233 | if (AI->getPointerOperand() == UseV) |
5234 | MA = AI->getAlign(); |
5235 | } |
5236 | |
5237 | if (!MA || *MA <= QueryingAA.getKnownAlign()) |
5238 | return 0; |
5239 | |
5240 | unsigned Alignment = MA->value(); |
5241 | int64_t Offset; |
5242 | |
5243 | if (const Value *Base = GetPointerBaseWithConstantOffset(Ptr: UseV, Offset, DL)) { |
5244 | if (Base == &AssociatedValue) { |
5245 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. |
5246 | // So we can say that the maximum power of two which is a divisor of |
5247 | // gcd(Offset, Alignment) is an alignment. |
5248 | |
5249 | uint32_t gcd = std::gcd(m: uint32_t(abs(x: (int32_t)Offset)), n: Alignment); |
5250 | Alignment = llvm::bit_floor(Value: gcd); |
5251 | } |
5252 | } |
5253 | |
5254 | return Alignment; |
5255 | } |
5256 | |
5257 | struct AAAlignImpl : AAAlign { |
5258 | AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} |
5259 | |
5260 | /// See AbstractAttribute::initialize(...). |
5261 | void initialize(Attributor &A) override { |
5262 | SmallVector<Attribute, 4> Attrs; |
5263 | A.getAttrs(IRP: getIRPosition(), AKs: {Attribute::Alignment}, Attrs); |
5264 | for (const Attribute &Attr : Attrs) |
5265 | takeKnownMaximum(Value: Attr.getValueAsInt()); |
5266 | |
5267 | Value &V = *getAssociatedValue().stripPointerCasts(); |
5268 | takeKnownMaximum(Value: V.getPointerAlignment(DL: A.getDataLayout()).value()); |
5269 | |
5270 | if (Instruction *CtxI = getCtxI()) |
5271 | followUsesInMBEC(AA&: *this, A, S&: getState(), CtxI&: *CtxI); |
5272 | } |
5273 | |
5274 | /// See AbstractAttribute::manifest(...). |
5275 | ChangeStatus manifest(Attributor &A) override { |
5276 | ChangeStatus InstrChanged = ChangeStatus::UNCHANGED; |
5277 | |
5278 | // Check for users that allow alignment annotations. |
5279 | Value &AssociatedValue = getAssociatedValue(); |
5280 | if (isa<ConstantData>(Val: AssociatedValue)) |
5281 | return ChangeStatus::UNCHANGED; |
5282 | |
5283 | for (const Use &U : AssociatedValue.uses()) { |
5284 | if (auto *SI = dyn_cast<StoreInst>(Val: U.getUser())) { |
5285 | if (SI->getPointerOperand() == &AssociatedValue) |
5286 | if (SI->getAlign() < getAssumedAlign()) { |
5287 | STATS_DECLTRACK(AAAlign, Store, |
5288 | "Number of times alignment added to a store" ); |
5289 | SI->setAlignment(getAssumedAlign()); |
5290 | InstrChanged = ChangeStatus::CHANGED; |
5291 | } |
5292 | } else if (auto *LI = dyn_cast<LoadInst>(Val: U.getUser())) { |
5293 | if (LI->getPointerOperand() == &AssociatedValue) |
5294 | if (LI->getAlign() < getAssumedAlign()) { |
5295 | LI->setAlignment(getAssumedAlign()); |
5296 | STATS_DECLTRACK(AAAlign, Load, |
5297 | "Number of times alignment added to a load" ); |
5298 | InstrChanged = ChangeStatus::CHANGED; |
5299 | } |
5300 | } else if (auto *RMW = dyn_cast<AtomicRMWInst>(Val: U.getUser())) { |
5301 | if (RMW->getPointerOperand() == &AssociatedValue) { |
5302 | if (RMW->getAlign() < getAssumedAlign()) { |
5303 | STATS_DECLTRACK(AAAlign, AtomicRMW, |
5304 | "Number of times alignment added to atomicrmw" ); |
5305 | |
5306 | RMW->setAlignment(getAssumedAlign()); |
5307 | InstrChanged = ChangeStatus::CHANGED; |
5308 | } |
5309 | } |
5310 | } else if (auto *CAS = dyn_cast<AtomicCmpXchgInst>(Val: U.getUser())) { |
5311 | if (CAS->getPointerOperand() == &AssociatedValue) { |
5312 | if (CAS->getAlign() < getAssumedAlign()) { |
5313 | STATS_DECLTRACK(AAAlign, AtomicCmpXchg, |
5314 | "Number of times alignment added to cmpxchg" ); |
5315 | CAS->setAlignment(getAssumedAlign()); |
5316 | InstrChanged = ChangeStatus::CHANGED; |
5317 | } |
5318 | } |
5319 | } |
5320 | } |
5321 | |
5322 | ChangeStatus Changed = AAAlign::manifest(A); |
5323 | |
5324 | Align InheritAlign = |
5325 | getAssociatedValue().getPointerAlignment(DL: A.getDataLayout()); |
5326 | if (InheritAlign >= getAssumedAlign()) |
5327 | return InstrChanged; |
5328 | return Changed | InstrChanged; |
5329 | } |
5330 | |
5331 | // TODO: Provide a helper to determine the implied ABI alignment and check in |
5332 | // the existing manifest method and a new one for AAAlignImpl that value |
5333 | // to avoid making the alignment explicit if it did not improve. |
5334 | |
5335 | /// See AbstractAttribute::getDeducedAttributes |
5336 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
5337 | SmallVectorImpl<Attribute> &Attrs) const override { |
5338 | if (getAssumedAlign() > 1) |
5339 | Attrs.emplace_back( |
5340 | Args: Attribute::getWithAlignment(Context&: Ctx, Alignment: Align(getAssumedAlign()))); |
5341 | } |
5342 | |
5343 | /// See followUsesInMBEC |
5344 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
5345 | AAAlign::StateType &State) { |
5346 | bool TrackUse = false; |
5347 | |
5348 | unsigned int KnownAlign = |
5349 | getKnownAlignForUse(A, QueryingAA&: *this, AssociatedValue&: getAssociatedValue(), U, I, TrackUse); |
5350 | State.takeKnownMaximum(Value: KnownAlign); |
5351 | |
5352 | return TrackUse; |
5353 | } |
5354 | |
5355 | /// See AbstractAttribute::getAsStr(). |
5356 | const std::string getAsStr(Attributor *A) const override { |
5357 | return "align<" + std::to_string(val: getKnownAlign().value()) + "-" + |
5358 | std::to_string(val: getAssumedAlign().value()) + ">" ; |
5359 | } |
5360 | }; |
5361 | |
5362 | /// Align attribute for a floating value. |
5363 | struct AAAlignFloating : AAAlignImpl { |
5364 | AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} |
5365 | |
5366 | /// See AbstractAttribute::updateImpl(...). |
5367 | ChangeStatus updateImpl(Attributor &A) override { |
5368 | const DataLayout &DL = A.getDataLayout(); |
5369 | |
5370 | bool Stripped; |
5371 | bool UsedAssumedInformation = false; |
5372 | SmallVector<AA::ValueAndContext> Values; |
5373 | if (!A.getAssumedSimplifiedValues(IRP: getIRPosition(), AA: *this, Values, |
5374 | S: AA::AnyScope, UsedAssumedInformation)) { |
5375 | Values.push_back(Elt: {getAssociatedValue(), getCtxI()}); |
5376 | Stripped = false; |
5377 | } else { |
5378 | Stripped = Values.size() != 1 || |
5379 | Values.front().getValue() != &getAssociatedValue(); |
5380 | } |
5381 | |
5382 | StateType T; |
5383 | auto VisitValueCB = [&](Value &V) -> bool { |
5384 | if (isa<UndefValue>(Val: V) || isa<ConstantPointerNull>(Val: V)) |
5385 | return true; |
5386 | const auto *AA = A.getAAFor<AAAlign>(QueryingAA: *this, IRP: IRPosition::value(V), |
5387 | DepClass: DepClassTy::REQUIRED); |
5388 | if (!AA || (!Stripped && this == AA)) { |
5389 | int64_t Offset; |
5390 | unsigned Alignment = 1; |
5391 | if (const Value *Base = |
5392 | GetPointerBaseWithConstantOffset(Ptr: &V, Offset, DL)) { |
5393 | // TODO: Use AAAlign for the base too. |
5394 | Align PA = Base->getPointerAlignment(DL); |
5395 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. |
5396 | // So we can say that the maximum power of two which is a divisor of |
5397 | // gcd(Offset, Alignment) is an alignment. |
5398 | |
5399 | uint32_t gcd = |
5400 | std::gcd(m: uint32_t(abs(x: (int32_t)Offset)), n: uint32_t(PA.value())); |
5401 | Alignment = llvm::bit_floor(Value: gcd); |
5402 | } else { |
5403 | Alignment = V.getPointerAlignment(DL).value(); |
5404 | } |
5405 | // Use only IR information if we did not strip anything. |
5406 | T.takeKnownMaximum(Value: Alignment); |
5407 | T.indicatePessimisticFixpoint(); |
5408 | } else { |
5409 | // Use abstract attribute information. |
5410 | const AAAlign::StateType &DS = AA->getState(); |
5411 | T ^= DS; |
5412 | } |
5413 | return T.isValidState(); |
5414 | }; |
5415 | |
5416 | for (const auto &VAC : Values) { |
5417 | if (!VisitValueCB(*VAC.getValue())) |
5418 | return indicatePessimisticFixpoint(); |
5419 | } |
5420 | |
5421 | // TODO: If we know we visited all incoming values, thus no are assumed |
5422 | // dead, we can take the known information from the state T. |
5423 | return clampStateAndIndicateChange(S&: getState(), R: T); |
5424 | } |
5425 | |
5426 | /// See AbstractAttribute::trackStatistics() |
5427 | void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } |
5428 | }; |
5429 | |
5430 | /// Align attribute for function return value. |
5431 | struct AAAlignReturned final |
5432 | : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { |
5433 | using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; |
5434 | AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
5435 | |
5436 | /// See AbstractAttribute::trackStatistics() |
5437 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } |
5438 | }; |
5439 | |
5440 | /// Align attribute for function argument. |
5441 | struct AAAlignArgument final |
5442 | : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { |
5443 | using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; |
5444 | AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
5445 | |
5446 | /// See AbstractAttribute::manifest(...). |
5447 | ChangeStatus manifest(Attributor &A) override { |
5448 | // If the associated argument is involved in a must-tail call we give up |
5449 | // because we would need to keep the argument alignments of caller and |
5450 | // callee in-sync. Just does not seem worth the trouble right now. |
5451 | if (A.getInfoCache().isInvolvedInMustTailCall(Arg: *getAssociatedArgument())) |
5452 | return ChangeStatus::UNCHANGED; |
5453 | return Base::manifest(A); |
5454 | } |
5455 | |
5456 | /// See AbstractAttribute::trackStatistics() |
5457 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } |
5458 | }; |
5459 | |
5460 | struct AAAlignCallSiteArgument final : AAAlignFloating { |
5461 | AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) |
5462 | : AAAlignFloating(IRP, A) {} |
5463 | |
5464 | /// See AbstractAttribute::manifest(...). |
5465 | ChangeStatus manifest(Attributor &A) override { |
5466 | // If the associated argument is involved in a must-tail call we give up |
5467 | // because we would need to keep the argument alignments of caller and |
5468 | // callee in-sync. Just does not seem worth the trouble right now. |
5469 | if (Argument *Arg = getAssociatedArgument()) |
5470 | if (A.getInfoCache().isInvolvedInMustTailCall(Arg: *Arg)) |
5471 | return ChangeStatus::UNCHANGED; |
5472 | ChangeStatus Changed = AAAlignImpl::manifest(A); |
5473 | Align InheritAlign = |
5474 | getAssociatedValue().getPointerAlignment(DL: A.getDataLayout()); |
5475 | if (InheritAlign >= getAssumedAlign()) |
5476 | Changed = ChangeStatus::UNCHANGED; |
5477 | return Changed; |
5478 | } |
5479 | |
5480 | /// See AbstractAttribute::updateImpl(Attributor &A). |
5481 | ChangeStatus updateImpl(Attributor &A) override { |
5482 | ChangeStatus Changed = AAAlignFloating::updateImpl(A); |
5483 | if (Argument *Arg = getAssociatedArgument()) { |
5484 | // We only take known information from the argument |
5485 | // so we do not need to track a dependence. |
5486 | const auto *ArgAlignAA = A.getAAFor<AAAlign>( |
5487 | QueryingAA: *this, IRP: IRPosition::argument(Arg: *Arg), DepClass: DepClassTy::NONE); |
5488 | if (ArgAlignAA) |
5489 | takeKnownMaximum(Value: ArgAlignAA->getKnownAlign().value()); |
5490 | } |
5491 | return Changed; |
5492 | } |
5493 | |
5494 | /// See AbstractAttribute::trackStatistics() |
5495 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } |
5496 | }; |
5497 | |
5498 | /// Align attribute deduction for a call site return value. |
5499 | struct AAAlignCallSiteReturned final |
5500 | : AACalleeToCallSite<AAAlign, AAAlignImpl> { |
5501 | using Base = AACalleeToCallSite<AAAlign, AAAlignImpl>; |
5502 | AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) |
5503 | : Base(IRP, A) {} |
5504 | |
5505 | /// See AbstractAttribute::trackStatistics() |
5506 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } |
5507 | }; |
5508 | } // namespace |
5509 | |
5510 | /// ------------------ Function No-Return Attribute ---------------------------- |
5511 | namespace { |
5512 | struct AANoReturnImpl : public AANoReturn { |
5513 | AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} |
5514 | |
5515 | /// See AbstractAttribute::initialize(...). |
5516 | void initialize(Attributor &A) override { |
5517 | bool IsKnown; |
5518 | assert(!AA::hasAssumedIRAttr<Attribute::NoReturn>( |
5519 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
5520 | (void)IsKnown; |
5521 | } |
5522 | |
5523 | /// See AbstractAttribute::getAsStr(). |
5524 | const std::string getAsStr(Attributor *A) const override { |
5525 | return getAssumed() ? "noreturn" : "may-return" ; |
5526 | } |
5527 | |
5528 | /// See AbstractAttribute::updateImpl(Attributor &A). |
5529 | ChangeStatus updateImpl(Attributor &A) override { |
5530 | auto CheckForNoReturn = [](Instruction &) { return false; }; |
5531 | bool UsedAssumedInformation = false; |
5532 | if (!A.checkForAllInstructions(Pred: CheckForNoReturn, QueryingAA: *this, |
5533 | Opcodes: {(unsigned)Instruction::Ret}, |
5534 | UsedAssumedInformation)) |
5535 | return indicatePessimisticFixpoint(); |
5536 | return ChangeStatus::UNCHANGED; |
5537 | } |
5538 | }; |
5539 | |
5540 | struct AANoReturnFunction final : AANoReturnImpl { |
5541 | AANoReturnFunction(const IRPosition &IRP, Attributor &A) |
5542 | : AANoReturnImpl(IRP, A) {} |
5543 | |
5544 | /// See AbstractAttribute::trackStatistics() |
5545 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } |
5546 | }; |
5547 | |
5548 | /// NoReturn attribute deduction for a call sites. |
5549 | struct AANoReturnCallSite final |
5550 | : AACalleeToCallSite<AANoReturn, AANoReturnImpl> { |
5551 | AANoReturnCallSite(const IRPosition &IRP, Attributor &A) |
5552 | : AACalleeToCallSite<AANoReturn, AANoReturnImpl>(IRP, A) {} |
5553 | |
5554 | /// See AbstractAttribute::trackStatistics() |
5555 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } |
5556 | }; |
5557 | } // namespace |
5558 | |
5559 | /// ----------------------- Instance Info --------------------------------- |
5560 | |
5561 | namespace { |
5562 | /// A class to hold the state of for no-capture attributes. |
5563 | struct AAInstanceInfoImpl : public AAInstanceInfo { |
5564 | AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A) |
5565 | : AAInstanceInfo(IRP, A) {} |
5566 | |
5567 | /// See AbstractAttribute::initialize(...). |
5568 | void initialize(Attributor &A) override { |
5569 | Value &V = getAssociatedValue(); |
5570 | if (auto *C = dyn_cast<Constant>(Val: &V)) { |
5571 | if (C->isThreadDependent()) |
5572 | indicatePessimisticFixpoint(); |
5573 | else |
5574 | indicateOptimisticFixpoint(); |
5575 | return; |
5576 | } |
5577 | if (auto *CB = dyn_cast<CallBase>(Val: &V)) |
5578 | if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() && |
5579 | !CB->mayReadFromMemory()) { |
5580 | indicateOptimisticFixpoint(); |
5581 | return; |
5582 | } |
5583 | if (auto *I = dyn_cast<Instruction>(Val: &V)) { |
5584 | const auto *CI = |
5585 | A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>( |
5586 | F: *I->getFunction()); |
5587 | if (mayBeInCycle(CI, I, /* HeaderOnly */ false)) { |
5588 | indicatePessimisticFixpoint(); |
5589 | return; |
5590 | } |
5591 | } |
5592 | } |
5593 | |
5594 | /// See AbstractAttribute::updateImpl(...). |
5595 | ChangeStatus updateImpl(Attributor &A) override { |
5596 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
5597 | |
5598 | Value &V = getAssociatedValue(); |
5599 | const Function *Scope = nullptr; |
5600 | if (auto *I = dyn_cast<Instruction>(Val: &V)) |
5601 | Scope = I->getFunction(); |
5602 | if (auto *A = dyn_cast<Argument>(Val: &V)) { |
5603 | Scope = A->getParent(); |
5604 | if (!Scope->hasLocalLinkage()) |
5605 | return Changed; |
5606 | } |
5607 | if (!Scope) |
5608 | return indicateOptimisticFixpoint(); |
5609 | |
5610 | bool IsKnownNoRecurse; |
5611 | if (AA::hasAssumedIRAttr<Attribute::NoRecurse>( |
5612 | A, QueryingAA: this, IRP: IRPosition::function(F: *Scope), DepClass: DepClassTy::OPTIONAL, |
5613 | IsKnown&: IsKnownNoRecurse)) |
5614 | return Changed; |
5615 | |
5616 | auto UsePred = [&](const Use &U, bool &Follow) { |
5617 | const Instruction *UserI = dyn_cast<Instruction>(Val: U.getUser()); |
5618 | if (!UserI || isa<GetElementPtrInst>(Val: UserI) || isa<CastInst>(Val: UserI) || |
5619 | isa<PHINode>(Val: UserI) || isa<SelectInst>(Val: UserI)) { |
5620 | Follow = true; |
5621 | return true; |
5622 | } |
5623 | if (isa<LoadInst>(Val: UserI) || isa<CmpInst>(Val: UserI) || |
5624 | (isa<StoreInst>(Val: UserI) && |
5625 | cast<StoreInst>(Val: UserI)->getValueOperand() != U.get())) |
5626 | return true; |
5627 | if (auto *CB = dyn_cast<CallBase>(Val: UserI)) { |
5628 | // This check is not guaranteeing uniqueness but for now that we cannot |
5629 | // end up with two versions of \p U thinking it was one. |
5630 | auto *Callee = dyn_cast_if_present<Function>(Val: CB->getCalledOperand()); |
5631 | if (!Callee || !Callee->hasLocalLinkage()) |
5632 | return true; |
5633 | if (!CB->isArgOperand(U: &U)) |
5634 | return false; |
5635 | const auto *ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>( |
5636 | QueryingAA: *this, IRP: IRPosition::callsite_argument(CB: *CB, ArgNo: CB->getArgOperandNo(U: &U)), |
5637 | DepClass: DepClassTy::OPTIONAL); |
5638 | if (!ArgInstanceInfoAA || |
5639 | !ArgInstanceInfoAA->isAssumedUniqueForAnalysis()) |
5640 | return false; |
5641 | // If this call base might reach the scope again we might forward the |
5642 | // argument back here. This is very conservative. |
5643 | if (AA::isPotentiallyReachable( |
5644 | A, FromI: *CB, ToFn: *Scope, QueryingAA: *this, /* ExclusionSet */ nullptr, |
5645 | GoBackwardsCB: [Scope](const Function &Fn) { return &Fn != Scope; })) |
5646 | return false; |
5647 | return true; |
5648 | } |
5649 | return false; |
5650 | }; |
5651 | |
5652 | auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { |
5653 | if (auto *SI = dyn_cast<StoreInst>(Val: OldU.getUser())) { |
5654 | auto *Ptr = SI->getPointerOperand()->stripPointerCasts(); |
5655 | if ((isa<AllocaInst>(Val: Ptr) || isNoAliasCall(V: Ptr)) && |
5656 | AA::isDynamicallyUnique(A, QueryingAA: *this, V: *Ptr)) |
5657 | return true; |
5658 | } |
5659 | return false; |
5660 | }; |
5661 | |
5662 | if (!A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V, /* CheckBBLivenessOnly */ true, |
5663 | LivenessDepClass: DepClassTy::OPTIONAL, |
5664 | /* IgnoreDroppableUses */ true, EquivalentUseCB)) |
5665 | return indicatePessimisticFixpoint(); |
5666 | |
5667 | return Changed; |
5668 | } |
5669 | |
5670 | /// See AbstractState::getAsStr(). |
5671 | const std::string getAsStr(Attributor *A) const override { |
5672 | return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>" ; |
5673 | } |
5674 | |
5675 | /// See AbstractAttribute::trackStatistics() |
5676 | void trackStatistics() const override {} |
5677 | }; |
5678 | |
5679 | /// InstanceInfo attribute for floating values. |
5680 | struct AAInstanceInfoFloating : AAInstanceInfoImpl { |
5681 | AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A) |
5682 | : AAInstanceInfoImpl(IRP, A) {} |
5683 | }; |
5684 | |
5685 | /// NoCapture attribute for function arguments. |
5686 | struct AAInstanceInfoArgument final : AAInstanceInfoFloating { |
5687 | AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A) |
5688 | : AAInstanceInfoFloating(IRP, A) {} |
5689 | }; |
5690 | |
5691 | /// InstanceInfo attribute for call site arguments. |
5692 | struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl { |
5693 | AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) |
5694 | : AAInstanceInfoImpl(IRP, A) {} |
5695 | |
5696 | /// See AbstractAttribute::updateImpl(...). |
5697 | ChangeStatus updateImpl(Attributor &A) override { |
5698 | // TODO: Once we have call site specific value information we can provide |
5699 | // call site specific liveness information and then it makes |
5700 | // sense to specialize attributes for call sites arguments instead of |
5701 | // redirecting requests to the callee argument. |
5702 | Argument *Arg = getAssociatedArgument(); |
5703 | if (!Arg) |
5704 | return indicatePessimisticFixpoint(); |
5705 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
5706 | auto *ArgAA = |
5707 | A.getAAFor<AAInstanceInfo>(QueryingAA: *this, IRP: ArgPos, DepClass: DepClassTy::REQUIRED); |
5708 | if (!ArgAA) |
5709 | return indicatePessimisticFixpoint(); |
5710 | return clampStateAndIndicateChange(S&: getState(), R: ArgAA->getState()); |
5711 | } |
5712 | }; |
5713 | |
5714 | /// InstanceInfo attribute for function return value. |
5715 | struct AAInstanceInfoReturned final : AAInstanceInfoImpl { |
5716 | AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A) |
5717 | : AAInstanceInfoImpl(IRP, A) { |
5718 | llvm_unreachable("InstanceInfo is not applicable to function returns!" ); |
5719 | } |
5720 | |
5721 | /// See AbstractAttribute::initialize(...). |
5722 | void initialize(Attributor &A) override { |
5723 | llvm_unreachable("InstanceInfo is not applicable to function returns!" ); |
5724 | } |
5725 | |
5726 | /// See AbstractAttribute::updateImpl(...). |
5727 | ChangeStatus updateImpl(Attributor &A) override { |
5728 | llvm_unreachable("InstanceInfo is not applicable to function returns!" ); |
5729 | } |
5730 | }; |
5731 | |
5732 | /// InstanceInfo attribute deduction for a call site return value. |
5733 | struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating { |
5734 | AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) |
5735 | : AAInstanceInfoFloating(IRP, A) {} |
5736 | }; |
5737 | } // namespace |
5738 | |
5739 | /// ----------------------- Variable Capturing --------------------------------- |
5740 | bool AANoCapture::isImpliedByIR(Attributor &A, const IRPosition &IRP, |
5741 | Attribute::AttrKind ImpliedAttributeKind, |
5742 | bool IgnoreSubsumingPositions) { |
5743 | assert(ImpliedAttributeKind == Attribute::Captures && |
5744 | "Unexpected attribute kind" ); |
5745 | Value &V = IRP.getAssociatedValue(); |
5746 | if (!isa<Constant>(Val: V) && !IRP.isArgumentPosition()) |
5747 | return V.use_empty(); |
5748 | |
5749 | // You cannot "capture" null in the default address space. |
5750 | // |
5751 | // FIXME: This should use NullPointerIsDefined to account for the function |
5752 | // attribute. |
5753 | if (isa<UndefValue>(Val: V) || (isa<ConstantPointerNull>(Val: V) && |
5754 | V.getType()->getPointerAddressSpace() == 0)) { |
5755 | return true; |
5756 | } |
5757 | |
5758 | SmallVector<Attribute, 1> Attrs; |
5759 | A.getAttrs(IRP, AKs: {Attribute::Captures}, Attrs, |
5760 | /* IgnoreSubsumingPositions */ true); |
5761 | for (const Attribute &Attr : Attrs) |
5762 | if (capturesNothing(CC: Attr.getCaptureInfo())) |
5763 | return true; |
5764 | |
5765 | if (IRP.getPositionKind() == IRP_CALL_SITE_ARGUMENT) |
5766 | if (Argument *Arg = IRP.getAssociatedArgument()) { |
5767 | SmallVector<Attribute, 1> Attrs; |
5768 | A.getAttrs(IRP: IRPosition::argument(Arg: *Arg), |
5769 | AKs: {Attribute::Captures, Attribute::ByVal}, Attrs, |
5770 | /* IgnoreSubsumingPositions */ true); |
5771 | bool ArgNoCapture = any_of(Range&: Attrs, P: [](Attribute Attr) { |
5772 | return Attr.getKindAsEnum() == Attribute::ByVal || |
5773 | capturesNothing(CC: Attr.getCaptureInfo()); |
5774 | }); |
5775 | if (ArgNoCapture) { |
5776 | A.manifestAttrs(IRP, DeducedAttrs: Attribute::getWithCaptureInfo( |
5777 | Context&: V.getContext(), CI: CaptureInfo::none())); |
5778 | return true; |
5779 | } |
5780 | } |
5781 | |
5782 | if (const Function *F = IRP.getAssociatedFunction()) { |
5783 | // Check what state the associated function can actually capture. |
5784 | AANoCapture::StateType State; |
5785 | determineFunctionCaptureCapabilities(IRP, F: *F, State); |
5786 | if (State.isKnown(BitsEncoding: NO_CAPTURE)) { |
5787 | A.manifestAttrs(IRP, DeducedAttrs: Attribute::getWithCaptureInfo(Context&: V.getContext(), |
5788 | CI: CaptureInfo::none())); |
5789 | return true; |
5790 | } |
5791 | } |
5792 | |
5793 | return false; |
5794 | } |
5795 | |
5796 | /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known |
5797 | /// depending on the ability of the function associated with \p IRP to capture |
5798 | /// state in memory and through "returning/throwing", respectively. |
5799 | void AANoCapture::determineFunctionCaptureCapabilities(const IRPosition &IRP, |
5800 | const Function &F, |
5801 | BitIntegerState &State) { |
5802 | // TODO: Once we have memory behavior attributes we should use them here. |
5803 | |
5804 | // If we know we cannot communicate or write to memory, we do not care about |
5805 | // ptr2int anymore. |
5806 | bool ReadOnly = F.onlyReadsMemory(); |
5807 | bool NoThrow = F.doesNotThrow(); |
5808 | bool IsVoidReturn = F.getReturnType()->isVoidTy(); |
5809 | if (ReadOnly && NoThrow && IsVoidReturn) { |
5810 | State.addKnownBits(Bits: NO_CAPTURE); |
5811 | return; |
5812 | } |
5813 | |
5814 | // A function cannot capture state in memory if it only reads memory, it can |
5815 | // however return/throw state and the state might be influenced by the |
5816 | // pointer value, e.g., loading from a returned pointer might reveal a bit. |
5817 | if (ReadOnly) |
5818 | State.addKnownBits(Bits: NOT_CAPTURED_IN_MEM); |
5819 | |
5820 | // A function cannot communicate state back if it does not through |
5821 | // exceptions and doesn not return values. |
5822 | if (NoThrow && IsVoidReturn) |
5823 | State.addKnownBits(Bits: NOT_CAPTURED_IN_RET); |
5824 | |
5825 | // Check existing "returned" attributes. |
5826 | int ArgNo = IRP.getCalleeArgNo(); |
5827 | if (!NoThrow || ArgNo < 0 || |
5828 | !F.getAttributes().hasAttrSomewhere(Kind: Attribute::Returned)) |
5829 | return; |
5830 | |
5831 | for (unsigned U = 0, E = F.arg_size(); U < E; ++U) |
5832 | if (F.hasParamAttribute(ArgNo: U, Kind: Attribute::Returned)) { |
5833 | if (U == unsigned(ArgNo)) |
5834 | State.removeAssumedBits(BitsEncoding: NOT_CAPTURED_IN_RET); |
5835 | else if (ReadOnly) |
5836 | State.addKnownBits(Bits: NO_CAPTURE); |
5837 | else |
5838 | State.addKnownBits(Bits: NOT_CAPTURED_IN_RET); |
5839 | break; |
5840 | } |
5841 | } |
5842 | |
5843 | namespace { |
5844 | /// A class to hold the state of for no-capture attributes. |
5845 | struct AANoCaptureImpl : public AANoCapture { |
5846 | AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} |
5847 | |
5848 | /// See AbstractAttribute::initialize(...). |
5849 | void initialize(Attributor &A) override { |
5850 | bool IsKnown; |
5851 | assert(!AA::hasAssumedIRAttr<Attribute::Captures>( |
5852 | A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); |
5853 | (void)IsKnown; |
5854 | } |
5855 | |
5856 | /// See AbstractAttribute::updateImpl(...). |
5857 | ChangeStatus updateImpl(Attributor &A) override; |
5858 | |
5859 | /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). |
5860 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
5861 | SmallVectorImpl<Attribute> &Attrs) const override { |
5862 | if (!isAssumedNoCaptureMaybeReturned()) |
5863 | return; |
5864 | |
5865 | if (isArgumentPosition()) { |
5866 | if (isAssumedNoCapture()) |
5867 | Attrs.emplace_back(Args: Attribute::get(Context&: Ctx, Kind: Attribute::Captures)); |
5868 | else if (ManifestInternal) |
5869 | Attrs.emplace_back(Args: Attribute::get(Context&: Ctx, Kind: "no-capture-maybe-returned" )); |
5870 | } |
5871 | } |
5872 | |
5873 | /// See AbstractState::getAsStr(). |
5874 | const std::string getAsStr(Attributor *A) const override { |
5875 | if (isKnownNoCapture()) |
5876 | return "known not-captured" ; |
5877 | if (isAssumedNoCapture()) |
5878 | return "assumed not-captured" ; |
5879 | if (isKnownNoCaptureMaybeReturned()) |
5880 | return "known not-captured-maybe-returned" ; |
5881 | if (isAssumedNoCaptureMaybeReturned()) |
5882 | return "assumed not-captured-maybe-returned" ; |
5883 | return "assumed-captured" ; |
5884 | } |
5885 | |
5886 | /// Check the use \p U and update \p State accordingly. Return true if we |
5887 | /// should continue to update the state. |
5888 | bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, |
5889 | bool &Follow) { |
5890 | Instruction *UInst = cast<Instruction>(Val: U.getUser()); |
5891 | LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " |
5892 | << *UInst << "\n" ); |
5893 | |
5894 | // Deal with ptr2int by following uses. |
5895 | if (isa<PtrToIntInst>(Val: UInst)) { |
5896 | LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n" ); |
5897 | return isCapturedIn(State, /* Memory */ CapturedInMem: true, /* Integer */ CapturedInInt: true, |
5898 | /* Return */ CapturedInRet: true); |
5899 | } |
5900 | |
5901 | // For stores we already checked if we can follow them, if they make it |
5902 | // here we give up. |
5903 | if (isa<StoreInst>(Val: UInst)) |
5904 | return isCapturedIn(State, /* Memory */ CapturedInMem: true, /* Integer */ CapturedInInt: true, |
5905 | /* Return */ CapturedInRet: true); |
5906 | |
5907 | // Explicitly catch return instructions. |
5908 | if (isa<ReturnInst>(Val: UInst)) { |
5909 | if (UInst->getFunction() == getAnchorScope()) |
5910 | return isCapturedIn(State, /* Memory */ CapturedInMem: false, /* Integer */ CapturedInInt: false, |
5911 | /* Return */ CapturedInRet: true); |
5912 | return isCapturedIn(State, /* Memory */ CapturedInMem: true, /* Integer */ CapturedInInt: true, |
5913 | /* Return */ CapturedInRet: true); |
5914 | } |
5915 | |
5916 | // For now we only use special logic for call sites. However, the tracker |
5917 | // itself knows about a lot of other non-capturing cases already. |
5918 | auto *CB = dyn_cast<CallBase>(Val: UInst); |
5919 | if (!CB || !CB->isArgOperand(U: &U)) |
5920 | return isCapturedIn(State, /* Memory */ CapturedInMem: true, /* Integer */ CapturedInInt: true, |
5921 | /* Return */ CapturedInRet: true); |
5922 | |
5923 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
5924 | const IRPosition &CSArgPos = IRPosition::callsite_argument(CB: *CB, ArgNo); |
5925 | // If we have a abstract no-capture attribute for the argument we can use |
5926 | // it to justify a non-capture attribute here. This allows recursion! |
5927 | bool IsKnownNoCapture; |
5928 | const AANoCapture *ArgNoCaptureAA = nullptr; |
5929 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
5930 | A, QueryingAA: this, IRP: CSArgPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoCapture, IgnoreSubsumingPositions: false, |
5931 | AAPtr: &ArgNoCaptureAA); |
5932 | if (IsAssumedNoCapture) |
5933 | return isCapturedIn(State, /* Memory */ CapturedInMem: false, /* Integer */ CapturedInInt: false, |
5934 | /* Return */ CapturedInRet: false); |
5935 | if (ArgNoCaptureAA && ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned()) { |
5936 | Follow = true; |
5937 | return isCapturedIn(State, /* Memory */ CapturedInMem: false, /* Integer */ CapturedInInt: false, |
5938 | /* Return */ CapturedInRet: false); |
5939 | } |
5940 | |
5941 | // Lastly, we could not find a reason no-capture can be assumed so we don't. |
5942 | return isCapturedIn(State, /* Memory */ CapturedInMem: true, /* Integer */ CapturedInInt: true, |
5943 | /* Return */ CapturedInRet: true); |
5944 | } |
5945 | |
5946 | /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and |
5947 | /// \p CapturedInRet, then return true if we should continue updating the |
5948 | /// state. |
5949 | static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, |
5950 | bool CapturedInInt, bool CapturedInRet) { |
5951 | LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " |
5952 | << CapturedInInt << "|Ret " << CapturedInRet << "]\n" ); |
5953 | if (CapturedInMem) |
5954 | State.removeAssumedBits(BitsEncoding: AANoCapture::NOT_CAPTURED_IN_MEM); |
5955 | if (CapturedInInt) |
5956 | State.removeAssumedBits(BitsEncoding: AANoCapture::NOT_CAPTURED_IN_INT); |
5957 | if (CapturedInRet) |
5958 | State.removeAssumedBits(BitsEncoding: AANoCapture::NOT_CAPTURED_IN_RET); |
5959 | return State.isAssumed(BitsEncoding: AANoCapture::NO_CAPTURE_MAYBE_RETURNED); |
5960 | } |
5961 | }; |
5962 | |
5963 | ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { |
5964 | const IRPosition &IRP = getIRPosition(); |
5965 | Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() |
5966 | : &IRP.getAssociatedValue(); |
5967 | if (!V) |
5968 | return indicatePessimisticFixpoint(); |
5969 | |
5970 | const Function *F = |
5971 | isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); |
5972 | |
5973 | // TODO: Is the checkForAllUses below useful for constants? |
5974 | if (!F) |
5975 | return indicatePessimisticFixpoint(); |
5976 | |
5977 | AANoCapture::StateType T; |
5978 | const IRPosition &FnPos = IRPosition::function(F: *F); |
5979 | |
5980 | // Readonly means we cannot capture through memory. |
5981 | bool IsKnown; |
5982 | if (AA::isAssumedReadOnly(A, IRP: FnPos, QueryingAA: *this, IsKnown)) { |
5983 | T.addKnownBits(Bits: NOT_CAPTURED_IN_MEM); |
5984 | if (IsKnown) |
5985 | addKnownBits(Bits: NOT_CAPTURED_IN_MEM); |
5986 | } |
5987 | |
5988 | // Make sure all returned values are different than the underlying value. |
5989 | // TODO: we could do this in a more sophisticated way inside |
5990 | // AAReturnedValues, e.g., track all values that escape through returns |
5991 | // directly somehow. |
5992 | auto CheckReturnedArgs = [&](bool &UsedAssumedInformation) { |
5993 | SmallVector<AA::ValueAndContext> Values; |
5994 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::returned(F: *F), AA: this, Values, |
5995 | S: AA::ValueScope::Intraprocedural, |
5996 | UsedAssumedInformation)) |
5997 | return false; |
5998 | bool SeenConstant = false; |
5999 | for (const AA::ValueAndContext &VAC : Values) { |
6000 | if (isa<Constant>(Val: VAC.getValue())) { |
6001 | if (SeenConstant) |
6002 | return false; |
6003 | SeenConstant = true; |
6004 | } else if (!isa<Argument>(Val: VAC.getValue()) || |
6005 | VAC.getValue() == getAssociatedArgument()) |
6006 | return false; |
6007 | } |
6008 | return true; |
6009 | }; |
6010 | |
6011 | bool IsKnownNoUnwind; |
6012 | if (AA::hasAssumedIRAttr<Attribute::NoUnwind>( |
6013 | A, QueryingAA: this, IRP: FnPos, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoUnwind)) { |
6014 | bool IsVoidTy = F->getReturnType()->isVoidTy(); |
6015 | bool UsedAssumedInformation = false; |
6016 | if (IsVoidTy || CheckReturnedArgs(UsedAssumedInformation)) { |
6017 | T.addKnownBits(Bits: NOT_CAPTURED_IN_RET); |
6018 | if (T.isKnown(BitsEncoding: NOT_CAPTURED_IN_MEM)) |
6019 | return ChangeStatus::UNCHANGED; |
6020 | if (IsKnownNoUnwind && (IsVoidTy || !UsedAssumedInformation)) { |
6021 | addKnownBits(Bits: NOT_CAPTURED_IN_RET); |
6022 | if (isKnown(BitsEncoding: NOT_CAPTURED_IN_MEM)) |
6023 | return indicateOptimisticFixpoint(); |
6024 | } |
6025 | } |
6026 | } |
6027 | |
6028 | auto UseCheck = [&](const Use &U, bool &Follow) -> bool { |
6029 | // TODO(captures): Make this more precise. |
6030 | UseCaptureInfo CI = DetermineUseCaptureKind(U, /*Base=*/nullptr); |
6031 | if (capturesNothing(CC: CI)) |
6032 | return true; |
6033 | if (CI.isPassthrough()) { |
6034 | Follow = true; |
6035 | return true; |
6036 | } |
6037 | return checkUse(A, State&: T, U, Follow); |
6038 | }; |
6039 | |
6040 | if (!A.checkForAllUses(Pred: UseCheck, QueryingAA: *this, V: *V)) |
6041 | return indicatePessimisticFixpoint(); |
6042 | |
6043 | AANoCapture::StateType &S = getState(); |
6044 | auto Assumed = S.getAssumed(); |
6045 | S.intersectAssumedBits(BitsEncoding: T.getAssumed()); |
6046 | if (!isAssumedNoCaptureMaybeReturned()) |
6047 | return indicatePessimisticFixpoint(); |
6048 | return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED |
6049 | : ChangeStatus::CHANGED; |
6050 | } |
6051 | |
6052 | /// NoCapture attribute for function arguments. |
6053 | struct AANoCaptureArgument final : AANoCaptureImpl { |
6054 | AANoCaptureArgument(const IRPosition &IRP, Attributor &A) |
6055 | : AANoCaptureImpl(IRP, A) {} |
6056 | |
6057 | /// See AbstractAttribute::trackStatistics() |
6058 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } |
6059 | }; |
6060 | |
6061 | /// NoCapture attribute for call site arguments. |
6062 | struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { |
6063 | AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) |
6064 | : AANoCaptureImpl(IRP, A) {} |
6065 | |
6066 | /// See AbstractAttribute::updateImpl(...). |
6067 | ChangeStatus updateImpl(Attributor &A) override { |
6068 | // TODO: Once we have call site specific value information we can provide |
6069 | // call site specific liveness information and then it makes |
6070 | // sense to specialize attributes for call sites arguments instead of |
6071 | // redirecting requests to the callee argument. |
6072 | Argument *Arg = getAssociatedArgument(); |
6073 | if (!Arg) |
6074 | return indicatePessimisticFixpoint(); |
6075 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
6076 | bool IsKnownNoCapture; |
6077 | const AANoCapture *ArgAA = nullptr; |
6078 | if (AA::hasAssumedIRAttr<Attribute::Captures>( |
6079 | A, QueryingAA: this, IRP: ArgPos, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoCapture, IgnoreSubsumingPositions: false, |
6080 | AAPtr: &ArgAA)) |
6081 | return ChangeStatus::UNCHANGED; |
6082 | if (!ArgAA || !ArgAA->isAssumedNoCaptureMaybeReturned()) |
6083 | return indicatePessimisticFixpoint(); |
6084 | return clampStateAndIndicateChange(S&: getState(), R: ArgAA->getState()); |
6085 | } |
6086 | |
6087 | /// See AbstractAttribute::trackStatistics() |
6088 | void trackStatistics() const override { |
6089 | STATS_DECLTRACK_CSARG_ATTR(nocapture) |
6090 | }; |
6091 | }; |
6092 | |
6093 | /// NoCapture attribute for floating values. |
6094 | struct AANoCaptureFloating final : AANoCaptureImpl { |
6095 | AANoCaptureFloating(const IRPosition &IRP, Attributor &A) |
6096 | : AANoCaptureImpl(IRP, A) {} |
6097 | |
6098 | /// See AbstractAttribute::trackStatistics() |
6099 | void trackStatistics() const override { |
6100 | STATS_DECLTRACK_FLOATING_ATTR(nocapture) |
6101 | } |
6102 | }; |
6103 | |
6104 | /// NoCapture attribute for function return value. |
6105 | struct AANoCaptureReturned final : AANoCaptureImpl { |
6106 | AANoCaptureReturned(const IRPosition &IRP, Attributor &A) |
6107 | : AANoCaptureImpl(IRP, A) { |
6108 | llvm_unreachable("NoCapture is not applicable to function returns!" ); |
6109 | } |
6110 | |
6111 | /// See AbstractAttribute::initialize(...). |
6112 | void initialize(Attributor &A) override { |
6113 | llvm_unreachable("NoCapture is not applicable to function returns!" ); |
6114 | } |
6115 | |
6116 | /// See AbstractAttribute::updateImpl(...). |
6117 | ChangeStatus updateImpl(Attributor &A) override { |
6118 | llvm_unreachable("NoCapture is not applicable to function returns!" ); |
6119 | } |
6120 | |
6121 | /// See AbstractAttribute::trackStatistics() |
6122 | void trackStatistics() const override {} |
6123 | }; |
6124 | |
6125 | /// NoCapture attribute deduction for a call site return value. |
6126 | struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { |
6127 | AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) |
6128 | : AANoCaptureImpl(IRP, A) {} |
6129 | |
6130 | /// See AbstractAttribute::initialize(...). |
6131 | void initialize(Attributor &A) override { |
6132 | const Function *F = getAnchorScope(); |
6133 | // Check what state the associated function can actually capture. |
6134 | determineFunctionCaptureCapabilities(IRP: getIRPosition(), F: *F, State&: *this); |
6135 | } |
6136 | |
6137 | /// See AbstractAttribute::trackStatistics() |
6138 | void trackStatistics() const override { |
6139 | STATS_DECLTRACK_CSRET_ATTR(nocapture) |
6140 | } |
6141 | }; |
6142 | } // namespace |
6143 | |
6144 | /// ------------------ Value Simplify Attribute ---------------------------- |
6145 | |
6146 | bool ValueSimplifyStateType::unionAssumed(std::optional<Value *> Other) { |
6147 | // FIXME: Add a typecast support. |
6148 | SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( |
6149 | A: SimplifiedAssociatedValue, B: Other, Ty); |
6150 | if (SimplifiedAssociatedValue == std::optional<Value *>(nullptr)) |
6151 | return false; |
6152 | |
6153 | LLVM_DEBUG({ |
6154 | if (SimplifiedAssociatedValue) |
6155 | dbgs() << "[ValueSimplify] is assumed to be " |
6156 | << **SimplifiedAssociatedValue << "\n" ; |
6157 | else |
6158 | dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; |
6159 | }); |
6160 | return true; |
6161 | } |
6162 | |
6163 | namespace { |
6164 | struct AAValueSimplifyImpl : AAValueSimplify { |
6165 | AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) |
6166 | : AAValueSimplify(IRP, A) {} |
6167 | |
6168 | /// See AbstractAttribute::initialize(...). |
6169 | void initialize(Attributor &A) override { |
6170 | if (getAssociatedValue().getType()->isVoidTy()) |
6171 | indicatePessimisticFixpoint(); |
6172 | if (A.hasSimplificationCallback(IRP: getIRPosition())) |
6173 | indicatePessimisticFixpoint(); |
6174 | } |
6175 | |
6176 | /// See AbstractAttribute::getAsStr(). |
6177 | const std::string getAsStr(Attributor *A) const override { |
6178 | LLVM_DEBUG({ |
6179 | dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " " ; |
6180 | if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) |
6181 | dbgs() << "SAV: " << **SimplifiedAssociatedValue << " " ; |
6182 | }); |
6183 | return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple" ) |
6184 | : "not-simple" ; |
6185 | } |
6186 | |
6187 | /// See AbstractAttribute::trackStatistics() |
6188 | void trackStatistics() const override {} |
6189 | |
6190 | /// See AAValueSimplify::getAssumedSimplifiedValue() |
6191 | std::optional<Value *> |
6192 | getAssumedSimplifiedValue(Attributor &A) const override { |
6193 | return SimplifiedAssociatedValue; |
6194 | } |
6195 | |
6196 | /// Ensure the return value is \p V with type \p Ty, if not possible return |
6197 | /// nullptr. If \p Check is true we will only verify such an operation would |
6198 | /// suceed and return a non-nullptr value if that is the case. No IR is |
6199 | /// generated or modified. |
6200 | static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI, |
6201 | bool Check) { |
6202 | if (auto *TypedV = AA::getWithType(V, Ty)) |
6203 | return TypedV; |
6204 | if (CtxI && V.getType()->canLosslesslyBitCastTo(Ty: &Ty)) |
6205 | return Check ? &V |
6206 | : BitCastInst::CreatePointerBitCastOrAddrSpaceCast( |
6207 | S: &V, Ty: &Ty, Name: "" , InsertBefore: CtxI->getIterator()); |
6208 | return nullptr; |
6209 | } |
6210 | |
6211 | /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble. |
6212 | /// If \p Check is true we will only verify such an operation would suceed and |
6213 | /// return a non-nullptr value if that is the case. No IR is generated or |
6214 | /// modified. |
6215 | static Value *reproduceInst(Attributor &A, |
6216 | const AbstractAttribute &QueryingAA, |
6217 | Instruction &I, Type &Ty, Instruction *CtxI, |
6218 | bool Check, ValueToValueMapTy &VMap) { |
6219 | assert(CtxI && "Cannot reproduce an instruction without context!" ); |
6220 | if (Check && (I.mayReadFromMemory() || |
6221 | !isSafeToSpeculativelyExecute(I: &I, CtxI, /* DT */ AC: nullptr, |
6222 | /* TLI */ DT: nullptr))) |
6223 | return nullptr; |
6224 | for (Value *Op : I.operands()) { |
6225 | Value *NewOp = reproduceValue(A, QueryingAA, V&: *Op, Ty, CtxI, Check, VMap); |
6226 | if (!NewOp) { |
6227 | assert(Check && "Manifest of new value unexpectedly failed!" ); |
6228 | return nullptr; |
6229 | } |
6230 | if (!Check) |
6231 | VMap[Op] = NewOp; |
6232 | } |
6233 | if (Check) |
6234 | return &I; |
6235 | |
6236 | Instruction *CloneI = I.clone(); |
6237 | // TODO: Try to salvage debug information here. |
6238 | CloneI->setDebugLoc(DebugLoc()); |
6239 | VMap[&I] = CloneI; |
6240 | CloneI->insertBefore(InsertPos: CtxI->getIterator()); |
6241 | RemapInstruction(I: CloneI, VM&: VMap); |
6242 | return CloneI; |
6243 | } |
6244 | |
6245 | /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble. |
6246 | /// If \p Check is true we will only verify such an operation would suceed and |
6247 | /// return a non-nullptr value if that is the case. No IR is generated or |
6248 | /// modified. |
6249 | static Value *reproduceValue(Attributor &A, |
6250 | const AbstractAttribute &QueryingAA, Value &V, |
6251 | Type &Ty, Instruction *CtxI, bool Check, |
6252 | ValueToValueMapTy &VMap) { |
6253 | if (const auto &NewV = VMap.lookup(Val: &V)) |
6254 | return NewV; |
6255 | bool UsedAssumedInformation = false; |
6256 | std::optional<Value *> SimpleV = A.getAssumedSimplified( |
6257 | V, AA: QueryingAA, UsedAssumedInformation, S: AA::Interprocedural); |
6258 | if (!SimpleV.has_value()) |
6259 | return PoisonValue::get(T: &Ty); |
6260 | Value *EffectiveV = &V; |
6261 | if (*SimpleV) |
6262 | EffectiveV = *SimpleV; |
6263 | if (auto *C = dyn_cast<Constant>(Val: EffectiveV)) |
6264 | return C; |
6265 | if (CtxI && AA::isValidAtPosition(VAC: AA::ValueAndContext(*EffectiveV, *CtxI), |
6266 | InfoCache&: A.getInfoCache())) |
6267 | return ensureType(A, V&: *EffectiveV, Ty, CtxI, Check); |
6268 | if (auto *I = dyn_cast<Instruction>(Val: EffectiveV)) |
6269 | if (Value *NewV = reproduceInst(A, QueryingAA, I&: *I, Ty, CtxI, Check, VMap)) |
6270 | return ensureType(A, V&: *NewV, Ty, CtxI, Check); |
6271 | return nullptr; |
6272 | } |
6273 | |
6274 | /// Return a value we can use as replacement for the associated one, or |
6275 | /// nullptr if we don't have one that makes sense. |
6276 | Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const { |
6277 | Value *NewV = SimplifiedAssociatedValue |
6278 | ? *SimplifiedAssociatedValue |
6279 | : UndefValue::get(T: getAssociatedType()); |
6280 | if (NewV && NewV != &getAssociatedValue()) { |
6281 | ValueToValueMapTy VMap; |
6282 | // First verify we can reprduce the value with the required type at the |
6283 | // context location before we actually start modifying the IR. |
6284 | if (reproduceValue(A, QueryingAA: *this, V&: *NewV, Ty&: *getAssociatedType(), CtxI, |
6285 | /* CheckOnly */ Check: true, VMap)) |
6286 | return reproduceValue(A, QueryingAA: *this, V&: *NewV, Ty&: *getAssociatedType(), CtxI, |
6287 | /* CheckOnly */ Check: false, VMap); |
6288 | } |
6289 | return nullptr; |
6290 | } |
6291 | |
6292 | /// Helper function for querying AAValueSimplify and updating candidate. |
6293 | /// \param IRP The value position we are trying to unify with SimplifiedValue |
6294 | bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, |
6295 | const IRPosition &IRP, bool Simplify = true) { |
6296 | bool UsedAssumedInformation = false; |
6297 | std::optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); |
6298 | if (Simplify) |
6299 | QueryingValueSimplified = A.getAssumedSimplified( |
6300 | IRP, AA: QueryingAA, UsedAssumedInformation, S: AA::Interprocedural); |
6301 | return unionAssumed(Other: QueryingValueSimplified); |
6302 | } |
6303 | |
6304 | /// Returns a candidate is found or not |
6305 | template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { |
6306 | if (!getAssociatedValue().getType()->isIntegerTy()) |
6307 | return false; |
6308 | |
6309 | // This will also pass the call base context. |
6310 | const auto *AA = |
6311 | A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); |
6312 | if (!AA) |
6313 | return false; |
6314 | |
6315 | std::optional<Constant *> COpt = AA->getAssumedConstant(A); |
6316 | |
6317 | if (!COpt) { |
6318 | SimplifiedAssociatedValue = std::nullopt; |
6319 | A.recordDependence(FromAA: *AA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
6320 | return true; |
6321 | } |
6322 | if (auto *C = *COpt) { |
6323 | SimplifiedAssociatedValue = C; |
6324 | A.recordDependence(FromAA: *AA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
6325 | return true; |
6326 | } |
6327 | return false; |
6328 | } |
6329 | |
6330 | bool askSimplifiedValueForOtherAAs(Attributor &A) { |
6331 | if (askSimplifiedValueFor<AAValueConstantRange>(A)) |
6332 | return true; |
6333 | if (askSimplifiedValueFor<AAPotentialConstantValues>(A)) |
6334 | return true; |
6335 | return false; |
6336 | } |
6337 | |
6338 | /// See AbstractAttribute::manifest(...). |
6339 | ChangeStatus manifest(Attributor &A) override { |
6340 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
6341 | for (auto &U : getAssociatedValue().uses()) { |
6342 | // Check if we need to adjust the insertion point to make sure the IR is |
6343 | // valid. |
6344 | Instruction *IP = dyn_cast<Instruction>(Val: U.getUser()); |
6345 | if (auto *PHI = dyn_cast_or_null<PHINode>(Val: IP)) |
6346 | IP = PHI->getIncomingBlock(U)->getTerminator(); |
6347 | if (auto *NewV = manifestReplacementValue(A, CtxI: IP)) { |
6348 | LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() |
6349 | << " -> " << *NewV << " :: " << *this << "\n" ); |
6350 | if (A.changeUseAfterManifest(U, NV&: *NewV)) |
6351 | Changed = ChangeStatus::CHANGED; |
6352 | } |
6353 | } |
6354 | |
6355 | return Changed | AAValueSimplify::manifest(A); |
6356 | } |
6357 | |
6358 | /// See AbstractState::indicatePessimisticFixpoint(...). |
6359 | ChangeStatus indicatePessimisticFixpoint() override { |
6360 | SimplifiedAssociatedValue = &getAssociatedValue(); |
6361 | return AAValueSimplify::indicatePessimisticFixpoint(); |
6362 | } |
6363 | }; |
6364 | |
6365 | struct AAValueSimplifyArgument final : AAValueSimplifyImpl { |
6366 | AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) |
6367 | : AAValueSimplifyImpl(IRP, A) {} |
6368 | |
6369 | void initialize(Attributor &A) override { |
6370 | AAValueSimplifyImpl::initialize(A); |
6371 | if (A.hasAttr(IRP: getIRPosition(), |
6372 | AKs: {Attribute::InAlloca, Attribute::Preallocated, |
6373 | Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, |
6374 | /* IgnoreSubsumingPositions */ true)) |
6375 | indicatePessimisticFixpoint(); |
6376 | } |
6377 | |
6378 | /// See AbstractAttribute::updateImpl(...). |
6379 | ChangeStatus updateImpl(Attributor &A) override { |
6380 | // Byval is only replacable if it is readonly otherwise we would write into |
6381 | // the replaced value and not the copy that byval creates implicitly. |
6382 | Argument *Arg = getAssociatedArgument(); |
6383 | if (Arg->hasByValAttr()) { |
6384 | // TODO: We probably need to verify synchronization is not an issue, e.g., |
6385 | // there is no race by not copying a constant byval. |
6386 | bool IsKnown; |
6387 | if (!AA::isAssumedReadOnly(A, IRP: getIRPosition(), QueryingAA: *this, IsKnown)) |
6388 | return indicatePessimisticFixpoint(); |
6389 | } |
6390 | |
6391 | auto Before = SimplifiedAssociatedValue; |
6392 | |
6393 | auto PredForCallSite = [&](AbstractCallSite ACS) { |
6394 | const IRPosition &ACSArgPos = |
6395 | IRPosition::callsite_argument(ACS, ArgNo: getCallSiteArgNo()); |
6396 | // Check if a coresponding argument was found or if it is on not |
6397 | // associated (which can happen for callback calls). |
6398 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
6399 | return false; |
6400 | |
6401 | // Simplify the argument operand explicitly and check if the result is |
6402 | // valid in the current scope. This avoids refering to simplified values |
6403 | // in other functions, e.g., we don't want to say a an argument in a |
6404 | // static function is actually an argument in a different function. |
6405 | bool UsedAssumedInformation = false; |
6406 | std::optional<Constant *> SimpleArgOp = |
6407 | A.getAssumedConstant(IRP: ACSArgPos, AA: *this, UsedAssumedInformation); |
6408 | if (!SimpleArgOp) |
6409 | return true; |
6410 | if (!*SimpleArgOp) |
6411 | return false; |
6412 | if (!AA::isDynamicallyUnique(A, QueryingAA: *this, V: **SimpleArgOp)) |
6413 | return false; |
6414 | return unionAssumed(Other: *SimpleArgOp); |
6415 | }; |
6416 | |
6417 | // Generate a answer specific to a call site context. |
6418 | bool Success; |
6419 | bool UsedAssumedInformation = false; |
6420 | if (hasCallBaseContext() && |
6421 | getCallBaseContext()->getCalledOperand() == Arg->getParent()) |
6422 | Success = PredForCallSite( |
6423 | AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); |
6424 | else |
6425 | Success = A.checkForAllCallSites(Pred: PredForCallSite, QueryingAA: *this, RequireAllCallSites: true, |
6426 | UsedAssumedInformation); |
6427 | |
6428 | if (!Success) |
6429 | if (!askSimplifiedValueForOtherAAs(A)) |
6430 | return indicatePessimisticFixpoint(); |
6431 | |
6432 | // If a candidate was found in this update, return CHANGED. |
6433 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED |
6434 | : ChangeStatus ::CHANGED; |
6435 | } |
6436 | |
6437 | /// See AbstractAttribute::trackStatistics() |
6438 | void trackStatistics() const override { |
6439 | STATS_DECLTRACK_ARG_ATTR(value_simplify) |
6440 | } |
6441 | }; |
6442 | |
6443 | struct AAValueSimplifyReturned : AAValueSimplifyImpl { |
6444 | AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) |
6445 | : AAValueSimplifyImpl(IRP, A) {} |
6446 | |
6447 | /// See AAValueSimplify::getAssumedSimplifiedValue() |
6448 | std::optional<Value *> |
6449 | getAssumedSimplifiedValue(Attributor &A) const override { |
6450 | if (!isValidState()) |
6451 | return nullptr; |
6452 | return SimplifiedAssociatedValue; |
6453 | } |
6454 | |
6455 | /// See AbstractAttribute::updateImpl(...). |
6456 | ChangeStatus updateImpl(Attributor &A) override { |
6457 | auto Before = SimplifiedAssociatedValue; |
6458 | |
6459 | auto ReturnInstCB = [&](Instruction &I) { |
6460 | auto &RI = cast<ReturnInst>(Val&: I); |
6461 | return checkAndUpdate( |
6462 | A, QueryingAA: *this, |
6463 | IRP: IRPosition::value(V: *RI.getReturnValue(), CBContext: getCallBaseContext())); |
6464 | }; |
6465 | |
6466 | bool UsedAssumedInformation = false; |
6467 | if (!A.checkForAllInstructions(Pred: ReturnInstCB, QueryingAA: *this, Opcodes: {Instruction::Ret}, |
6468 | UsedAssumedInformation)) |
6469 | if (!askSimplifiedValueForOtherAAs(A)) |
6470 | return indicatePessimisticFixpoint(); |
6471 | |
6472 | // If a candidate was found in this update, return CHANGED. |
6473 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED |
6474 | : ChangeStatus ::CHANGED; |
6475 | } |
6476 | |
6477 | ChangeStatus manifest(Attributor &A) override { |
6478 | // We queried AAValueSimplify for the returned values so they will be |
6479 | // replaced if a simplified form was found. Nothing to do here. |
6480 | return ChangeStatus::UNCHANGED; |
6481 | } |
6482 | |
6483 | /// See AbstractAttribute::trackStatistics() |
6484 | void trackStatistics() const override { |
6485 | STATS_DECLTRACK_FNRET_ATTR(value_simplify) |
6486 | } |
6487 | }; |
6488 | |
6489 | struct AAValueSimplifyFloating : AAValueSimplifyImpl { |
6490 | AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) |
6491 | : AAValueSimplifyImpl(IRP, A) {} |
6492 | |
6493 | /// See AbstractAttribute::initialize(...). |
6494 | void initialize(Attributor &A) override { |
6495 | AAValueSimplifyImpl::initialize(A); |
6496 | Value &V = getAnchorValue(); |
6497 | |
6498 | // TODO: add other stuffs |
6499 | if (isa<Constant>(Val: V)) |
6500 | indicatePessimisticFixpoint(); |
6501 | } |
6502 | |
6503 | /// See AbstractAttribute::updateImpl(...). |
6504 | ChangeStatus updateImpl(Attributor &A) override { |
6505 | auto Before = SimplifiedAssociatedValue; |
6506 | if (!askSimplifiedValueForOtherAAs(A)) |
6507 | return indicatePessimisticFixpoint(); |
6508 | |
6509 | // If a candidate was found in this update, return CHANGED. |
6510 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED |
6511 | : ChangeStatus ::CHANGED; |
6512 | } |
6513 | |
6514 | /// See AbstractAttribute::trackStatistics() |
6515 | void trackStatistics() const override { |
6516 | STATS_DECLTRACK_FLOATING_ATTR(value_simplify) |
6517 | } |
6518 | }; |
6519 | |
6520 | struct AAValueSimplifyFunction : AAValueSimplifyImpl { |
6521 | AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) |
6522 | : AAValueSimplifyImpl(IRP, A) {} |
6523 | |
6524 | /// See AbstractAttribute::initialize(...). |
6525 | void initialize(Attributor &A) override { |
6526 | SimplifiedAssociatedValue = nullptr; |
6527 | indicateOptimisticFixpoint(); |
6528 | } |
6529 | /// See AbstractAttribute::initialize(...). |
6530 | ChangeStatus updateImpl(Attributor &A) override { |
6531 | llvm_unreachable( |
6532 | "AAValueSimplify(Function|CallSite)::updateImpl will not be called" ); |
6533 | } |
6534 | /// See AbstractAttribute::trackStatistics() |
6535 | void trackStatistics() const override { |
6536 | STATS_DECLTRACK_FN_ATTR(value_simplify) |
6537 | } |
6538 | }; |
6539 | |
6540 | struct AAValueSimplifyCallSite : AAValueSimplifyFunction { |
6541 | AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) |
6542 | : AAValueSimplifyFunction(IRP, A) {} |
6543 | /// See AbstractAttribute::trackStatistics() |
6544 | void trackStatistics() const override { |
6545 | STATS_DECLTRACK_CS_ATTR(value_simplify) |
6546 | } |
6547 | }; |
6548 | |
6549 | struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { |
6550 | AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) |
6551 | : AAValueSimplifyImpl(IRP, A) {} |
6552 | |
6553 | void initialize(Attributor &A) override { |
6554 | AAValueSimplifyImpl::initialize(A); |
6555 | Function *Fn = getAssociatedFunction(); |
6556 | assert(Fn && "Did expect an associted function" ); |
6557 | for (Argument &Arg : Fn->args()) { |
6558 | if (Arg.hasReturnedAttr()) { |
6559 | auto IRP = IRPosition::callsite_argument(CB: *cast<CallBase>(Val: getCtxI()), |
6560 | ArgNo: Arg.getArgNo()); |
6561 | if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && |
6562 | checkAndUpdate(A, QueryingAA: *this, IRP)) |
6563 | indicateOptimisticFixpoint(); |
6564 | else |
6565 | indicatePessimisticFixpoint(); |
6566 | return; |
6567 | } |
6568 | } |
6569 | } |
6570 | |
6571 | /// See AbstractAttribute::updateImpl(...). |
6572 | ChangeStatus updateImpl(Attributor &A) override { |
6573 | return indicatePessimisticFixpoint(); |
6574 | } |
6575 | |
6576 | void trackStatistics() const override { |
6577 | STATS_DECLTRACK_CSRET_ATTR(value_simplify) |
6578 | } |
6579 | }; |
6580 | |
6581 | struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { |
6582 | AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) |
6583 | : AAValueSimplifyFloating(IRP, A) {} |
6584 | |
6585 | /// See AbstractAttribute::manifest(...). |
6586 | ChangeStatus manifest(Attributor &A) override { |
6587 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
6588 | // TODO: We should avoid simplification duplication to begin with. |
6589 | auto *FloatAA = A.lookupAAFor<AAValueSimplify>( |
6590 | IRP: IRPosition::value(V: getAssociatedValue()), QueryingAA: this, DepClass: DepClassTy::NONE); |
6591 | if (FloatAA && FloatAA->getState().isValidState()) |
6592 | return Changed; |
6593 | |
6594 | if (auto *NewV = manifestReplacementValue(A, CtxI: getCtxI())) { |
6595 | Use &U = cast<CallBase>(Val: &getAnchorValue()) |
6596 | ->getArgOperandUse(i: getCallSiteArgNo()); |
6597 | if (A.changeUseAfterManifest(U, NV&: *NewV)) |
6598 | Changed = ChangeStatus::CHANGED; |
6599 | } |
6600 | |
6601 | return Changed | AAValueSimplify::manifest(A); |
6602 | } |
6603 | |
6604 | void trackStatistics() const override { |
6605 | STATS_DECLTRACK_CSARG_ATTR(value_simplify) |
6606 | } |
6607 | }; |
6608 | } // namespace |
6609 | |
6610 | /// ----------------------- Heap-To-Stack Conversion --------------------------- |
6611 | namespace { |
6612 | struct AAHeapToStackFunction final : public AAHeapToStack { |
6613 | |
6614 | struct AllocationInfo { |
6615 | /// The call that allocates the memory. |
6616 | CallBase *const CB; |
6617 | |
6618 | /// The library function id for the allocation. |
6619 | LibFunc LibraryFunctionId = NotLibFunc; |
6620 | |
6621 | /// The status wrt. a rewrite. |
6622 | enum { |
6623 | STACK_DUE_TO_USE, |
6624 | STACK_DUE_TO_FREE, |
6625 | INVALID, |
6626 | } Status = STACK_DUE_TO_USE; |
6627 | |
6628 | /// Flag to indicate if we encountered a use that might free this allocation |
6629 | /// but which is not in the deallocation infos. |
6630 | bool HasPotentiallyFreeingUnknownUses = false; |
6631 | |
6632 | /// Flag to indicate that we should place the new alloca in the function |
6633 | /// entry block rather than where the call site (CB) is. |
6634 | bool MoveAllocaIntoEntry = true; |
6635 | |
6636 | /// The set of free calls that use this allocation. |
6637 | SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; |
6638 | }; |
6639 | |
6640 | struct DeallocationInfo { |
6641 | /// The call that deallocates the memory. |
6642 | CallBase *const CB; |
6643 | /// The value freed by the call. |
6644 | Value *FreedOp; |
6645 | |
6646 | /// Flag to indicate if we don't know all objects this deallocation might |
6647 | /// free. |
6648 | bool MightFreeUnknownObjects = false; |
6649 | |
6650 | /// The set of allocation calls that are potentially freed. |
6651 | SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; |
6652 | }; |
6653 | |
6654 | AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) |
6655 | : AAHeapToStack(IRP, A) {} |
6656 | |
6657 | ~AAHeapToStackFunction() { |
6658 | // Ensure we call the destructor so we release any memory allocated in the |
6659 | // sets. |
6660 | for (auto &It : AllocationInfos) |
6661 | It.second->~AllocationInfo(); |
6662 | for (auto &It : DeallocationInfos) |
6663 | It.second->~DeallocationInfo(); |
6664 | } |
6665 | |
6666 | void initialize(Attributor &A) override { |
6667 | AAHeapToStack::initialize(A); |
6668 | |
6669 | const Function *F = getAnchorScope(); |
6670 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(F: *F); |
6671 | |
6672 | auto AllocationIdentifierCB = [&](Instruction &I) { |
6673 | CallBase *CB = dyn_cast<CallBase>(Val: &I); |
6674 | if (!CB) |
6675 | return true; |
6676 | if (Value *FreedOp = getFreedOperand(CB, TLI)) { |
6677 | DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{.CB: CB, .FreedOp: FreedOp}; |
6678 | return true; |
6679 | } |
6680 | // To do heap to stack, we need to know that the allocation itself is |
6681 | // removable once uses are rewritten, and that we can initialize the |
6682 | // alloca to the same pattern as the original allocation result. |
6683 | if (isRemovableAlloc(V: CB, TLI)) { |
6684 | auto *I8Ty = Type::getInt8Ty(C&: CB->getParent()->getContext()); |
6685 | if (nullptr != getInitialValueOfAllocation(V: CB, TLI, Ty: I8Ty)) { |
6686 | AllocationInfo *AI = new (A.Allocator) AllocationInfo{.CB: CB}; |
6687 | AllocationInfos[CB] = AI; |
6688 | if (TLI) |
6689 | TLI->getLibFunc(CB: *CB, F&: AI->LibraryFunctionId); |
6690 | } |
6691 | } |
6692 | return true; |
6693 | }; |
6694 | |
6695 | bool UsedAssumedInformation = false; |
6696 | bool Success = A.checkForAllCallLikeInstructions( |
6697 | Pred: AllocationIdentifierCB, QueryingAA: *this, UsedAssumedInformation, |
6698 | /* CheckBBLivenessOnly */ false, |
6699 | /* CheckPotentiallyDead */ true); |
6700 | (void)Success; |
6701 | assert(Success && "Did not expect the call base visit callback to fail!" ); |
6702 | |
6703 | Attributor::SimplifictionCallbackTy SCB = |
6704 | [](const IRPosition &, const AbstractAttribute *, |
6705 | bool &) -> std::optional<Value *> { return nullptr; }; |
6706 | for (const auto &It : AllocationInfos) |
6707 | A.registerSimplificationCallback(IRP: IRPosition::callsite_returned(CB: *It.first), |
6708 | CB: SCB); |
6709 | for (const auto &It : DeallocationInfos) |
6710 | A.registerSimplificationCallback(IRP: IRPosition::callsite_returned(CB: *It.first), |
6711 | CB: SCB); |
6712 | } |
6713 | |
6714 | const std::string getAsStr(Attributor *A) const override { |
6715 | unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; |
6716 | for (const auto &It : AllocationInfos) { |
6717 | if (It.second->Status == AllocationInfo::INVALID) |
6718 | ++NumInvalidMallocs; |
6719 | else |
6720 | ++NumH2SMallocs; |
6721 | } |
6722 | return "[H2S] Mallocs Good/Bad: " + std::to_string(val: NumH2SMallocs) + "/" + |
6723 | std::to_string(val: NumInvalidMallocs); |
6724 | } |
6725 | |
6726 | /// See AbstractAttribute::trackStatistics(). |
6727 | void trackStatistics() const override { |
6728 | STATS_DECL( |
6729 | MallocCalls, Function, |
6730 | "Number of malloc/calloc/aligned_alloc calls converted to allocas" ); |
6731 | for (const auto &It : AllocationInfos) |
6732 | if (It.second->Status != AllocationInfo::INVALID) |
6733 | ++BUILD_STAT_NAME(MallocCalls, Function); |
6734 | } |
6735 | |
6736 | bool isAssumedHeapToStack(const CallBase &CB) const override { |
6737 | if (isValidState()) |
6738 | if (AllocationInfo *AI = |
6739 | AllocationInfos.lookup(Key: const_cast<CallBase *>(&CB))) |
6740 | return AI->Status != AllocationInfo::INVALID; |
6741 | return false; |
6742 | } |
6743 | |
6744 | bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { |
6745 | if (!isValidState()) |
6746 | return false; |
6747 | |
6748 | for (const auto &It : AllocationInfos) { |
6749 | AllocationInfo &AI = *It.second; |
6750 | if (AI.Status == AllocationInfo::INVALID) |
6751 | continue; |
6752 | |
6753 | if (AI.PotentialFreeCalls.count(key: &CB)) |
6754 | return true; |
6755 | } |
6756 | |
6757 | return false; |
6758 | } |
6759 | |
6760 | ChangeStatus manifest(Attributor &A) override { |
6761 | assert(getState().isValidState() && |
6762 | "Attempted to manifest an invalid state!" ); |
6763 | |
6764 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; |
6765 | Function *F = getAnchorScope(); |
6766 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(F: *F); |
6767 | |
6768 | for (auto &It : AllocationInfos) { |
6769 | AllocationInfo &AI = *It.second; |
6770 | if (AI.Status == AllocationInfo::INVALID) |
6771 | continue; |
6772 | |
6773 | for (CallBase *FreeCall : AI.PotentialFreeCalls) { |
6774 | LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n" ); |
6775 | A.deleteAfterManifest(I&: *FreeCall); |
6776 | HasChanged = ChangeStatus::CHANGED; |
6777 | } |
6778 | |
6779 | LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB |
6780 | << "\n" ); |
6781 | |
6782 | auto = [&](OptimizationRemark OR) { |
6783 | LibFunc IsAllocShared; |
6784 | if (TLI->getLibFunc(CB: *AI.CB, F&: IsAllocShared)) |
6785 | if (IsAllocShared == LibFunc___kmpc_alloc_shared) |
6786 | return OR << "Moving globalized variable to the stack." ; |
6787 | return OR << "Moving memory allocation from the heap to the stack." ; |
6788 | }; |
6789 | if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) |
6790 | A.emitRemark<OptimizationRemark>(I: AI.CB, RemarkName: "OMP110" , RemarkCB&: Remark); |
6791 | else |
6792 | A.emitRemark<OptimizationRemark>(I: AI.CB, RemarkName: "HeapToStack" , RemarkCB&: Remark); |
6793 | |
6794 | const DataLayout &DL = A.getInfoCache().getDL(); |
6795 | Value *Size; |
6796 | std::optional<APInt> SizeAPI = getSize(A, AA: *this, AI); |
6797 | if (SizeAPI) { |
6798 | Size = ConstantInt::get(Context&: AI.CB->getContext(), V: *SizeAPI); |
6799 | } else { |
6800 | LLVMContext &Ctx = AI.CB->getContext(); |
6801 | ObjectSizeOpts Opts; |
6802 | ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); |
6803 | SizeOffsetValue SizeOffsetPair = Eval.compute(V: AI.CB); |
6804 | assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && |
6805 | cast<ConstantInt>(SizeOffsetPair.Offset)->isZero()); |
6806 | Size = SizeOffsetPair.Size; |
6807 | } |
6808 | |
6809 | BasicBlock::iterator IP = AI.MoveAllocaIntoEntry |
6810 | ? F->getEntryBlock().begin() |
6811 | : AI.CB->getIterator(); |
6812 | |
6813 | Align Alignment(1); |
6814 | if (MaybeAlign RetAlign = AI.CB->getRetAlign()) |
6815 | Alignment = std::max(a: Alignment, b: *RetAlign); |
6816 | if (Value *Align = getAllocAlignment(V: AI.CB, TLI)) { |
6817 | std::optional<APInt> AlignmentAPI = getAPInt(A, AA: *this, V&: *Align); |
6818 | assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 && |
6819 | "Expected an alignment during manifest!" ); |
6820 | Alignment = |
6821 | std::max(a: Alignment, b: assumeAligned(Value: AlignmentAPI->getZExtValue())); |
6822 | } |
6823 | |
6824 | // TODO: Hoist the alloca towards the function entry. |
6825 | unsigned AS = DL.getAllocaAddrSpace(); |
6826 | Instruction *Alloca = |
6827 | new AllocaInst(Type::getInt8Ty(C&: F->getContext()), AS, Size, Alignment, |
6828 | AI.CB->getName() + ".h2s" , IP); |
6829 | |
6830 | if (Alloca->getType() != AI.CB->getType()) |
6831 | Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( |
6832 | S: Alloca, Ty: AI.CB->getType(), Name: "malloc_cast" , InsertBefore: AI.CB->getIterator()); |
6833 | |
6834 | auto *I8Ty = Type::getInt8Ty(C&: F->getContext()); |
6835 | auto *InitVal = getInitialValueOfAllocation(V: AI.CB, TLI, Ty: I8Ty); |
6836 | assert(InitVal && |
6837 | "Must be able to materialize initial memory state of allocation" ); |
6838 | |
6839 | A.changeAfterManifest(IRP: IRPosition::inst(I: *AI.CB), NV&: *Alloca); |
6840 | |
6841 | if (auto *II = dyn_cast<InvokeInst>(Val: AI.CB)) { |
6842 | auto *NBB = II->getNormalDest(); |
6843 | BranchInst::Create(IfTrue: NBB, InsertBefore: AI.CB->getParent()); |
6844 | A.deleteAfterManifest(I&: *AI.CB); |
6845 | } else { |
6846 | A.deleteAfterManifest(I&: *AI.CB); |
6847 | } |
6848 | |
6849 | // Initialize the alloca with the same value as used by the allocation |
6850 | // function. We can skip undef as the initial value of an alloc is |
6851 | // undef, and the memset would simply end up being DSEd. |
6852 | if (!isa<UndefValue>(Val: InitVal)) { |
6853 | IRBuilder<> Builder(Alloca->getNextNode()); |
6854 | // TODO: Use alignment above if align!=1 |
6855 | Builder.CreateMemSet(Ptr: Alloca, Val: InitVal, Size, Align: std::nullopt); |
6856 | } |
6857 | HasChanged = ChangeStatus::CHANGED; |
6858 | } |
6859 | |
6860 | return HasChanged; |
6861 | } |
6862 | |
6863 | std::optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, |
6864 | Value &V) { |
6865 | bool UsedAssumedInformation = false; |
6866 | std::optional<Constant *> SimpleV = |
6867 | A.getAssumedConstant(V, AA, UsedAssumedInformation); |
6868 | if (!SimpleV) |
6869 | return APInt(64, 0); |
6870 | if (auto *CI = dyn_cast_or_null<ConstantInt>(Val: *SimpleV)) |
6871 | return CI->getValue(); |
6872 | return std::nullopt; |
6873 | } |
6874 | |
6875 | std::optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, |
6876 | AllocationInfo &AI) { |
6877 | auto Mapper = [&](const Value *V) -> const Value * { |
6878 | bool UsedAssumedInformation = false; |
6879 | if (std::optional<Constant *> SimpleV = |
6880 | A.getAssumedConstant(V: *V, AA, UsedAssumedInformation)) |
6881 | if (*SimpleV) |
6882 | return *SimpleV; |
6883 | return V; |
6884 | }; |
6885 | |
6886 | const Function *F = getAnchorScope(); |
6887 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(F: *F); |
6888 | return getAllocSize(CB: AI.CB, TLI, Mapper); |
6889 | } |
6890 | |
6891 | /// Collection of all malloc-like calls in a function with associated |
6892 | /// information. |
6893 | MapVector<CallBase *, AllocationInfo *> AllocationInfos; |
6894 | |
6895 | /// Collection of all free-like calls in a function with associated |
6896 | /// information. |
6897 | MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; |
6898 | |
6899 | ChangeStatus updateImpl(Attributor &A) override; |
6900 | }; |
6901 | |
6902 | ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { |
6903 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
6904 | const Function *F = getAnchorScope(); |
6905 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(F: *F); |
6906 | |
6907 | const auto *LivenessAA = |
6908 | A.getAAFor<AAIsDead>(QueryingAA: *this, IRP: IRPosition::function(F: *F), DepClass: DepClassTy::NONE); |
6909 | |
6910 | MustBeExecutedContextExplorer *Explorer = |
6911 | A.getInfoCache().getMustBeExecutedContextExplorer(); |
6912 | |
6913 | bool StackIsAccessibleByOtherThreads = |
6914 | A.getInfoCache().stackIsAccessibleByOtherThreads(); |
6915 | |
6916 | LoopInfo *LI = |
6917 | A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F: *F); |
6918 | std::optional<bool> MayContainIrreducibleControl; |
6919 | auto IsInLoop = [&](BasicBlock &BB) { |
6920 | if (&F->getEntryBlock() == &BB) |
6921 | return false; |
6922 | if (!MayContainIrreducibleControl.has_value()) |
6923 | MayContainIrreducibleControl = mayContainIrreducibleControl(F: *F, LI); |
6924 | if (*MayContainIrreducibleControl) |
6925 | return true; |
6926 | if (!LI) |
6927 | return true; |
6928 | return LI->getLoopFor(BB: &BB) != nullptr; |
6929 | }; |
6930 | |
6931 | // Flag to ensure we update our deallocation information at most once per |
6932 | // updateImpl call and only if we use the free check reasoning. |
6933 | bool HasUpdatedFrees = false; |
6934 | |
6935 | auto UpdateFrees = [&]() { |
6936 | HasUpdatedFrees = true; |
6937 | |
6938 | for (auto &It : DeallocationInfos) { |
6939 | DeallocationInfo &DI = *It.second; |
6940 | // For now we cannot use deallocations that have unknown inputs, skip |
6941 | // them. |
6942 | if (DI.MightFreeUnknownObjects) |
6943 | continue; |
6944 | |
6945 | // No need to analyze dead calls, ignore them instead. |
6946 | bool UsedAssumedInformation = false; |
6947 | if (A.isAssumedDead(I: *DI.CB, QueryingAA: this, LivenessAA, UsedAssumedInformation, |
6948 | /* CheckBBLivenessOnly */ true)) |
6949 | continue; |
6950 | |
6951 | // Use the non-optimistic version to get the freed object. |
6952 | Value *Obj = getUnderlyingObject(V: DI.FreedOp); |
6953 | if (!Obj) { |
6954 | LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n" ); |
6955 | DI.MightFreeUnknownObjects = true; |
6956 | continue; |
6957 | } |
6958 | |
6959 | // Free of null and undef can be ignored as no-ops (or UB in the latter |
6960 | // case). |
6961 | if (isa<ConstantPointerNull>(Val: Obj) || isa<UndefValue>(Val: Obj)) |
6962 | continue; |
6963 | |
6964 | CallBase *ObjCB = dyn_cast<CallBase>(Val: Obj); |
6965 | if (!ObjCB) { |
6966 | LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj |
6967 | << "\n" ); |
6968 | DI.MightFreeUnknownObjects = true; |
6969 | continue; |
6970 | } |
6971 | |
6972 | AllocationInfo *AI = AllocationInfos.lookup(Key: ObjCB); |
6973 | if (!AI) { |
6974 | LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj |
6975 | << "\n" ); |
6976 | DI.MightFreeUnknownObjects = true; |
6977 | continue; |
6978 | } |
6979 | |
6980 | DI.PotentialAllocationCalls.insert(X: ObjCB); |
6981 | } |
6982 | }; |
6983 | |
6984 | auto FreeCheck = [&](AllocationInfo &AI) { |
6985 | // If the stack is not accessible by other threads, the "must-free" logic |
6986 | // doesn't apply as the pointer could be shared and needs to be places in |
6987 | // "shareable" memory. |
6988 | if (!StackIsAccessibleByOtherThreads) { |
6989 | bool IsKnownNoSycn; |
6990 | if (!AA::hasAssumedIRAttr<Attribute::NoSync>( |
6991 | A, QueryingAA: this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoSycn)) { |
6992 | LLVM_DEBUG( |
6993 | dbgs() << "[H2S] found an escaping use, stack is not accessible by " |
6994 | "other threads and function is not nosync:\n" ); |
6995 | return false; |
6996 | } |
6997 | } |
6998 | if (!HasUpdatedFrees) |
6999 | UpdateFrees(); |
7000 | |
7001 | // TODO: Allow multi exit functions that have different free calls. |
7002 | if (AI.PotentialFreeCalls.size() != 1) { |
7003 | LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " |
7004 | << AI.PotentialFreeCalls.size() << "\n" ); |
7005 | return false; |
7006 | } |
7007 | CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); |
7008 | DeallocationInfo *DI = DeallocationInfos.lookup(Key: UniqueFree); |
7009 | if (!DI) { |
7010 | LLVM_DEBUG( |
7011 | dbgs() << "[H2S] unique free call was not known as deallocation call " |
7012 | << *UniqueFree << "\n" ); |
7013 | return false; |
7014 | } |
7015 | if (DI->MightFreeUnknownObjects) { |
7016 | LLVM_DEBUG( |
7017 | dbgs() << "[H2S] unique free call might free unknown allocations\n" ); |
7018 | return false; |
7019 | } |
7020 | if (DI->PotentialAllocationCalls.empty()) |
7021 | return true; |
7022 | if (DI->PotentialAllocationCalls.size() > 1) { |
7023 | LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " |
7024 | << DI->PotentialAllocationCalls.size() |
7025 | << " different allocations\n" ); |
7026 | return false; |
7027 | } |
7028 | if (*DI->PotentialAllocationCalls.begin() != AI.CB) { |
7029 | LLVM_DEBUG( |
7030 | dbgs() |
7031 | << "[H2S] unique free call not known to free this allocation but " |
7032 | << **DI->PotentialAllocationCalls.begin() << "\n" ); |
7033 | return false; |
7034 | } |
7035 | |
7036 | // __kmpc_alloc_shared and __kmpc_alloc_free are by construction matched. |
7037 | if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared) { |
7038 | Instruction *CtxI = isa<InvokeInst>(Val: AI.CB) ? AI.CB : AI.CB->getNextNode(); |
7039 | if (!Explorer || !Explorer->findInContextOf(I: UniqueFree, PP: CtxI)) { |
7040 | LLVM_DEBUG(dbgs() << "[H2S] unique free call might not be executed " |
7041 | "with the allocation " |
7042 | << *UniqueFree << "\n" ); |
7043 | return false; |
7044 | } |
7045 | } |
7046 | return true; |
7047 | }; |
7048 | |
7049 | auto UsesCheck = [&](AllocationInfo &AI) { |
7050 | bool ValidUsesOnly = true; |
7051 | |
7052 | auto Pred = [&](const Use &U, bool &Follow) -> bool { |
7053 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
7054 | if (isa<LoadInst>(Val: UserI)) |
7055 | return true; |
7056 | if (auto *SI = dyn_cast<StoreInst>(Val: UserI)) { |
7057 | if (SI->getValueOperand() == U.get()) { |
7058 | LLVM_DEBUG(dbgs() |
7059 | << "[H2S] escaping store to memory: " << *UserI << "\n" ); |
7060 | ValidUsesOnly = false; |
7061 | } else { |
7062 | // A store into the malloc'ed memory is fine. |
7063 | } |
7064 | return true; |
7065 | } |
7066 | if (auto *CB = dyn_cast<CallBase>(Val: UserI)) { |
7067 | if (!CB->isArgOperand(U: &U) || CB->isLifetimeStartOrEnd()) |
7068 | return true; |
7069 | if (DeallocationInfos.count(Key: CB)) { |
7070 | AI.PotentialFreeCalls.insert(X: CB); |
7071 | return true; |
7072 | } |
7073 | |
7074 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
7075 | auto CBIRP = IRPosition::callsite_argument(CB: *CB, ArgNo); |
7076 | |
7077 | bool IsKnownNoCapture; |
7078 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
7079 | A, QueryingAA: this, IRP: CBIRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture); |
7080 | |
7081 | // If a call site argument use is nofree, we are fine. |
7082 | bool IsKnownNoFree; |
7083 | bool IsAssumedNoFree = AA::hasAssumedIRAttr<Attribute::NoFree>( |
7084 | A, QueryingAA: this, IRP: CBIRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoFree); |
7085 | |
7086 | if (!IsAssumedNoCapture || |
7087 | (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && |
7088 | !IsAssumedNoFree)) { |
7089 | AI.HasPotentiallyFreeingUnknownUses |= !IsAssumedNoFree; |
7090 | |
7091 | // Emit a missed remark if this is missed OpenMP globalization. |
7092 | auto = [&](OptimizationRemarkMissed ORM) { |
7093 | return ORM |
7094 | << "Could not move globalized variable to the stack. " |
7095 | "Variable is potentially captured in call. Mark " |
7096 | "parameter as `__attribute__((noescape))` to override." ; |
7097 | }; |
7098 | |
7099 | if (ValidUsesOnly && |
7100 | AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) |
7101 | A.emitRemark<OptimizationRemarkMissed>(I: CB, RemarkName: "OMP113" , RemarkCB&: Remark); |
7102 | |
7103 | LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n" ); |
7104 | ValidUsesOnly = false; |
7105 | } |
7106 | return true; |
7107 | } |
7108 | |
7109 | if (isa<GetElementPtrInst>(Val: UserI) || isa<BitCastInst>(Val: UserI) || |
7110 | isa<PHINode>(Val: UserI) || isa<SelectInst>(Val: UserI)) { |
7111 | Follow = true; |
7112 | return true; |
7113 | } |
7114 | // Unknown user for which we can not track uses further (in a way that |
7115 | // makes sense). |
7116 | LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n" ); |
7117 | ValidUsesOnly = false; |
7118 | return true; |
7119 | }; |
7120 | if (!A.checkForAllUses(Pred, QueryingAA: *this, V: *AI.CB, /* CheckBBLivenessOnly */ false, |
7121 | LivenessDepClass: DepClassTy::OPTIONAL, /* IgnoreDroppableUses */ true, |
7122 | EquivalentUseCB: [&](const Use &OldU, const Use &NewU) { |
7123 | auto *SI = dyn_cast<StoreInst>(Val: OldU.getUser()); |
7124 | return !SI || StackIsAccessibleByOtherThreads || |
7125 | AA::isAssumedThreadLocalObject( |
7126 | A, Obj&: *SI->getPointerOperand(), QueryingAA: *this); |
7127 | })) |
7128 | return false; |
7129 | return ValidUsesOnly; |
7130 | }; |
7131 | |
7132 | // The actual update starts here. We look at all allocations and depending on |
7133 | // their status perform the appropriate check(s). |
7134 | for (auto &It : AllocationInfos) { |
7135 | AllocationInfo &AI = *It.second; |
7136 | if (AI.Status == AllocationInfo::INVALID) |
7137 | continue; |
7138 | |
7139 | if (Value *Align = getAllocAlignment(V: AI.CB, TLI)) { |
7140 | std::optional<APInt> APAlign = getAPInt(A, AA: *this, V&: *Align); |
7141 | if (!APAlign) { |
7142 | // Can't generate an alloca which respects the required alignment |
7143 | // on the allocation. |
7144 | LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB |
7145 | << "\n" ); |
7146 | AI.Status = AllocationInfo::INVALID; |
7147 | Changed = ChangeStatus::CHANGED; |
7148 | continue; |
7149 | } |
7150 | if (APAlign->ugt(RHS: llvm::Value::MaximumAlignment) || |
7151 | !APAlign->isPowerOf2()) { |
7152 | LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign |
7153 | << "\n" ); |
7154 | AI.Status = AllocationInfo::INVALID; |
7155 | Changed = ChangeStatus::CHANGED; |
7156 | continue; |
7157 | } |
7158 | } |
7159 | |
7160 | std::optional<APInt> Size = getSize(A, AA: *this, AI); |
7161 | if (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && |
7162 | MaxHeapToStackSize != -1) { |
7163 | if (!Size || Size->ugt(RHS: MaxHeapToStackSize)) { |
7164 | LLVM_DEBUG({ |
7165 | if (!Size) |
7166 | dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n" ; |
7167 | else |
7168 | dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " |
7169 | << MaxHeapToStackSize << "\n" ; |
7170 | }); |
7171 | |
7172 | AI.Status = AllocationInfo::INVALID; |
7173 | Changed = ChangeStatus::CHANGED; |
7174 | continue; |
7175 | } |
7176 | } |
7177 | |
7178 | switch (AI.Status) { |
7179 | case AllocationInfo::STACK_DUE_TO_USE: |
7180 | if (UsesCheck(AI)) |
7181 | break; |
7182 | AI.Status = AllocationInfo::STACK_DUE_TO_FREE; |
7183 | [[fallthrough]]; |
7184 | case AllocationInfo::STACK_DUE_TO_FREE: |
7185 | if (FreeCheck(AI)) |
7186 | break; |
7187 | AI.Status = AllocationInfo::INVALID; |
7188 | Changed = ChangeStatus::CHANGED; |
7189 | break; |
7190 | case AllocationInfo::INVALID: |
7191 | llvm_unreachable("Invalid allocations should never reach this point!" ); |
7192 | }; |
7193 | |
7194 | // Check if we still think we can move it into the entry block. If the |
7195 | // alloca comes from a converted __kmpc_alloc_shared then we can usually |
7196 | // ignore the potential compilations associated with loops. |
7197 | bool IsGlobalizedLocal = |
7198 | AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared; |
7199 | if (AI.MoveAllocaIntoEntry && |
7200 | (!Size.has_value() || |
7201 | (!IsGlobalizedLocal && IsInLoop(*AI.CB->getParent())))) |
7202 | AI.MoveAllocaIntoEntry = false; |
7203 | } |
7204 | |
7205 | return Changed; |
7206 | } |
7207 | } // namespace |
7208 | |
7209 | /// ----------------------- Privatizable Pointers ------------------------------ |
7210 | namespace { |
7211 | struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { |
7212 | AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) |
7213 | : AAPrivatizablePtr(IRP, A), PrivatizableType(std::nullopt) {} |
7214 | |
7215 | ChangeStatus indicatePessimisticFixpoint() override { |
7216 | AAPrivatizablePtr::indicatePessimisticFixpoint(); |
7217 | PrivatizableType = nullptr; |
7218 | return ChangeStatus::CHANGED; |
7219 | } |
7220 | |
7221 | /// Identify the type we can chose for a private copy of the underlying |
7222 | /// argument. std::nullopt means it is not clear yet, nullptr means there is |
7223 | /// none. |
7224 | virtual std::optional<Type *> identifyPrivatizableType(Attributor &A) = 0; |
7225 | |
7226 | /// Return a privatizable type that encloses both T0 and T1. |
7227 | /// TODO: This is merely a stub for now as we should manage a mapping as well. |
7228 | std::optional<Type *> combineTypes(std::optional<Type *> T0, |
7229 | std::optional<Type *> T1) { |
7230 | if (!T0) |
7231 | return T1; |
7232 | if (!T1) |
7233 | return T0; |
7234 | if (T0 == T1) |
7235 | return T0; |
7236 | return nullptr; |
7237 | } |
7238 | |
7239 | std::optional<Type *> getPrivatizableType() const override { |
7240 | return PrivatizableType; |
7241 | } |
7242 | |
7243 | const std::string getAsStr(Attributor *A) const override { |
7244 | return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]" ; |
7245 | } |
7246 | |
7247 | protected: |
7248 | std::optional<Type *> PrivatizableType; |
7249 | }; |
7250 | |
7251 | // TODO: Do this for call site arguments (probably also other values) as well. |
7252 | |
7253 | struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { |
7254 | AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) |
7255 | : AAPrivatizablePtrImpl(IRP, A) {} |
7256 | |
7257 | /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) |
7258 | std::optional<Type *> identifyPrivatizableType(Attributor &A) override { |
7259 | // If this is a byval argument and we know all the call sites (so we can |
7260 | // rewrite them), there is no need to check them explicitly. |
7261 | bool UsedAssumedInformation = false; |
7262 | SmallVector<Attribute, 1> Attrs; |
7263 | A.getAttrs(IRP: getIRPosition(), AKs: {Attribute::ByVal}, Attrs, |
7264 | /* IgnoreSubsumingPositions */ true); |
7265 | if (!Attrs.empty() && |
7266 | A.checkForAllCallSites(Pred: [](AbstractCallSite ACS) { return true; }, QueryingAA: *this, |
7267 | RequireAllCallSites: true, UsedAssumedInformation)) |
7268 | return Attrs[0].getValueAsType(); |
7269 | |
7270 | std::optional<Type *> Ty; |
7271 | unsigned ArgNo = getIRPosition().getCallSiteArgNo(); |
7272 | |
7273 | // Make sure the associated call site argument has the same type at all call |
7274 | // sites and it is an allocation we know is safe to privatize, for now that |
7275 | // means we only allow alloca instructions. |
7276 | // TODO: We can additionally analyze the accesses in the callee to create |
7277 | // the type from that information instead. That is a little more |
7278 | // involved and will be done in a follow up patch. |
7279 | auto CallSiteCheck = [&](AbstractCallSite ACS) { |
7280 | IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); |
7281 | // Check if a coresponding argument was found or if it is one not |
7282 | // associated (which can happen for callback calls). |
7283 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
7284 | return false; |
7285 | |
7286 | // Check that all call sites agree on a type. |
7287 | auto *PrivCSArgAA = |
7288 | A.getAAFor<AAPrivatizablePtr>(QueryingAA: *this, IRP: ACSArgPos, DepClass: DepClassTy::REQUIRED); |
7289 | if (!PrivCSArgAA) |
7290 | return false; |
7291 | std::optional<Type *> CSTy = PrivCSArgAA->getPrivatizableType(); |
7292 | |
7293 | LLVM_DEBUG({ |
7294 | dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: " ; |
7295 | if (CSTy && *CSTy) |
7296 | (*CSTy)->print(dbgs()); |
7297 | else if (CSTy) |
7298 | dbgs() << "<nullptr>" ; |
7299 | else |
7300 | dbgs() << "<none>" ; |
7301 | }); |
7302 | |
7303 | Ty = combineTypes(T0: Ty, T1: CSTy); |
7304 | |
7305 | LLVM_DEBUG({ |
7306 | dbgs() << " : New Type: " ; |
7307 | if (Ty && *Ty) |
7308 | (*Ty)->print(dbgs()); |
7309 | else if (Ty) |
7310 | dbgs() << "<nullptr>" ; |
7311 | else |
7312 | dbgs() << "<none>" ; |
7313 | dbgs() << "\n" ; |
7314 | }); |
7315 | |
7316 | return !Ty || *Ty; |
7317 | }; |
7318 | |
7319 | if (!A.checkForAllCallSites(Pred: CallSiteCheck, QueryingAA: *this, RequireAllCallSites: true, |
7320 | UsedAssumedInformation)) |
7321 | return nullptr; |
7322 | return Ty; |
7323 | } |
7324 | |
7325 | /// See AbstractAttribute::updateImpl(...). |
7326 | ChangeStatus updateImpl(Attributor &A) override { |
7327 | PrivatizableType = identifyPrivatizableType(A); |
7328 | if (!PrivatizableType) |
7329 | return ChangeStatus::UNCHANGED; |
7330 | if (!*PrivatizableType) |
7331 | return indicatePessimisticFixpoint(); |
7332 | |
7333 | // The dependence is optional so we don't give up once we give up on the |
7334 | // alignment. |
7335 | A.getAAFor<AAAlign>(QueryingAA: *this, IRP: IRPosition::value(V: getAssociatedValue()), |
7336 | DepClass: DepClassTy::OPTIONAL); |
7337 | |
7338 | // Avoid arguments with padding for now. |
7339 | if (!A.hasAttr(IRP: getIRPosition(), AKs: Attribute::ByVal) && |
7340 | !isDenselyPacked(Ty: *PrivatizableType, DL: A.getInfoCache().getDL())) { |
7341 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n" ); |
7342 | return indicatePessimisticFixpoint(); |
7343 | } |
7344 | |
7345 | // Collect the types that will replace the privatizable type in the function |
7346 | // signature. |
7347 | SmallVector<Type *, 16> ReplacementTypes; |
7348 | identifyReplacementTypes(PrivType: *PrivatizableType, ReplacementTypes); |
7349 | |
7350 | // Verify callee and caller agree on how the promoted argument would be |
7351 | // passed. |
7352 | Function &Fn = *getIRPosition().getAnchorScope(); |
7353 | const auto *TTI = |
7354 | A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(F: Fn); |
7355 | if (!TTI) { |
7356 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " |
7357 | << Fn.getName() << "\n" ); |
7358 | return indicatePessimisticFixpoint(); |
7359 | } |
7360 | |
7361 | auto CallSiteCheck = [&](AbstractCallSite ACS) { |
7362 | CallBase *CB = ACS.getInstruction(); |
7363 | return TTI->areTypesABICompatible( |
7364 | Caller: CB->getCaller(), |
7365 | Callee: dyn_cast_if_present<Function>(Val: CB->getCalledOperand()), |
7366 | Types: ReplacementTypes); |
7367 | }; |
7368 | bool UsedAssumedInformation = false; |
7369 | if (!A.checkForAllCallSites(Pred: CallSiteCheck, QueryingAA: *this, RequireAllCallSites: true, |
7370 | UsedAssumedInformation)) { |
7371 | LLVM_DEBUG( |
7372 | dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " |
7373 | << Fn.getName() << "\n" ); |
7374 | return indicatePessimisticFixpoint(); |
7375 | } |
7376 | |
7377 | // Register a rewrite of the argument. |
7378 | Argument *Arg = getAssociatedArgument(); |
7379 | if (!A.isValidFunctionSignatureRewrite(Arg&: *Arg, ReplacementTypes)) { |
7380 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n" ); |
7381 | return indicatePessimisticFixpoint(); |
7382 | } |
7383 | |
7384 | unsigned ArgNo = Arg->getArgNo(); |
7385 | |
7386 | // Helper to check if for the given call site the associated argument is |
7387 | // passed to a callback where the privatization would be different. |
7388 | auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { |
7389 | SmallVector<const Use *, 4> CallbackUses; |
7390 | AbstractCallSite::getCallbackUses(CB, CallbackUses); |
7391 | for (const Use *U : CallbackUses) { |
7392 | AbstractCallSite CBACS(U); |
7393 | assert(CBACS && CBACS.isCallbackCall()); |
7394 | for (Argument &CBArg : CBACS.getCalledFunction()->args()) { |
7395 | int CBArgNo = CBACS.getCallArgOperandNo(Arg&: CBArg); |
7396 | |
7397 | LLVM_DEBUG({ |
7398 | dbgs() |
7399 | << "[AAPrivatizablePtr] Argument " << *Arg |
7400 | << "check if can be privatized in the context of its parent (" |
7401 | << Arg->getParent()->getName() |
7402 | << ")\n[AAPrivatizablePtr] because it is an argument in a " |
7403 | "callback (" |
7404 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName() |
7405 | << ")\n[AAPrivatizablePtr] " << CBArg << " : " |
7406 | << CBACS.getCallArgOperand(CBArg) << " vs " |
7407 | << CB.getArgOperand(ArgNo) << "\n" |
7408 | << "[AAPrivatizablePtr] " << CBArg << " : " |
7409 | << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n" ; |
7410 | }); |
7411 | |
7412 | if (CBArgNo != int(ArgNo)) |
7413 | continue; |
7414 | const auto *CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( |
7415 | QueryingAA: *this, IRP: IRPosition::argument(Arg: CBArg), DepClass: DepClassTy::REQUIRED); |
7416 | if (CBArgPrivAA && CBArgPrivAA->isValidState()) { |
7417 | auto CBArgPrivTy = CBArgPrivAA->getPrivatizableType(); |
7418 | if (!CBArgPrivTy) |
7419 | continue; |
7420 | if (*CBArgPrivTy == PrivatizableType) |
7421 | continue; |
7422 | } |
7423 | |
7424 | LLVM_DEBUG({ |
7425 | dbgs() << "[AAPrivatizablePtr] Argument " << *Arg |
7426 | << " cannot be privatized in the context of its parent (" |
7427 | << Arg->getParent()->getName() |
7428 | << ")\n[AAPrivatizablePtr] because it is an argument in a " |
7429 | "callback (" |
7430 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName() |
7431 | << ").\n[AAPrivatizablePtr] for which the argument " |
7432 | "privatization is not compatible.\n" ; |
7433 | }); |
7434 | return false; |
7435 | } |
7436 | } |
7437 | return true; |
7438 | }; |
7439 | |
7440 | // Helper to check if for the given call site the associated argument is |
7441 | // passed to a direct call where the privatization would be different. |
7442 | auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { |
7443 | CallBase *DC = cast<CallBase>(Val: ACS.getInstruction()); |
7444 | int DCArgNo = ACS.getCallArgOperandNo(ArgNo); |
7445 | assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && |
7446 | "Expected a direct call operand for callback call operand" ); |
7447 | |
7448 | Function *DCCallee = |
7449 | dyn_cast_if_present<Function>(Val: DC->getCalledOperand()); |
7450 | LLVM_DEBUG({ |
7451 | dbgs() << "[AAPrivatizablePtr] Argument " << *Arg |
7452 | << " check if be privatized in the context of its parent (" |
7453 | << Arg->getParent()->getName() |
7454 | << ")\n[AAPrivatizablePtr] because it is an argument in a " |
7455 | "direct call of (" |
7456 | << DCArgNo << "@" << DCCallee->getName() << ").\n" ; |
7457 | }); |
7458 | |
7459 | if (unsigned(DCArgNo) < DCCallee->arg_size()) { |
7460 | const auto *DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( |
7461 | QueryingAA: *this, IRP: IRPosition::argument(Arg: *DCCallee->getArg(i: DCArgNo)), |
7462 | DepClass: DepClassTy::REQUIRED); |
7463 | if (DCArgPrivAA && DCArgPrivAA->isValidState()) { |
7464 | auto DCArgPrivTy = DCArgPrivAA->getPrivatizableType(); |
7465 | if (!DCArgPrivTy) |
7466 | return true; |
7467 | if (*DCArgPrivTy == PrivatizableType) |
7468 | return true; |
7469 | } |
7470 | } |
7471 | |
7472 | LLVM_DEBUG({ |
7473 | dbgs() << "[AAPrivatizablePtr] Argument " << *Arg |
7474 | << " cannot be privatized in the context of its parent (" |
7475 | << Arg->getParent()->getName() |
7476 | << ")\n[AAPrivatizablePtr] because it is an argument in a " |
7477 | "direct call of (" |
7478 | << ACS.getInstruction()->getCalledOperand()->getName() |
7479 | << ").\n[AAPrivatizablePtr] for which the argument " |
7480 | "privatization is not compatible.\n" ; |
7481 | }); |
7482 | return false; |
7483 | }; |
7484 | |
7485 | // Helper to check if the associated argument is used at the given abstract |
7486 | // call site in a way that is incompatible with the privatization assumed |
7487 | // here. |
7488 | auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { |
7489 | if (ACS.isDirectCall()) |
7490 | return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); |
7491 | if (ACS.isCallbackCall()) |
7492 | return IsCompatiblePrivArgOfDirectCS(ACS); |
7493 | return false; |
7494 | }; |
7495 | |
7496 | if (!A.checkForAllCallSites(Pred: IsCompatiblePrivArgOfOtherCallSite, QueryingAA: *this, RequireAllCallSites: true, |
7497 | UsedAssumedInformation)) |
7498 | return indicatePessimisticFixpoint(); |
7499 | |
7500 | return ChangeStatus::UNCHANGED; |
7501 | } |
7502 | |
7503 | /// Given a type to private \p PrivType, collect the constituates (which are |
7504 | /// used) in \p ReplacementTypes. |
7505 | static void |
7506 | identifyReplacementTypes(Type *PrivType, |
7507 | SmallVectorImpl<Type *> &ReplacementTypes) { |
7508 | // TODO: For now we expand the privatization type to the fullest which can |
7509 | // lead to dead arguments that need to be removed later. |
7510 | assert(PrivType && "Expected privatizable type!" ); |
7511 | |
7512 | // Traverse the type, extract constituate types on the outermost level. |
7513 | if (auto *PrivStructType = dyn_cast<StructType>(Val: PrivType)) { |
7514 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) |
7515 | ReplacementTypes.push_back(Elt: PrivStructType->getElementType(N: u)); |
7516 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(Val: PrivType)) { |
7517 | ReplacementTypes.append(NumInputs: PrivArrayType->getNumElements(), |
7518 | Elt: PrivArrayType->getElementType()); |
7519 | } else { |
7520 | ReplacementTypes.push_back(Elt: PrivType); |
7521 | } |
7522 | } |
7523 | |
7524 | /// Initialize \p Base according to the type \p PrivType at position \p IP. |
7525 | /// The values needed are taken from the arguments of \p F starting at |
7526 | /// position \p ArgNo. |
7527 | static void createInitialization(Type *PrivType, Value &Base, Function &F, |
7528 | unsigned ArgNo, BasicBlock::iterator IP) { |
7529 | assert(PrivType && "Expected privatizable type!" ); |
7530 | |
7531 | IRBuilder<NoFolder> IRB(IP->getParent(), IP); |
7532 | const DataLayout &DL = F.getDataLayout(); |
7533 | |
7534 | // Traverse the type, build GEPs and stores. |
7535 | if (auto *PrivStructType = dyn_cast<StructType>(Val: PrivType)) { |
7536 | const StructLayout *PrivStructLayout = DL.getStructLayout(Ty: PrivStructType); |
7537 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { |
7538 | Value *Ptr = |
7539 | constructPointer(Ptr: &Base, Offset: PrivStructLayout->getElementOffset(Idx: u), IRB); |
7540 | new StoreInst(F.getArg(i: ArgNo + u), Ptr, IP); |
7541 | } |
7542 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(Val: PrivType)) { |
7543 | Type *PointeeTy = PrivArrayType->getElementType(); |
7544 | uint64_t PointeeTySize = DL.getTypeStoreSize(Ty: PointeeTy); |
7545 | for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { |
7546 | Value *Ptr = constructPointer(Ptr: &Base, Offset: u * PointeeTySize, IRB); |
7547 | new StoreInst(F.getArg(i: ArgNo + u), Ptr, IP); |
7548 | } |
7549 | } else { |
7550 | new StoreInst(F.getArg(i: ArgNo), &Base, IP); |
7551 | } |
7552 | } |
7553 | |
7554 | /// Extract values from \p Base according to the type \p PrivType at the |
7555 | /// call position \p ACS. The values are appended to \p ReplacementValues. |
7556 | void createReplacementValues(Align Alignment, Type *PrivType, |
7557 | AbstractCallSite ACS, Value *Base, |
7558 | SmallVectorImpl<Value *> &ReplacementValues) { |
7559 | assert(Base && "Expected base value!" ); |
7560 | assert(PrivType && "Expected privatizable type!" ); |
7561 | Instruction *IP = ACS.getInstruction(); |
7562 | |
7563 | IRBuilder<NoFolder> IRB(IP); |
7564 | const DataLayout &DL = IP->getDataLayout(); |
7565 | |
7566 | // Traverse the type, build GEPs and loads. |
7567 | if (auto *PrivStructType = dyn_cast<StructType>(Val: PrivType)) { |
7568 | const StructLayout *PrivStructLayout = DL.getStructLayout(Ty: PrivStructType); |
7569 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { |
7570 | Type *PointeeTy = PrivStructType->getElementType(N: u); |
7571 | Value *Ptr = |
7572 | constructPointer(Ptr: Base, Offset: PrivStructLayout->getElementOffset(Idx: u), IRB); |
7573 | LoadInst *L = new LoadInst(PointeeTy, Ptr, "" , IP->getIterator()); |
7574 | L->setAlignment(Alignment); |
7575 | ReplacementValues.push_back(Elt: L); |
7576 | } |
7577 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(Val: PrivType)) { |
7578 | Type *PointeeTy = PrivArrayType->getElementType(); |
7579 | uint64_t PointeeTySize = DL.getTypeStoreSize(Ty: PointeeTy); |
7580 | for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { |
7581 | Value *Ptr = constructPointer(Ptr: Base, Offset: u * PointeeTySize, IRB); |
7582 | LoadInst *L = new LoadInst(PointeeTy, Ptr, "" , IP->getIterator()); |
7583 | L->setAlignment(Alignment); |
7584 | ReplacementValues.push_back(Elt: L); |
7585 | } |
7586 | } else { |
7587 | LoadInst *L = new LoadInst(PrivType, Base, "" , IP->getIterator()); |
7588 | L->setAlignment(Alignment); |
7589 | ReplacementValues.push_back(Elt: L); |
7590 | } |
7591 | } |
7592 | |
7593 | /// See AbstractAttribute::manifest(...) |
7594 | ChangeStatus manifest(Attributor &A) override { |
7595 | if (!PrivatizableType) |
7596 | return ChangeStatus::UNCHANGED; |
7597 | assert(*PrivatizableType && "Expected privatizable type!" ); |
7598 | |
7599 | // Collect all tail calls in the function as we cannot allow new allocas to |
7600 | // escape into tail recursion. |
7601 | // TODO: Be smarter about new allocas escaping into tail calls. |
7602 | SmallVector<CallInst *, 16> TailCalls; |
7603 | bool UsedAssumedInformation = false; |
7604 | if (!A.checkForAllInstructions( |
7605 | Pred: [&](Instruction &I) { |
7606 | CallInst &CI = cast<CallInst>(Val&: I); |
7607 | if (CI.isTailCall()) |
7608 | TailCalls.push_back(Elt: &CI); |
7609 | return true; |
7610 | }, |
7611 | QueryingAA: *this, Opcodes: {Instruction::Call}, UsedAssumedInformation)) |
7612 | return ChangeStatus::UNCHANGED; |
7613 | |
7614 | Argument *Arg = getAssociatedArgument(); |
7615 | // Query AAAlign attribute for alignment of associated argument to |
7616 | // determine the best alignment of loads. |
7617 | const auto *AlignAA = |
7618 | A.getAAFor<AAAlign>(QueryingAA: *this, IRP: IRPosition::value(V: *Arg), DepClass: DepClassTy::NONE); |
7619 | |
7620 | // Callback to repair the associated function. A new alloca is placed at the |
7621 | // beginning and initialized with the values passed through arguments. The |
7622 | // new alloca replaces the use of the old pointer argument. |
7623 | Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = |
7624 | [=](const Attributor::ArgumentReplacementInfo &ARI, |
7625 | Function &ReplacementFn, Function::arg_iterator ArgIt) { |
7626 | BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); |
7627 | BasicBlock::iterator IP = EntryBB.getFirstInsertionPt(); |
7628 | const DataLayout &DL = IP->getDataLayout(); |
7629 | unsigned AS = DL.getAllocaAddrSpace(); |
7630 | Instruction *AI = new AllocaInst(*PrivatizableType, AS, |
7631 | Arg->getName() + ".priv" , IP); |
7632 | createInitialization(PrivType: *PrivatizableType, Base&: *AI, F&: ReplacementFn, |
7633 | ArgNo: ArgIt->getArgNo(), IP); |
7634 | |
7635 | if (AI->getType() != Arg->getType()) |
7636 | AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( |
7637 | S: AI, Ty: Arg->getType(), Name: "" , InsertBefore: IP); |
7638 | Arg->replaceAllUsesWith(V: AI); |
7639 | |
7640 | for (CallInst *CI : TailCalls) |
7641 | CI->setTailCall(false); |
7642 | }; |
7643 | |
7644 | // Callback to repair a call site of the associated function. The elements |
7645 | // of the privatizable type are loaded prior to the call and passed to the |
7646 | // new function version. |
7647 | Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = |
7648 | [=](const Attributor::ArgumentReplacementInfo &ARI, |
7649 | AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) { |
7650 | // When no alignment is specified for the load instruction, |
7651 | // natural alignment is assumed. |
7652 | createReplacementValues( |
7653 | Alignment: AlignAA ? AlignAA->getAssumedAlign() : Align(0), |
7654 | PrivType: *PrivatizableType, ACS, |
7655 | Base: ACS.getCallArgOperand(ArgNo: ARI.getReplacedArg().getArgNo()), |
7656 | ReplacementValues&: NewArgOperands); |
7657 | }; |
7658 | |
7659 | // Collect the types that will replace the privatizable type in the function |
7660 | // signature. |
7661 | SmallVector<Type *, 16> ReplacementTypes; |
7662 | identifyReplacementTypes(PrivType: *PrivatizableType, ReplacementTypes); |
7663 | |
7664 | // Register a rewrite of the argument. |
7665 | if (A.registerFunctionSignatureRewrite(Arg&: *Arg, ReplacementTypes, |
7666 | CalleeRepairCB: std::move(FnRepairCB), |
7667 | ACSRepairCB: std::move(ACSRepairCB))) |
7668 | return ChangeStatus::CHANGED; |
7669 | return ChangeStatus::UNCHANGED; |
7670 | } |
7671 | |
7672 | /// See AbstractAttribute::trackStatistics() |
7673 | void trackStatistics() const override { |
7674 | STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); |
7675 | } |
7676 | }; |
7677 | |
7678 | struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { |
7679 | AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) |
7680 | : AAPrivatizablePtrImpl(IRP, A) {} |
7681 | |
7682 | /// See AbstractAttribute::initialize(...). |
7683 | void initialize(Attributor &A) override { |
7684 | // TODO: We can privatize more than arguments. |
7685 | indicatePessimisticFixpoint(); |
7686 | } |
7687 | |
7688 | ChangeStatus updateImpl(Attributor &A) override { |
7689 | llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" |
7690 | "updateImpl will not be called" ); |
7691 | } |
7692 | |
7693 | /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) |
7694 | std::optional<Type *> identifyPrivatizableType(Attributor &A) override { |
7695 | Value *Obj = getUnderlyingObject(V: &getAssociatedValue()); |
7696 | if (!Obj) { |
7697 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n" ); |
7698 | return nullptr; |
7699 | } |
7700 | |
7701 | if (auto *AI = dyn_cast<AllocaInst>(Val: Obj)) |
7702 | if (auto *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize())) |
7703 | if (CI->isOne()) |
7704 | return AI->getAllocatedType(); |
7705 | if (auto *Arg = dyn_cast<Argument>(Val: Obj)) { |
7706 | auto *PrivArgAA = A.getAAFor<AAPrivatizablePtr>( |
7707 | QueryingAA: *this, IRP: IRPosition::argument(Arg: *Arg), DepClass: DepClassTy::REQUIRED); |
7708 | if (PrivArgAA && PrivArgAA->isAssumedPrivatizablePtr()) |
7709 | return PrivArgAA->getPrivatizableType(); |
7710 | } |
7711 | |
7712 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " |
7713 | "alloca nor privatizable argument: " |
7714 | << *Obj << "!\n" ); |
7715 | return nullptr; |
7716 | } |
7717 | |
7718 | /// See AbstractAttribute::trackStatistics() |
7719 | void trackStatistics() const override { |
7720 | STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); |
7721 | } |
7722 | }; |
7723 | |
7724 | struct AAPrivatizablePtrCallSiteArgument final |
7725 | : public AAPrivatizablePtrFloating { |
7726 | AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) |
7727 | : AAPrivatizablePtrFloating(IRP, A) {} |
7728 | |
7729 | /// See AbstractAttribute::initialize(...). |
7730 | void initialize(Attributor &A) override { |
7731 | if (A.hasAttr(IRP: getIRPosition(), AKs: Attribute::ByVal)) |
7732 | indicateOptimisticFixpoint(); |
7733 | } |
7734 | |
7735 | /// See AbstractAttribute::updateImpl(...). |
7736 | ChangeStatus updateImpl(Attributor &A) override { |
7737 | PrivatizableType = identifyPrivatizableType(A); |
7738 | if (!PrivatizableType) |
7739 | return ChangeStatus::UNCHANGED; |
7740 | if (!*PrivatizableType) |
7741 | return indicatePessimisticFixpoint(); |
7742 | |
7743 | const IRPosition &IRP = getIRPosition(); |
7744 | bool IsKnownNoCapture; |
7745 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
7746 | A, QueryingAA: this, IRP, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoCapture); |
7747 | if (!IsAssumedNoCapture) { |
7748 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n" ); |
7749 | return indicatePessimisticFixpoint(); |
7750 | } |
7751 | |
7752 | bool IsKnownNoAlias; |
7753 | if (!AA::hasAssumedIRAttr<Attribute::NoAlias>( |
7754 | A, QueryingAA: this, IRP, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoAlias)) { |
7755 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n" ); |
7756 | return indicatePessimisticFixpoint(); |
7757 | } |
7758 | |
7759 | bool IsKnown; |
7760 | if (!AA::isAssumedReadOnly(A, IRP, QueryingAA: *this, IsKnown)) { |
7761 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n" ); |
7762 | return indicatePessimisticFixpoint(); |
7763 | } |
7764 | |
7765 | return ChangeStatus::UNCHANGED; |
7766 | } |
7767 | |
7768 | /// See AbstractAttribute::trackStatistics() |
7769 | void trackStatistics() const override { |
7770 | STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); |
7771 | } |
7772 | }; |
7773 | |
7774 | struct AAPrivatizablePtrCallSiteReturned final |
7775 | : public AAPrivatizablePtrFloating { |
7776 | AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) |
7777 | : AAPrivatizablePtrFloating(IRP, A) {} |
7778 | |
7779 | /// See AbstractAttribute::initialize(...). |
7780 | void initialize(Attributor &A) override { |
7781 | // TODO: We can privatize more than arguments. |
7782 | indicatePessimisticFixpoint(); |
7783 | } |
7784 | |
7785 | /// See AbstractAttribute::trackStatistics() |
7786 | void trackStatistics() const override { |
7787 | STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); |
7788 | } |
7789 | }; |
7790 | |
7791 | struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { |
7792 | AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) |
7793 | : AAPrivatizablePtrFloating(IRP, A) {} |
7794 | |
7795 | /// See AbstractAttribute::initialize(...). |
7796 | void initialize(Attributor &A) override { |
7797 | // TODO: We can privatize more than arguments. |
7798 | indicatePessimisticFixpoint(); |
7799 | } |
7800 | |
7801 | /// See AbstractAttribute::trackStatistics() |
7802 | void trackStatistics() const override { |
7803 | STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); |
7804 | } |
7805 | }; |
7806 | } // namespace |
7807 | |
7808 | /// -------------------- Memory Behavior Attributes ---------------------------- |
7809 | /// Includes read-none, read-only, and write-only. |
7810 | /// ---------------------------------------------------------------------------- |
7811 | namespace { |
7812 | struct AAMemoryBehaviorImpl : public AAMemoryBehavior { |
7813 | AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) |
7814 | : AAMemoryBehavior(IRP, A) {} |
7815 | |
7816 | /// See AbstractAttribute::initialize(...). |
7817 | void initialize(Attributor &A) override { |
7818 | intersectAssumedBits(BitsEncoding: BEST_STATE); |
7819 | getKnownStateFromValue(A, IRP: getIRPosition(), State&: getState()); |
7820 | AAMemoryBehavior::initialize(A); |
7821 | } |
7822 | |
7823 | /// Return the memory behavior information encoded in the IR for \p IRP. |
7824 | static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, |
7825 | BitIntegerState &State, |
7826 | bool IgnoreSubsumingPositions = false) { |
7827 | SmallVector<Attribute, 2> Attrs; |
7828 | A.getAttrs(IRP, AKs: AttrKinds, Attrs, IgnoreSubsumingPositions); |
7829 | for (const Attribute &Attr : Attrs) { |
7830 | switch (Attr.getKindAsEnum()) { |
7831 | case Attribute::ReadNone: |
7832 | State.addKnownBits(Bits: NO_ACCESSES); |
7833 | break; |
7834 | case Attribute::ReadOnly: |
7835 | State.addKnownBits(Bits: NO_WRITES); |
7836 | break; |
7837 | case Attribute::WriteOnly: |
7838 | State.addKnownBits(Bits: NO_READS); |
7839 | break; |
7840 | default: |
7841 | llvm_unreachable("Unexpected attribute!" ); |
7842 | } |
7843 | } |
7844 | |
7845 | if (auto *I = dyn_cast<Instruction>(Val: &IRP.getAnchorValue())) { |
7846 | if (!I->mayReadFromMemory()) |
7847 | State.addKnownBits(Bits: NO_READS); |
7848 | if (!I->mayWriteToMemory()) |
7849 | State.addKnownBits(Bits: NO_WRITES); |
7850 | } |
7851 | } |
7852 | |
7853 | /// See AbstractAttribute::getDeducedAttributes(...). |
7854 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
7855 | SmallVectorImpl<Attribute> &Attrs) const override { |
7856 | assert(Attrs.size() == 0); |
7857 | if (isAssumedReadNone()) |
7858 | Attrs.push_back(Elt: Attribute::get(Context&: Ctx, Kind: Attribute::ReadNone)); |
7859 | else if (isAssumedReadOnly()) |
7860 | Attrs.push_back(Elt: Attribute::get(Context&: Ctx, Kind: Attribute::ReadOnly)); |
7861 | else if (isAssumedWriteOnly()) |
7862 | Attrs.push_back(Elt: Attribute::get(Context&: Ctx, Kind: Attribute::WriteOnly)); |
7863 | assert(Attrs.size() <= 1); |
7864 | } |
7865 | |
7866 | /// See AbstractAttribute::manifest(...). |
7867 | ChangeStatus manifest(Attributor &A) override { |
7868 | const IRPosition &IRP = getIRPosition(); |
7869 | |
7870 | if (A.hasAttr(IRP, AKs: Attribute::ReadNone, |
7871 | /* IgnoreSubsumingPositions */ true)) |
7872 | return ChangeStatus::UNCHANGED; |
7873 | |
7874 | // Check if we would improve the existing attributes first. |
7875 | SmallVector<Attribute, 4> DeducedAttrs; |
7876 | getDeducedAttributes(A, Ctx&: IRP.getAnchorValue().getContext(), Attrs&: DeducedAttrs); |
7877 | if (llvm::all_of(Range&: DeducedAttrs, P: [&](const Attribute &Attr) { |
7878 | return A.hasAttr(IRP, AKs: Attr.getKindAsEnum(), |
7879 | /* IgnoreSubsumingPositions */ true); |
7880 | })) |
7881 | return ChangeStatus::UNCHANGED; |
7882 | |
7883 | // Clear existing attributes. |
7884 | A.removeAttrs(IRP, AttrKinds); |
7885 | // Clear conflicting writable attribute. |
7886 | if (isAssumedReadOnly()) |
7887 | A.removeAttrs(IRP, AttrKinds: Attribute::Writable); |
7888 | |
7889 | // Use the generic manifest method. |
7890 | return IRAttribute::manifest(A); |
7891 | } |
7892 | |
7893 | /// See AbstractState::getAsStr(). |
7894 | const std::string getAsStr(Attributor *A) const override { |
7895 | if (isAssumedReadNone()) |
7896 | return "readnone" ; |
7897 | if (isAssumedReadOnly()) |
7898 | return "readonly" ; |
7899 | if (isAssumedWriteOnly()) |
7900 | return "writeonly" ; |
7901 | return "may-read/write" ; |
7902 | } |
7903 | |
7904 | /// The set of IR attributes AAMemoryBehavior deals with. |
7905 | static const Attribute::AttrKind AttrKinds[3]; |
7906 | }; |
7907 | |
7908 | const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { |
7909 | Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; |
7910 | |
7911 | /// Memory behavior attribute for a floating value. |
7912 | struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { |
7913 | AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) |
7914 | : AAMemoryBehaviorImpl(IRP, A) {} |
7915 | |
7916 | /// See AbstractAttribute::updateImpl(...). |
7917 | ChangeStatus updateImpl(Attributor &A) override; |
7918 | |
7919 | /// See AbstractAttribute::trackStatistics() |
7920 | void trackStatistics() const override { |
7921 | if (isAssumedReadNone()) |
7922 | STATS_DECLTRACK_FLOATING_ATTR(readnone) |
7923 | else if (isAssumedReadOnly()) |
7924 | STATS_DECLTRACK_FLOATING_ATTR(readonly) |
7925 | else if (isAssumedWriteOnly()) |
7926 | STATS_DECLTRACK_FLOATING_ATTR(writeonly) |
7927 | } |
7928 | |
7929 | private: |
7930 | /// Return true if users of \p UserI might access the underlying |
7931 | /// variable/location described by \p U and should therefore be analyzed. |
7932 | bool followUsersOfUseIn(Attributor &A, const Use &U, |
7933 | const Instruction *UserI); |
7934 | |
7935 | /// Update the state according to the effect of use \p U in \p UserI. |
7936 | void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); |
7937 | }; |
7938 | |
7939 | /// Memory behavior attribute for function argument. |
7940 | struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { |
7941 | AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) |
7942 | : AAMemoryBehaviorFloating(IRP, A) {} |
7943 | |
7944 | /// See AbstractAttribute::initialize(...). |
7945 | void initialize(Attributor &A) override { |
7946 | intersectAssumedBits(BitsEncoding: BEST_STATE); |
7947 | const IRPosition &IRP = getIRPosition(); |
7948 | // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we |
7949 | // can query it when we use has/getAttr. That would allow us to reuse the |
7950 | // initialize of the base class here. |
7951 | bool HasByVal = A.hasAttr(IRP, AKs: {Attribute::ByVal}, |
7952 | /* IgnoreSubsumingPositions */ true); |
7953 | getKnownStateFromValue(A, IRP, State&: getState(), |
7954 | /* IgnoreSubsumingPositions */ HasByVal); |
7955 | } |
7956 | |
7957 | ChangeStatus manifest(Attributor &A) override { |
7958 | // TODO: Pointer arguments are not supported on vectors of pointers yet. |
7959 | if (!getAssociatedValue().getType()->isPointerTy()) |
7960 | return ChangeStatus::UNCHANGED; |
7961 | |
7962 | // TODO: From readattrs.ll: "inalloca parameters are always |
7963 | // considered written" |
7964 | if (A.hasAttr(IRP: getIRPosition(), |
7965 | AKs: {Attribute::InAlloca, Attribute::Preallocated})) { |
7966 | removeKnownBits(BitsEncoding: NO_WRITES); |
7967 | removeAssumedBits(BitsEncoding: NO_WRITES); |
7968 | } |
7969 | A.removeAttrs(IRP: getIRPosition(), AttrKinds); |
7970 | return AAMemoryBehaviorFloating::manifest(A); |
7971 | } |
7972 | |
7973 | /// See AbstractAttribute::trackStatistics() |
7974 | void trackStatistics() const override { |
7975 | if (isAssumedReadNone()) |
7976 | STATS_DECLTRACK_ARG_ATTR(readnone) |
7977 | else if (isAssumedReadOnly()) |
7978 | STATS_DECLTRACK_ARG_ATTR(readonly) |
7979 | else if (isAssumedWriteOnly()) |
7980 | STATS_DECLTRACK_ARG_ATTR(writeonly) |
7981 | } |
7982 | }; |
7983 | |
7984 | struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { |
7985 | AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) |
7986 | : AAMemoryBehaviorArgument(IRP, A) {} |
7987 | |
7988 | /// See AbstractAttribute::initialize(...). |
7989 | void initialize(Attributor &A) override { |
7990 | // If we don't have an associated attribute this is either a variadic call |
7991 | // or an indirect call, either way, nothing to do here. |
7992 | Argument *Arg = getAssociatedArgument(); |
7993 | if (!Arg) { |
7994 | indicatePessimisticFixpoint(); |
7995 | return; |
7996 | } |
7997 | if (Arg->hasByValAttr()) { |
7998 | addKnownBits(Bits: NO_WRITES); |
7999 | removeKnownBits(BitsEncoding: NO_READS); |
8000 | removeAssumedBits(BitsEncoding: NO_READS); |
8001 | } |
8002 | AAMemoryBehaviorArgument::initialize(A); |
8003 | if (getAssociatedFunction()->isDeclaration()) |
8004 | indicatePessimisticFixpoint(); |
8005 | } |
8006 | |
8007 | /// See AbstractAttribute::updateImpl(...). |
8008 | ChangeStatus updateImpl(Attributor &A) override { |
8009 | // TODO: Once we have call site specific value information we can provide |
8010 | // call site specific liveness liveness information and then it makes |
8011 | // sense to specialize attributes for call sites arguments instead of |
8012 | // redirecting requests to the callee argument. |
8013 | Argument *Arg = getAssociatedArgument(); |
8014 | const IRPosition &ArgPos = IRPosition::argument(Arg: *Arg); |
8015 | auto *ArgAA = |
8016 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: ArgPos, DepClass: DepClassTy::REQUIRED); |
8017 | if (!ArgAA) |
8018 | return indicatePessimisticFixpoint(); |
8019 | return clampStateAndIndicateChange(S&: getState(), R: ArgAA->getState()); |
8020 | } |
8021 | |
8022 | /// See AbstractAttribute::trackStatistics() |
8023 | void trackStatistics() const override { |
8024 | if (isAssumedReadNone()) |
8025 | STATS_DECLTRACK_CSARG_ATTR(readnone) |
8026 | else if (isAssumedReadOnly()) |
8027 | STATS_DECLTRACK_CSARG_ATTR(readonly) |
8028 | else if (isAssumedWriteOnly()) |
8029 | STATS_DECLTRACK_CSARG_ATTR(writeonly) |
8030 | } |
8031 | }; |
8032 | |
8033 | /// Memory behavior attribute for a call site return position. |
8034 | struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { |
8035 | AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) |
8036 | : AAMemoryBehaviorFloating(IRP, A) {} |
8037 | |
8038 | /// See AbstractAttribute::initialize(...). |
8039 | void initialize(Attributor &A) override { |
8040 | AAMemoryBehaviorImpl::initialize(A); |
8041 | } |
8042 | /// See AbstractAttribute::manifest(...). |
8043 | ChangeStatus manifest(Attributor &A) override { |
8044 | // We do not annotate returned values. |
8045 | return ChangeStatus::UNCHANGED; |
8046 | } |
8047 | |
8048 | /// See AbstractAttribute::trackStatistics() |
8049 | void trackStatistics() const override {} |
8050 | }; |
8051 | |
8052 | /// An AA to represent the memory behavior function attributes. |
8053 | struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { |
8054 | AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) |
8055 | : AAMemoryBehaviorImpl(IRP, A) {} |
8056 | |
8057 | /// See AbstractAttribute::updateImpl(Attributor &A). |
8058 | ChangeStatus updateImpl(Attributor &A) override; |
8059 | |
8060 | /// See AbstractAttribute::manifest(...). |
8061 | ChangeStatus manifest(Attributor &A) override { |
8062 | // TODO: It would be better to merge this with AAMemoryLocation, so that |
8063 | // we could determine read/write per location. This would also have the |
8064 | // benefit of only one place trying to manifest the memory attribute. |
8065 | Function &F = cast<Function>(Val&: getAnchorValue()); |
8066 | MemoryEffects ME = MemoryEffects::unknown(); |
8067 | if (isAssumedReadNone()) |
8068 | ME = MemoryEffects::none(); |
8069 | else if (isAssumedReadOnly()) |
8070 | ME = MemoryEffects::readOnly(); |
8071 | else if (isAssumedWriteOnly()) |
8072 | ME = MemoryEffects::writeOnly(); |
8073 | |
8074 | A.removeAttrs(IRP: getIRPosition(), AttrKinds); |
8075 | // Clear conflicting writable attribute. |
8076 | if (ME.onlyReadsMemory()) |
8077 | for (Argument &Arg : F.args()) |
8078 | A.removeAttrs(IRP: IRPosition::argument(Arg), AttrKinds: Attribute::Writable); |
8079 | return A.manifestAttrs(IRP: getIRPosition(), |
8080 | DeducedAttrs: Attribute::getWithMemoryEffects(Context&: F.getContext(), ME)); |
8081 | } |
8082 | |
8083 | /// See AbstractAttribute::trackStatistics() |
8084 | void trackStatistics() const override { |
8085 | if (isAssumedReadNone()) |
8086 | STATS_DECLTRACK_FN_ATTR(readnone) |
8087 | else if (isAssumedReadOnly()) |
8088 | STATS_DECLTRACK_FN_ATTR(readonly) |
8089 | else if (isAssumedWriteOnly()) |
8090 | STATS_DECLTRACK_FN_ATTR(writeonly) |
8091 | } |
8092 | }; |
8093 | |
8094 | /// AAMemoryBehavior attribute for call sites. |
8095 | struct AAMemoryBehaviorCallSite final |
8096 | : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl> { |
8097 | AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) |
8098 | : AACalleeToCallSite<AAMemoryBehavior, AAMemoryBehaviorImpl>(IRP, A) {} |
8099 | |
8100 | /// See AbstractAttribute::manifest(...). |
8101 | ChangeStatus manifest(Attributor &A) override { |
8102 | // TODO: Deduplicate this with AAMemoryBehaviorFunction. |
8103 | CallBase &CB = cast<CallBase>(Val&: getAnchorValue()); |
8104 | MemoryEffects ME = MemoryEffects::unknown(); |
8105 | if (isAssumedReadNone()) |
8106 | ME = MemoryEffects::none(); |
8107 | else if (isAssumedReadOnly()) |
8108 | ME = MemoryEffects::readOnly(); |
8109 | else if (isAssumedWriteOnly()) |
8110 | ME = MemoryEffects::writeOnly(); |
8111 | |
8112 | A.removeAttrs(IRP: getIRPosition(), AttrKinds); |
8113 | // Clear conflicting writable attribute. |
8114 | if (ME.onlyReadsMemory()) |
8115 | for (Use &U : CB.args()) |
8116 | A.removeAttrs(IRP: IRPosition::callsite_argument(CB, ArgNo: U.getOperandNo()), |
8117 | AttrKinds: Attribute::Writable); |
8118 | return A.manifestAttrs( |
8119 | IRP: getIRPosition(), DeducedAttrs: Attribute::getWithMemoryEffects(Context&: CB.getContext(), ME)); |
8120 | } |
8121 | |
8122 | /// See AbstractAttribute::trackStatistics() |
8123 | void trackStatistics() const override { |
8124 | if (isAssumedReadNone()) |
8125 | STATS_DECLTRACK_CS_ATTR(readnone) |
8126 | else if (isAssumedReadOnly()) |
8127 | STATS_DECLTRACK_CS_ATTR(readonly) |
8128 | else if (isAssumedWriteOnly()) |
8129 | STATS_DECLTRACK_CS_ATTR(writeonly) |
8130 | } |
8131 | }; |
8132 | |
8133 | ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { |
8134 | |
8135 | // The current assumed state used to determine a change. |
8136 | auto AssumedState = getAssumed(); |
8137 | |
8138 | auto CheckRWInst = [&](Instruction &I) { |
8139 | // If the instruction has an own memory behavior state, use it to restrict |
8140 | // the local state. No further analysis is required as the other memory |
8141 | // state is as optimistic as it gets. |
8142 | if (const auto *CB = dyn_cast<CallBase>(Val: &I)) { |
8143 | const auto *MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( |
8144 | QueryingAA: *this, IRP: IRPosition::callsite_function(CB: *CB), DepClass: DepClassTy::REQUIRED); |
8145 | if (MemBehaviorAA) { |
8146 | intersectAssumedBits(BitsEncoding: MemBehaviorAA->getAssumed()); |
8147 | return !isAtFixpoint(); |
8148 | } |
8149 | } |
8150 | |
8151 | // Remove access kind modifiers if necessary. |
8152 | if (I.mayReadFromMemory()) |
8153 | removeAssumedBits(BitsEncoding: NO_READS); |
8154 | if (I.mayWriteToMemory()) |
8155 | removeAssumedBits(BitsEncoding: NO_WRITES); |
8156 | return !isAtFixpoint(); |
8157 | }; |
8158 | |
8159 | bool UsedAssumedInformation = false; |
8160 | if (!A.checkForAllReadWriteInstructions(Pred: CheckRWInst, QueryingAA&: *this, |
8161 | UsedAssumedInformation)) |
8162 | return indicatePessimisticFixpoint(); |
8163 | |
8164 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED |
8165 | : ChangeStatus::UNCHANGED; |
8166 | } |
8167 | |
8168 | ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { |
8169 | |
8170 | const IRPosition &IRP = getIRPosition(); |
8171 | const IRPosition &FnPos = IRPosition::function_scope(IRP); |
8172 | AAMemoryBehavior::StateType &S = getState(); |
8173 | |
8174 | // First, check the function scope. We take the known information and we avoid |
8175 | // work if the assumed information implies the current assumed information for |
8176 | // this attribute. This is a valid for all but byval arguments. |
8177 | Argument *Arg = IRP.getAssociatedArgument(); |
8178 | AAMemoryBehavior::base_t FnMemAssumedState = |
8179 | AAMemoryBehavior::StateType::getWorstState(); |
8180 | if (!Arg || !Arg->hasByValAttr()) { |
8181 | const auto *FnMemAA = |
8182 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: FnPos, DepClass: DepClassTy::OPTIONAL); |
8183 | if (FnMemAA) { |
8184 | FnMemAssumedState = FnMemAA->getAssumed(); |
8185 | S.addKnownBits(Bits: FnMemAA->getKnown()); |
8186 | if ((S.getAssumed() & FnMemAA->getAssumed()) == S.getAssumed()) |
8187 | return ChangeStatus::UNCHANGED; |
8188 | } |
8189 | } |
8190 | |
8191 | // The current assumed state used to determine a change. |
8192 | auto AssumedState = S.getAssumed(); |
8193 | |
8194 | // Make sure the value is not captured (except through "return"), if |
8195 | // it is, any information derived would be irrelevant anyway as we cannot |
8196 | // check the potential aliases introduced by the capture. However, no need |
8197 | // to fall back to anythign less optimistic than the function state. |
8198 | bool IsKnownNoCapture; |
8199 | const AANoCapture *ArgNoCaptureAA = nullptr; |
8200 | bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>( |
8201 | A, QueryingAA: this, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture, IgnoreSubsumingPositions: false, |
8202 | AAPtr: &ArgNoCaptureAA); |
8203 | |
8204 | if (!IsAssumedNoCapture && |
8205 | (!ArgNoCaptureAA || !ArgNoCaptureAA->isAssumedNoCaptureMaybeReturned())) { |
8206 | S.intersectAssumedBits(BitsEncoding: FnMemAssumedState); |
8207 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED |
8208 | : ChangeStatus::UNCHANGED; |
8209 | } |
8210 | |
8211 | // Visit and expand uses until all are analyzed or a fixpoint is reached. |
8212 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
8213 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
8214 | LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI |
8215 | << " \n" ); |
8216 | |
8217 | // Droppable users, e.g., llvm::assume does not actually perform any action. |
8218 | if (UserI->isDroppable()) |
8219 | return true; |
8220 | |
8221 | // Check if the users of UserI should also be visited. |
8222 | Follow = followUsersOfUseIn(A, U, UserI); |
8223 | |
8224 | // If UserI might touch memory we analyze the use in detail. |
8225 | if (UserI->mayReadOrWriteMemory()) |
8226 | analyzeUseIn(A, U, UserI); |
8227 | |
8228 | return !isAtFixpoint(); |
8229 | }; |
8230 | |
8231 | if (!A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V: getAssociatedValue())) |
8232 | return indicatePessimisticFixpoint(); |
8233 | |
8234 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED |
8235 | : ChangeStatus::UNCHANGED; |
8236 | } |
8237 | |
8238 | bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, |
8239 | const Instruction *UserI) { |
8240 | // The loaded value is unrelated to the pointer argument, no need to |
8241 | // follow the users of the load. |
8242 | if (isa<LoadInst>(Val: UserI) || isa<ReturnInst>(Val: UserI)) |
8243 | return false; |
8244 | |
8245 | // By default we follow all uses assuming UserI might leak information on U, |
8246 | // we have special handling for call sites operands though. |
8247 | const auto *CB = dyn_cast<CallBase>(Val: UserI); |
8248 | if (!CB || !CB->isArgOperand(U: &U)) |
8249 | return true; |
8250 | |
8251 | // If the use is a call argument known not to be captured, the users of |
8252 | // the call do not need to be visited because they have to be unrelated to |
8253 | // the input. Note that this check is not trivial even though we disallow |
8254 | // general capturing of the underlying argument. The reason is that the |
8255 | // call might the argument "through return", which we allow and for which we |
8256 | // need to check call users. |
8257 | if (U.get()->getType()->isPointerTy()) { |
8258 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
8259 | bool IsKnownNoCapture; |
8260 | return !AA::hasAssumedIRAttr<Attribute::Captures>( |
8261 | A, QueryingAA: this, IRP: IRPosition::callsite_argument(CB: *CB, ArgNo), |
8262 | DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture); |
8263 | } |
8264 | |
8265 | return true; |
8266 | } |
8267 | |
8268 | void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, |
8269 | const Instruction *UserI) { |
8270 | assert(UserI->mayReadOrWriteMemory()); |
8271 | |
8272 | switch (UserI->getOpcode()) { |
8273 | default: |
8274 | // TODO: Handle all atomics and other side-effect operations we know of. |
8275 | break; |
8276 | case Instruction::Load: |
8277 | // Loads cause the NO_READS property to disappear. |
8278 | removeAssumedBits(BitsEncoding: NO_READS); |
8279 | return; |
8280 | |
8281 | case Instruction::Store: |
8282 | // Stores cause the NO_WRITES property to disappear if the use is the |
8283 | // pointer operand. Note that while capturing was taken care of somewhere |
8284 | // else we need to deal with stores of the value that is not looked through. |
8285 | if (cast<StoreInst>(Val: UserI)->getPointerOperand() == U.get()) |
8286 | removeAssumedBits(BitsEncoding: NO_WRITES); |
8287 | else |
8288 | indicatePessimisticFixpoint(); |
8289 | return; |
8290 | |
8291 | case Instruction::Call: |
8292 | case Instruction::CallBr: |
8293 | case Instruction::Invoke: { |
8294 | // For call sites we look at the argument memory behavior attribute (this |
8295 | // could be recursive!) in order to restrict our own state. |
8296 | const auto *CB = cast<CallBase>(Val: UserI); |
8297 | |
8298 | // Give up on operand bundles. |
8299 | if (CB->isBundleOperand(U: &U)) { |
8300 | indicatePessimisticFixpoint(); |
8301 | return; |
8302 | } |
8303 | |
8304 | // Calling a function does read the function pointer, maybe write it if the |
8305 | // function is self-modifying. |
8306 | if (CB->isCallee(U: &U)) { |
8307 | removeAssumedBits(BitsEncoding: NO_READS); |
8308 | break; |
8309 | } |
8310 | |
8311 | // Adjust the possible access behavior based on the information on the |
8312 | // argument. |
8313 | IRPosition Pos; |
8314 | if (U.get()->getType()->isPointerTy()) |
8315 | Pos = IRPosition::callsite_argument(CB: *CB, ArgNo: CB->getArgOperandNo(U: &U)); |
8316 | else |
8317 | Pos = IRPosition::callsite_function(CB: *CB); |
8318 | const auto *MemBehaviorAA = |
8319 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: Pos, DepClass: DepClassTy::OPTIONAL); |
8320 | if (!MemBehaviorAA) |
8321 | break; |
8322 | // "assumed" has at most the same bits as the MemBehaviorAA assumed |
8323 | // and at least "known". |
8324 | intersectAssumedBits(BitsEncoding: MemBehaviorAA->getAssumed()); |
8325 | return; |
8326 | } |
8327 | }; |
8328 | |
8329 | // Generally, look at the "may-properties" and adjust the assumed state if we |
8330 | // did not trigger special handling before. |
8331 | if (UserI->mayReadFromMemory()) |
8332 | removeAssumedBits(BitsEncoding: NO_READS); |
8333 | if (UserI->mayWriteToMemory()) |
8334 | removeAssumedBits(BitsEncoding: NO_WRITES); |
8335 | } |
8336 | } // namespace |
8337 | |
8338 | /// -------------------- Memory Locations Attributes --------------------------- |
8339 | /// Includes read-none, argmemonly, inaccessiblememonly, |
8340 | /// inaccessiblememorargmemonly |
8341 | /// ---------------------------------------------------------------------------- |
8342 | |
8343 | std::string AAMemoryLocation::getMemoryLocationsAsStr( |
8344 | AAMemoryLocation::MemoryLocationsKind MLK) { |
8345 | if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) |
8346 | return "all memory" ; |
8347 | if (MLK == AAMemoryLocation::NO_LOCATIONS) |
8348 | return "no memory" ; |
8349 | std::string S = "memory:" ; |
8350 | if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) |
8351 | S += "stack," ; |
8352 | if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) |
8353 | S += "constant," ; |
8354 | if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) |
8355 | S += "internal global," ; |
8356 | if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) |
8357 | S += "external global," ; |
8358 | if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) |
8359 | S += "argument," ; |
8360 | if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) |
8361 | S += "inaccessible," ; |
8362 | if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) |
8363 | S += "malloced," ; |
8364 | if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) |
8365 | S += "unknown," ; |
8366 | S.pop_back(); |
8367 | return S; |
8368 | } |
8369 | |
8370 | namespace { |
8371 | struct AAMemoryLocationImpl : public AAMemoryLocation { |
8372 | |
8373 | AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) |
8374 | : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { |
8375 | AccessKind2Accesses.fill(u: nullptr); |
8376 | } |
8377 | |
8378 | ~AAMemoryLocationImpl() { |
8379 | // The AccessSets are allocated via a BumpPtrAllocator, we call |
8380 | // the destructor manually. |
8381 | for (AccessSet *AS : AccessKind2Accesses) |
8382 | if (AS) |
8383 | AS->~AccessSet(); |
8384 | } |
8385 | |
8386 | /// See AbstractAttribute::initialize(...). |
8387 | void initialize(Attributor &A) override { |
8388 | intersectAssumedBits(BitsEncoding: BEST_STATE); |
8389 | getKnownStateFromValue(A, IRP: getIRPosition(), State&: getState()); |
8390 | AAMemoryLocation::initialize(A); |
8391 | } |
8392 | |
8393 | /// Return the memory behavior information encoded in the IR for \p IRP. |
8394 | static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, |
8395 | BitIntegerState &State, |
8396 | bool IgnoreSubsumingPositions = false) { |
8397 | // For internal functions we ignore `argmemonly` and |
8398 | // `inaccessiblememorargmemonly` as we might break it via interprocedural |
8399 | // constant propagation. It is unclear if this is the best way but it is |
8400 | // unlikely this will cause real performance problems. If we are deriving |
8401 | // attributes for the anchor function we even remove the attribute in |
8402 | // addition to ignoring it. |
8403 | // TODO: A better way to handle this would be to add ~NO_GLOBAL_MEM / |
8404 | // MemoryEffects::Other as a possible location. |
8405 | bool UseArgMemOnly = true; |
8406 | Function *AnchorFn = IRP.getAnchorScope(); |
8407 | if (AnchorFn && A.isRunOn(Fn&: *AnchorFn)) |
8408 | UseArgMemOnly = !AnchorFn->hasLocalLinkage(); |
8409 | |
8410 | SmallVector<Attribute, 2> Attrs; |
8411 | A.getAttrs(IRP, AKs: {Attribute::Memory}, Attrs, IgnoreSubsumingPositions); |
8412 | for (const Attribute &Attr : Attrs) { |
8413 | // TODO: We can map MemoryEffects to Attributor locations more precisely. |
8414 | MemoryEffects ME = Attr.getMemoryEffects(); |
8415 | if (ME.doesNotAccessMemory()) { |
8416 | State.addKnownBits(Bits: NO_LOCAL_MEM | NO_CONST_MEM); |
8417 | continue; |
8418 | } |
8419 | if (ME.onlyAccessesInaccessibleMem()) { |
8420 | State.addKnownBits(Bits: inverseLocation(Loc: NO_INACCESSIBLE_MEM, AndLocalMem: true, AndConstMem: true)); |
8421 | continue; |
8422 | } |
8423 | if (ME.onlyAccessesArgPointees()) { |
8424 | if (UseArgMemOnly) |
8425 | State.addKnownBits(Bits: inverseLocation(Loc: NO_ARGUMENT_MEM, AndLocalMem: true, AndConstMem: true)); |
8426 | else { |
8427 | // Remove location information, only keep read/write info. |
8428 | ME = MemoryEffects(ME.getModRef()); |
8429 | A.manifestAttrs(IRP, |
8430 | DeducedAttrs: Attribute::getWithMemoryEffects( |
8431 | Context&: IRP.getAnchorValue().getContext(), ME), |
8432 | /*ForceReplace*/ true); |
8433 | } |
8434 | continue; |
8435 | } |
8436 | if (ME.onlyAccessesInaccessibleOrArgMem()) { |
8437 | if (UseArgMemOnly) |
8438 | State.addKnownBits(Bits: inverseLocation( |
8439 | Loc: NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, AndLocalMem: true, AndConstMem: true)); |
8440 | else { |
8441 | // Remove location information, only keep read/write info. |
8442 | ME = MemoryEffects(ME.getModRef()); |
8443 | A.manifestAttrs(IRP, |
8444 | DeducedAttrs: Attribute::getWithMemoryEffects( |
8445 | Context&: IRP.getAnchorValue().getContext(), ME), |
8446 | /*ForceReplace*/ true); |
8447 | } |
8448 | continue; |
8449 | } |
8450 | } |
8451 | } |
8452 | |
8453 | /// See AbstractAttribute::getDeducedAttributes(...). |
8454 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
8455 | SmallVectorImpl<Attribute> &Attrs) const override { |
8456 | // TODO: We can map Attributor locations to MemoryEffects more precisely. |
8457 | assert(Attrs.size() == 0); |
8458 | if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { |
8459 | if (isAssumedReadNone()) |
8460 | Attrs.push_back( |
8461 | Elt: Attribute::getWithMemoryEffects(Context&: Ctx, ME: MemoryEffects::none())); |
8462 | else if (isAssumedInaccessibleMemOnly()) |
8463 | Attrs.push_back(Elt: Attribute::getWithMemoryEffects( |
8464 | Context&: Ctx, ME: MemoryEffects::inaccessibleMemOnly())); |
8465 | else if (isAssumedArgMemOnly()) |
8466 | Attrs.push_back( |
8467 | Elt: Attribute::getWithMemoryEffects(Context&: Ctx, ME: MemoryEffects::argMemOnly())); |
8468 | else if (isAssumedInaccessibleOrArgMemOnly()) |
8469 | Attrs.push_back(Elt: Attribute::getWithMemoryEffects( |
8470 | Context&: Ctx, ME: MemoryEffects::inaccessibleOrArgMemOnly())); |
8471 | } |
8472 | assert(Attrs.size() <= 1); |
8473 | } |
8474 | |
8475 | /// See AbstractAttribute::manifest(...). |
8476 | ChangeStatus manifest(Attributor &A) override { |
8477 | // TODO: If AAMemoryLocation and AAMemoryBehavior are merged, we could |
8478 | // provide per-location modref information here. |
8479 | const IRPosition &IRP = getIRPosition(); |
8480 | |
8481 | SmallVector<Attribute, 1> DeducedAttrs; |
8482 | getDeducedAttributes(A, Ctx&: IRP.getAnchorValue().getContext(), Attrs&: DeducedAttrs); |
8483 | if (DeducedAttrs.size() != 1) |
8484 | return ChangeStatus::UNCHANGED; |
8485 | MemoryEffects ME = DeducedAttrs[0].getMemoryEffects(); |
8486 | |
8487 | return A.manifestAttrs(IRP, DeducedAttrs: Attribute::getWithMemoryEffects( |
8488 | Context&: IRP.getAnchorValue().getContext(), ME)); |
8489 | } |
8490 | |
8491 | /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). |
8492 | bool checkForAllAccessesToMemoryKind( |
8493 | function_ref<bool(const Instruction *, const Value *, AccessKind, |
8494 | MemoryLocationsKind)> |
8495 | Pred, |
8496 | MemoryLocationsKind RequestedMLK) const override { |
8497 | if (!isValidState()) |
8498 | return false; |
8499 | |
8500 | MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); |
8501 | if (AssumedMLK == NO_LOCATIONS) |
8502 | return true; |
8503 | |
8504 | unsigned Idx = 0; |
8505 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; |
8506 | CurMLK *= 2, ++Idx) { |
8507 | if (CurMLK & RequestedMLK) |
8508 | continue; |
8509 | |
8510 | if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) |
8511 | for (const AccessInfo &AI : *Accesses) |
8512 | if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) |
8513 | return false; |
8514 | } |
8515 | |
8516 | return true; |
8517 | } |
8518 | |
8519 | ChangeStatus indicatePessimisticFixpoint() override { |
8520 | // If we give up and indicate a pessimistic fixpoint this instruction will |
8521 | // become an access for all potential access kinds: |
8522 | // TODO: Add pointers for argmemonly and globals to improve the results of |
8523 | // checkForAllAccessesToMemoryKind. |
8524 | bool Changed = false; |
8525 | MemoryLocationsKind KnownMLK = getKnown(); |
8526 | Instruction *I = dyn_cast<Instruction>(Val: &getAssociatedValue()); |
8527 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) |
8528 | if (!(CurMLK & KnownMLK)) |
8529 | updateStateAndAccessesMap(State&: getState(), MLK: CurMLK, I, Ptr: nullptr, Changed, |
8530 | AK: getAccessKindFromInst(I)); |
8531 | return AAMemoryLocation::indicatePessimisticFixpoint(); |
8532 | } |
8533 | |
8534 | protected: |
8535 | /// Helper struct to tie together an instruction that has a read or write |
8536 | /// effect with the pointer it accesses (if any). |
8537 | struct AccessInfo { |
8538 | |
8539 | /// The instruction that caused the access. |
8540 | const Instruction *I; |
8541 | |
8542 | /// The base pointer that is accessed, or null if unknown. |
8543 | const Value *Ptr; |
8544 | |
8545 | /// The kind of access (read/write/read+write). |
8546 | AccessKind Kind; |
8547 | |
8548 | bool operator==(const AccessInfo &RHS) const { |
8549 | return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; |
8550 | } |
8551 | bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { |
8552 | if (LHS.I != RHS.I) |
8553 | return LHS.I < RHS.I; |
8554 | if (LHS.Ptr != RHS.Ptr) |
8555 | return LHS.Ptr < RHS.Ptr; |
8556 | if (LHS.Kind != RHS.Kind) |
8557 | return LHS.Kind < RHS.Kind; |
8558 | return false; |
8559 | } |
8560 | }; |
8561 | |
8562 | /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the |
8563 | /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. |
8564 | using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; |
8565 | std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses; |
8566 | |
8567 | /// Categorize the pointer arguments of CB that might access memory in |
8568 | /// AccessedLoc and update the state and access map accordingly. |
8569 | void |
8570 | categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, |
8571 | AAMemoryLocation::StateType &AccessedLocs, |
8572 | bool &Changed); |
8573 | |
8574 | /// Return the kind(s) of location that may be accessed by \p V. |
8575 | AAMemoryLocation::MemoryLocationsKind |
8576 | categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); |
8577 | |
8578 | /// Return the access kind as determined by \p I. |
8579 | AccessKind getAccessKindFromInst(const Instruction *I) { |
8580 | AccessKind AK = READ_WRITE; |
8581 | if (I) { |
8582 | AK = I->mayReadFromMemory() ? READ : NONE; |
8583 | AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); |
8584 | } |
8585 | return AK; |
8586 | } |
8587 | |
8588 | /// Update the state \p State and the AccessKind2Accesses given that \p I is |
8589 | /// an access of kind \p AK to a \p MLK memory location with the access |
8590 | /// pointer \p Ptr. |
8591 | void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, |
8592 | MemoryLocationsKind MLK, const Instruction *I, |
8593 | const Value *Ptr, bool &Changed, |
8594 | AccessKind AK = READ_WRITE) { |
8595 | |
8596 | assert(isPowerOf2_32(MLK) && "Expected a single location set!" ); |
8597 | auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(Value: MLK)]; |
8598 | if (!Accesses) |
8599 | Accesses = new (Allocator) AccessSet(); |
8600 | Changed |= Accesses->insert(V: AccessInfo{.I: I, .Ptr: Ptr, .Kind: AK}).second; |
8601 | if (MLK == NO_UNKOWN_MEM) |
8602 | MLK = NO_LOCATIONS; |
8603 | State.removeAssumedBits(BitsEncoding: MLK); |
8604 | } |
8605 | |
8606 | /// Determine the underlying locations kinds for \p Ptr, e.g., globals or |
8607 | /// arguments, and update the state and access map accordingly. |
8608 | void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, |
8609 | AAMemoryLocation::StateType &State, bool &Changed, |
8610 | unsigned AccessAS = 0); |
8611 | |
8612 | /// Used to allocate access sets. |
8613 | BumpPtrAllocator &Allocator; |
8614 | }; |
8615 | |
8616 | void AAMemoryLocationImpl::categorizePtrValue( |
8617 | Attributor &A, const Instruction &I, const Value &Ptr, |
8618 | AAMemoryLocation::StateType &State, bool &Changed, unsigned AccessAS) { |
8619 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " |
8620 | << Ptr << " [" |
8621 | << getMemoryLocationsAsStr(State.getAssumed()) << "]\n" ); |
8622 | |
8623 | auto Pred = [&](Value &Obj) { |
8624 | unsigned ObjectAS = Obj.getType()->getPointerAddressSpace(); |
8625 | // TODO: recognize the TBAA used for constant accesses. |
8626 | MemoryLocationsKind MLK = NO_LOCATIONS; |
8627 | |
8628 | // Filter accesses to constant (GPU) memory if we have an AS at the access |
8629 | // site or the object is known to actually have the associated AS. |
8630 | if ((AccessAS == (unsigned)AA::GPUAddressSpace::Constant || |
8631 | (ObjectAS == (unsigned)AA::GPUAddressSpace::Constant && |
8632 | isIdentifiedObject(V: &Obj))) && |
8633 | AA::isGPU(M: *I.getModule())) |
8634 | return true; |
8635 | |
8636 | if (isa<UndefValue>(Val: &Obj)) |
8637 | return true; |
8638 | if (isa<Argument>(Val: &Obj)) { |
8639 | // TODO: For now we do not treat byval arguments as local copies performed |
8640 | // on the call edge, though, we should. To make that happen we need to |
8641 | // teach various passes, e.g., DSE, about the copy effect of a byval. That |
8642 | // would also allow us to mark functions only accessing byval arguments as |
8643 | // readnone again, arguably their accesses have no effect outside of the |
8644 | // function, like accesses to allocas. |
8645 | MLK = NO_ARGUMENT_MEM; |
8646 | } else if (auto *GV = dyn_cast<GlobalValue>(Val: &Obj)) { |
8647 | // Reading constant memory is not treated as a read "effect" by the |
8648 | // function attr pass so we won't neither. Constants defined by TBAA are |
8649 | // similar. (We know we do not write it because it is constant.) |
8650 | if (auto *GVar = dyn_cast<GlobalVariable>(Val: GV)) |
8651 | if (GVar->isConstant()) |
8652 | return true; |
8653 | |
8654 | if (GV->hasLocalLinkage()) |
8655 | MLK = NO_GLOBAL_INTERNAL_MEM; |
8656 | else |
8657 | MLK = NO_GLOBAL_EXTERNAL_MEM; |
8658 | } else if (isa<ConstantPointerNull>(Val: &Obj) && |
8659 | (!NullPointerIsDefined(F: getAssociatedFunction(), AS: AccessAS) || |
8660 | !NullPointerIsDefined(F: getAssociatedFunction(), AS: ObjectAS))) { |
8661 | return true; |
8662 | } else if (isa<AllocaInst>(Val: &Obj)) { |
8663 | MLK = NO_LOCAL_MEM; |
8664 | } else if (const auto *CB = dyn_cast<CallBase>(Val: &Obj)) { |
8665 | bool IsKnownNoAlias; |
8666 | if (AA::hasAssumedIRAttr<Attribute::NoAlias>( |
8667 | A, QueryingAA: this, IRP: IRPosition::callsite_returned(CB: *CB), DepClass: DepClassTy::OPTIONAL, |
8668 | IsKnown&: IsKnownNoAlias)) |
8669 | MLK = NO_MALLOCED_MEM; |
8670 | else |
8671 | MLK = NO_UNKOWN_MEM; |
8672 | } else { |
8673 | MLK = NO_UNKOWN_MEM; |
8674 | } |
8675 | |
8676 | assert(MLK != NO_LOCATIONS && "No location specified!" ); |
8677 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " |
8678 | << Obj << " -> " << getMemoryLocationsAsStr(MLK) << "\n" ); |
8679 | updateStateAndAccessesMap(State, MLK, I: &I, Ptr: &Obj, Changed, |
8680 | AK: getAccessKindFromInst(I: &I)); |
8681 | |
8682 | return true; |
8683 | }; |
8684 | |
8685 | const auto *AA = A.getAAFor<AAUnderlyingObjects>( |
8686 | QueryingAA: *this, IRP: IRPosition::value(V: Ptr), DepClass: DepClassTy::OPTIONAL); |
8687 | if (!AA || !AA->forallUnderlyingObjects(Pred, Scope: AA::Intraprocedural)) { |
8688 | LLVM_DEBUG( |
8689 | dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n" ); |
8690 | updateStateAndAccessesMap(State, MLK: NO_UNKOWN_MEM, I: &I, Ptr: nullptr, Changed, |
8691 | AK: getAccessKindFromInst(I: &I)); |
8692 | return; |
8693 | } |
8694 | |
8695 | LLVM_DEBUG( |
8696 | dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " |
8697 | << getMemoryLocationsAsStr(State.getAssumed()) << "\n" ); |
8698 | } |
8699 | |
8700 | void AAMemoryLocationImpl::categorizeArgumentPointerLocations( |
8701 | Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, |
8702 | bool &Changed) { |
8703 | for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { |
8704 | |
8705 | // Skip non-pointer arguments. |
8706 | const Value *ArgOp = CB.getArgOperand(i: ArgNo); |
8707 | if (!ArgOp->getType()->isPtrOrPtrVectorTy()) |
8708 | continue; |
8709 | |
8710 | // Skip readnone arguments. |
8711 | const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); |
8712 | const auto *ArgOpMemLocationAA = |
8713 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: ArgOpIRP, DepClass: DepClassTy::OPTIONAL); |
8714 | |
8715 | if (ArgOpMemLocationAA && ArgOpMemLocationAA->isAssumedReadNone()) |
8716 | continue; |
8717 | |
8718 | // Categorize potentially accessed pointer arguments as if there was an |
8719 | // access instruction with them as pointer. |
8720 | categorizePtrValue(A, I: CB, Ptr: *ArgOp, State&: AccessedLocs, Changed); |
8721 | } |
8722 | } |
8723 | |
8724 | AAMemoryLocation::MemoryLocationsKind |
8725 | AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, |
8726 | bool &Changed) { |
8727 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " |
8728 | << I << "\n" ); |
8729 | |
8730 | AAMemoryLocation::StateType AccessedLocs; |
8731 | AccessedLocs.intersectAssumedBits(BitsEncoding: NO_LOCATIONS); |
8732 | |
8733 | if (auto *CB = dyn_cast<CallBase>(Val: &I)) { |
8734 | |
8735 | // First check if we assume any memory is access is visible. |
8736 | const auto *CBMemLocationAA = A.getAAFor<AAMemoryLocation>( |
8737 | QueryingAA: *this, IRP: IRPosition::callsite_function(CB: *CB), DepClass: DepClassTy::OPTIONAL); |
8738 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I |
8739 | << " [" << CBMemLocationAA << "]\n" ); |
8740 | if (!CBMemLocationAA) { |
8741 | updateStateAndAccessesMap(State&: AccessedLocs, MLK: NO_UNKOWN_MEM, I: &I, Ptr: nullptr, |
8742 | Changed, AK: getAccessKindFromInst(I: &I)); |
8743 | return NO_UNKOWN_MEM; |
8744 | } |
8745 | |
8746 | if (CBMemLocationAA->isAssumedReadNone()) |
8747 | return NO_LOCATIONS; |
8748 | |
8749 | if (CBMemLocationAA->isAssumedInaccessibleMemOnly()) { |
8750 | updateStateAndAccessesMap(State&: AccessedLocs, MLK: NO_INACCESSIBLE_MEM, I: &I, Ptr: nullptr, |
8751 | Changed, AK: getAccessKindFromInst(I: &I)); |
8752 | return AccessedLocs.getAssumed(); |
8753 | } |
8754 | |
8755 | uint32_t CBAssumedNotAccessedLocs = |
8756 | CBMemLocationAA->getAssumedNotAccessedLocation(); |
8757 | |
8758 | // Set the argmemonly and global bit as we handle them separately below. |
8759 | uint32_t CBAssumedNotAccessedLocsNoArgMem = |
8760 | CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; |
8761 | |
8762 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { |
8763 | if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) |
8764 | continue; |
8765 | updateStateAndAccessesMap(State&: AccessedLocs, MLK: CurMLK, I: &I, Ptr: nullptr, Changed, |
8766 | AK: getAccessKindFromInst(I: &I)); |
8767 | } |
8768 | |
8769 | // Now handle global memory if it might be accessed. This is slightly tricky |
8770 | // as NO_GLOBAL_MEM has multiple bits set. |
8771 | bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); |
8772 | if (HasGlobalAccesses) { |
8773 | auto AccessPred = [&](const Instruction *, const Value *Ptr, |
8774 | AccessKind Kind, MemoryLocationsKind MLK) { |
8775 | updateStateAndAccessesMap(State&: AccessedLocs, MLK, I: &I, Ptr, Changed, |
8776 | AK: getAccessKindFromInst(I: &I)); |
8777 | return true; |
8778 | }; |
8779 | if (!CBMemLocationAA->checkForAllAccessesToMemoryKind( |
8780 | Pred: AccessPred, MLK: inverseLocation(Loc: NO_GLOBAL_MEM, AndLocalMem: false, AndConstMem: false))) |
8781 | return AccessedLocs.getWorstState(); |
8782 | } |
8783 | |
8784 | LLVM_DEBUG( |
8785 | dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " |
8786 | << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n" ); |
8787 | |
8788 | // Now handle argument memory if it might be accessed. |
8789 | bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); |
8790 | if (HasArgAccesses) |
8791 | categorizeArgumentPointerLocations(A, CB&: *CB, AccessedLocs, Changed); |
8792 | |
8793 | LLVM_DEBUG( |
8794 | dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " |
8795 | << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n" ); |
8796 | |
8797 | return AccessedLocs.getAssumed(); |
8798 | } |
8799 | |
8800 | if (const Value *Ptr = getPointerOperand(I: &I, /* AllowVolatile */ true)) { |
8801 | LLVM_DEBUG( |
8802 | dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " |
8803 | << I << " [" << *Ptr << "]\n" ); |
8804 | categorizePtrValue(A, I, Ptr: *Ptr, State&: AccessedLocs, Changed, |
8805 | AccessAS: Ptr->getType()->getPointerAddressSpace()); |
8806 | return AccessedLocs.getAssumed(); |
8807 | } |
8808 | |
8809 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " |
8810 | << I << "\n" ); |
8811 | updateStateAndAccessesMap(State&: AccessedLocs, MLK: NO_UNKOWN_MEM, I: &I, Ptr: nullptr, Changed, |
8812 | AK: getAccessKindFromInst(I: &I)); |
8813 | return AccessedLocs.getAssumed(); |
8814 | } |
8815 | |
8816 | /// An AA to represent the memory behavior function attributes. |
8817 | struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { |
8818 | AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) |
8819 | : AAMemoryLocationImpl(IRP, A) {} |
8820 | |
8821 | /// See AbstractAttribute::updateImpl(Attributor &A). |
8822 | ChangeStatus updateImpl(Attributor &A) override { |
8823 | |
8824 | const auto *MemBehaviorAA = |
8825 | A.getAAFor<AAMemoryBehavior>(QueryingAA: *this, IRP: getIRPosition(), DepClass: DepClassTy::NONE); |
8826 | if (MemBehaviorAA && MemBehaviorAA->isAssumedReadNone()) { |
8827 | if (MemBehaviorAA->isKnownReadNone()) |
8828 | return indicateOptimisticFixpoint(); |
8829 | assert(isAssumedReadNone() && |
8830 | "AAMemoryLocation was not read-none but AAMemoryBehavior was!" ); |
8831 | A.recordDependence(FromAA: *MemBehaviorAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
8832 | return ChangeStatus::UNCHANGED; |
8833 | } |
8834 | |
8835 | // The current assumed state used to determine a change. |
8836 | auto AssumedState = getAssumed(); |
8837 | bool Changed = false; |
8838 | |
8839 | auto CheckRWInst = [&](Instruction &I) { |
8840 | MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); |
8841 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I |
8842 | << ": " << getMemoryLocationsAsStr(MLK) << "\n" ); |
8843 | removeAssumedBits(BitsEncoding: inverseLocation(Loc: MLK, AndLocalMem: false, AndConstMem: false)); |
8844 | // Stop once only the valid bit set in the *not assumed location*, thus |
8845 | // once we don't actually exclude any memory locations in the state. |
8846 | return getAssumedNotAccessedLocation() != VALID_STATE; |
8847 | }; |
8848 | |
8849 | bool UsedAssumedInformation = false; |
8850 | if (!A.checkForAllReadWriteInstructions(Pred: CheckRWInst, QueryingAA&: *this, |
8851 | UsedAssumedInformation)) |
8852 | return indicatePessimisticFixpoint(); |
8853 | |
8854 | Changed |= AssumedState != getAssumed(); |
8855 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
8856 | } |
8857 | |
8858 | /// See AbstractAttribute::trackStatistics() |
8859 | void trackStatistics() const override { |
8860 | if (isAssumedReadNone()) |
8861 | STATS_DECLTRACK_FN_ATTR(readnone) |
8862 | else if (isAssumedArgMemOnly()) |
8863 | STATS_DECLTRACK_FN_ATTR(argmemonly) |
8864 | else if (isAssumedInaccessibleMemOnly()) |
8865 | STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) |
8866 | else if (isAssumedInaccessibleOrArgMemOnly()) |
8867 | STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) |
8868 | } |
8869 | }; |
8870 | |
8871 | /// AAMemoryLocation attribute for call sites. |
8872 | struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { |
8873 | AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) |
8874 | : AAMemoryLocationImpl(IRP, A) {} |
8875 | |
8876 | /// See AbstractAttribute::updateImpl(...). |
8877 | ChangeStatus updateImpl(Attributor &A) override { |
8878 | // TODO: Once we have call site specific value information we can provide |
8879 | // call site specific liveness liveness information and then it makes |
8880 | // sense to specialize attributes for call sites arguments instead of |
8881 | // redirecting requests to the callee argument. |
8882 | Function *F = getAssociatedFunction(); |
8883 | const IRPosition &FnPos = IRPosition::function(F: *F); |
8884 | auto *FnAA = |
8885 | A.getAAFor<AAMemoryLocation>(QueryingAA: *this, IRP: FnPos, DepClass: DepClassTy::REQUIRED); |
8886 | if (!FnAA) |
8887 | return indicatePessimisticFixpoint(); |
8888 | bool Changed = false; |
8889 | auto AccessPred = [&](const Instruction *I, const Value *Ptr, |
8890 | AccessKind Kind, MemoryLocationsKind MLK) { |
8891 | updateStateAndAccessesMap(State&: getState(), MLK, I, Ptr, Changed, |
8892 | AK: getAccessKindFromInst(I)); |
8893 | return true; |
8894 | }; |
8895 | if (!FnAA->checkForAllAccessesToMemoryKind(Pred: AccessPred, MLK: ALL_LOCATIONS)) |
8896 | return indicatePessimisticFixpoint(); |
8897 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
8898 | } |
8899 | |
8900 | /// See AbstractAttribute::trackStatistics() |
8901 | void trackStatistics() const override { |
8902 | if (isAssumedReadNone()) |
8903 | STATS_DECLTRACK_CS_ATTR(readnone) |
8904 | } |
8905 | }; |
8906 | } // namespace |
8907 | |
8908 | /// ------------------ denormal-fp-math Attribute ------------------------- |
8909 | |
8910 | namespace { |
8911 | struct AADenormalFPMathImpl : public AADenormalFPMath { |
8912 | AADenormalFPMathImpl(const IRPosition &IRP, Attributor &A) |
8913 | : AADenormalFPMath(IRP, A) {} |
8914 | |
8915 | const std::string getAsStr(Attributor *A) const override { |
8916 | std::string Str("AADenormalFPMath[" ); |
8917 | raw_string_ostream OS(Str); |
8918 | |
8919 | DenormalState Known = getKnown(); |
8920 | if (Known.Mode.isValid()) |
8921 | OS << "denormal-fp-math=" << Known.Mode; |
8922 | else |
8923 | OS << "invalid" ; |
8924 | |
8925 | if (Known.ModeF32.isValid()) |
8926 | OS << " denormal-fp-math-f32=" << Known.ModeF32; |
8927 | OS << ']'; |
8928 | return Str; |
8929 | } |
8930 | }; |
8931 | |
8932 | struct AADenormalFPMathFunction final : AADenormalFPMathImpl { |
8933 | AADenormalFPMathFunction(const IRPosition &IRP, Attributor &A) |
8934 | : AADenormalFPMathImpl(IRP, A) {} |
8935 | |
8936 | void initialize(Attributor &A) override { |
8937 | const Function *F = getAnchorScope(); |
8938 | DenormalMode Mode = F->getDenormalModeRaw(); |
8939 | DenormalMode ModeF32 = F->getDenormalModeF32Raw(); |
8940 | |
8941 | // TODO: Handling this here prevents handling the case where a callee has a |
8942 | // fixed denormal-fp-math with dynamic denormal-fp-math-f32, but called from |
8943 | // a function with a fully fixed mode. |
8944 | if (ModeF32 == DenormalMode::getInvalid()) |
8945 | ModeF32 = Mode; |
8946 | Known = DenormalState{.Mode: Mode, .ModeF32: ModeF32}; |
8947 | if (isModeFixed()) |
8948 | indicateFixpoint(); |
8949 | } |
8950 | |
8951 | ChangeStatus updateImpl(Attributor &A) override { |
8952 | ChangeStatus Change = ChangeStatus::UNCHANGED; |
8953 | |
8954 | auto CheckCallSite = [=, &Change, &A](AbstractCallSite CS) { |
8955 | Function *Caller = CS.getInstruction()->getFunction(); |
8956 | LLVM_DEBUG(dbgs() << "[AADenormalFPMath] Call " << Caller->getName() |
8957 | << "->" << getAssociatedFunction()->getName() << '\n'); |
8958 | |
8959 | const auto *CallerInfo = A.getAAFor<AADenormalFPMath>( |
8960 | QueryingAA: *this, IRP: IRPosition::function(F: *Caller), DepClass: DepClassTy::REQUIRED); |
8961 | if (!CallerInfo) |
8962 | return false; |
8963 | |
8964 | Change = Change | clampStateAndIndicateChange(S&: this->getState(), |
8965 | R: CallerInfo->getState()); |
8966 | return true; |
8967 | }; |
8968 | |
8969 | bool AllCallSitesKnown = true; |
8970 | if (!A.checkForAllCallSites(Pred: CheckCallSite, QueryingAA: *this, RequireAllCallSites: true, UsedAssumedInformation&: AllCallSitesKnown)) |
8971 | return indicatePessimisticFixpoint(); |
8972 | |
8973 | if (Change == ChangeStatus::CHANGED && isModeFixed()) |
8974 | indicateFixpoint(); |
8975 | return Change; |
8976 | } |
8977 | |
8978 | ChangeStatus manifest(Attributor &A) override { |
8979 | LLVMContext &Ctx = getAssociatedFunction()->getContext(); |
8980 | |
8981 | SmallVector<Attribute, 2> AttrToAdd; |
8982 | SmallVector<StringRef, 2> AttrToRemove; |
8983 | if (Known.Mode == DenormalMode::getDefault()) { |
8984 | AttrToRemove.push_back(Elt: "denormal-fp-math" ); |
8985 | } else { |
8986 | AttrToAdd.push_back( |
8987 | Elt: Attribute::get(Context&: Ctx, Kind: "denormal-fp-math" , Val: Known.Mode.str())); |
8988 | } |
8989 | |
8990 | if (Known.ModeF32 != Known.Mode) { |
8991 | AttrToAdd.push_back( |
8992 | Elt: Attribute::get(Context&: Ctx, Kind: "denormal-fp-math-f32" , Val: Known.ModeF32.str())); |
8993 | } else { |
8994 | AttrToRemove.push_back(Elt: "denormal-fp-math-f32" ); |
8995 | } |
8996 | |
8997 | auto &IRP = getIRPosition(); |
8998 | |
8999 | // TODO: There should be a combined add and remove API. |
9000 | return A.removeAttrs(IRP, Attrs: AttrToRemove) | |
9001 | A.manifestAttrs(IRP, DeducedAttrs: AttrToAdd, /*ForceReplace=*/true); |
9002 | } |
9003 | |
9004 | void trackStatistics() const override { |
9005 | STATS_DECLTRACK_FN_ATTR(denormal_fp_math) |
9006 | } |
9007 | }; |
9008 | } // namespace |
9009 | |
9010 | /// ------------------ Value Constant Range Attribute ------------------------- |
9011 | |
9012 | namespace { |
9013 | struct AAValueConstantRangeImpl : AAValueConstantRange { |
9014 | using StateType = IntegerRangeState; |
9015 | AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) |
9016 | : AAValueConstantRange(IRP, A) {} |
9017 | |
9018 | /// See AbstractAttribute::initialize(..). |
9019 | void initialize(Attributor &A) override { |
9020 | if (A.hasSimplificationCallback(IRP: getIRPosition())) { |
9021 | indicatePessimisticFixpoint(); |
9022 | return; |
9023 | } |
9024 | |
9025 | // Intersect a range given by SCEV. |
9026 | intersectKnown(R: getConstantRangeFromSCEV(A, I: getCtxI())); |
9027 | |
9028 | // Intersect a range given by LVI. |
9029 | intersectKnown(R: getConstantRangeFromLVI(A, CtxI: getCtxI())); |
9030 | } |
9031 | |
9032 | /// See AbstractAttribute::getAsStr(). |
9033 | const std::string getAsStr(Attributor *A) const override { |
9034 | std::string Str; |
9035 | llvm::raw_string_ostream OS(Str); |
9036 | OS << "range(" << getBitWidth() << ")<" ; |
9037 | getKnown().print(OS); |
9038 | OS << " / " ; |
9039 | getAssumed().print(OS); |
9040 | OS << ">" ; |
9041 | return Str; |
9042 | } |
9043 | |
9044 | /// Helper function to get a SCEV expr for the associated value at program |
9045 | /// point \p I. |
9046 | const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { |
9047 | if (!getAnchorScope()) |
9048 | return nullptr; |
9049 | |
9050 | ScalarEvolution *SE = |
9051 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( |
9052 | F: *getAnchorScope()); |
9053 | |
9054 | LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( |
9055 | F: *getAnchorScope()); |
9056 | |
9057 | if (!SE || !LI) |
9058 | return nullptr; |
9059 | |
9060 | const SCEV *S = SE->getSCEV(V: &getAssociatedValue()); |
9061 | if (!I) |
9062 | return S; |
9063 | |
9064 | return SE->getSCEVAtScope(S, L: LI->getLoopFor(BB: I->getParent())); |
9065 | } |
9066 | |
9067 | /// Helper function to get a range from SCEV for the associated value at |
9068 | /// program point \p I. |
9069 | ConstantRange getConstantRangeFromSCEV(Attributor &A, |
9070 | const Instruction *I = nullptr) const { |
9071 | if (!getAnchorScope()) |
9072 | return getWorstState(BitWidth: getBitWidth()); |
9073 | |
9074 | ScalarEvolution *SE = |
9075 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( |
9076 | F: *getAnchorScope()); |
9077 | |
9078 | const SCEV *S = getSCEV(A, I); |
9079 | if (!SE || !S) |
9080 | return getWorstState(BitWidth: getBitWidth()); |
9081 | |
9082 | return SE->getUnsignedRange(S); |
9083 | } |
9084 | |
9085 | /// Helper function to get a range from LVI for the associated value at |
9086 | /// program point \p I. |
9087 | ConstantRange |
9088 | getConstantRangeFromLVI(Attributor &A, |
9089 | const Instruction *CtxI = nullptr) const { |
9090 | if (!getAnchorScope()) |
9091 | return getWorstState(BitWidth: getBitWidth()); |
9092 | |
9093 | LazyValueInfo *LVI = |
9094 | A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( |
9095 | F: *getAnchorScope()); |
9096 | |
9097 | if (!LVI || !CtxI) |
9098 | return getWorstState(BitWidth: getBitWidth()); |
9099 | return LVI->getConstantRange(V: &getAssociatedValue(), |
9100 | CxtI: const_cast<Instruction *>(CtxI), |
9101 | /*UndefAllowed*/ false); |
9102 | } |
9103 | |
9104 | /// Return true if \p CtxI is valid for querying outside analyses. |
9105 | /// This basically makes sure we do not ask intra-procedural analysis |
9106 | /// about a context in the wrong function or a context that violates |
9107 | /// dominance assumptions they might have. The \p AllowAACtxI flag indicates |
9108 | /// if the original context of this AA is OK or should be considered invalid. |
9109 | bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, |
9110 | const Instruction *CtxI, |
9111 | bool AllowAACtxI) const { |
9112 | if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) |
9113 | return false; |
9114 | |
9115 | // Our context might be in a different function, neither intra-procedural |
9116 | // analysis (ScalarEvolution nor LazyValueInfo) can handle that. |
9117 | if (!AA::isValidInScope(V: getAssociatedValue(), Scope: CtxI->getFunction())) |
9118 | return false; |
9119 | |
9120 | // If the context is not dominated by the value there are paths to the |
9121 | // context that do not define the value. This cannot be handled by |
9122 | // LazyValueInfo so we need to bail. |
9123 | if (auto *I = dyn_cast<Instruction>(Val: &getAssociatedValue())) { |
9124 | InformationCache &InfoCache = A.getInfoCache(); |
9125 | const DominatorTree *DT = |
9126 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( |
9127 | F: *I->getFunction()); |
9128 | return DT && DT->dominates(Def: I, User: CtxI); |
9129 | } |
9130 | |
9131 | return true; |
9132 | } |
9133 | |
9134 | /// See AAValueConstantRange::getKnownConstantRange(..). |
9135 | ConstantRange |
9136 | getKnownConstantRange(Attributor &A, |
9137 | const Instruction *CtxI = nullptr) const override { |
9138 | if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, |
9139 | /* AllowAACtxI */ false)) |
9140 | return getKnown(); |
9141 | |
9142 | ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); |
9143 | ConstantRange SCEVR = getConstantRangeFromSCEV(A, I: CtxI); |
9144 | return getKnown().intersectWith(CR: SCEVR).intersectWith(CR: LVIR); |
9145 | } |
9146 | |
9147 | /// See AAValueConstantRange::getAssumedConstantRange(..). |
9148 | ConstantRange |
9149 | getAssumedConstantRange(Attributor &A, |
9150 | const Instruction *CtxI = nullptr) const override { |
9151 | // TODO: Make SCEV use Attributor assumption. |
9152 | // We may be able to bound a variable range via assumptions in |
9153 | // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to |
9154 | // evolve to x^2 + x, then we can say that y is in [2, 12]. |
9155 | if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, |
9156 | /* AllowAACtxI */ false)) |
9157 | return getAssumed(); |
9158 | |
9159 | ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); |
9160 | ConstantRange SCEVR = getConstantRangeFromSCEV(A, I: CtxI); |
9161 | return getAssumed().intersectWith(CR: SCEVR).intersectWith(CR: LVIR); |
9162 | } |
9163 | |
9164 | /// Helper function to create MDNode for range metadata. |
9165 | static MDNode * |
9166 | getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, |
9167 | const ConstantRange &AssumedConstantRange) { |
9168 | Metadata *LowAndHigh[] = {ConstantAsMetadata::get(C: ConstantInt::get( |
9169 | Ty, V: AssumedConstantRange.getLower())), |
9170 | ConstantAsMetadata::get(C: ConstantInt::get( |
9171 | Ty, V: AssumedConstantRange.getUpper()))}; |
9172 | return MDNode::get(Context&: Ctx, MDs: LowAndHigh); |
9173 | } |
9174 | |
9175 | /// Return true if \p Assumed is included in \p KnownRanges. |
9176 | static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { |
9177 | |
9178 | if (Assumed.isFullSet()) |
9179 | return false; |
9180 | |
9181 | if (!KnownRanges) |
9182 | return true; |
9183 | |
9184 | // If multiple ranges are annotated in IR, we give up to annotate assumed |
9185 | // range for now. |
9186 | |
9187 | // TODO: If there exists a known range which containts assumed range, we |
9188 | // can say assumed range is better. |
9189 | if (KnownRanges->getNumOperands() > 2) |
9190 | return false; |
9191 | |
9192 | ConstantInt *Lower = |
9193 | mdconst::extract<ConstantInt>(MD: KnownRanges->getOperand(I: 0)); |
9194 | ConstantInt *Upper = |
9195 | mdconst::extract<ConstantInt>(MD: KnownRanges->getOperand(I: 1)); |
9196 | |
9197 | ConstantRange Known(Lower->getValue(), Upper->getValue()); |
9198 | return Known.contains(CR: Assumed) && Known != Assumed; |
9199 | } |
9200 | |
9201 | /// Helper function to set range metadata. |
9202 | static bool |
9203 | setRangeMetadataIfisBetterRange(Instruction *I, |
9204 | const ConstantRange &AssumedConstantRange) { |
9205 | auto *OldRangeMD = I->getMetadata(KindID: LLVMContext::MD_range); |
9206 | if (isBetterRange(Assumed: AssumedConstantRange, KnownRanges: OldRangeMD)) { |
9207 | if (!AssumedConstantRange.isEmptySet()) { |
9208 | I->setMetadata(KindID: LLVMContext::MD_range, |
9209 | Node: getMDNodeForConstantRange(Ty: I->getType(), Ctx&: I->getContext(), |
9210 | AssumedConstantRange)); |
9211 | return true; |
9212 | } |
9213 | } |
9214 | return false; |
9215 | } |
9216 | |
9217 | /// See AbstractAttribute::manifest() |
9218 | ChangeStatus manifest(Attributor &A) override { |
9219 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
9220 | ConstantRange AssumedConstantRange = getAssumedConstantRange(A); |
9221 | assert(!AssumedConstantRange.isFullSet() && "Invalid state" ); |
9222 | |
9223 | auto &V = getAssociatedValue(); |
9224 | if (!AssumedConstantRange.isEmptySet() && |
9225 | !AssumedConstantRange.isSingleElement()) { |
9226 | if (Instruction *I = dyn_cast<Instruction>(Val: &V)) { |
9227 | assert(I == getCtxI() && "Should not annotate an instruction which is " |
9228 | "not the context instruction" ); |
9229 | if (isa<CallInst>(Val: I) || isa<LoadInst>(Val: I)) |
9230 | if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) |
9231 | Changed = ChangeStatus::CHANGED; |
9232 | } |
9233 | } |
9234 | |
9235 | return Changed; |
9236 | } |
9237 | }; |
9238 | |
9239 | struct AAValueConstantRangeArgument final |
9240 | : AAArgumentFromCallSiteArguments< |
9241 | AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, |
9242 | true /* BridgeCallBaseContext */> { |
9243 | using Base = AAArgumentFromCallSiteArguments< |
9244 | AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, |
9245 | true /* BridgeCallBaseContext */>; |
9246 | AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) |
9247 | : Base(IRP, A) {} |
9248 | |
9249 | /// See AbstractAttribute::trackStatistics() |
9250 | void trackStatistics() const override { |
9251 | STATS_DECLTRACK_ARG_ATTR(value_range) |
9252 | } |
9253 | }; |
9254 | |
9255 | struct AAValueConstantRangeReturned |
9256 | : AAReturnedFromReturnedValues<AAValueConstantRange, |
9257 | AAValueConstantRangeImpl, |
9258 | AAValueConstantRangeImpl::StateType, |
9259 | /* PropagateCallBaseContext */ true> { |
9260 | using Base = |
9261 | AAReturnedFromReturnedValues<AAValueConstantRange, |
9262 | AAValueConstantRangeImpl, |
9263 | AAValueConstantRangeImpl::StateType, |
9264 | /* PropagateCallBaseContext */ true>; |
9265 | AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) |
9266 | : Base(IRP, A) {} |
9267 | |
9268 | /// See AbstractAttribute::initialize(...). |
9269 | void initialize(Attributor &A) override { |
9270 | if (!A.isFunctionIPOAmendable(F: *getAssociatedFunction())) |
9271 | indicatePessimisticFixpoint(); |
9272 | } |
9273 | |
9274 | /// See AbstractAttribute::trackStatistics() |
9275 | void trackStatistics() const override { |
9276 | STATS_DECLTRACK_FNRET_ATTR(value_range) |
9277 | } |
9278 | }; |
9279 | |
9280 | struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { |
9281 | AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) |
9282 | : AAValueConstantRangeImpl(IRP, A) {} |
9283 | |
9284 | /// See AbstractAttribute::initialize(...). |
9285 | void initialize(Attributor &A) override { |
9286 | AAValueConstantRangeImpl::initialize(A); |
9287 | if (isAtFixpoint()) |
9288 | return; |
9289 | |
9290 | Value &V = getAssociatedValue(); |
9291 | |
9292 | if (auto *C = dyn_cast<ConstantInt>(Val: &V)) { |
9293 | unionAssumed(R: ConstantRange(C->getValue())); |
9294 | indicateOptimisticFixpoint(); |
9295 | return; |
9296 | } |
9297 | |
9298 | if (isa<UndefValue>(Val: &V)) { |
9299 | // Collapse the undef state to 0. |
9300 | unionAssumed(R: ConstantRange(APInt(getBitWidth(), 0))); |
9301 | indicateOptimisticFixpoint(); |
9302 | return; |
9303 | } |
9304 | |
9305 | if (isa<CallBase>(Val: &V)) |
9306 | return; |
9307 | |
9308 | if (isa<BinaryOperator>(Val: &V) || isa<CmpInst>(Val: &V) || isa<CastInst>(Val: &V)) |
9309 | return; |
9310 | |
9311 | // If it is a load instruction with range metadata, use it. |
9312 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: &V)) |
9313 | if (auto *RangeMD = LI->getMetadata(KindID: LLVMContext::MD_range)) { |
9314 | intersectKnown(R: getConstantRangeFromMetadata(RangeMD: *RangeMD)); |
9315 | return; |
9316 | } |
9317 | |
9318 | // We can work with PHI and select instruction as we traverse their operands |
9319 | // during update. |
9320 | if (isa<SelectInst>(Val: V) || isa<PHINode>(Val: V)) |
9321 | return; |
9322 | |
9323 | // Otherwise we give up. |
9324 | indicatePessimisticFixpoint(); |
9325 | |
9326 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " |
9327 | << getAssociatedValue() << "\n" ); |
9328 | } |
9329 | |
9330 | bool calculateBinaryOperator( |
9331 | Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, |
9332 | const Instruction *CtxI, |
9333 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { |
9334 | Value *LHS = BinOp->getOperand(i_nocapture: 0); |
9335 | Value *RHS = BinOp->getOperand(i_nocapture: 1); |
9336 | |
9337 | // Simplify the operands first. |
9338 | bool UsedAssumedInformation = false; |
9339 | const auto &SimplifiedLHS = A.getAssumedSimplified( |
9340 | IRP: IRPosition::value(V: *LHS, CBContext: getCallBaseContext()), AA: *this, |
9341 | UsedAssumedInformation, S: AA::Interprocedural); |
9342 | if (!SimplifiedLHS.has_value()) |
9343 | return true; |
9344 | if (!*SimplifiedLHS) |
9345 | return false; |
9346 | LHS = *SimplifiedLHS; |
9347 | |
9348 | const auto &SimplifiedRHS = A.getAssumedSimplified( |
9349 | IRP: IRPosition::value(V: *RHS, CBContext: getCallBaseContext()), AA: *this, |
9350 | UsedAssumedInformation, S: AA::Interprocedural); |
9351 | if (!SimplifiedRHS.has_value()) |
9352 | return true; |
9353 | if (!*SimplifiedRHS) |
9354 | return false; |
9355 | RHS = *SimplifiedRHS; |
9356 | |
9357 | // TODO: Allow non integers as well. |
9358 | if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) |
9359 | return false; |
9360 | |
9361 | auto *LHSAA = A.getAAFor<AAValueConstantRange>( |
9362 | QueryingAA: *this, IRP: IRPosition::value(V: *LHS, CBContext: getCallBaseContext()), |
9363 | DepClass: DepClassTy::REQUIRED); |
9364 | if (!LHSAA) |
9365 | return false; |
9366 | QuerriedAAs.push_back(Elt: LHSAA); |
9367 | auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI); |
9368 | |
9369 | auto *RHSAA = A.getAAFor<AAValueConstantRange>( |
9370 | QueryingAA: *this, IRP: IRPosition::value(V: *RHS, CBContext: getCallBaseContext()), |
9371 | DepClass: DepClassTy::REQUIRED); |
9372 | if (!RHSAA) |
9373 | return false; |
9374 | QuerriedAAs.push_back(Elt: RHSAA); |
9375 | auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI); |
9376 | |
9377 | auto AssumedRange = LHSAARange.binaryOp(BinOp: BinOp->getOpcode(), Other: RHSAARange); |
9378 | |
9379 | T.unionAssumed(R: AssumedRange); |
9380 | |
9381 | // TODO: Track a known state too. |
9382 | |
9383 | return T.isValidState(); |
9384 | } |
9385 | |
9386 | bool calculateCastInst( |
9387 | Attributor &A, CastInst *CastI, IntegerRangeState &T, |
9388 | const Instruction *CtxI, |
9389 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { |
9390 | assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!" ); |
9391 | // TODO: Allow non integers as well. |
9392 | Value *OpV = CastI->getOperand(i_nocapture: 0); |
9393 | |
9394 | // Simplify the operand first. |
9395 | bool UsedAssumedInformation = false; |
9396 | const auto &SimplifiedOpV = A.getAssumedSimplified( |
9397 | IRP: IRPosition::value(V: *OpV, CBContext: getCallBaseContext()), AA: *this, |
9398 | UsedAssumedInformation, S: AA::Interprocedural); |
9399 | if (!SimplifiedOpV.has_value()) |
9400 | return true; |
9401 | if (!*SimplifiedOpV) |
9402 | return false; |
9403 | OpV = *SimplifiedOpV; |
9404 | |
9405 | if (!OpV->getType()->isIntegerTy()) |
9406 | return false; |
9407 | |
9408 | auto *OpAA = A.getAAFor<AAValueConstantRange>( |
9409 | QueryingAA: *this, IRP: IRPosition::value(V: *OpV, CBContext: getCallBaseContext()), |
9410 | DepClass: DepClassTy::REQUIRED); |
9411 | if (!OpAA) |
9412 | return false; |
9413 | QuerriedAAs.push_back(Elt: OpAA); |
9414 | T.unionAssumed(R: OpAA->getAssumed().castOp(CastOp: CastI->getOpcode(), |
9415 | BitWidth: getState().getBitWidth())); |
9416 | return T.isValidState(); |
9417 | } |
9418 | |
9419 | bool |
9420 | calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, |
9421 | const Instruction *CtxI, |
9422 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { |
9423 | Value *LHS = CmpI->getOperand(i_nocapture: 0); |
9424 | Value *RHS = CmpI->getOperand(i_nocapture: 1); |
9425 | |
9426 | // Simplify the operands first. |
9427 | bool UsedAssumedInformation = false; |
9428 | const auto &SimplifiedLHS = A.getAssumedSimplified( |
9429 | IRP: IRPosition::value(V: *LHS, CBContext: getCallBaseContext()), AA: *this, |
9430 | UsedAssumedInformation, S: AA::Interprocedural); |
9431 | if (!SimplifiedLHS.has_value()) |
9432 | return true; |
9433 | if (!*SimplifiedLHS) |
9434 | return false; |
9435 | LHS = *SimplifiedLHS; |
9436 | |
9437 | const auto &SimplifiedRHS = A.getAssumedSimplified( |
9438 | IRP: IRPosition::value(V: *RHS, CBContext: getCallBaseContext()), AA: *this, |
9439 | UsedAssumedInformation, S: AA::Interprocedural); |
9440 | if (!SimplifiedRHS.has_value()) |
9441 | return true; |
9442 | if (!*SimplifiedRHS) |
9443 | return false; |
9444 | RHS = *SimplifiedRHS; |
9445 | |
9446 | // TODO: Allow non integers as well. |
9447 | if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) |
9448 | return false; |
9449 | |
9450 | auto *LHSAA = A.getAAFor<AAValueConstantRange>( |
9451 | QueryingAA: *this, IRP: IRPosition::value(V: *LHS, CBContext: getCallBaseContext()), |
9452 | DepClass: DepClassTy::REQUIRED); |
9453 | if (!LHSAA) |
9454 | return false; |
9455 | QuerriedAAs.push_back(Elt: LHSAA); |
9456 | auto *RHSAA = A.getAAFor<AAValueConstantRange>( |
9457 | QueryingAA: *this, IRP: IRPosition::value(V: *RHS, CBContext: getCallBaseContext()), |
9458 | DepClass: DepClassTy::REQUIRED); |
9459 | if (!RHSAA) |
9460 | return false; |
9461 | QuerriedAAs.push_back(Elt: RHSAA); |
9462 | auto LHSAARange = LHSAA->getAssumedConstantRange(A, CtxI); |
9463 | auto RHSAARange = RHSAA->getAssumedConstantRange(A, CtxI); |
9464 | |
9465 | // If one of them is empty set, we can't decide. |
9466 | if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) |
9467 | return true; |
9468 | |
9469 | bool MustTrue = false, MustFalse = false; |
9470 | |
9471 | auto AllowedRegion = |
9472 | ConstantRange::makeAllowedICmpRegion(Pred: CmpI->getPredicate(), Other: RHSAARange); |
9473 | |
9474 | if (AllowedRegion.intersectWith(CR: LHSAARange).isEmptySet()) |
9475 | MustFalse = true; |
9476 | |
9477 | if (LHSAARange.icmp(Pred: CmpI->getPredicate(), Other: RHSAARange)) |
9478 | MustTrue = true; |
9479 | |
9480 | assert((!MustTrue || !MustFalse) && |
9481 | "Either MustTrue or MustFalse should be false!" ); |
9482 | |
9483 | if (MustTrue) |
9484 | T.unionAssumed(R: ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); |
9485 | else if (MustFalse) |
9486 | T.unionAssumed(R: ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); |
9487 | else |
9488 | T.unionAssumed(R: ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); |
9489 | |
9490 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " after " |
9491 | << (MustTrue ? "true" : (MustFalse ? "false" : "unknown" )) |
9492 | << ": " << T << "\n\t" << *LHSAA << "\t<op>\n\t" |
9493 | << *RHSAA); |
9494 | |
9495 | // TODO: Track a known state too. |
9496 | return T.isValidState(); |
9497 | } |
9498 | |
9499 | /// See AbstractAttribute::updateImpl(...). |
9500 | ChangeStatus updateImpl(Attributor &A) override { |
9501 | |
9502 | IntegerRangeState T(getBitWidth()); |
9503 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { |
9504 | Instruction *I = dyn_cast<Instruction>(Val: &V); |
9505 | if (!I || isa<CallBase>(Val: I)) { |
9506 | |
9507 | // Simplify the operand first. |
9508 | bool UsedAssumedInformation = false; |
9509 | const auto &SimplifiedOpV = A.getAssumedSimplified( |
9510 | IRP: IRPosition::value(V, CBContext: getCallBaseContext()), AA: *this, |
9511 | UsedAssumedInformation, S: AA::Interprocedural); |
9512 | if (!SimplifiedOpV.has_value()) |
9513 | return true; |
9514 | if (!*SimplifiedOpV) |
9515 | return false; |
9516 | Value *VPtr = *SimplifiedOpV; |
9517 | |
9518 | // If the value is not instruction, we query AA to Attributor. |
9519 | const auto *AA = A.getAAFor<AAValueConstantRange>( |
9520 | QueryingAA: *this, IRP: IRPosition::value(V: *VPtr, CBContext: getCallBaseContext()), |
9521 | DepClass: DepClassTy::REQUIRED); |
9522 | |
9523 | // Clamp operator is not used to utilize a program point CtxI. |
9524 | if (AA) |
9525 | T.unionAssumed(R: AA->getAssumedConstantRange(A, CtxI)); |
9526 | else |
9527 | return false; |
9528 | |
9529 | return T.isValidState(); |
9530 | } |
9531 | |
9532 | SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; |
9533 | if (auto *BinOp = dyn_cast<BinaryOperator>(Val: I)) { |
9534 | if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) |
9535 | return false; |
9536 | } else if (auto *CmpI = dyn_cast<CmpInst>(Val: I)) { |
9537 | if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) |
9538 | return false; |
9539 | } else if (auto *CastI = dyn_cast<CastInst>(Val: I)) { |
9540 | if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) |
9541 | return false; |
9542 | } else { |
9543 | // Give up with other instructions. |
9544 | // TODO: Add other instructions |
9545 | |
9546 | T.indicatePessimisticFixpoint(); |
9547 | return false; |
9548 | } |
9549 | |
9550 | // Catch circular reasoning in a pessimistic way for now. |
9551 | // TODO: Check how the range evolves and if we stripped anything, see also |
9552 | // AADereferenceable or AAAlign for similar situations. |
9553 | for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { |
9554 | if (QueriedAA != this) |
9555 | continue; |
9556 | // If we are in a stady state we do not need to worry. |
9557 | if (T.getAssumed() == getState().getAssumed()) |
9558 | continue; |
9559 | T.indicatePessimisticFixpoint(); |
9560 | } |
9561 | |
9562 | return T.isValidState(); |
9563 | }; |
9564 | |
9565 | if (!VisitValueCB(getAssociatedValue(), getCtxI())) |
9566 | return indicatePessimisticFixpoint(); |
9567 | |
9568 | // Ensure that long def-use chains can't cause circular reasoning either by |
9569 | // introducing a cutoff below. |
9570 | if (clampStateAndIndicateChange(S&: getState(), R: T) == ChangeStatus::UNCHANGED) |
9571 | return ChangeStatus::UNCHANGED; |
9572 | if (++NumChanges > MaxNumChanges) { |
9573 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges |
9574 | << " but only " << MaxNumChanges |
9575 | << " are allowed to avoid cyclic reasoning." ); |
9576 | return indicatePessimisticFixpoint(); |
9577 | } |
9578 | return ChangeStatus::CHANGED; |
9579 | } |
9580 | |
9581 | /// See AbstractAttribute::trackStatistics() |
9582 | void trackStatistics() const override { |
9583 | STATS_DECLTRACK_FLOATING_ATTR(value_range) |
9584 | } |
9585 | |
9586 | /// Tracker to bail after too many widening steps of the constant range. |
9587 | int NumChanges = 0; |
9588 | |
9589 | /// Upper bound for the number of allowed changes (=widening steps) for the |
9590 | /// constant range before we give up. |
9591 | static constexpr int MaxNumChanges = 5; |
9592 | }; |
9593 | |
9594 | struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { |
9595 | AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) |
9596 | : AAValueConstantRangeImpl(IRP, A) {} |
9597 | |
9598 | /// See AbstractAttribute::initialize(...). |
9599 | ChangeStatus updateImpl(Attributor &A) override { |
9600 | llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " |
9601 | "not be called" ); |
9602 | } |
9603 | |
9604 | /// See AbstractAttribute::trackStatistics() |
9605 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } |
9606 | }; |
9607 | |
9608 | struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { |
9609 | AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) |
9610 | : AAValueConstantRangeFunction(IRP, A) {} |
9611 | |
9612 | /// See AbstractAttribute::trackStatistics() |
9613 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } |
9614 | }; |
9615 | |
9616 | struct AAValueConstantRangeCallSiteReturned |
9617 | : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl, |
9618 | AAValueConstantRangeImpl::StateType, |
9619 | /* IntroduceCallBaseContext */ true> { |
9620 | AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) |
9621 | : AACalleeToCallSite<AAValueConstantRange, AAValueConstantRangeImpl, |
9622 | AAValueConstantRangeImpl::StateType, |
9623 | /* IntroduceCallBaseContext */ true>(IRP, A) {} |
9624 | |
9625 | /// See AbstractAttribute::initialize(...). |
9626 | void initialize(Attributor &A) override { |
9627 | // If it is a load instruction with range metadata, use the metadata. |
9628 | if (CallInst *CI = dyn_cast<CallInst>(Val: &getAssociatedValue())) |
9629 | if (auto *RangeMD = CI->getMetadata(KindID: LLVMContext::MD_range)) |
9630 | intersectKnown(R: getConstantRangeFromMetadata(RangeMD: *RangeMD)); |
9631 | |
9632 | AAValueConstantRangeImpl::initialize(A); |
9633 | } |
9634 | |
9635 | /// See AbstractAttribute::trackStatistics() |
9636 | void trackStatistics() const override { |
9637 | STATS_DECLTRACK_CSRET_ATTR(value_range) |
9638 | } |
9639 | }; |
9640 | struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { |
9641 | AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) |
9642 | : AAValueConstantRangeFloating(IRP, A) {} |
9643 | |
9644 | /// See AbstractAttribute::manifest() |
9645 | ChangeStatus manifest(Attributor &A) override { |
9646 | return ChangeStatus::UNCHANGED; |
9647 | } |
9648 | |
9649 | /// See AbstractAttribute::trackStatistics() |
9650 | void trackStatistics() const override { |
9651 | STATS_DECLTRACK_CSARG_ATTR(value_range) |
9652 | } |
9653 | }; |
9654 | } // namespace |
9655 | |
9656 | /// ------------------ Potential Values Attribute ------------------------- |
9657 | |
9658 | namespace { |
9659 | struct AAPotentialConstantValuesImpl : AAPotentialConstantValues { |
9660 | using StateType = PotentialConstantIntValuesState; |
9661 | |
9662 | AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A) |
9663 | : AAPotentialConstantValues(IRP, A) {} |
9664 | |
9665 | /// See AbstractAttribute::initialize(..). |
9666 | void initialize(Attributor &A) override { |
9667 | if (A.hasSimplificationCallback(IRP: getIRPosition())) |
9668 | indicatePessimisticFixpoint(); |
9669 | else |
9670 | AAPotentialConstantValues::initialize(A); |
9671 | } |
9672 | |
9673 | bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S, |
9674 | bool &ContainsUndef, bool ForSelf) { |
9675 | SmallVector<AA::ValueAndContext> Values; |
9676 | bool UsedAssumedInformation = false; |
9677 | if (!A.getAssumedSimplifiedValues(IRP, AA: *this, Values, S: AA::Interprocedural, |
9678 | UsedAssumedInformation)) { |
9679 | // Avoid recursion when the caller is computing constant values for this |
9680 | // IRP itself. |
9681 | if (ForSelf) |
9682 | return false; |
9683 | if (!IRP.getAssociatedType()->isIntegerTy()) |
9684 | return false; |
9685 | auto *PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>( |
9686 | QueryingAA: *this, IRP, DepClass: DepClassTy::REQUIRED); |
9687 | if (!PotentialValuesAA || !PotentialValuesAA->getState().isValidState()) |
9688 | return false; |
9689 | ContainsUndef = PotentialValuesAA->getState().undefIsContained(); |
9690 | S = PotentialValuesAA->getState().getAssumedSet(); |
9691 | return true; |
9692 | } |
9693 | |
9694 | // Copy all the constant values, except UndefValue. ContainsUndef is true |
9695 | // iff Values contains only UndefValue instances. If there are other known |
9696 | // constants, then UndefValue is dropped. |
9697 | ContainsUndef = false; |
9698 | for (auto &It : Values) { |
9699 | if (isa<UndefValue>(Val: It.getValue())) { |
9700 | ContainsUndef = true; |
9701 | continue; |
9702 | } |
9703 | auto *CI = dyn_cast<ConstantInt>(Val: It.getValue()); |
9704 | if (!CI) |
9705 | return false; |
9706 | S.insert(X: CI->getValue()); |
9707 | } |
9708 | ContainsUndef &= S.empty(); |
9709 | |
9710 | return true; |
9711 | } |
9712 | |
9713 | /// See AbstractAttribute::getAsStr(). |
9714 | const std::string getAsStr(Attributor *A) const override { |
9715 | std::string Str; |
9716 | llvm::raw_string_ostream OS(Str); |
9717 | OS << getState(); |
9718 | return Str; |
9719 | } |
9720 | |
9721 | /// See AbstractAttribute::updateImpl(...). |
9722 | ChangeStatus updateImpl(Attributor &A) override { |
9723 | return indicatePessimisticFixpoint(); |
9724 | } |
9725 | }; |
9726 | |
9727 | struct AAPotentialConstantValuesArgument final |
9728 | : AAArgumentFromCallSiteArguments<AAPotentialConstantValues, |
9729 | AAPotentialConstantValuesImpl, |
9730 | PotentialConstantIntValuesState> { |
9731 | using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues, |
9732 | AAPotentialConstantValuesImpl, |
9733 | PotentialConstantIntValuesState>; |
9734 | AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A) |
9735 | : Base(IRP, A) {} |
9736 | |
9737 | /// See AbstractAttribute::trackStatistics() |
9738 | void trackStatistics() const override { |
9739 | STATS_DECLTRACK_ARG_ATTR(potential_values) |
9740 | } |
9741 | }; |
9742 | |
9743 | struct AAPotentialConstantValuesReturned |
9744 | : AAReturnedFromReturnedValues<AAPotentialConstantValues, |
9745 | AAPotentialConstantValuesImpl> { |
9746 | using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues, |
9747 | AAPotentialConstantValuesImpl>; |
9748 | AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A) |
9749 | : Base(IRP, A) {} |
9750 | |
9751 | void initialize(Attributor &A) override { |
9752 | if (!A.isFunctionIPOAmendable(F: *getAssociatedFunction())) |
9753 | indicatePessimisticFixpoint(); |
9754 | Base::initialize(A); |
9755 | } |
9756 | |
9757 | /// See AbstractAttribute::trackStatistics() |
9758 | void trackStatistics() const override { |
9759 | STATS_DECLTRACK_FNRET_ATTR(potential_values) |
9760 | } |
9761 | }; |
9762 | |
9763 | struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { |
9764 | AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A) |
9765 | : AAPotentialConstantValuesImpl(IRP, A) {} |
9766 | |
9767 | /// See AbstractAttribute::initialize(..). |
9768 | void initialize(Attributor &A) override { |
9769 | AAPotentialConstantValuesImpl::initialize(A); |
9770 | if (isAtFixpoint()) |
9771 | return; |
9772 | |
9773 | Value &V = getAssociatedValue(); |
9774 | |
9775 | if (auto *C = dyn_cast<ConstantInt>(Val: &V)) { |
9776 | unionAssumed(C: C->getValue()); |
9777 | indicateOptimisticFixpoint(); |
9778 | return; |
9779 | } |
9780 | |
9781 | if (isa<UndefValue>(Val: &V)) { |
9782 | unionAssumedWithUndef(); |
9783 | indicateOptimisticFixpoint(); |
9784 | return; |
9785 | } |
9786 | |
9787 | if (isa<BinaryOperator>(Val: &V) || isa<ICmpInst>(Val: &V) || isa<CastInst>(Val: &V)) |
9788 | return; |
9789 | |
9790 | if (isa<SelectInst>(Val: V) || isa<PHINode>(Val: V) || isa<LoadInst>(Val: V)) |
9791 | return; |
9792 | |
9793 | indicatePessimisticFixpoint(); |
9794 | |
9795 | LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: " |
9796 | << getAssociatedValue() << "\n" ); |
9797 | } |
9798 | |
9799 | static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, |
9800 | const APInt &RHS) { |
9801 | return ICmpInst::compare(LHS, RHS, Pred: ICI->getPredicate()); |
9802 | } |
9803 | |
9804 | static APInt calculateCastInst(const CastInst *CI, const APInt &Src, |
9805 | uint32_t ResultBitWidth) { |
9806 | Instruction::CastOps CastOp = CI->getOpcode(); |
9807 | switch (CastOp) { |
9808 | default: |
9809 | llvm_unreachable("unsupported or not integer cast" ); |
9810 | case Instruction::Trunc: |
9811 | return Src.trunc(width: ResultBitWidth); |
9812 | case Instruction::SExt: |
9813 | return Src.sext(width: ResultBitWidth); |
9814 | case Instruction::ZExt: |
9815 | return Src.zext(width: ResultBitWidth); |
9816 | case Instruction::BitCast: |
9817 | return Src; |
9818 | } |
9819 | } |
9820 | |
9821 | static APInt calculateBinaryOperator(const BinaryOperator *BinOp, |
9822 | const APInt &LHS, const APInt &RHS, |
9823 | bool &SkipOperation, bool &Unsupported) { |
9824 | Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); |
9825 | // Unsupported is set to true when the binary operator is not supported. |
9826 | // SkipOperation is set to true when UB occur with the given operand pair |
9827 | // (LHS, RHS). |
9828 | // TODO: we should look at nsw and nuw keywords to handle operations |
9829 | // that create poison or undef value. |
9830 | switch (BinOpcode) { |
9831 | default: |
9832 | Unsupported = true; |
9833 | return LHS; |
9834 | case Instruction::Add: |
9835 | return LHS + RHS; |
9836 | case Instruction::Sub: |
9837 | return LHS - RHS; |
9838 | case Instruction::Mul: |
9839 | return LHS * RHS; |
9840 | case Instruction::UDiv: |
9841 | if (RHS.isZero()) { |
9842 | SkipOperation = true; |
9843 | return LHS; |
9844 | } |
9845 | return LHS.udiv(RHS); |
9846 | case Instruction::SDiv: |
9847 | if (RHS.isZero()) { |
9848 | SkipOperation = true; |
9849 | return LHS; |
9850 | } |
9851 | return LHS.sdiv(RHS); |
9852 | case Instruction::URem: |
9853 | if (RHS.isZero()) { |
9854 | SkipOperation = true; |
9855 | return LHS; |
9856 | } |
9857 | return LHS.urem(RHS); |
9858 | case Instruction::SRem: |
9859 | if (RHS.isZero()) { |
9860 | SkipOperation = true; |
9861 | return LHS; |
9862 | } |
9863 | return LHS.srem(RHS); |
9864 | case Instruction::Shl: |
9865 | return LHS.shl(ShiftAmt: RHS); |
9866 | case Instruction::LShr: |
9867 | return LHS.lshr(ShiftAmt: RHS); |
9868 | case Instruction::AShr: |
9869 | return LHS.ashr(ShiftAmt: RHS); |
9870 | case Instruction::And: |
9871 | return LHS & RHS; |
9872 | case Instruction::Or: |
9873 | return LHS | RHS; |
9874 | case Instruction::Xor: |
9875 | return LHS ^ RHS; |
9876 | } |
9877 | } |
9878 | |
9879 | bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, |
9880 | const APInt &LHS, const APInt &RHS) { |
9881 | bool SkipOperation = false; |
9882 | bool Unsupported = false; |
9883 | APInt Result = |
9884 | calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); |
9885 | if (Unsupported) |
9886 | return false; |
9887 | // If SkipOperation is true, we can ignore this operand pair (L, R). |
9888 | if (!SkipOperation) |
9889 | unionAssumed(C: Result); |
9890 | return isValidState(); |
9891 | } |
9892 | |
9893 | ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { |
9894 | auto AssumedBefore = getAssumed(); |
9895 | Value *LHS = ICI->getOperand(i_nocapture: 0); |
9896 | Value *RHS = ICI->getOperand(i_nocapture: 1); |
9897 | |
9898 | bool LHSContainsUndef = false, RHSContainsUndef = false; |
9899 | SetTy LHSAAPVS, RHSAAPVS; |
9900 | if (!fillSetWithConstantValues(A, IRP: IRPosition::value(V: *LHS), S&: LHSAAPVS, |
9901 | ContainsUndef&: LHSContainsUndef, /* ForSelf */ false) || |
9902 | !fillSetWithConstantValues(A, IRP: IRPosition::value(V: *RHS), S&: RHSAAPVS, |
9903 | ContainsUndef&: RHSContainsUndef, /* ForSelf */ false)) |
9904 | return indicatePessimisticFixpoint(); |
9905 | |
9906 | // TODO: make use of undef flag to limit potential values aggressively. |
9907 | bool MaybeTrue = false, MaybeFalse = false; |
9908 | const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); |
9909 | if (LHSContainsUndef && RHSContainsUndef) { |
9910 | // The result of any comparison between undefs can be soundly replaced |
9911 | // with undef. |
9912 | unionAssumedWithUndef(); |
9913 | } else if (LHSContainsUndef) { |
9914 | for (const APInt &R : RHSAAPVS) { |
9915 | bool CmpResult = calculateICmpInst(ICI, LHS: Zero, RHS: R); |
9916 | MaybeTrue |= CmpResult; |
9917 | MaybeFalse |= !CmpResult; |
9918 | if (MaybeTrue & MaybeFalse) |
9919 | return indicatePessimisticFixpoint(); |
9920 | } |
9921 | } else if (RHSContainsUndef) { |
9922 | for (const APInt &L : LHSAAPVS) { |
9923 | bool CmpResult = calculateICmpInst(ICI, LHS: L, RHS: Zero); |
9924 | MaybeTrue |= CmpResult; |
9925 | MaybeFalse |= !CmpResult; |
9926 | if (MaybeTrue & MaybeFalse) |
9927 | return indicatePessimisticFixpoint(); |
9928 | } |
9929 | } else { |
9930 | for (const APInt &L : LHSAAPVS) { |
9931 | for (const APInt &R : RHSAAPVS) { |
9932 | bool CmpResult = calculateICmpInst(ICI, LHS: L, RHS: R); |
9933 | MaybeTrue |= CmpResult; |
9934 | MaybeFalse |= !CmpResult; |
9935 | if (MaybeTrue & MaybeFalse) |
9936 | return indicatePessimisticFixpoint(); |
9937 | } |
9938 | } |
9939 | } |
9940 | if (MaybeTrue) |
9941 | unionAssumed(C: APInt(/* numBits */ 1, /* val */ 1)); |
9942 | if (MaybeFalse) |
9943 | unionAssumed(C: APInt(/* numBits */ 1, /* val */ 0)); |
9944 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
9945 | : ChangeStatus::CHANGED; |
9946 | } |
9947 | |
9948 | ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { |
9949 | auto AssumedBefore = getAssumed(); |
9950 | Value *LHS = SI->getTrueValue(); |
9951 | Value *RHS = SI->getFalseValue(); |
9952 | |
9953 | bool UsedAssumedInformation = false; |
9954 | std::optional<Constant *> C = A.getAssumedConstant( |
9955 | V: *SI->getCondition(), AA: *this, UsedAssumedInformation); |
9956 | |
9957 | // Check if we only need one operand. |
9958 | bool OnlyLeft = false, OnlyRight = false; |
9959 | if (C && *C && (*C)->isOneValue()) |
9960 | OnlyLeft = true; |
9961 | else if (C && *C && (*C)->isZeroValue()) |
9962 | OnlyRight = true; |
9963 | |
9964 | bool LHSContainsUndef = false, RHSContainsUndef = false; |
9965 | SetTy LHSAAPVS, RHSAAPVS; |
9966 | if (!OnlyRight && |
9967 | !fillSetWithConstantValues(A, IRP: IRPosition::value(V: *LHS), S&: LHSAAPVS, |
9968 | ContainsUndef&: LHSContainsUndef, /* ForSelf */ false)) |
9969 | return indicatePessimisticFixpoint(); |
9970 | |
9971 | if (!OnlyLeft && |
9972 | !fillSetWithConstantValues(A, IRP: IRPosition::value(V: *RHS), S&: RHSAAPVS, |
9973 | ContainsUndef&: RHSContainsUndef, /* ForSelf */ false)) |
9974 | return indicatePessimisticFixpoint(); |
9975 | |
9976 | if (OnlyLeft || OnlyRight) { |
9977 | // select (true/false), lhs, rhs |
9978 | auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS; |
9979 | auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef; |
9980 | |
9981 | if (Undef) |
9982 | unionAssumedWithUndef(); |
9983 | else { |
9984 | for (const auto &It : *OpAA) |
9985 | unionAssumed(C: It); |
9986 | } |
9987 | |
9988 | } else if (LHSContainsUndef && RHSContainsUndef) { |
9989 | // select i1 *, undef , undef => undef |
9990 | unionAssumedWithUndef(); |
9991 | } else { |
9992 | for (const auto &It : LHSAAPVS) |
9993 | unionAssumed(C: It); |
9994 | for (const auto &It : RHSAAPVS) |
9995 | unionAssumed(C: It); |
9996 | } |
9997 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
9998 | : ChangeStatus::CHANGED; |
9999 | } |
10000 | |
10001 | ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { |
10002 | auto AssumedBefore = getAssumed(); |
10003 | if (!CI->isIntegerCast()) |
10004 | return indicatePessimisticFixpoint(); |
10005 | assert(CI->getNumOperands() == 1 && "Expected cast to be unary!" ); |
10006 | uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); |
10007 | Value *Src = CI->getOperand(i_nocapture: 0); |
10008 | |
10009 | bool SrcContainsUndef = false; |
10010 | SetTy SrcPVS; |
10011 | if (!fillSetWithConstantValues(A, IRP: IRPosition::value(V: *Src), S&: SrcPVS, |
10012 | ContainsUndef&: SrcContainsUndef, /* ForSelf */ false)) |
10013 | return indicatePessimisticFixpoint(); |
10014 | |
10015 | if (SrcContainsUndef) |
10016 | unionAssumedWithUndef(); |
10017 | else { |
10018 | for (const APInt &S : SrcPVS) { |
10019 | APInt T = calculateCastInst(CI, Src: S, ResultBitWidth); |
10020 | unionAssumed(C: T); |
10021 | } |
10022 | } |
10023 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
10024 | : ChangeStatus::CHANGED; |
10025 | } |
10026 | |
10027 | ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { |
10028 | auto AssumedBefore = getAssumed(); |
10029 | Value *LHS = BinOp->getOperand(i_nocapture: 0); |
10030 | Value *RHS = BinOp->getOperand(i_nocapture: 1); |
10031 | |
10032 | bool LHSContainsUndef = false, RHSContainsUndef = false; |
10033 | SetTy LHSAAPVS, RHSAAPVS; |
10034 | if (!fillSetWithConstantValues(A, IRP: IRPosition::value(V: *LHS), S&: LHSAAPVS, |
10035 | ContainsUndef&: LHSContainsUndef, /* ForSelf */ false) || |
10036 | !fillSetWithConstantValues(A, IRP: IRPosition::value(V: *RHS), S&: RHSAAPVS, |
10037 | ContainsUndef&: RHSContainsUndef, /* ForSelf */ false)) |
10038 | return indicatePessimisticFixpoint(); |
10039 | |
10040 | const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); |
10041 | |
10042 | // TODO: make use of undef flag to limit potential values aggressively. |
10043 | if (LHSContainsUndef && RHSContainsUndef) { |
10044 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, LHS: Zero, RHS: Zero)) |
10045 | return indicatePessimisticFixpoint(); |
10046 | } else if (LHSContainsUndef) { |
10047 | for (const APInt &R : RHSAAPVS) { |
10048 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, LHS: Zero, RHS: R)) |
10049 | return indicatePessimisticFixpoint(); |
10050 | } |
10051 | } else if (RHSContainsUndef) { |
10052 | for (const APInt &L : LHSAAPVS) { |
10053 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, LHS: L, RHS: Zero)) |
10054 | return indicatePessimisticFixpoint(); |
10055 | } |
10056 | } else { |
10057 | for (const APInt &L : LHSAAPVS) { |
10058 | for (const APInt &R : RHSAAPVS) { |
10059 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, LHS: L, RHS: R)) |
10060 | return indicatePessimisticFixpoint(); |
10061 | } |
10062 | } |
10063 | } |
10064 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
10065 | : ChangeStatus::CHANGED; |
10066 | } |
10067 | |
10068 | ChangeStatus updateWithInstruction(Attributor &A, Instruction *Inst) { |
10069 | auto AssumedBefore = getAssumed(); |
10070 | SetTy Incoming; |
10071 | bool ContainsUndef; |
10072 | if (!fillSetWithConstantValues(A, IRP: IRPosition::value(V: *Inst), S&: Incoming, |
10073 | ContainsUndef, /* ForSelf */ true)) |
10074 | return indicatePessimisticFixpoint(); |
10075 | if (ContainsUndef) { |
10076 | unionAssumedWithUndef(); |
10077 | } else { |
10078 | for (const auto &It : Incoming) |
10079 | unionAssumed(C: It); |
10080 | } |
10081 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
10082 | : ChangeStatus::CHANGED; |
10083 | } |
10084 | |
10085 | /// See AbstractAttribute::updateImpl(...). |
10086 | ChangeStatus updateImpl(Attributor &A) override { |
10087 | Value &V = getAssociatedValue(); |
10088 | Instruction *I = dyn_cast<Instruction>(Val: &V); |
10089 | |
10090 | if (auto *ICI = dyn_cast<ICmpInst>(Val: I)) |
10091 | return updateWithICmpInst(A, ICI); |
10092 | |
10093 | if (auto *SI = dyn_cast<SelectInst>(Val: I)) |
10094 | return updateWithSelectInst(A, SI); |
10095 | |
10096 | if (auto *CI = dyn_cast<CastInst>(Val: I)) |
10097 | return updateWithCastInst(A, CI); |
10098 | |
10099 | if (auto *BinOp = dyn_cast<BinaryOperator>(Val: I)) |
10100 | return updateWithBinaryOperator(A, BinOp); |
10101 | |
10102 | if (isa<PHINode>(Val: I) || isa<LoadInst>(Val: I)) |
10103 | return updateWithInstruction(A, Inst: I); |
10104 | |
10105 | return indicatePessimisticFixpoint(); |
10106 | } |
10107 | |
10108 | /// See AbstractAttribute::trackStatistics() |
10109 | void trackStatistics() const override { |
10110 | STATS_DECLTRACK_FLOATING_ATTR(potential_values) |
10111 | } |
10112 | }; |
10113 | |
10114 | struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl { |
10115 | AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A) |
10116 | : AAPotentialConstantValuesImpl(IRP, A) {} |
10117 | |
10118 | /// See AbstractAttribute::initialize(...). |
10119 | ChangeStatus updateImpl(Attributor &A) override { |
10120 | llvm_unreachable( |
10121 | "AAPotentialConstantValues(Function|CallSite)::updateImpl will " |
10122 | "not be called" ); |
10123 | } |
10124 | |
10125 | /// See AbstractAttribute::trackStatistics() |
10126 | void trackStatistics() const override { |
10127 | STATS_DECLTRACK_FN_ATTR(potential_values) |
10128 | } |
10129 | }; |
10130 | |
10131 | struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction { |
10132 | AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A) |
10133 | : AAPotentialConstantValuesFunction(IRP, A) {} |
10134 | |
10135 | /// See AbstractAttribute::trackStatistics() |
10136 | void trackStatistics() const override { |
10137 | STATS_DECLTRACK_CS_ATTR(potential_values) |
10138 | } |
10139 | }; |
10140 | |
10141 | struct AAPotentialConstantValuesCallSiteReturned |
10142 | : AACalleeToCallSite<AAPotentialConstantValues, |
10143 | AAPotentialConstantValuesImpl> { |
10144 | AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP, |
10145 | Attributor &A) |
10146 | : AACalleeToCallSite<AAPotentialConstantValues, |
10147 | AAPotentialConstantValuesImpl>(IRP, A) {} |
10148 | |
10149 | /// See AbstractAttribute::trackStatistics() |
10150 | void trackStatistics() const override { |
10151 | STATS_DECLTRACK_CSRET_ATTR(potential_values) |
10152 | } |
10153 | }; |
10154 | |
10155 | struct AAPotentialConstantValuesCallSiteArgument |
10156 | : AAPotentialConstantValuesFloating { |
10157 | AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP, |
10158 | Attributor &A) |
10159 | : AAPotentialConstantValuesFloating(IRP, A) {} |
10160 | |
10161 | /// See AbstractAttribute::initialize(..). |
10162 | void initialize(Attributor &A) override { |
10163 | AAPotentialConstantValuesImpl::initialize(A); |
10164 | if (isAtFixpoint()) |
10165 | return; |
10166 | |
10167 | Value &V = getAssociatedValue(); |
10168 | |
10169 | if (auto *C = dyn_cast<ConstantInt>(Val: &V)) { |
10170 | unionAssumed(C: C->getValue()); |
10171 | indicateOptimisticFixpoint(); |
10172 | return; |
10173 | } |
10174 | |
10175 | if (isa<UndefValue>(Val: &V)) { |
10176 | unionAssumedWithUndef(); |
10177 | indicateOptimisticFixpoint(); |
10178 | return; |
10179 | } |
10180 | } |
10181 | |
10182 | /// See AbstractAttribute::updateImpl(...). |
10183 | ChangeStatus updateImpl(Attributor &A) override { |
10184 | Value &V = getAssociatedValue(); |
10185 | auto AssumedBefore = getAssumed(); |
10186 | auto *AA = A.getAAFor<AAPotentialConstantValues>( |
10187 | QueryingAA: *this, IRP: IRPosition::value(V), DepClass: DepClassTy::REQUIRED); |
10188 | if (!AA) |
10189 | return indicatePessimisticFixpoint(); |
10190 | const auto &S = AA->getAssumed(); |
10191 | unionAssumed(PVS: S); |
10192 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED |
10193 | : ChangeStatus::CHANGED; |
10194 | } |
10195 | |
10196 | /// See AbstractAttribute::trackStatistics() |
10197 | void trackStatistics() const override { |
10198 | STATS_DECLTRACK_CSARG_ATTR(potential_values) |
10199 | } |
10200 | }; |
10201 | } // namespace |
10202 | |
10203 | /// ------------------------ NoUndef Attribute --------------------------------- |
10204 | bool AANoUndef::isImpliedByIR(Attributor &A, const IRPosition &IRP, |
10205 | Attribute::AttrKind ImpliedAttributeKind, |
10206 | bool IgnoreSubsumingPositions) { |
10207 | assert(ImpliedAttributeKind == Attribute::NoUndef && |
10208 | "Unexpected attribute kind" ); |
10209 | if (A.hasAttr(IRP, AKs: {Attribute::NoUndef}, IgnoreSubsumingPositions, |
10210 | ImpliedAttributeKind: Attribute::NoUndef)) |
10211 | return true; |
10212 | |
10213 | Value &Val = IRP.getAssociatedValue(); |
10214 | if (IRP.getPositionKind() != IRPosition::IRP_RETURNED && |
10215 | isGuaranteedNotToBeUndefOrPoison(V: &Val)) { |
10216 | LLVMContext &Ctx = Val.getContext(); |
10217 | A.manifestAttrs(IRP, DeducedAttrs: Attribute::get(Context&: Ctx, Kind: Attribute::NoUndef)); |
10218 | return true; |
10219 | } |
10220 | |
10221 | return false; |
10222 | } |
10223 | |
10224 | namespace { |
10225 | struct AANoUndefImpl : AANoUndef { |
10226 | AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} |
10227 | |
10228 | /// See AbstractAttribute::initialize(...). |
10229 | void initialize(Attributor &A) override { |
10230 | Value &V = getAssociatedValue(); |
10231 | if (isa<UndefValue>(Val: V)) |
10232 | indicatePessimisticFixpoint(); |
10233 | assert(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef)); |
10234 | } |
10235 | |
10236 | /// See followUsesInMBEC |
10237 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
10238 | AANoUndef::StateType &State) { |
10239 | const Value *UseV = U->get(); |
10240 | const DominatorTree *DT = nullptr; |
10241 | AssumptionCache *AC = nullptr; |
10242 | InformationCache &InfoCache = A.getInfoCache(); |
10243 | if (Function *F = getAnchorScope()) { |
10244 | DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F: *F); |
10245 | AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(F: *F); |
10246 | } |
10247 | State.setKnown(isGuaranteedNotToBeUndefOrPoison(V: UseV, AC, CtxI: I, DT)); |
10248 | bool TrackUse = false; |
10249 | // Track use for instructions which must produce undef or poison bits when |
10250 | // at least one operand contains such bits. |
10251 | if (isa<CastInst>(Val: *I) || isa<GetElementPtrInst>(Val: *I)) |
10252 | TrackUse = true; |
10253 | return TrackUse; |
10254 | } |
10255 | |
10256 | /// See AbstractAttribute::getAsStr(). |
10257 | const std::string getAsStr(Attributor *A) const override { |
10258 | return getAssumed() ? "noundef" : "may-undef-or-poison" ; |
10259 | } |
10260 | |
10261 | ChangeStatus manifest(Attributor &A) override { |
10262 | // We don't manifest noundef attribute for dead positions because the |
10263 | // associated values with dead positions would be replaced with undef |
10264 | // values. |
10265 | bool UsedAssumedInformation = false; |
10266 | if (A.isAssumedDead(IRP: getIRPosition(), QueryingAA: nullptr, FnLivenessAA: nullptr, |
10267 | UsedAssumedInformation)) |
10268 | return ChangeStatus::UNCHANGED; |
10269 | // A position whose simplified value does not have any value is |
10270 | // considered to be dead. We don't manifest noundef in such positions for |
10271 | // the same reason above. |
10272 | if (!A.getAssumedSimplified(IRP: getIRPosition(), AA: *this, UsedAssumedInformation, |
10273 | S: AA::Interprocedural) |
10274 | .has_value()) |
10275 | return ChangeStatus::UNCHANGED; |
10276 | return AANoUndef::manifest(A); |
10277 | } |
10278 | }; |
10279 | |
10280 | struct AANoUndefFloating : public AANoUndefImpl { |
10281 | AANoUndefFloating(const IRPosition &IRP, Attributor &A) |
10282 | : AANoUndefImpl(IRP, A) {} |
10283 | |
10284 | /// See AbstractAttribute::initialize(...). |
10285 | void initialize(Attributor &A) override { |
10286 | AANoUndefImpl::initialize(A); |
10287 | if (!getState().isAtFixpoint() && getAnchorScope() && |
10288 | !getAnchorScope()->isDeclaration()) |
10289 | if (Instruction *CtxI = getCtxI()) |
10290 | followUsesInMBEC(AA&: *this, A, S&: getState(), CtxI&: *CtxI); |
10291 | } |
10292 | |
10293 | /// See AbstractAttribute::updateImpl(...). |
10294 | ChangeStatus updateImpl(Attributor &A) override { |
10295 | auto VisitValueCB = [&](const IRPosition &IRP) -> bool { |
10296 | bool IsKnownNoUndef; |
10297 | return AA::hasAssumedIRAttr<Attribute::NoUndef>( |
10298 | A, QueryingAA: this, IRP, DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNoUndef); |
10299 | }; |
10300 | |
10301 | bool Stripped; |
10302 | bool UsedAssumedInformation = false; |
10303 | Value *AssociatedValue = &getAssociatedValue(); |
10304 | SmallVector<AA::ValueAndContext> Values; |
10305 | if (!A.getAssumedSimplifiedValues(IRP: getIRPosition(), AA: *this, Values, |
10306 | S: AA::AnyScope, UsedAssumedInformation)) |
10307 | Stripped = false; |
10308 | else |
10309 | Stripped = |
10310 | Values.size() != 1 || Values.front().getValue() != AssociatedValue; |
10311 | |
10312 | if (!Stripped) { |
10313 | // If we haven't stripped anything we might still be able to use a |
10314 | // different AA, but only if the IRP changes. Effectively when we |
10315 | // interpret this not as a call site value but as a floating/argument |
10316 | // value. |
10317 | const IRPosition AVIRP = IRPosition::value(V: *AssociatedValue); |
10318 | if (AVIRP == getIRPosition() || !VisitValueCB(AVIRP)) |
10319 | return indicatePessimisticFixpoint(); |
10320 | return ChangeStatus::UNCHANGED; |
10321 | } |
10322 | |
10323 | for (const auto &VAC : Values) |
10324 | if (!VisitValueCB(IRPosition::value(V: *VAC.getValue()))) |
10325 | return indicatePessimisticFixpoint(); |
10326 | |
10327 | return ChangeStatus::UNCHANGED; |
10328 | } |
10329 | |
10330 | /// See AbstractAttribute::trackStatistics() |
10331 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } |
10332 | }; |
10333 | |
10334 | struct AANoUndefReturned final |
10335 | : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { |
10336 | AANoUndefReturned(const IRPosition &IRP, Attributor &A) |
10337 | : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} |
10338 | |
10339 | /// See AbstractAttribute::trackStatistics() |
10340 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } |
10341 | }; |
10342 | |
10343 | struct AANoUndefArgument final |
10344 | : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { |
10345 | AANoUndefArgument(const IRPosition &IRP, Attributor &A) |
10346 | : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} |
10347 | |
10348 | /// See AbstractAttribute::trackStatistics() |
10349 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } |
10350 | }; |
10351 | |
10352 | struct AANoUndefCallSiteArgument final : AANoUndefFloating { |
10353 | AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) |
10354 | : AANoUndefFloating(IRP, A) {} |
10355 | |
10356 | /// See AbstractAttribute::trackStatistics() |
10357 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } |
10358 | }; |
10359 | |
10360 | struct AANoUndefCallSiteReturned final |
10361 | : AACalleeToCallSite<AANoUndef, AANoUndefImpl> { |
10362 | AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) |
10363 | : AACalleeToCallSite<AANoUndef, AANoUndefImpl>(IRP, A) {} |
10364 | |
10365 | /// See AbstractAttribute::trackStatistics() |
10366 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } |
10367 | }; |
10368 | |
10369 | /// ------------------------ NoFPClass Attribute ------------------------------- |
10370 | |
10371 | struct AANoFPClassImpl : AANoFPClass { |
10372 | AANoFPClassImpl(const IRPosition &IRP, Attributor &A) : AANoFPClass(IRP, A) {} |
10373 | |
10374 | void initialize(Attributor &A) override { |
10375 | const IRPosition &IRP = getIRPosition(); |
10376 | |
10377 | Value &V = IRP.getAssociatedValue(); |
10378 | if (isa<UndefValue>(Val: V)) { |
10379 | indicateOptimisticFixpoint(); |
10380 | return; |
10381 | } |
10382 | |
10383 | SmallVector<Attribute> Attrs; |
10384 | A.getAttrs(IRP: getIRPosition(), AKs: {Attribute::NoFPClass}, Attrs, IgnoreSubsumingPositions: false); |
10385 | for (const auto &Attr : Attrs) { |
10386 | addKnownBits(Bits: Attr.getNoFPClass()); |
10387 | } |
10388 | |
10389 | const DataLayout &DL = A.getDataLayout(); |
10390 | if (getPositionKind() != IRPosition::IRP_RETURNED) { |
10391 | KnownFPClass KnownFPClass = computeKnownFPClass(V: &V, DL); |
10392 | addKnownBits(Bits: ~KnownFPClass.KnownFPClasses); |
10393 | } |
10394 | |
10395 | if (Instruction *CtxI = getCtxI()) |
10396 | followUsesInMBEC(AA&: *this, A, S&: getState(), CtxI&: *CtxI); |
10397 | } |
10398 | |
10399 | /// See followUsesInMBEC |
10400 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
10401 | AANoFPClass::StateType &State) { |
10402 | // TODO: Determine what instructions can be looked through. |
10403 | auto *CB = dyn_cast<CallBase>(Val: I); |
10404 | if (!CB) |
10405 | return false; |
10406 | |
10407 | if (!CB->isArgOperand(U)) |
10408 | return false; |
10409 | |
10410 | unsigned ArgNo = CB->getArgOperandNo(U); |
10411 | IRPosition IRP = IRPosition::callsite_argument(CB: *CB, ArgNo); |
10412 | if (auto *NoFPAA = A.getAAFor<AANoFPClass>(QueryingAA: *this, IRP, DepClass: DepClassTy::NONE)) |
10413 | State.addKnownBits(Bits: NoFPAA->getState().getKnown()); |
10414 | return false; |
10415 | } |
10416 | |
10417 | const std::string getAsStr(Attributor *A) const override { |
10418 | std::string Result = "nofpclass" ; |
10419 | raw_string_ostream OS(Result); |
10420 | OS << getKnownNoFPClass() << '/' << getAssumedNoFPClass(); |
10421 | return Result; |
10422 | } |
10423 | |
10424 | void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, |
10425 | SmallVectorImpl<Attribute> &Attrs) const override { |
10426 | Attrs.emplace_back(Args: Attribute::getWithNoFPClass(Context&: Ctx, Mask: getAssumedNoFPClass())); |
10427 | } |
10428 | }; |
10429 | |
10430 | struct AANoFPClassFloating : public AANoFPClassImpl { |
10431 | AANoFPClassFloating(const IRPosition &IRP, Attributor &A) |
10432 | : AANoFPClassImpl(IRP, A) {} |
10433 | |
10434 | /// See AbstractAttribute::updateImpl(...). |
10435 | ChangeStatus updateImpl(Attributor &A) override { |
10436 | SmallVector<AA::ValueAndContext> Values; |
10437 | bool UsedAssumedInformation = false; |
10438 | if (!A.getAssumedSimplifiedValues(IRP: getIRPosition(), AA: *this, Values, |
10439 | S: AA::AnyScope, UsedAssumedInformation)) { |
10440 | Values.push_back(Elt: {getAssociatedValue(), getCtxI()}); |
10441 | } |
10442 | |
10443 | StateType T; |
10444 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { |
10445 | const auto *AA = A.getAAFor<AANoFPClass>(QueryingAA: *this, IRP: IRPosition::value(V), |
10446 | DepClass: DepClassTy::REQUIRED); |
10447 | if (!AA || this == AA) { |
10448 | T.indicatePessimisticFixpoint(); |
10449 | } else { |
10450 | const AANoFPClass::StateType &S = |
10451 | static_cast<const AANoFPClass::StateType &>(AA->getState()); |
10452 | T ^= S; |
10453 | } |
10454 | return T.isValidState(); |
10455 | }; |
10456 | |
10457 | for (const auto &VAC : Values) |
10458 | if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI())) |
10459 | return indicatePessimisticFixpoint(); |
10460 | |
10461 | return clampStateAndIndicateChange(S&: getState(), R: T); |
10462 | } |
10463 | |
10464 | /// See AbstractAttribute::trackStatistics() |
10465 | void trackStatistics() const override { |
10466 | STATS_DECLTRACK_FNRET_ATTR(nofpclass) |
10467 | } |
10468 | }; |
10469 | |
10470 | struct AANoFPClassReturned final |
10471 | : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl, |
10472 | AANoFPClassImpl::StateType, false, |
10473 | Attribute::None, false> { |
10474 | AANoFPClassReturned(const IRPosition &IRP, Attributor &A) |
10475 | : AAReturnedFromReturnedValues<AANoFPClass, AANoFPClassImpl, |
10476 | AANoFPClassImpl::StateType, false, |
10477 | Attribute::None, false>(IRP, A) {} |
10478 | |
10479 | /// See AbstractAttribute::trackStatistics() |
10480 | void trackStatistics() const override { |
10481 | STATS_DECLTRACK_FNRET_ATTR(nofpclass) |
10482 | } |
10483 | }; |
10484 | |
10485 | struct AANoFPClassArgument final |
10486 | : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl> { |
10487 | AANoFPClassArgument(const IRPosition &IRP, Attributor &A) |
10488 | : AAArgumentFromCallSiteArguments<AANoFPClass, AANoFPClassImpl>(IRP, A) {} |
10489 | |
10490 | /// See AbstractAttribute::trackStatistics() |
10491 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofpclass) } |
10492 | }; |
10493 | |
10494 | struct AANoFPClassCallSiteArgument final : AANoFPClassFloating { |
10495 | AANoFPClassCallSiteArgument(const IRPosition &IRP, Attributor &A) |
10496 | : AANoFPClassFloating(IRP, A) {} |
10497 | |
10498 | /// See AbstractAttribute::trackStatistics() |
10499 | void trackStatistics() const override { |
10500 | STATS_DECLTRACK_CSARG_ATTR(nofpclass) |
10501 | } |
10502 | }; |
10503 | |
10504 | struct AANoFPClassCallSiteReturned final |
10505 | : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl> { |
10506 | AANoFPClassCallSiteReturned(const IRPosition &IRP, Attributor &A) |
10507 | : AACalleeToCallSite<AANoFPClass, AANoFPClassImpl>(IRP, A) {} |
10508 | |
10509 | /// See AbstractAttribute::trackStatistics() |
10510 | void trackStatistics() const override { |
10511 | STATS_DECLTRACK_CSRET_ATTR(nofpclass) |
10512 | } |
10513 | }; |
10514 | |
10515 | struct AACallEdgesImpl : public AACallEdges { |
10516 | AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} |
10517 | |
10518 | const SetVector<Function *> &getOptimisticEdges() const override { |
10519 | return CalledFunctions; |
10520 | } |
10521 | |
10522 | bool hasUnknownCallee() const override { return HasUnknownCallee; } |
10523 | |
10524 | bool hasNonAsmUnknownCallee() const override { |
10525 | return HasUnknownCalleeNonAsm; |
10526 | } |
10527 | |
10528 | const std::string getAsStr(Attributor *A) const override { |
10529 | return "CallEdges[" + std::to_string(val: HasUnknownCallee) + "," + |
10530 | std::to_string(val: CalledFunctions.size()) + "]" ; |
10531 | } |
10532 | |
10533 | void trackStatistics() const override {} |
10534 | |
10535 | protected: |
10536 | void addCalledFunction(Function *Fn, ChangeStatus &Change) { |
10537 | if (CalledFunctions.insert(X: Fn)) { |
10538 | Change = ChangeStatus::CHANGED; |
10539 | LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() |
10540 | << "\n" ); |
10541 | } |
10542 | } |
10543 | |
10544 | void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { |
10545 | if (!HasUnknownCallee) |
10546 | Change = ChangeStatus::CHANGED; |
10547 | if (NonAsm && !HasUnknownCalleeNonAsm) |
10548 | Change = ChangeStatus::CHANGED; |
10549 | HasUnknownCalleeNonAsm |= NonAsm; |
10550 | HasUnknownCallee = true; |
10551 | } |
10552 | |
10553 | private: |
10554 | /// Optimistic set of functions that might be called by this position. |
10555 | SetVector<Function *> CalledFunctions; |
10556 | |
10557 | /// Is there any call with a unknown callee. |
10558 | bool HasUnknownCallee = false; |
10559 | |
10560 | /// Is there any call with a unknown callee, excluding any inline asm. |
10561 | bool HasUnknownCalleeNonAsm = false; |
10562 | }; |
10563 | |
10564 | struct AACallEdgesCallSite : public AACallEdgesImpl { |
10565 | AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) |
10566 | : AACallEdgesImpl(IRP, A) {} |
10567 | /// See AbstractAttribute::updateImpl(...). |
10568 | ChangeStatus updateImpl(Attributor &A) override { |
10569 | ChangeStatus Change = ChangeStatus::UNCHANGED; |
10570 | |
10571 | auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool { |
10572 | if (Function *Fn = dyn_cast<Function>(Val: &V)) { |
10573 | addCalledFunction(Fn, Change); |
10574 | } else { |
10575 | LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n" ); |
10576 | setHasUnknownCallee(NonAsm: true, Change); |
10577 | } |
10578 | |
10579 | // Explore all values. |
10580 | return true; |
10581 | }; |
10582 | |
10583 | SmallVector<AA::ValueAndContext> Values; |
10584 | // Process any value that we might call. |
10585 | auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) { |
10586 | if (isa<Constant>(Val: V)) { |
10587 | VisitValue(*V, CtxI); |
10588 | return; |
10589 | } |
10590 | |
10591 | bool UsedAssumedInformation = false; |
10592 | Values.clear(); |
10593 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::value(V: *V), AA: *this, Values, |
10594 | S: AA::AnyScope, UsedAssumedInformation)) { |
10595 | Values.push_back(Elt: {*V, CtxI}); |
10596 | } |
10597 | for (auto &VAC : Values) |
10598 | VisitValue(*VAC.getValue(), VAC.getCtxI()); |
10599 | }; |
10600 | |
10601 | CallBase *CB = cast<CallBase>(Val: getCtxI()); |
10602 | |
10603 | if (auto *IA = dyn_cast<InlineAsm>(Val: CB->getCalledOperand())) { |
10604 | if (IA->hasSideEffects() && |
10605 | !hasAssumption(F: *CB->getCaller(), AssumptionStr: "ompx_no_call_asm" ) && |
10606 | !hasAssumption(CB: *CB, AssumptionStr: "ompx_no_call_asm" )) { |
10607 | setHasUnknownCallee(NonAsm: false, Change); |
10608 | } |
10609 | return Change; |
10610 | } |
10611 | |
10612 | if (CB->isIndirectCall()) |
10613 | if (auto *IndirectCallAA = A.getAAFor<AAIndirectCallInfo>( |
10614 | QueryingAA: *this, IRP: getIRPosition(), DepClass: DepClassTy::OPTIONAL)) |
10615 | if (IndirectCallAA->foreachCallee( |
10616 | CB: [&](Function *Fn) { return VisitValue(*Fn, CB); })) |
10617 | return Change; |
10618 | |
10619 | // The most simple case. |
10620 | ProcessCalledOperand(CB->getCalledOperand(), CB); |
10621 | |
10622 | // Process callback functions. |
10623 | SmallVector<const Use *, 4u> CallbackUses; |
10624 | AbstractCallSite::getCallbackUses(CB: *CB, CallbackUses); |
10625 | for (const Use *U : CallbackUses) |
10626 | ProcessCalledOperand(U->get(), CB); |
10627 | |
10628 | return Change; |
10629 | } |
10630 | }; |
10631 | |
10632 | struct AACallEdgesFunction : public AACallEdgesImpl { |
10633 | AACallEdgesFunction(const IRPosition &IRP, Attributor &A) |
10634 | : AACallEdgesImpl(IRP, A) {} |
10635 | |
10636 | /// See AbstractAttribute::updateImpl(...). |
10637 | ChangeStatus updateImpl(Attributor &A) override { |
10638 | ChangeStatus Change = ChangeStatus::UNCHANGED; |
10639 | |
10640 | auto ProcessCallInst = [&](Instruction &Inst) { |
10641 | CallBase &CB = cast<CallBase>(Val&: Inst); |
10642 | |
10643 | auto *CBEdges = A.getAAFor<AACallEdges>( |
10644 | QueryingAA: *this, IRP: IRPosition::callsite_function(CB), DepClass: DepClassTy::REQUIRED); |
10645 | if (!CBEdges) |
10646 | return false; |
10647 | if (CBEdges->hasNonAsmUnknownCallee()) |
10648 | setHasUnknownCallee(NonAsm: true, Change); |
10649 | if (CBEdges->hasUnknownCallee()) |
10650 | setHasUnknownCallee(NonAsm: false, Change); |
10651 | |
10652 | for (Function *F : CBEdges->getOptimisticEdges()) |
10653 | addCalledFunction(Fn: F, Change); |
10654 | |
10655 | return true; |
10656 | }; |
10657 | |
10658 | // Visit all callable instructions. |
10659 | bool UsedAssumedInformation = false; |
10660 | if (!A.checkForAllCallLikeInstructions(Pred: ProcessCallInst, QueryingAA: *this, |
10661 | UsedAssumedInformation, |
10662 | /* CheckBBLivenessOnly */ true)) { |
10663 | // If we haven't looked at all call like instructions, assume that there |
10664 | // are unknown callees. |
10665 | setHasUnknownCallee(NonAsm: true, Change); |
10666 | } |
10667 | |
10668 | return Change; |
10669 | } |
10670 | }; |
10671 | |
10672 | /// -------------------AAInterFnReachability Attribute-------------------------- |
10673 | |
10674 | struct AAInterFnReachabilityFunction |
10675 | : public CachedReachabilityAA<AAInterFnReachability, Function> { |
10676 | using Base = CachedReachabilityAA<AAInterFnReachability, Function>; |
10677 | AAInterFnReachabilityFunction(const IRPosition &IRP, Attributor &A) |
10678 | : Base(IRP, A) {} |
10679 | |
10680 | bool instructionCanReach( |
10681 | Attributor &A, const Instruction &From, const Function &To, |
10682 | const AA::InstExclusionSetTy *ExclusionSet) const override { |
10683 | assert(From.getFunction() == getAnchorScope() && "Queried the wrong AA!" ); |
10684 | auto *NonConstThis = const_cast<AAInterFnReachabilityFunction *>(this); |
10685 | |
10686 | RQITy StackRQI(A, From, To, ExclusionSet, false); |
10687 | typename RQITy::Reachable Result; |
10688 | if (!NonConstThis->checkQueryCache(A, StackRQI, Result)) |
10689 | return NonConstThis->isReachableImpl(A, RQI&: StackRQI, |
10690 | /*IsTemporaryRQI=*/true); |
10691 | return Result == RQITy::Reachable::Yes; |
10692 | } |
10693 | |
10694 | bool isReachableImpl(Attributor &A, RQITy &RQI, |
10695 | bool IsTemporaryRQI) override { |
10696 | const Instruction *EntryI = |
10697 | &RQI.From->getFunction()->getEntryBlock().front(); |
10698 | if (EntryI != RQI.From && |
10699 | !instructionCanReach(A, From: *EntryI, To: *RQI.To, ExclusionSet: nullptr)) |
10700 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet: false, |
10701 | IsTemporaryRQI); |
10702 | |
10703 | auto CheckReachableCallBase = [&](CallBase *CB) { |
10704 | auto *CBEdges = A.getAAFor<AACallEdges>( |
10705 | QueryingAA: *this, IRP: IRPosition::callsite_function(CB: *CB), DepClass: DepClassTy::OPTIONAL); |
10706 | if (!CBEdges || !CBEdges->getState().isValidState()) |
10707 | return false; |
10708 | // TODO Check To backwards in this case. |
10709 | if (CBEdges->hasUnknownCallee()) |
10710 | return false; |
10711 | |
10712 | for (Function *Fn : CBEdges->getOptimisticEdges()) { |
10713 | if (Fn == RQI.To) |
10714 | return false; |
10715 | |
10716 | if (Fn->isDeclaration()) { |
10717 | if (Fn->hasFnAttribute(Kind: Attribute::NoCallback)) |
10718 | continue; |
10719 | // TODO Check To backwards in this case. |
10720 | return false; |
10721 | } |
10722 | |
10723 | if (Fn == getAnchorScope()) { |
10724 | if (EntryI == RQI.From) |
10725 | continue; |
10726 | return false; |
10727 | } |
10728 | |
10729 | const AAInterFnReachability *InterFnReachability = |
10730 | A.getAAFor<AAInterFnReachability>(QueryingAA: *this, IRP: IRPosition::function(F: *Fn), |
10731 | DepClass: DepClassTy::OPTIONAL); |
10732 | |
10733 | const Instruction &FnFirstInst = Fn->getEntryBlock().front(); |
10734 | if (!InterFnReachability || |
10735 | InterFnReachability->instructionCanReach(A, Inst: FnFirstInst, Fn: *RQI.To, |
10736 | ExclusionSet: RQI.ExclusionSet)) |
10737 | return false; |
10738 | } |
10739 | return true; |
10740 | }; |
10741 | |
10742 | const auto *IntraFnReachability = A.getAAFor<AAIntraFnReachability>( |
10743 | QueryingAA: *this, IRP: IRPosition::function(F: *RQI.From->getFunction()), |
10744 | DepClass: DepClassTy::OPTIONAL); |
10745 | |
10746 | // Determine call like instructions that we can reach from the inst. |
10747 | auto CheckCallBase = [&](Instruction &CBInst) { |
10748 | // There are usually less nodes in the call graph, check inter function |
10749 | // reachability first. |
10750 | if (CheckReachableCallBase(cast<CallBase>(Val: &CBInst))) |
10751 | return true; |
10752 | return IntraFnReachability && !IntraFnReachability->isAssumedReachable( |
10753 | A, From: *RQI.From, To: CBInst, ExclusionSet: RQI.ExclusionSet); |
10754 | }; |
10755 | |
10756 | bool UsedExclusionSet = /* conservative */ true; |
10757 | bool UsedAssumedInformation = false; |
10758 | if (!A.checkForAllCallLikeInstructions(Pred: CheckCallBase, QueryingAA: *this, |
10759 | UsedAssumedInformation, |
10760 | /* CheckBBLivenessOnly */ true)) |
10761 | return rememberResult(A, Result: RQITy::Reachable::Yes, RQI, UsedExclusionSet, |
10762 | IsTemporaryRQI); |
10763 | |
10764 | return rememberResult(A, Result: RQITy::Reachable::No, RQI, UsedExclusionSet, |
10765 | IsTemporaryRQI); |
10766 | } |
10767 | |
10768 | void trackStatistics() const override {} |
10769 | }; |
10770 | } // namespace |
10771 | |
10772 | template <typename AAType> |
10773 | static std::optional<Constant *> |
10774 | askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA, |
10775 | const IRPosition &IRP, Type &Ty) { |
10776 | if (!Ty.isIntegerTy()) |
10777 | return nullptr; |
10778 | |
10779 | // This will also pass the call base context. |
10780 | const auto *AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE); |
10781 | if (!AA) |
10782 | return nullptr; |
10783 | |
10784 | std::optional<Constant *> COpt = AA->getAssumedConstant(A); |
10785 | |
10786 | if (!COpt.has_value()) { |
10787 | A.recordDependence(FromAA: *AA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL); |
10788 | return std::nullopt; |
10789 | } |
10790 | if (auto *C = *COpt) { |
10791 | A.recordDependence(FromAA: *AA, ToAA: QueryingAA, DepClass: DepClassTy::OPTIONAL); |
10792 | return C; |
10793 | } |
10794 | return nullptr; |
10795 | } |
10796 | |
10797 | Value *AAPotentialValues::getSingleValue( |
10798 | Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, |
10799 | SmallVectorImpl<AA::ValueAndContext> &Values) { |
10800 | Type &Ty = *IRP.getAssociatedType(); |
10801 | std::optional<Value *> V; |
10802 | for (auto &It : Values) { |
10803 | V = AA::combineOptionalValuesInAAValueLatice(A: V, B: It.getValue(), Ty: &Ty); |
10804 | if (V.has_value() && !*V) |
10805 | break; |
10806 | } |
10807 | if (!V.has_value()) |
10808 | return UndefValue::get(T: &Ty); |
10809 | return *V; |
10810 | } |
10811 | |
10812 | namespace { |
10813 | struct AAPotentialValuesImpl : AAPotentialValues { |
10814 | using StateType = PotentialLLVMValuesState; |
10815 | |
10816 | AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) |
10817 | : AAPotentialValues(IRP, A) {} |
10818 | |
10819 | /// See AbstractAttribute::initialize(..). |
10820 | void initialize(Attributor &A) override { |
10821 | if (A.hasSimplificationCallback(IRP: getIRPosition())) { |
10822 | indicatePessimisticFixpoint(); |
10823 | return; |
10824 | } |
10825 | Value *Stripped = getAssociatedValue().stripPointerCasts(); |
10826 | if (isa<Constant>(Val: Stripped) && !isa<ConstantExpr>(Val: Stripped)) { |
10827 | addValue(A, State&: getState(), V&: *Stripped, CtxI: getCtxI(), S: AA::AnyScope, |
10828 | AnchorScope: getAnchorScope()); |
10829 | indicateOptimisticFixpoint(); |
10830 | return; |
10831 | } |
10832 | AAPotentialValues::initialize(A); |
10833 | } |
10834 | |
10835 | /// See AbstractAttribute::getAsStr(). |
10836 | const std::string getAsStr(Attributor *A) const override { |
10837 | std::string Str; |
10838 | llvm::raw_string_ostream OS(Str); |
10839 | OS << getState(); |
10840 | return Str; |
10841 | } |
10842 | |
10843 | template <typename AAType> |
10844 | static std::optional<Value *> askOtherAA(Attributor &A, |
10845 | const AbstractAttribute &AA, |
10846 | const IRPosition &IRP, Type &Ty) { |
10847 | if (isa<Constant>(Val: IRP.getAssociatedValue())) |
10848 | return &IRP.getAssociatedValue(); |
10849 | std::optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty); |
10850 | if (!C) |
10851 | return std::nullopt; |
10852 | if (*C) |
10853 | if (auto *CC = AA::getWithType(V&: **C, Ty)) |
10854 | return CC; |
10855 | return nullptr; |
10856 | } |
10857 | |
10858 | virtual void addValue(Attributor &A, StateType &State, Value &V, |
10859 | const Instruction *CtxI, AA::ValueScope S, |
10860 | Function *AnchorScope) const { |
10861 | |
10862 | IRPosition ValIRP = IRPosition::value(V); |
10863 | if (auto *CB = dyn_cast_or_null<CallBase>(Val: CtxI)) { |
10864 | for (const auto &U : CB->args()) { |
10865 | if (U.get() != &V) |
10866 | continue; |
10867 | ValIRP = IRPosition::callsite_argument(CB: *CB, ArgNo: CB->getArgOperandNo(U: &U)); |
10868 | break; |
10869 | } |
10870 | } |
10871 | |
10872 | Value *VPtr = &V; |
10873 | if (ValIRP.getAssociatedType()->isIntegerTy()) { |
10874 | Type &Ty = *getAssociatedType(); |
10875 | std::optional<Value *> SimpleV = |
10876 | askOtherAA<AAValueConstantRange>(A, AA: *this, IRP: ValIRP, Ty); |
10877 | if (SimpleV.has_value() && !*SimpleV) { |
10878 | auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>( |
10879 | QueryingAA: *this, IRP: ValIRP, DepClass: DepClassTy::OPTIONAL); |
10880 | if (PotentialConstantsAA && PotentialConstantsAA->isValidState()) { |
10881 | for (const auto &It : PotentialConstantsAA->getAssumedSet()) |
10882 | State.unionAssumed(C: {{*ConstantInt::get(Ty: &Ty, V: It), nullptr}, S}); |
10883 | if (PotentialConstantsAA->undefIsContained()) |
10884 | State.unionAssumed(C: {{*UndefValue::get(T: &Ty), nullptr}, S}); |
10885 | return; |
10886 | } |
10887 | } |
10888 | if (!SimpleV.has_value()) |
10889 | return; |
10890 | |
10891 | if (*SimpleV) |
10892 | VPtr = *SimpleV; |
10893 | } |
10894 | |
10895 | if (isa<ConstantInt>(Val: VPtr)) |
10896 | CtxI = nullptr; |
10897 | if (!AA::isValidInScope(V: *VPtr, Scope: AnchorScope)) |
10898 | S = AA::ValueScope(S | AA::Interprocedural); |
10899 | |
10900 | State.unionAssumed(C: {{*VPtr, CtxI}, S}); |
10901 | } |
10902 | |
10903 | /// Helper struct to tie a value+context pair together with the scope for |
10904 | /// which this is the simplified version. |
10905 | struct ItemInfo { |
10906 | AA::ValueAndContext I; |
10907 | AA::ValueScope S; |
10908 | |
10909 | bool operator==(const ItemInfo &II) const { |
10910 | return II.I == I && II.S == S; |
10911 | }; |
10912 | bool operator<(const ItemInfo &II) const { |
10913 | return std::tie(args: I, args: S) < std::tie(args: II.I, args: II.S); |
10914 | }; |
10915 | }; |
10916 | |
10917 | bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) { |
10918 | SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap; |
10919 | for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) { |
10920 | if (!(CS & S)) |
10921 | continue; |
10922 | |
10923 | bool UsedAssumedInformation = false; |
10924 | SmallVector<AA::ValueAndContext> Values; |
10925 | if (!A.getAssumedSimplifiedValues(IRP, AA: this, Values, S: CS, |
10926 | UsedAssumedInformation)) |
10927 | return false; |
10928 | |
10929 | for (auto &It : Values) |
10930 | ValueScopeMap[It] += CS; |
10931 | } |
10932 | for (auto &It : ValueScopeMap) |
10933 | addValue(A, State&: getState(), V&: *It.first.getValue(), CtxI: It.first.getCtxI(), |
10934 | S: AA::ValueScope(It.second), AnchorScope: getAnchorScope()); |
10935 | |
10936 | return true; |
10937 | } |
10938 | |
10939 | void giveUpOnIntraprocedural(Attributor &A) { |
10940 | auto NewS = StateType::getBestState(PVS: getState()); |
10941 | for (const auto &It : getAssumedSet()) { |
10942 | if (It.second == AA::Intraprocedural) |
10943 | continue; |
10944 | addValue(A, State&: NewS, V&: *It.first.getValue(), CtxI: It.first.getCtxI(), |
10945 | S: AA::Interprocedural, AnchorScope: getAnchorScope()); |
10946 | } |
10947 | assert(!undefIsContained() && "Undef should be an explicit value!" ); |
10948 | addValue(A, State&: NewS, V&: getAssociatedValue(), CtxI: getCtxI(), S: AA::Intraprocedural, |
10949 | AnchorScope: getAnchorScope()); |
10950 | getState() = NewS; |
10951 | } |
10952 | |
10953 | /// See AbstractState::indicatePessimisticFixpoint(...). |
10954 | ChangeStatus indicatePessimisticFixpoint() override { |
10955 | getState() = StateType::getBestState(PVS: getState()); |
10956 | getState().unionAssumed(C: {{getAssociatedValue(), getCtxI()}, AA::AnyScope}); |
10957 | AAPotentialValues::indicateOptimisticFixpoint(); |
10958 | return ChangeStatus::CHANGED; |
10959 | } |
10960 | |
10961 | /// See AbstractAttribute::updateImpl(...). |
10962 | ChangeStatus updateImpl(Attributor &A) override { |
10963 | return indicatePessimisticFixpoint(); |
10964 | } |
10965 | |
10966 | /// See AbstractAttribute::manifest(...). |
10967 | ChangeStatus manifest(Attributor &A) override { |
10968 | SmallVector<AA::ValueAndContext> Values; |
10969 | for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) { |
10970 | Values.clear(); |
10971 | if (!getAssumedSimplifiedValues(A, Values, S)) |
10972 | continue; |
10973 | Value &OldV = getAssociatedValue(); |
10974 | if (isa<UndefValue>(Val: OldV)) |
10975 | continue; |
10976 | Value *NewV = getSingleValue(A, AA: *this, IRP: getIRPosition(), Values); |
10977 | if (!NewV || NewV == &OldV) |
10978 | continue; |
10979 | if (getCtxI() && |
10980 | !AA::isValidAtPosition(VAC: {*NewV, *getCtxI()}, InfoCache&: A.getInfoCache())) |
10981 | continue; |
10982 | if (A.changeAfterManifest(IRP: getIRPosition(), NV&: *NewV)) |
10983 | return ChangeStatus::CHANGED; |
10984 | } |
10985 | return ChangeStatus::UNCHANGED; |
10986 | } |
10987 | |
10988 | bool getAssumedSimplifiedValues( |
10989 | Attributor &A, SmallVectorImpl<AA::ValueAndContext> &Values, |
10990 | AA::ValueScope S, bool RecurseForSelectAndPHI = false) const override { |
10991 | if (!isValidState()) |
10992 | return false; |
10993 | bool UsedAssumedInformation = false; |
10994 | for (const auto &It : getAssumedSet()) |
10995 | if (It.second & S) { |
10996 | if (RecurseForSelectAndPHI && (isa<PHINode>(Val: It.first.getValue()) || |
10997 | isa<SelectInst>(Val: It.first.getValue()))) { |
10998 | if (A.getAssumedSimplifiedValues( |
10999 | IRP: IRPosition::inst(I: *cast<Instruction>(Val: It.first.getValue())), |
11000 | AA: this, Values, S, UsedAssumedInformation)) |
11001 | continue; |
11002 | } |
11003 | Values.push_back(Elt: It.first); |
11004 | } |
11005 | assert(!undefIsContained() && "Undef should be an explicit value!" ); |
11006 | return true; |
11007 | } |
11008 | }; |
11009 | |
11010 | struct AAPotentialValuesFloating : AAPotentialValuesImpl { |
11011 | AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) |
11012 | : AAPotentialValuesImpl(IRP, A) {} |
11013 | |
11014 | /// See AbstractAttribute::updateImpl(...). |
11015 | ChangeStatus updateImpl(Attributor &A) override { |
11016 | auto AssumedBefore = getAssumed(); |
11017 | |
11018 | genericValueTraversal(A, InitialV: &getAssociatedValue()); |
11019 | |
11020 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED |
11021 | : ChangeStatus::CHANGED; |
11022 | } |
11023 | |
11024 | /// Helper struct to remember which AAIsDead instances we actually used. |
11025 | struct LivenessInfo { |
11026 | const AAIsDead *LivenessAA = nullptr; |
11027 | bool AnyDead = false; |
11028 | }; |
11029 | |
11030 | /// Check if \p Cmp is a comparison we can simplify. |
11031 | /// |
11032 | /// We handle multiple cases, one in which at least one operand is an |
11033 | /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other |
11034 | /// operand. Return true if successful, in that case Worklist will be updated. |
11035 | bool handleCmp(Attributor &A, Value &Cmp, Value *LHS, Value *RHS, |
11036 | CmpInst::Predicate Pred, ItemInfo II, |
11037 | SmallVectorImpl<ItemInfo> &Worklist) { |
11038 | |
11039 | // Simplify the operands first. |
11040 | bool UsedAssumedInformation = false; |
11041 | SmallVector<AA::ValueAndContext> LHSValues, RHSValues; |
11042 | auto GetSimplifiedValues = [&](Value &V, |
11043 | SmallVector<AA::ValueAndContext> &Values) { |
11044 | if (!A.getAssumedSimplifiedValues( |
11045 | IRP: IRPosition::value(V, CBContext: getCallBaseContext()), AA: this, Values, |
11046 | S: AA::Intraprocedural, UsedAssumedInformation)) { |
11047 | Values.clear(); |
11048 | Values.push_back(Elt: AA::ValueAndContext{V, II.I.getCtxI()}); |
11049 | } |
11050 | return Values.empty(); |
11051 | }; |
11052 | if (GetSimplifiedValues(*LHS, LHSValues)) |
11053 | return true; |
11054 | if (GetSimplifiedValues(*RHS, RHSValues)) |
11055 | return true; |
11056 | |
11057 | LLVMContext &Ctx = LHS->getContext(); |
11058 | |
11059 | InformationCache &InfoCache = A.getInfoCache(); |
11060 | Instruction *CmpI = dyn_cast<Instruction>(Val: &Cmp); |
11061 | Function *F = CmpI ? CmpI->getFunction() : nullptr; |
11062 | const auto *DT = |
11063 | F ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F: *F) |
11064 | : nullptr; |
11065 | const auto *TLI = |
11066 | F ? A.getInfoCache().getTargetLibraryInfoForFunction(F: *F) : nullptr; |
11067 | auto *AC = |
11068 | F ? InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(F: *F) |
11069 | : nullptr; |
11070 | |
11071 | const DataLayout &DL = A.getDataLayout(); |
11072 | SimplifyQuery Q(DL, TLI, DT, AC, CmpI); |
11073 | |
11074 | auto CheckPair = [&](Value &LHSV, Value &RHSV) { |
11075 | if (isa<UndefValue>(Val: LHSV) || isa<UndefValue>(Val: RHSV)) { |
11076 | addValue(A, State&: getState(), V&: *UndefValue::get(T: Cmp.getType()), |
11077 | /* CtxI */ nullptr, S: II.S, AnchorScope: getAnchorScope()); |
11078 | return true; |
11079 | } |
11080 | |
11081 | // Handle the trivial case first in which we don't even need to think |
11082 | // about null or non-null. |
11083 | if (&LHSV == &RHSV && |
11084 | (CmpInst::isTrueWhenEqual(predicate: Pred) || CmpInst::isFalseWhenEqual(predicate: Pred))) { |
11085 | Constant *NewV = ConstantInt::get(Ty: Type::getInt1Ty(C&: Ctx), |
11086 | V: CmpInst::isTrueWhenEqual(predicate: Pred)); |
11087 | addValue(A, State&: getState(), V&: *NewV, /* CtxI */ nullptr, S: II.S, |
11088 | AnchorScope: getAnchorScope()); |
11089 | return true; |
11090 | } |
11091 | |
11092 | auto *TypedLHS = AA::getWithType(V&: LHSV, Ty&: *LHS->getType()); |
11093 | auto *TypedRHS = AA::getWithType(V&: RHSV, Ty&: *RHS->getType()); |
11094 | if (TypedLHS && TypedRHS) { |
11095 | Value *NewV = simplifyCmpInst(Predicate: Pred, LHS: TypedLHS, RHS: TypedRHS, Q); |
11096 | if (NewV && NewV != &Cmp) { |
11097 | addValue(A, State&: getState(), V&: *NewV, /* CtxI */ nullptr, S: II.S, |
11098 | AnchorScope: getAnchorScope()); |
11099 | return true; |
11100 | } |
11101 | } |
11102 | |
11103 | // From now on we only handle equalities (==, !=). |
11104 | if (!CmpInst::isEquality(pred: Pred)) |
11105 | return false; |
11106 | |
11107 | bool LHSIsNull = isa<ConstantPointerNull>(Val: LHSV); |
11108 | bool RHSIsNull = isa<ConstantPointerNull>(Val: RHSV); |
11109 | if (!LHSIsNull && !RHSIsNull) |
11110 | return false; |
11111 | |
11112 | // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the |
11113 | // non-nullptr operand and if we assume it's non-null we can conclude the |
11114 | // result of the comparison. |
11115 | assert((LHSIsNull || RHSIsNull) && |
11116 | "Expected nullptr versus non-nullptr comparison at this point" ); |
11117 | |
11118 | // The index is the operand that we assume is not null. |
11119 | unsigned PtrIdx = LHSIsNull; |
11120 | bool IsKnownNonNull; |
11121 | bool IsAssumedNonNull = AA::hasAssumedIRAttr<Attribute::NonNull>( |
11122 | A, QueryingAA: this, IRP: IRPosition::value(V: *(PtrIdx ? &RHSV : &LHSV)), |
11123 | DepClass: DepClassTy::REQUIRED, IsKnown&: IsKnownNonNull); |
11124 | if (!IsAssumedNonNull) |
11125 | return false; |
11126 | |
11127 | // The new value depends on the predicate, true for != and false for ==. |
11128 | Constant *NewV = |
11129 | ConstantInt::get(Ty: Type::getInt1Ty(C&: Ctx), V: Pred == CmpInst::ICMP_NE); |
11130 | addValue(A, State&: getState(), V&: *NewV, /* CtxI */ nullptr, S: II.S, |
11131 | AnchorScope: getAnchorScope()); |
11132 | return true; |
11133 | }; |
11134 | |
11135 | for (auto &LHSValue : LHSValues) |
11136 | for (auto &RHSValue : RHSValues) |
11137 | if (!CheckPair(*LHSValue.getValue(), *RHSValue.getValue())) |
11138 | return false; |
11139 | return true; |
11140 | } |
11141 | |
11142 | bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II, |
11143 | SmallVectorImpl<ItemInfo> &Worklist) { |
11144 | const Instruction *CtxI = II.I.getCtxI(); |
11145 | bool UsedAssumedInformation = false; |
11146 | |
11147 | std::optional<Constant *> C = |
11148 | A.getAssumedConstant(V: *SI.getCondition(), AA: *this, UsedAssumedInformation); |
11149 | bool NoValueYet = !C.has_value(); |
11150 | if (NoValueYet || isa_and_nonnull<UndefValue>(Val: *C)) |
11151 | return true; |
11152 | if (auto *CI = dyn_cast_or_null<ConstantInt>(Val: *C)) { |
11153 | if (CI->isZero()) |
11154 | Worklist.push_back(Elt: {.I: {*SI.getFalseValue(), CtxI}, .S: II.S}); |
11155 | else |
11156 | Worklist.push_back(Elt: {.I: {*SI.getTrueValue(), CtxI}, .S: II.S}); |
11157 | } else if (&SI == &getAssociatedValue()) { |
11158 | // We could not simplify the condition, assume both values. |
11159 | Worklist.push_back(Elt: {.I: {*SI.getTrueValue(), CtxI}, .S: II.S}); |
11160 | Worklist.push_back(Elt: {.I: {*SI.getFalseValue(), CtxI}, .S: II.S}); |
11161 | } else { |
11162 | std::optional<Value *> SimpleV = A.getAssumedSimplified( |
11163 | IRP: IRPosition::inst(I: SI), AA: *this, UsedAssumedInformation, S: II.S); |
11164 | if (!SimpleV.has_value()) |
11165 | return true; |
11166 | if (*SimpleV) { |
11167 | addValue(A, State&: getState(), V&: **SimpleV, CtxI, S: II.S, AnchorScope: getAnchorScope()); |
11168 | return true; |
11169 | } |
11170 | return false; |
11171 | } |
11172 | return true; |
11173 | } |
11174 | |
11175 | bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II, |
11176 | SmallVectorImpl<ItemInfo> &Worklist) { |
11177 | SmallSetVector<Value *, 4> PotentialCopies; |
11178 | SmallSetVector<Instruction *, 4> PotentialValueOrigins; |
11179 | bool UsedAssumedInformation = false; |
11180 | if (!AA::getPotentiallyLoadedValues(A, LI, PotentialValues&: PotentialCopies, |
11181 | PotentialValueOrigins, QueryingAA: *this, |
11182 | UsedAssumedInformation, |
11183 | /* OnlyExact */ true)) { |
11184 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially " |
11185 | "loaded values for load instruction " |
11186 | << LI << "\n" ); |
11187 | return false; |
11188 | } |
11189 | |
11190 | // Do not simplify loads that are only used in llvm.assume if we cannot also |
11191 | // remove all stores that may feed into the load. The reason is that the |
11192 | // assume is probably worth something as long as the stores are around. |
11193 | InformationCache &InfoCache = A.getInfoCache(); |
11194 | if (InfoCache.isOnlyUsedByAssume(I: LI)) { |
11195 | if (!llvm::all_of(Range&: PotentialValueOrigins, P: [&](Instruction *I) { |
11196 | if (!I || isa<AssumeInst>(Val: I)) |
11197 | return true; |
11198 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) |
11199 | return A.isAssumedDead(U: SI->getOperandUse(i: 0), QueryingAA: this, |
11200 | /* LivenessAA */ FnLivenessAA: nullptr, |
11201 | UsedAssumedInformation, |
11202 | /* CheckBBLivenessOnly */ false); |
11203 | return A.isAssumedDead(I: *I, QueryingAA: this, /* LivenessAA */ nullptr, |
11204 | UsedAssumedInformation, |
11205 | /* CheckBBLivenessOnly */ false); |
11206 | })) { |
11207 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes " |
11208 | "and we cannot delete all the stores: " |
11209 | << LI << "\n" ); |
11210 | return false; |
11211 | } |
11212 | } |
11213 | |
11214 | // Values have to be dynamically unique or we loose the fact that a |
11215 | // single llvm::Value might represent two runtime values (e.g., |
11216 | // stack locations in different recursive calls). |
11217 | const Instruction *CtxI = II.I.getCtxI(); |
11218 | bool ScopeIsLocal = (II.S & AA::Intraprocedural); |
11219 | bool AllLocal = ScopeIsLocal; |
11220 | bool DynamicallyUnique = llvm::all_of(Range&: PotentialCopies, P: [&](Value *PC) { |
11221 | AllLocal &= AA::isValidInScope(V: *PC, Scope: getAnchorScope()); |
11222 | return AA::isDynamicallyUnique(A, QueryingAA: *this, V: *PC); |
11223 | }); |
11224 | if (!DynamicallyUnique) { |
11225 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded " |
11226 | "values are dynamically unique: " |
11227 | << LI << "\n" ); |
11228 | return false; |
11229 | } |
11230 | |
11231 | for (auto *PotentialCopy : PotentialCopies) { |
11232 | if (AllLocal) { |
11233 | Worklist.push_back(Elt: {.I: {*PotentialCopy, CtxI}, .S: II.S}); |
11234 | } else { |
11235 | Worklist.push_back(Elt: {.I: {*PotentialCopy, CtxI}, .S: AA::Interprocedural}); |
11236 | } |
11237 | } |
11238 | if (!AllLocal && ScopeIsLocal) |
11239 | addValue(A, State&: getState(), V&: LI, CtxI, S: AA::Intraprocedural, AnchorScope: getAnchorScope()); |
11240 | return true; |
11241 | } |
11242 | |
11243 | bool handlePHINode( |
11244 | Attributor &A, PHINode &PHI, ItemInfo II, |
11245 | SmallVectorImpl<ItemInfo> &Worklist, |
11246 | SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { |
11247 | auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { |
11248 | LivenessInfo &LI = LivenessAAs[&F]; |
11249 | if (!LI.LivenessAA) |
11250 | LI.LivenessAA = A.getAAFor<AAIsDead>(QueryingAA: *this, IRP: IRPosition::function(F), |
11251 | DepClass: DepClassTy::NONE); |
11252 | return LI; |
11253 | }; |
11254 | |
11255 | if (&PHI == &getAssociatedValue()) { |
11256 | LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction()); |
11257 | const auto *CI = |
11258 | A.getInfoCache().getAnalysisResultForFunction<CycleAnalysis>( |
11259 | F: *PHI.getFunction()); |
11260 | |
11261 | Cycle *C = nullptr; |
11262 | bool CyclePHI = mayBeInCycle(CI, I: &PHI, /* HeaderOnly */ true, CPtr: &C); |
11263 | for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) { |
11264 | BasicBlock *IncomingBB = PHI.getIncomingBlock(i: u); |
11265 | if (LI.LivenessAA && |
11266 | LI.LivenessAA->isEdgeDead(From: IncomingBB, To: PHI.getParent())) { |
11267 | LI.AnyDead = true; |
11268 | continue; |
11269 | } |
11270 | Value *V = PHI.getIncomingValue(i: u); |
11271 | if (V == &PHI) |
11272 | continue; |
11273 | |
11274 | // If the incoming value is not the PHI but an instruction in the same |
11275 | // cycle we might have multiple versions of it flying around. |
11276 | if (CyclePHI && isa<Instruction>(Val: V) && |
11277 | (!C || C->contains(Block: cast<Instruction>(Val: V)->getParent()))) |
11278 | return false; |
11279 | |
11280 | Worklist.push_back(Elt: {.I: {*V, IncomingBB->getTerminator()}, .S: II.S}); |
11281 | } |
11282 | return true; |
11283 | } |
11284 | |
11285 | bool UsedAssumedInformation = false; |
11286 | std::optional<Value *> SimpleV = A.getAssumedSimplified( |
11287 | IRP: IRPosition::inst(I: PHI), AA: *this, UsedAssumedInformation, S: II.S); |
11288 | if (!SimpleV.has_value()) |
11289 | return true; |
11290 | if (!(*SimpleV)) |
11291 | return false; |
11292 | addValue(A, State&: getState(), V&: **SimpleV, CtxI: &PHI, S: II.S, AnchorScope: getAnchorScope()); |
11293 | return true; |
11294 | } |
11295 | |
11296 | /// Use the generic, non-optimistic InstSimplfy functionality if we managed to |
11297 | /// simplify any operand of the instruction \p I. Return true if successful, |
11298 | /// in that case Worklist will be updated. |
11299 | bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II, |
11300 | SmallVectorImpl<ItemInfo> &Worklist) { |
11301 | bool SomeSimplified = false; |
11302 | bool UsedAssumedInformation = false; |
11303 | |
11304 | SmallVector<Value *, 8> NewOps(I.getNumOperands()); |
11305 | int Idx = 0; |
11306 | for (Value *Op : I.operands()) { |
11307 | const auto &SimplifiedOp = A.getAssumedSimplified( |
11308 | IRP: IRPosition::value(V: *Op, CBContext: getCallBaseContext()), AA: *this, |
11309 | UsedAssumedInformation, S: AA::Intraprocedural); |
11310 | // If we are not sure about any operand we are not sure about the entire |
11311 | // instruction, we'll wait. |
11312 | if (!SimplifiedOp.has_value()) |
11313 | return true; |
11314 | |
11315 | if (*SimplifiedOp) |
11316 | NewOps[Idx] = *SimplifiedOp; |
11317 | else |
11318 | NewOps[Idx] = Op; |
11319 | |
11320 | SomeSimplified |= (NewOps[Idx] != Op); |
11321 | ++Idx; |
11322 | } |
11323 | |
11324 | // We won't bother with the InstSimplify interface if we didn't simplify any |
11325 | // operand ourselves. |
11326 | if (!SomeSimplified) |
11327 | return false; |
11328 | |
11329 | InformationCache &InfoCache = A.getInfoCache(); |
11330 | Function *F = I.getFunction(); |
11331 | const auto *DT = |
11332 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F: *F); |
11333 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(F: *F); |
11334 | auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(F: *F); |
11335 | |
11336 | const DataLayout &DL = I.getDataLayout(); |
11337 | SimplifyQuery Q(DL, TLI, DT, AC, &I); |
11338 | Value *NewV = simplifyInstructionWithOperands(I: &I, NewOps, Q); |
11339 | if (!NewV || NewV == &I) |
11340 | return false; |
11341 | |
11342 | LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to " |
11343 | << *NewV << "\n" ); |
11344 | Worklist.push_back(Elt: {.I: {*NewV, II.I.getCtxI()}, .S: II.S}); |
11345 | return true; |
11346 | } |
11347 | |
11348 | bool simplifyInstruction( |
11349 | Attributor &A, Instruction &I, ItemInfo II, |
11350 | SmallVectorImpl<ItemInfo> &Worklist, |
11351 | SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { |
11352 | if (auto *CI = dyn_cast<CmpInst>(Val: &I)) |
11353 | return handleCmp(A, Cmp&: *CI, LHS: CI->getOperand(i_nocapture: 0), RHS: CI->getOperand(i_nocapture: 1), |
11354 | Pred: CI->getPredicate(), II, Worklist); |
11355 | |
11356 | switch (I.getOpcode()) { |
11357 | case Instruction::Select: |
11358 | return handleSelectInst(A, SI&: cast<SelectInst>(Val&: I), II, Worklist); |
11359 | case Instruction::PHI: |
11360 | return handlePHINode(A, PHI&: cast<PHINode>(Val&: I), II, Worklist, LivenessAAs); |
11361 | case Instruction::Load: |
11362 | return handleLoadInst(A, LI&: cast<LoadInst>(Val&: I), II, Worklist); |
11363 | default: |
11364 | return handleGenericInst(A, I, II, Worklist); |
11365 | }; |
11366 | return false; |
11367 | } |
11368 | |
11369 | void genericValueTraversal(Attributor &A, Value *InitialV) { |
11370 | SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; |
11371 | |
11372 | SmallSet<ItemInfo, 16> Visited; |
11373 | SmallVector<ItemInfo, 16> Worklist; |
11374 | Worklist.push_back(Elt: {.I: {*InitialV, getCtxI()}, .S: AA::AnyScope}); |
11375 | |
11376 | int Iteration = 0; |
11377 | do { |
11378 | ItemInfo II = Worklist.pop_back_val(); |
11379 | Value *V = II.I.getValue(); |
11380 | assert(V); |
11381 | const Instruction *CtxI = II.I.getCtxI(); |
11382 | AA::ValueScope S = II.S; |
11383 | |
11384 | // Check if we should process the current value. To prevent endless |
11385 | // recursion keep a record of the values we followed! |
11386 | if (!Visited.insert(V: II).second) |
11387 | continue; |
11388 | |
11389 | // Make sure we limit the compile time for complex expressions. |
11390 | if (Iteration++ >= MaxPotentialValuesIterations) { |
11391 | LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " |
11392 | << Iteration << "!\n" ); |
11393 | addValue(A, State&: getState(), V&: *V, CtxI, S, AnchorScope: getAnchorScope()); |
11394 | continue; |
11395 | } |
11396 | |
11397 | // Explicitly look through calls with a "returned" attribute if we do |
11398 | // not have a pointer as stripPointerCasts only works on them. |
11399 | Value *NewV = nullptr; |
11400 | if (V->getType()->isPointerTy()) { |
11401 | NewV = AA::getWithType(V&: *V->stripPointerCasts(), Ty&: *V->getType()); |
11402 | } else { |
11403 | if (auto *CB = dyn_cast<CallBase>(Val: V)) |
11404 | if (auto *Callee = |
11405 | dyn_cast_if_present<Function>(Val: CB->getCalledOperand())) { |
11406 | for (Argument &Arg : Callee->args()) |
11407 | if (Arg.hasReturnedAttr()) { |
11408 | NewV = CB->getArgOperand(i: Arg.getArgNo()); |
11409 | break; |
11410 | } |
11411 | } |
11412 | } |
11413 | if (NewV && NewV != V) { |
11414 | Worklist.push_back(Elt: {.I: {*NewV, CtxI}, .S: S}); |
11415 | continue; |
11416 | } |
11417 | |
11418 | if (auto *I = dyn_cast<Instruction>(Val: V)) { |
11419 | if (simplifyInstruction(A, I&: *I, II, Worklist, LivenessAAs)) |
11420 | continue; |
11421 | } |
11422 | |
11423 | if (V != InitialV || isa<Argument>(Val: V)) |
11424 | if (recurseForValue(A, IRP: IRPosition::value(V: *V), S: II.S)) |
11425 | continue; |
11426 | |
11427 | // If we haven't stripped anything we give up. |
11428 | if (V == InitialV && CtxI == getCtxI()) { |
11429 | indicatePessimisticFixpoint(); |
11430 | return; |
11431 | } |
11432 | |
11433 | addValue(A, State&: getState(), V&: *V, CtxI, S, AnchorScope: getAnchorScope()); |
11434 | } while (!Worklist.empty()); |
11435 | |
11436 | // If we actually used liveness information so we have to record a |
11437 | // dependence. |
11438 | for (auto &It : LivenessAAs) |
11439 | if (It.second.AnyDead) |
11440 | A.recordDependence(FromAA: *It.second.LivenessAA, ToAA: *this, DepClass: DepClassTy::OPTIONAL); |
11441 | } |
11442 | |
11443 | /// See AbstractAttribute::trackStatistics() |
11444 | void trackStatistics() const override { |
11445 | STATS_DECLTRACK_FLOATING_ATTR(potential_values) |
11446 | } |
11447 | }; |
11448 | |
11449 | struct AAPotentialValuesArgument final : AAPotentialValuesImpl { |
11450 | using Base = AAPotentialValuesImpl; |
11451 | AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) |
11452 | : Base(IRP, A) {} |
11453 | |
11454 | /// See AbstractAttribute::initialize(..). |
11455 | void initialize(Attributor &A) override { |
11456 | auto &Arg = cast<Argument>(Val&: getAssociatedValue()); |
11457 | if (Arg.hasPointeeInMemoryValueAttr()) |
11458 | indicatePessimisticFixpoint(); |
11459 | } |
11460 | |
11461 | /// See AbstractAttribute::updateImpl(...). |
11462 | ChangeStatus updateImpl(Attributor &A) override { |
11463 | auto AssumedBefore = getAssumed(); |
11464 | |
11465 | unsigned ArgNo = getCalleeArgNo(); |
11466 | |
11467 | bool UsedAssumedInformation = false; |
11468 | SmallVector<AA::ValueAndContext> Values; |
11469 | auto CallSitePred = [&](AbstractCallSite ACS) { |
11470 | const auto CSArgIRP = IRPosition::callsite_argument(ACS, ArgNo); |
11471 | if (CSArgIRP.getPositionKind() == IRP_INVALID) |
11472 | return false; |
11473 | |
11474 | if (!A.getAssumedSimplifiedValues(IRP: CSArgIRP, AA: this, Values, |
11475 | S: AA::Interprocedural, |
11476 | UsedAssumedInformation)) |
11477 | return false; |
11478 | |
11479 | return isValidState(); |
11480 | }; |
11481 | |
11482 | if (!A.checkForAllCallSites(Pred: CallSitePred, QueryingAA: *this, |
11483 | /* RequireAllCallSites */ true, |
11484 | UsedAssumedInformation)) |
11485 | return indicatePessimisticFixpoint(); |
11486 | |
11487 | Function *Fn = getAssociatedFunction(); |
11488 | bool AnyNonLocal = false; |
11489 | for (auto &It : Values) { |
11490 | if (isa<Constant>(Val: It.getValue())) { |
11491 | addValue(A, State&: getState(), V&: *It.getValue(), CtxI: It.getCtxI(), S: AA::AnyScope, |
11492 | AnchorScope: getAnchorScope()); |
11493 | continue; |
11494 | } |
11495 | if (!AA::isDynamicallyUnique(A, QueryingAA: *this, V: *It.getValue())) |
11496 | return indicatePessimisticFixpoint(); |
11497 | |
11498 | if (auto *Arg = dyn_cast<Argument>(Val: It.getValue())) |
11499 | if (Arg->getParent() == Fn) { |
11500 | addValue(A, State&: getState(), V&: *It.getValue(), CtxI: It.getCtxI(), S: AA::AnyScope, |
11501 | AnchorScope: getAnchorScope()); |
11502 | continue; |
11503 | } |
11504 | addValue(A, State&: getState(), V&: *It.getValue(), CtxI: It.getCtxI(), S: AA::Interprocedural, |
11505 | AnchorScope: getAnchorScope()); |
11506 | AnyNonLocal = true; |
11507 | } |
11508 | assert(!undefIsContained() && "Undef should be an explicit value!" ); |
11509 | if (AnyNonLocal) |
11510 | giveUpOnIntraprocedural(A); |
11511 | |
11512 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED |
11513 | : ChangeStatus::CHANGED; |
11514 | } |
11515 | |
11516 | /// See AbstractAttribute::trackStatistics() |
11517 | void trackStatistics() const override { |
11518 | STATS_DECLTRACK_ARG_ATTR(potential_values) |
11519 | } |
11520 | }; |
11521 | |
11522 | struct AAPotentialValuesReturned : public AAPotentialValuesFloating { |
11523 | using Base = AAPotentialValuesFloating; |
11524 | AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) |
11525 | : Base(IRP, A) {} |
11526 | |
11527 | /// See AbstractAttribute::initialize(..). |
11528 | void initialize(Attributor &A) override { |
11529 | Function *F = getAssociatedFunction(); |
11530 | if (!F || F->isDeclaration() || F->getReturnType()->isVoidTy()) { |
11531 | indicatePessimisticFixpoint(); |
11532 | return; |
11533 | } |
11534 | |
11535 | for (Argument &Arg : F->args()) |
11536 | if (Arg.hasReturnedAttr()) { |
11537 | addValue(A, State&: getState(), V&: Arg, CtxI: nullptr, S: AA::AnyScope, AnchorScope: F); |
11538 | ReturnedArg = &Arg; |
11539 | break; |
11540 | } |
11541 | if (!A.isFunctionIPOAmendable(F: *F) || |
11542 | A.hasSimplificationCallback(IRP: getIRPosition())) { |
11543 | if (!ReturnedArg) |
11544 | indicatePessimisticFixpoint(); |
11545 | else |
11546 | indicateOptimisticFixpoint(); |
11547 | } |
11548 | } |
11549 | |
11550 | /// See AbstractAttribute::updateImpl(...). |
11551 | ChangeStatus updateImpl(Attributor &A) override { |
11552 | auto AssumedBefore = getAssumed(); |
11553 | bool UsedAssumedInformation = false; |
11554 | |
11555 | SmallVector<AA::ValueAndContext> Values; |
11556 | Function *AnchorScope = getAnchorScope(); |
11557 | auto HandleReturnedValue = [&](Value &V, Instruction *CtxI, |
11558 | bool AddValues) { |
11559 | for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) { |
11560 | Values.clear(); |
11561 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::value(V), AA: this, Values, S, |
11562 | UsedAssumedInformation, |
11563 | /* RecurseForSelectAndPHI */ true)) |
11564 | return false; |
11565 | if (!AddValues) |
11566 | continue; |
11567 | |
11568 | bool AllInterAreIntra = false; |
11569 | if (S == AA::Interprocedural) |
11570 | AllInterAreIntra = |
11571 | llvm::all_of(Range&: Values, P: [&](const AA::ValueAndContext &VAC) { |
11572 | return AA::isValidInScope(V: *VAC.getValue(), Scope: AnchorScope); |
11573 | }); |
11574 | |
11575 | for (const AA::ValueAndContext &VAC : Values) { |
11576 | addValue(A, State&: getState(), V&: *VAC.getValue(), |
11577 | CtxI: VAC.getCtxI() ? VAC.getCtxI() : CtxI, |
11578 | S: AllInterAreIntra ? AA::AnyScope : S, AnchorScope); |
11579 | } |
11580 | if (AllInterAreIntra) |
11581 | break; |
11582 | } |
11583 | return true; |
11584 | }; |
11585 | |
11586 | if (ReturnedArg) { |
11587 | HandleReturnedValue(*ReturnedArg, nullptr, true); |
11588 | } else { |
11589 | auto RetInstPred = [&](Instruction &RetI) { |
11590 | bool AddValues = true; |
11591 | if (isa<PHINode>(Val: RetI.getOperand(i: 0)) || |
11592 | isa<SelectInst>(Val: RetI.getOperand(i: 0))) { |
11593 | addValue(A, State&: getState(), V&: *RetI.getOperand(i: 0), CtxI: &RetI, S: AA::AnyScope, |
11594 | AnchorScope); |
11595 | AddValues = false; |
11596 | } |
11597 | return HandleReturnedValue(*RetI.getOperand(i: 0), &RetI, AddValues); |
11598 | }; |
11599 | |
11600 | if (!A.checkForAllInstructions(Pred: RetInstPred, QueryingAA: *this, Opcodes: {Instruction::Ret}, |
11601 | UsedAssumedInformation, |
11602 | /* CheckBBLivenessOnly */ true)) |
11603 | return indicatePessimisticFixpoint(); |
11604 | } |
11605 | |
11606 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED |
11607 | : ChangeStatus::CHANGED; |
11608 | } |
11609 | |
11610 | ChangeStatus manifest(Attributor &A) override { |
11611 | if (ReturnedArg) |
11612 | return ChangeStatus::UNCHANGED; |
11613 | SmallVector<AA::ValueAndContext> Values; |
11614 | if (!getAssumedSimplifiedValues(A, Values, S: AA::ValueScope::Intraprocedural, |
11615 | /* RecurseForSelectAndPHI */ true)) |
11616 | return ChangeStatus::UNCHANGED; |
11617 | Value *NewVal = getSingleValue(A, AA: *this, IRP: getIRPosition(), Values); |
11618 | if (!NewVal) |
11619 | return ChangeStatus::UNCHANGED; |
11620 | |
11621 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
11622 | if (auto *Arg = dyn_cast<Argument>(Val: NewVal)) { |
11623 | STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, |
11624 | "Number of function with unique return" ); |
11625 | Changed |= A.manifestAttrs( |
11626 | IRP: IRPosition::argument(Arg: *Arg), |
11627 | DeducedAttrs: {Attribute::get(Context&: Arg->getContext(), Kind: Attribute::Returned)}); |
11628 | STATS_DECLTRACK_ARG_ATTR(returned); |
11629 | } |
11630 | |
11631 | auto RetInstPred = [&](Instruction &RetI) { |
11632 | Value *RetOp = RetI.getOperand(i: 0); |
11633 | if (isa<UndefValue>(Val: RetOp) || RetOp == NewVal) |
11634 | return true; |
11635 | if (AA::isValidAtPosition(VAC: {*NewVal, RetI}, InfoCache&: A.getInfoCache())) |
11636 | if (A.changeUseAfterManifest(U&: RetI.getOperandUse(i: 0), NV&: *NewVal)) |
11637 | Changed = ChangeStatus::CHANGED; |
11638 | return true; |
11639 | }; |
11640 | bool UsedAssumedInformation = false; |
11641 | (void)A.checkForAllInstructions(Pred: RetInstPred, QueryingAA: *this, Opcodes: {Instruction::Ret}, |
11642 | UsedAssumedInformation, |
11643 | /* CheckBBLivenessOnly */ true); |
11644 | return Changed; |
11645 | } |
11646 | |
11647 | ChangeStatus indicatePessimisticFixpoint() override { |
11648 | return AAPotentialValues::indicatePessimisticFixpoint(); |
11649 | } |
11650 | |
11651 | /// See AbstractAttribute::trackStatistics() |
11652 | void trackStatistics() const override{ |
11653 | STATS_DECLTRACK_FNRET_ATTR(potential_values)} |
11654 | |
11655 | /// The argumented with an existing `returned` attribute. |
11656 | Argument *ReturnedArg = nullptr; |
11657 | }; |
11658 | |
11659 | struct AAPotentialValuesFunction : AAPotentialValuesImpl { |
11660 | AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) |
11661 | : AAPotentialValuesImpl(IRP, A) {} |
11662 | |
11663 | /// See AbstractAttribute::updateImpl(...). |
11664 | ChangeStatus updateImpl(Attributor &A) override { |
11665 | llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " |
11666 | "not be called" ); |
11667 | } |
11668 | |
11669 | /// See AbstractAttribute::trackStatistics() |
11670 | void trackStatistics() const override { |
11671 | STATS_DECLTRACK_FN_ATTR(potential_values) |
11672 | } |
11673 | }; |
11674 | |
11675 | struct AAPotentialValuesCallSite : AAPotentialValuesFunction { |
11676 | AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) |
11677 | : AAPotentialValuesFunction(IRP, A) {} |
11678 | |
11679 | /// See AbstractAttribute::trackStatistics() |
11680 | void trackStatistics() const override { |
11681 | STATS_DECLTRACK_CS_ATTR(potential_values) |
11682 | } |
11683 | }; |
11684 | |
11685 | struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl { |
11686 | AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) |
11687 | : AAPotentialValuesImpl(IRP, A) {} |
11688 | |
11689 | /// See AbstractAttribute::updateImpl(...). |
11690 | ChangeStatus updateImpl(Attributor &A) override { |
11691 | auto AssumedBefore = getAssumed(); |
11692 | |
11693 | Function *Callee = getAssociatedFunction(); |
11694 | if (!Callee) |
11695 | return indicatePessimisticFixpoint(); |
11696 | |
11697 | bool UsedAssumedInformation = false; |
11698 | auto *CB = cast<CallBase>(Val: getCtxI()); |
11699 | if (CB->isMustTailCall() && |
11700 | !A.isAssumedDead(IRP: IRPosition::inst(I: *CB), QueryingAA: this, FnLivenessAA: nullptr, |
11701 | UsedAssumedInformation)) |
11702 | return indicatePessimisticFixpoint(); |
11703 | |
11704 | Function *Caller = CB->getCaller(); |
11705 | |
11706 | auto AddScope = [&](AA::ValueScope S) { |
11707 | SmallVector<AA::ValueAndContext> Values; |
11708 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::returned(F: *Callee), AA: this, |
11709 | Values, S, UsedAssumedInformation)) |
11710 | return false; |
11711 | |
11712 | for (auto &It : Values) { |
11713 | Value *V = It.getValue(); |
11714 | std::optional<Value *> CallerV = A.translateArgumentToCallSiteContent( |
11715 | V, CB&: *CB, AA: *this, UsedAssumedInformation); |
11716 | if (!CallerV.has_value()) { |
11717 | // Nothing to do as long as no value was determined. |
11718 | continue; |
11719 | } |
11720 | V = *CallerV ? *CallerV : V; |
11721 | if (*CallerV && AA::isDynamicallyUnique(A, QueryingAA: *this, V: *V)) { |
11722 | if (recurseForValue(A, IRP: IRPosition::value(V: *V), S)) |
11723 | continue; |
11724 | } |
11725 | if (S == AA::Intraprocedural && !AA::isValidInScope(V: *V, Scope: Caller)) { |
11726 | giveUpOnIntraprocedural(A); |
11727 | return true; |
11728 | } |
11729 | addValue(A, State&: getState(), V&: *V, CtxI: CB, S, AnchorScope: getAnchorScope()); |
11730 | } |
11731 | return true; |
11732 | }; |
11733 | if (!AddScope(AA::Intraprocedural)) |
11734 | return indicatePessimisticFixpoint(); |
11735 | if (!AddScope(AA::Interprocedural)) |
11736 | return indicatePessimisticFixpoint(); |
11737 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED |
11738 | : ChangeStatus::CHANGED; |
11739 | } |
11740 | |
11741 | ChangeStatus indicatePessimisticFixpoint() override { |
11742 | return AAPotentialValues::indicatePessimisticFixpoint(); |
11743 | } |
11744 | |
11745 | /// See AbstractAttribute::trackStatistics() |
11746 | void trackStatistics() const override { |
11747 | STATS_DECLTRACK_CSRET_ATTR(potential_values) |
11748 | } |
11749 | }; |
11750 | |
11751 | struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { |
11752 | AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) |
11753 | : AAPotentialValuesFloating(IRP, A) {} |
11754 | |
11755 | /// See AbstractAttribute::trackStatistics() |
11756 | void trackStatistics() const override { |
11757 | STATS_DECLTRACK_CSARG_ATTR(potential_values) |
11758 | } |
11759 | }; |
11760 | } // namespace |
11761 | |
11762 | /// ---------------------- Assumption Propagation ------------------------------ |
11763 | namespace { |
11764 | struct AAAssumptionInfoImpl : public AAAssumptionInfo { |
11765 | AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, |
11766 | const DenseSet<StringRef> &Known) |
11767 | : AAAssumptionInfo(IRP, A, Known) {} |
11768 | |
11769 | /// See AbstractAttribute::manifest(...). |
11770 | ChangeStatus manifest(Attributor &A) override { |
11771 | // Don't manifest a universal set if it somehow made it here. |
11772 | if (getKnown().isUniversal()) |
11773 | return ChangeStatus::UNCHANGED; |
11774 | |
11775 | const IRPosition &IRP = getIRPosition(); |
11776 | SmallVector<StringRef, 0> Set(getAssumed().getSet().begin(), |
11777 | getAssumed().getSet().end()); |
11778 | llvm::sort(C&: Set); |
11779 | return A.manifestAttrs(IRP, |
11780 | DeducedAttrs: Attribute::get(Context&: IRP.getAnchorValue().getContext(), |
11781 | Kind: AssumptionAttrKey, |
11782 | Val: llvm::join(R&: Set, Separator: "," )), |
11783 | /*ForceReplace=*/true); |
11784 | } |
11785 | |
11786 | bool hasAssumption(const StringRef Assumption) const override { |
11787 | return isValidState() && setContains(Assumption); |
11788 | } |
11789 | |
11790 | /// See AbstractAttribute::getAsStr() |
11791 | const std::string getAsStr(Attributor *A) const override { |
11792 | const SetContents &Known = getKnown(); |
11793 | const SetContents &Assumed = getAssumed(); |
11794 | |
11795 | SmallVector<StringRef, 0> Set(Known.getSet().begin(), Known.getSet().end()); |
11796 | llvm::sort(C&: Set); |
11797 | const std::string KnownStr = llvm::join(R&: Set, Separator: "," ); |
11798 | |
11799 | std::string AssumedStr = "Universal" ; |
11800 | if (!Assumed.isUniversal()) { |
11801 | Set.assign(in_start: Assumed.getSet().begin(), in_end: Assumed.getSet().end()); |
11802 | AssumedStr = llvm::join(R&: Set, Separator: "," ); |
11803 | } |
11804 | return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]" ; |
11805 | } |
11806 | }; |
11807 | |
11808 | /// Propagates assumption information from parent functions to all of their |
11809 | /// successors. An assumption can be propagated if the containing function |
11810 | /// dominates the called function. |
11811 | /// |
11812 | /// We start with a "known" set of assumptions already valid for the associated |
11813 | /// function and an "assumed" set that initially contains all possible |
11814 | /// assumptions. The assumed set is inter-procedurally updated by narrowing its |
11815 | /// contents as concrete values are known. The concrete values are seeded by the |
11816 | /// first nodes that are either entries into the call graph, or contains no |
11817 | /// assumptions. Each node is updated as the intersection of the assumed state |
11818 | /// with all of its predecessors. |
11819 | struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { |
11820 | AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) |
11821 | : AAAssumptionInfoImpl(IRP, A, |
11822 | getAssumptions(F: *IRP.getAssociatedFunction())) {} |
11823 | |
11824 | /// See AbstractAttribute::updateImpl(...). |
11825 | ChangeStatus updateImpl(Attributor &A) override { |
11826 | bool Changed = false; |
11827 | |
11828 | auto CallSitePred = [&](AbstractCallSite ACS) { |
11829 | const auto *AssumptionAA = A.getAAFor<AAAssumptionInfo>( |
11830 | QueryingAA: *this, IRP: IRPosition::callsite_function(CB: *ACS.getInstruction()), |
11831 | DepClass: DepClassTy::REQUIRED); |
11832 | if (!AssumptionAA) |
11833 | return false; |
11834 | // Get the set of assumptions shared by all of this function's callers. |
11835 | Changed |= getIntersection(RHS: AssumptionAA->getAssumed()); |
11836 | return !getAssumed().empty() || !getKnown().empty(); |
11837 | }; |
11838 | |
11839 | bool UsedAssumedInformation = false; |
11840 | // Get the intersection of all assumptions held by this node's predecessors. |
11841 | // If we don't know all the call sites then this is either an entry into the |
11842 | // call graph or an empty node. This node is known to only contain its own |
11843 | // assumptions and can be propagated to its successors. |
11844 | if (!A.checkForAllCallSites(Pred: CallSitePred, QueryingAA: *this, RequireAllCallSites: true, |
11845 | UsedAssumedInformation)) |
11846 | return indicatePessimisticFixpoint(); |
11847 | |
11848 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
11849 | } |
11850 | |
11851 | void trackStatistics() const override {} |
11852 | }; |
11853 | |
11854 | /// Assumption Info defined for call sites. |
11855 | struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { |
11856 | |
11857 | AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) |
11858 | : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} |
11859 | |
11860 | /// See AbstractAttribute::initialize(...). |
11861 | void initialize(Attributor &A) override { |
11862 | const IRPosition &FnPos = IRPosition::function(F: *getAnchorScope()); |
11863 | A.getAAFor<AAAssumptionInfo>(QueryingAA: *this, IRP: FnPos, DepClass: DepClassTy::REQUIRED); |
11864 | } |
11865 | |
11866 | /// See AbstractAttribute::updateImpl(...). |
11867 | ChangeStatus updateImpl(Attributor &A) override { |
11868 | const IRPosition &FnPos = IRPosition::function(F: *getAnchorScope()); |
11869 | auto *AssumptionAA = |
11870 | A.getAAFor<AAAssumptionInfo>(QueryingAA: *this, IRP: FnPos, DepClass: DepClassTy::REQUIRED); |
11871 | if (!AssumptionAA) |
11872 | return indicatePessimisticFixpoint(); |
11873 | bool Changed = getIntersection(RHS: AssumptionAA->getAssumed()); |
11874 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
11875 | } |
11876 | |
11877 | /// See AbstractAttribute::trackStatistics() |
11878 | void trackStatistics() const override {} |
11879 | |
11880 | private: |
11881 | /// Helper to initialized the known set as all the assumptions this call and |
11882 | /// the callee contain. |
11883 | DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { |
11884 | const CallBase &CB = cast<CallBase>(Val&: IRP.getAssociatedValue()); |
11885 | auto Assumptions = getAssumptions(CB); |
11886 | if (const Function *F = CB.getCaller()) |
11887 | set_union(S1&: Assumptions, S2: getAssumptions(F: *F)); |
11888 | if (Function *F = IRP.getAssociatedFunction()) |
11889 | set_union(S1&: Assumptions, S2: getAssumptions(F: *F)); |
11890 | return Assumptions; |
11891 | } |
11892 | }; |
11893 | } // namespace |
11894 | |
11895 | AACallGraphNode *AACallEdgeIterator::operator*() const { |
11896 | return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( |
11897 | A.getOrCreateAAFor<AACallEdges>(IRP: IRPosition::function(F: **I)))); |
11898 | } |
11899 | |
11900 | void AttributorCallGraph::print() { llvm::WriteGraph(O&: outs(), G: this); } |
11901 | |
11902 | /// ------------------------ UnderlyingObjects --------------------------------- |
11903 | |
11904 | namespace { |
11905 | struct AAUnderlyingObjectsImpl |
11906 | : StateWrapper<BooleanState, AAUnderlyingObjects> { |
11907 | using BaseTy = StateWrapper<BooleanState, AAUnderlyingObjects>; |
11908 | AAUnderlyingObjectsImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} |
11909 | |
11910 | /// See AbstractAttribute::getAsStr(). |
11911 | const std::string getAsStr(Attributor *A) const override { |
11912 | if (!isValidState()) |
11913 | return "<invalid>" ; |
11914 | std::string Str; |
11915 | llvm::raw_string_ostream OS(Str); |
11916 | OS << "underlying objects: inter " << InterAssumedUnderlyingObjects.size() |
11917 | << " objects, intra " << IntraAssumedUnderlyingObjects.size() |
11918 | << " objects.\n" ; |
11919 | if (!InterAssumedUnderlyingObjects.empty()) { |
11920 | OS << "inter objects:\n" ; |
11921 | for (auto *Obj : InterAssumedUnderlyingObjects) |
11922 | OS << *Obj << '\n'; |
11923 | } |
11924 | if (!IntraAssumedUnderlyingObjects.empty()) { |
11925 | OS << "intra objects:\n" ; |
11926 | for (auto *Obj : IntraAssumedUnderlyingObjects) |
11927 | OS << *Obj << '\n'; |
11928 | } |
11929 | return Str; |
11930 | } |
11931 | |
11932 | /// See AbstractAttribute::trackStatistics() |
11933 | void trackStatistics() const override {} |
11934 | |
11935 | /// See AbstractAttribute::updateImpl(...). |
11936 | ChangeStatus updateImpl(Attributor &A) override { |
11937 | auto &Ptr = getAssociatedValue(); |
11938 | |
11939 | bool UsedAssumedInformation = false; |
11940 | auto DoUpdate = [&](SmallSetVector<Value *, 8> &UnderlyingObjects, |
11941 | AA::ValueScope Scope) { |
11942 | SmallPtrSet<Value *, 8> SeenObjects; |
11943 | SmallVector<AA::ValueAndContext> Values; |
11944 | |
11945 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::value(V: Ptr), AA: *this, Values, |
11946 | S: Scope, UsedAssumedInformation)) |
11947 | return UnderlyingObjects.insert(X: &Ptr); |
11948 | |
11949 | bool Changed = false; |
11950 | |
11951 | for (unsigned I = 0; I < Values.size(); ++I) { |
11952 | auto &VAC = Values[I]; |
11953 | auto *Obj = VAC.getValue(); |
11954 | Value *UO = getUnderlyingObject(V: Obj); |
11955 | if (!SeenObjects.insert(Ptr: UO ? UO : Obj).second) |
11956 | continue; |
11957 | if (UO && UO != Obj) { |
11958 | if (isa<AllocaInst>(Val: UO) || isa<GlobalValue>(Val: UO)) { |
11959 | Changed |= UnderlyingObjects.insert(X: UO); |
11960 | continue; |
11961 | } |
11962 | |
11963 | const auto *OtherAA = A.getAAFor<AAUnderlyingObjects>( |
11964 | QueryingAA: *this, IRP: IRPosition::value(V: *UO), DepClass: DepClassTy::OPTIONAL); |
11965 | auto Pred = [&](Value &V) { |
11966 | if (&V == UO) |
11967 | Changed |= UnderlyingObjects.insert(X: UO); |
11968 | else |
11969 | Values.emplace_back(Args&: V, Args: nullptr); |
11970 | return true; |
11971 | }; |
11972 | |
11973 | if (!OtherAA || !OtherAA->forallUnderlyingObjects(Pred, Scope)) |
11974 | llvm_unreachable( |
11975 | "The forall call should not return false at this position" ); |
11976 | UsedAssumedInformation |= !OtherAA->getState().isAtFixpoint(); |
11977 | continue; |
11978 | } |
11979 | |
11980 | if (isa<SelectInst>(Val: Obj)) { |
11981 | Changed |= handleIndirect(A, V&: *Obj, UnderlyingObjects, Scope, |
11982 | UsedAssumedInformation); |
11983 | continue; |
11984 | } |
11985 | if (auto *PHI = dyn_cast<PHINode>(Val: Obj)) { |
11986 | // Explicitly look through PHIs as we do not care about dynamically |
11987 | // uniqueness. |
11988 | for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { |
11989 | Changed |= |
11990 | handleIndirect(A, V&: *PHI->getIncomingValue(i: u), UnderlyingObjects, |
11991 | Scope, UsedAssumedInformation); |
11992 | } |
11993 | continue; |
11994 | } |
11995 | |
11996 | Changed |= UnderlyingObjects.insert(X: Obj); |
11997 | } |
11998 | |
11999 | return Changed; |
12000 | }; |
12001 | |
12002 | bool Changed = false; |
12003 | Changed |= DoUpdate(IntraAssumedUnderlyingObjects, AA::Intraprocedural); |
12004 | Changed |= DoUpdate(InterAssumedUnderlyingObjects, AA::Interprocedural); |
12005 | if (!UsedAssumedInformation) |
12006 | indicateOptimisticFixpoint(); |
12007 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
12008 | } |
12009 | |
12010 | bool forallUnderlyingObjects( |
12011 | function_ref<bool(Value &)> Pred, |
12012 | AA::ValueScope Scope = AA::Interprocedural) const override { |
12013 | if (!isValidState()) |
12014 | return Pred(getAssociatedValue()); |
12015 | |
12016 | auto &AssumedUnderlyingObjects = Scope == AA::Intraprocedural |
12017 | ? IntraAssumedUnderlyingObjects |
12018 | : InterAssumedUnderlyingObjects; |
12019 | for (Value *Obj : AssumedUnderlyingObjects) |
12020 | if (!Pred(*Obj)) |
12021 | return false; |
12022 | |
12023 | return true; |
12024 | } |
12025 | |
12026 | private: |
12027 | /// Handle the case where the value is not the actual underlying value, such |
12028 | /// as a phi node or a select instruction. |
12029 | bool handleIndirect(Attributor &A, Value &V, |
12030 | SmallSetVector<Value *, 8> &UnderlyingObjects, |
12031 | AA::ValueScope Scope, bool &UsedAssumedInformation) { |
12032 | bool Changed = false; |
12033 | const auto *AA = A.getAAFor<AAUnderlyingObjects>( |
12034 | QueryingAA: *this, IRP: IRPosition::value(V), DepClass: DepClassTy::OPTIONAL); |
12035 | auto Pred = [&](Value &V) { |
12036 | Changed |= UnderlyingObjects.insert(X: &V); |
12037 | return true; |
12038 | }; |
12039 | if (!AA || !AA->forallUnderlyingObjects(Pred, Scope)) |
12040 | llvm_unreachable( |
12041 | "The forall call should not return false at this position" ); |
12042 | UsedAssumedInformation |= !AA->getState().isAtFixpoint(); |
12043 | return Changed; |
12044 | } |
12045 | |
12046 | /// All the underlying objects collected so far via intra procedural scope. |
12047 | SmallSetVector<Value *, 8> IntraAssumedUnderlyingObjects; |
12048 | /// All the underlying objects collected so far via inter procedural scope. |
12049 | SmallSetVector<Value *, 8> InterAssumedUnderlyingObjects; |
12050 | }; |
12051 | |
12052 | struct AAUnderlyingObjectsFloating final : AAUnderlyingObjectsImpl { |
12053 | AAUnderlyingObjectsFloating(const IRPosition &IRP, Attributor &A) |
12054 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12055 | }; |
12056 | |
12057 | struct AAUnderlyingObjectsArgument final : AAUnderlyingObjectsImpl { |
12058 | AAUnderlyingObjectsArgument(const IRPosition &IRP, Attributor &A) |
12059 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12060 | }; |
12061 | |
12062 | struct AAUnderlyingObjectsCallSite final : AAUnderlyingObjectsImpl { |
12063 | AAUnderlyingObjectsCallSite(const IRPosition &IRP, Attributor &A) |
12064 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12065 | }; |
12066 | |
12067 | struct AAUnderlyingObjectsCallSiteArgument final : AAUnderlyingObjectsImpl { |
12068 | AAUnderlyingObjectsCallSiteArgument(const IRPosition &IRP, Attributor &A) |
12069 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12070 | }; |
12071 | |
12072 | struct AAUnderlyingObjectsReturned final : AAUnderlyingObjectsImpl { |
12073 | AAUnderlyingObjectsReturned(const IRPosition &IRP, Attributor &A) |
12074 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12075 | }; |
12076 | |
12077 | struct AAUnderlyingObjectsCallSiteReturned final : AAUnderlyingObjectsImpl { |
12078 | AAUnderlyingObjectsCallSiteReturned(const IRPosition &IRP, Attributor &A) |
12079 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12080 | }; |
12081 | |
12082 | struct AAUnderlyingObjectsFunction final : AAUnderlyingObjectsImpl { |
12083 | AAUnderlyingObjectsFunction(const IRPosition &IRP, Attributor &A) |
12084 | : AAUnderlyingObjectsImpl(IRP, A) {} |
12085 | }; |
12086 | } // namespace |
12087 | |
12088 | /// ------------------------ Global Value Info ------------------------------- |
12089 | namespace { |
12090 | struct AAGlobalValueInfoFloating : public AAGlobalValueInfo { |
12091 | AAGlobalValueInfoFloating(const IRPosition &IRP, Attributor &A) |
12092 | : AAGlobalValueInfo(IRP, A) {} |
12093 | |
12094 | /// See AbstractAttribute::initialize(...). |
12095 | void initialize(Attributor &A) override {} |
12096 | |
12097 | bool checkUse(Attributor &A, const Use &U, bool &Follow, |
12098 | SmallVectorImpl<const Value *> &Worklist) { |
12099 | Instruction *UInst = dyn_cast<Instruction>(Val: U.getUser()); |
12100 | if (!UInst) { |
12101 | Follow = true; |
12102 | return true; |
12103 | } |
12104 | |
12105 | LLVM_DEBUG(dbgs() << "[AAGlobalValueInfo] Check use: " << *U.get() << " in " |
12106 | << *UInst << "\n" ); |
12107 | |
12108 | if (auto *Cmp = dyn_cast<ICmpInst>(Val: U.getUser())) { |
12109 | int Idx = &Cmp->getOperandUse(i: 0) == &U; |
12110 | if (isa<Constant>(Val: Cmp->getOperand(i_nocapture: Idx))) |
12111 | return true; |
12112 | return U == &getAnchorValue(); |
12113 | } |
12114 | |
12115 | // Explicitly catch return instructions. |
12116 | if (isa<ReturnInst>(Val: UInst)) { |
12117 | auto CallSitePred = [&](AbstractCallSite ACS) { |
12118 | Worklist.push_back(Elt: ACS.getInstruction()); |
12119 | return true; |
12120 | }; |
12121 | bool UsedAssumedInformation = false; |
12122 | // TODO: We should traverse the uses or add a "non-call-site" CB. |
12123 | if (!A.checkForAllCallSites(Pred: CallSitePred, Fn: *UInst->getFunction(), |
12124 | /*RequireAllCallSites=*/true, QueryingAA: this, |
12125 | UsedAssumedInformation)) |
12126 | return false; |
12127 | return true; |
12128 | } |
12129 | |
12130 | // For now we only use special logic for call sites. However, the tracker |
12131 | // itself knows about a lot of other non-capturing cases already. |
12132 | auto *CB = dyn_cast<CallBase>(Val: UInst); |
12133 | if (!CB) |
12134 | return false; |
12135 | // Direct calls are OK uses. |
12136 | if (CB->isCallee(U: &U)) |
12137 | return true; |
12138 | // Non-argument uses are scary. |
12139 | if (!CB->isArgOperand(U: &U)) |
12140 | return false; |
12141 | // TODO: Iterate callees. |
12142 | auto *Fn = dyn_cast<Function>(Val: CB->getCalledOperand()); |
12143 | if (!Fn || !A.isFunctionIPOAmendable(F: *Fn)) |
12144 | return false; |
12145 | |
12146 | unsigned ArgNo = CB->getArgOperandNo(U: &U); |
12147 | Worklist.push_back(Elt: Fn->getArg(i: ArgNo)); |
12148 | return true; |
12149 | } |
12150 | |
12151 | ChangeStatus updateImpl(Attributor &A) override { |
12152 | unsigned NumUsesBefore = Uses.size(); |
12153 | |
12154 | SmallPtrSet<const Value *, 8> Visited; |
12155 | SmallVector<const Value *> Worklist; |
12156 | Worklist.push_back(Elt: &getAnchorValue()); |
12157 | |
12158 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
12159 | Uses.insert(Ptr: &U); |
12160 | // TODO(captures): Make this more precise. |
12161 | UseCaptureInfo CI = DetermineUseCaptureKind(U, /*Base=*/nullptr); |
12162 | if (CI.isPassthrough()) { |
12163 | Follow = true; |
12164 | return true; |
12165 | } |
12166 | return checkUse(A, U, Follow, Worklist); |
12167 | }; |
12168 | auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { |
12169 | Uses.insert(Ptr: &OldU); |
12170 | return true; |
12171 | }; |
12172 | |
12173 | while (!Worklist.empty()) { |
12174 | const Value *V = Worklist.pop_back_val(); |
12175 | if (!Visited.insert(Ptr: V).second) |
12176 | continue; |
12177 | if (!A.checkForAllUses(Pred: UsePred, QueryingAA: *this, V: *V, |
12178 | /* CheckBBLivenessOnly */ true, |
12179 | LivenessDepClass: DepClassTy::OPTIONAL, |
12180 | /* IgnoreDroppableUses */ true, EquivalentUseCB)) { |
12181 | return indicatePessimisticFixpoint(); |
12182 | } |
12183 | } |
12184 | |
12185 | return Uses.size() == NumUsesBefore ? ChangeStatus::UNCHANGED |
12186 | : ChangeStatus::CHANGED; |
12187 | } |
12188 | |
12189 | bool isPotentialUse(const Use &U) const override { |
12190 | return !isValidState() || Uses.contains(Ptr: &U); |
12191 | } |
12192 | |
12193 | /// See AbstractAttribute::manifest(...). |
12194 | ChangeStatus manifest(Attributor &A) override { |
12195 | return ChangeStatus::UNCHANGED; |
12196 | } |
12197 | |
12198 | /// See AbstractAttribute::getAsStr(). |
12199 | const std::string getAsStr(Attributor *A) const override { |
12200 | return "[" + std::to_string(val: Uses.size()) + " uses]" ; |
12201 | } |
12202 | |
12203 | void trackStatistics() const override { |
12204 | STATS_DECLTRACK_FLOATING_ATTR(GlobalValuesTracked); |
12205 | } |
12206 | |
12207 | private: |
12208 | /// Set of (transitive) uses of this GlobalValue. |
12209 | SmallPtrSet<const Use *, 8> Uses; |
12210 | }; |
12211 | } // namespace |
12212 | |
12213 | /// ------------------------ Indirect Call Info ------------------------------- |
12214 | namespace { |
12215 | struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo { |
12216 | AAIndirectCallInfoCallSite(const IRPosition &IRP, Attributor &A) |
12217 | : AAIndirectCallInfo(IRP, A) {} |
12218 | |
12219 | /// See AbstractAttribute::initialize(...). |
12220 | void initialize(Attributor &A) override { |
12221 | auto *MD = getCtxI()->getMetadata(KindID: LLVMContext::MD_callees); |
12222 | if (!MD && !A.isClosedWorldModule()) |
12223 | return; |
12224 | |
12225 | if (MD) { |
12226 | for (const auto &Op : MD->operands()) |
12227 | if (Function *Callee = mdconst::dyn_extract_or_null<Function>(MD: Op)) |
12228 | PotentialCallees.insert(X: Callee); |
12229 | } else if (A.isClosedWorldModule()) { |
12230 | ArrayRef<Function *> IndirectlyCallableFunctions = |
12231 | A.getInfoCache().getIndirectlyCallableFunctions(A); |
12232 | PotentialCallees.insert_range(R&: IndirectlyCallableFunctions); |
12233 | } |
12234 | |
12235 | if (PotentialCallees.empty()) |
12236 | indicateOptimisticFixpoint(); |
12237 | } |
12238 | |
12239 | ChangeStatus updateImpl(Attributor &A) override { |
12240 | CallBase *CB = cast<CallBase>(Val: getCtxI()); |
12241 | const Use &CalleeUse = CB->getCalledOperandUse(); |
12242 | Value *FP = CB->getCalledOperand(); |
12243 | |
12244 | SmallSetVector<Function *, 4> AssumedCalleesNow; |
12245 | bool AllCalleesKnownNow = AllCalleesKnown; |
12246 | |
12247 | auto CheckPotentialCalleeUse = [&](Function &PotentialCallee, |
12248 | bool &UsedAssumedInformation) { |
12249 | const auto *GIAA = A.getAAFor<AAGlobalValueInfo>( |
12250 | QueryingAA: *this, IRP: IRPosition::value(V: PotentialCallee), DepClass: DepClassTy::OPTIONAL); |
12251 | if (!GIAA || GIAA->isPotentialUse(U: CalleeUse)) |
12252 | return true; |
12253 | UsedAssumedInformation = !GIAA->isAtFixpoint(); |
12254 | return false; |
12255 | }; |
12256 | |
12257 | auto AddPotentialCallees = [&]() { |
12258 | for (auto *PotentialCallee : PotentialCallees) { |
12259 | bool UsedAssumedInformation = false; |
12260 | if (CheckPotentialCalleeUse(*PotentialCallee, UsedAssumedInformation)) |
12261 | AssumedCalleesNow.insert(X: PotentialCallee); |
12262 | } |
12263 | }; |
12264 | |
12265 | // Use simplification to find potential callees, if !callees was present, |
12266 | // fallback to that set if necessary. |
12267 | bool UsedAssumedInformation = false; |
12268 | SmallVector<AA::ValueAndContext> Values; |
12269 | if (!A.getAssumedSimplifiedValues(IRP: IRPosition::value(V: *FP), AA: this, Values, |
12270 | S: AA::ValueScope::AnyScope, |
12271 | UsedAssumedInformation)) { |
12272 | if (PotentialCallees.empty()) |
12273 | return indicatePessimisticFixpoint(); |
12274 | AddPotentialCallees(); |
12275 | } |
12276 | |
12277 | // Try to find a reason for \p Fn not to be a potential callee. If none was |
12278 | // found, add it to the assumed callees set. |
12279 | auto CheckPotentialCallee = [&](Function &Fn) { |
12280 | if (!PotentialCallees.empty() && !PotentialCallees.count(key: &Fn)) |
12281 | return false; |
12282 | |
12283 | auto &CachedResult = FilterResults[&Fn]; |
12284 | if (CachedResult.has_value()) |
12285 | return CachedResult.value(); |
12286 | |
12287 | bool UsedAssumedInformation = false; |
12288 | if (!CheckPotentialCalleeUse(Fn, UsedAssumedInformation)) { |
12289 | if (!UsedAssumedInformation) |
12290 | CachedResult = false; |
12291 | return false; |
12292 | } |
12293 | |
12294 | int NumFnArgs = Fn.arg_size(); |
12295 | int NumCBArgs = CB->arg_size(); |
12296 | |
12297 | // Check if any excess argument (which we fill up with poison) is known to |
12298 | // be UB on undef. |
12299 | for (int I = NumCBArgs; I < NumFnArgs; ++I) { |
12300 | bool IsKnown = false; |
12301 | if (AA::hasAssumedIRAttr<Attribute::NoUndef>( |
12302 | A, QueryingAA: this, IRP: IRPosition::argument(Arg: *Fn.getArg(i: I)), |
12303 | DepClass: DepClassTy::OPTIONAL, IsKnown)) { |
12304 | if (IsKnown) |
12305 | CachedResult = false; |
12306 | return false; |
12307 | } |
12308 | } |
12309 | |
12310 | CachedResult = true; |
12311 | return true; |
12312 | }; |
12313 | |
12314 | // Check simplification result, prune known UB callees, also restrict it to |
12315 | // the !callees set, if present. |
12316 | for (auto &VAC : Values) { |
12317 | if (isa<UndefValue>(Val: VAC.getValue())) |
12318 | continue; |
12319 | if (isa<ConstantPointerNull>(Val: VAC.getValue()) && |
12320 | VAC.getValue()->getType()->getPointerAddressSpace() == 0) |
12321 | continue; |
12322 | // TODO: Check for known UB, e.g., poison + noundef. |
12323 | if (auto *VACFn = dyn_cast<Function>(Val: VAC.getValue())) { |
12324 | if (CheckPotentialCallee(*VACFn)) |
12325 | AssumedCalleesNow.insert(X: VACFn); |
12326 | continue; |
12327 | } |
12328 | if (!PotentialCallees.empty()) { |
12329 | AddPotentialCallees(); |
12330 | break; |
12331 | } |
12332 | AllCalleesKnownNow = false; |
12333 | } |
12334 | |
12335 | if (AssumedCalleesNow == AssumedCallees && |
12336 | AllCalleesKnown == AllCalleesKnownNow) |
12337 | return ChangeStatus::UNCHANGED; |
12338 | |
12339 | std::swap(LHS&: AssumedCallees, RHS&: AssumedCalleesNow); |
12340 | AllCalleesKnown = AllCalleesKnownNow; |
12341 | return ChangeStatus::CHANGED; |
12342 | } |
12343 | |
12344 | /// See AbstractAttribute::manifest(...). |
12345 | ChangeStatus manifest(Attributor &A) override { |
12346 | // If we can't specialize at all, give up now. |
12347 | if (!AllCalleesKnown && AssumedCallees.empty()) |
12348 | return ChangeStatus::UNCHANGED; |
12349 | |
12350 | CallBase *CB = cast<CallBase>(Val: getCtxI()); |
12351 | bool UsedAssumedInformation = false; |
12352 | if (A.isAssumedDead(I: *CB, QueryingAA: this, /*LivenessAA=*/nullptr, |
12353 | UsedAssumedInformation)) |
12354 | return ChangeStatus::UNCHANGED; |
12355 | |
12356 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
12357 | Value *FP = CB->getCalledOperand(); |
12358 | if (FP->getType()->getPointerAddressSpace()) |
12359 | FP = new AddrSpaceCastInst(FP, PointerType::get(C&: FP->getContext(), AddressSpace: 0), |
12360 | FP->getName() + ".as0" , CB->getIterator()); |
12361 | |
12362 | bool CBIsVoid = CB->getType()->isVoidTy(); |
12363 | BasicBlock::iterator IP = CB->getIterator(); |
12364 | FunctionType *CSFT = CB->getFunctionType(); |
12365 | SmallVector<Value *> CSArgs(CB->args()); |
12366 | |
12367 | // If we know all callees and there are none, the call site is (effectively) |
12368 | // dead (or UB). |
12369 | if (AssumedCallees.empty()) { |
12370 | assert(AllCalleesKnown && |
12371 | "Expected all callees to be known if there are none." ); |
12372 | A.changeToUnreachableAfterManifest(I: CB); |
12373 | return ChangeStatus::CHANGED; |
12374 | } |
12375 | |
12376 | // Special handling for the single callee case. |
12377 | if (AllCalleesKnown && AssumedCallees.size() == 1) { |
12378 | auto *NewCallee = AssumedCallees.front(); |
12379 | if (isLegalToPromote(CB: *CB, Callee: NewCallee)) { |
12380 | promoteCall(CB&: *CB, Callee: NewCallee, RetBitCast: nullptr); |
12381 | NumIndirectCallsPromoted++; |
12382 | return ChangeStatus::CHANGED; |
12383 | } |
12384 | Instruction *NewCall = |
12385 | CallInst::Create(Func: FunctionCallee(CSFT, NewCallee), Args: CSArgs, |
12386 | NameStr: CB->getName(), InsertBefore: CB->getIterator()); |
12387 | if (!CBIsVoid) |
12388 | A.changeAfterManifest(IRP: IRPosition::callsite_returned(CB: *CB), NV&: *NewCall); |
12389 | A.deleteAfterManifest(I&: *CB); |
12390 | return ChangeStatus::CHANGED; |
12391 | } |
12392 | |
12393 | // For each potential value we create a conditional |
12394 | // |
12395 | // ``` |
12396 | // if (ptr == value) value(args); |
12397 | // else ... |
12398 | // ``` |
12399 | // |
12400 | bool SpecializedForAnyCallees = false; |
12401 | bool SpecializedForAllCallees = AllCalleesKnown; |
12402 | ICmpInst *LastCmp = nullptr; |
12403 | SmallVector<Function *, 8> SkippedAssumedCallees; |
12404 | SmallVector<std::pair<CallInst *, Instruction *>> NewCalls; |
12405 | for (Function *NewCallee : AssumedCallees) { |
12406 | if (!A.shouldSpecializeCallSiteForCallee(AA: *this, CB&: *CB, Callee&: *NewCallee, |
12407 | NumAssumedCallees: AssumedCallees.size())) { |
12408 | SkippedAssumedCallees.push_back(Elt: NewCallee); |
12409 | SpecializedForAllCallees = false; |
12410 | continue; |
12411 | } |
12412 | SpecializedForAnyCallees = true; |
12413 | |
12414 | LastCmp = new ICmpInst(IP, llvm::CmpInst::ICMP_EQ, FP, NewCallee); |
12415 | Instruction *ThenTI = |
12416 | SplitBlockAndInsertIfThen(Cond: LastCmp, SplitBefore: IP, /* Unreachable */ false); |
12417 | BasicBlock *CBBB = CB->getParent(); |
12418 | A.registerManifestAddedBasicBlock(BB&: *ThenTI->getParent()); |
12419 | A.registerManifestAddedBasicBlock(BB&: *IP->getParent()); |
12420 | auto *SplitTI = cast<BranchInst>(Val: LastCmp->getNextNode()); |
12421 | BasicBlock *ElseBB; |
12422 | if (&*IP == CB) { |
12423 | ElseBB = BasicBlock::Create(Context&: ThenTI->getContext(), Name: "" , |
12424 | Parent: ThenTI->getFunction(), InsertBefore: CBBB); |
12425 | A.registerManifestAddedBasicBlock(BB&: *ElseBB); |
12426 | IP = BranchInst::Create(IfTrue: CBBB, InsertBefore: ElseBB)->getIterator(); |
12427 | SplitTI->replaceUsesOfWith(From: CBBB, To: ElseBB); |
12428 | } else { |
12429 | ElseBB = IP->getParent(); |
12430 | ThenTI->replaceUsesOfWith(From: ElseBB, To: CBBB); |
12431 | } |
12432 | CastInst *RetBC = nullptr; |
12433 | CallInst *NewCall = nullptr; |
12434 | if (isLegalToPromote(CB: *CB, Callee: NewCallee)) { |
12435 | auto *CBClone = cast<CallBase>(Val: CB->clone()); |
12436 | CBClone->insertBefore(InsertPos: ThenTI->getIterator()); |
12437 | NewCall = &cast<CallInst>(Val&: promoteCall(CB&: *CBClone, Callee: NewCallee, RetBitCast: &RetBC)); |
12438 | NumIndirectCallsPromoted++; |
12439 | } else { |
12440 | NewCall = CallInst::Create(Func: FunctionCallee(CSFT, NewCallee), Args: CSArgs, |
12441 | NameStr: CB->getName(), InsertBefore: ThenTI->getIterator()); |
12442 | } |
12443 | NewCalls.push_back(Elt: {NewCall, RetBC}); |
12444 | } |
12445 | |
12446 | auto AttachCalleeMetadata = [&](CallBase &IndirectCB) { |
12447 | if (!AllCalleesKnown) |
12448 | return ChangeStatus::UNCHANGED; |
12449 | MDBuilder MDB(IndirectCB.getContext()); |
12450 | MDNode *Callees = MDB.createCallees(Callees: SkippedAssumedCallees); |
12451 | IndirectCB.setMetadata(KindID: LLVMContext::MD_callees, Node: Callees); |
12452 | return ChangeStatus::CHANGED; |
12453 | }; |
12454 | |
12455 | if (!SpecializedForAnyCallees) |
12456 | return AttachCalleeMetadata(*CB); |
12457 | |
12458 | // Check if we need the fallback indirect call still. |
12459 | if (SpecializedForAllCallees) { |
12460 | LastCmp->replaceAllUsesWith(V: ConstantInt::getTrue(Context&: LastCmp->getContext())); |
12461 | LastCmp->eraseFromParent(); |
12462 | new UnreachableInst(IP->getContext(), IP); |
12463 | IP->eraseFromParent(); |
12464 | } else { |
12465 | auto *CBClone = cast<CallInst>(Val: CB->clone()); |
12466 | CBClone->setName(CB->getName()); |
12467 | CBClone->insertBefore(BB&: *IP->getParent(), InsertPos: IP); |
12468 | NewCalls.push_back(Elt: {CBClone, nullptr}); |
12469 | AttachCalleeMetadata(*CBClone); |
12470 | } |
12471 | |
12472 | // Check if we need a PHI to merge the results. |
12473 | if (!CBIsVoid) { |
12474 | auto *PHI = PHINode::Create(Ty: CB->getType(), NumReservedValues: NewCalls.size(), |
12475 | NameStr: CB->getName() + ".phi" , |
12476 | InsertBefore: CB->getParent()->getFirstInsertionPt()); |
12477 | for (auto &It : NewCalls) { |
12478 | CallBase *NewCall = It.first; |
12479 | Instruction *CallRet = It.second ? It.second : It.first; |
12480 | if (CallRet->getType() == CB->getType()) |
12481 | PHI->addIncoming(V: CallRet, BB: CallRet->getParent()); |
12482 | else if (NewCall->getType()->isVoidTy()) |
12483 | PHI->addIncoming(V: PoisonValue::get(T: CB->getType()), |
12484 | BB: NewCall->getParent()); |
12485 | else |
12486 | llvm_unreachable("Call return should match or be void!" ); |
12487 | } |
12488 | A.changeAfterManifest(IRP: IRPosition::callsite_returned(CB: *CB), NV&: *PHI); |
12489 | } |
12490 | |
12491 | A.deleteAfterManifest(I&: *CB); |
12492 | Changed = ChangeStatus::CHANGED; |
12493 | |
12494 | return Changed; |
12495 | } |
12496 | |
12497 | /// See AbstractAttribute::getAsStr(). |
12498 | const std::string getAsStr(Attributor *A) const override { |
12499 | return std::string(AllCalleesKnown ? "eliminate" : "specialize" ) + |
12500 | " indirect call site with " + std::to_string(val: AssumedCallees.size()) + |
12501 | " functions" ; |
12502 | } |
12503 | |
12504 | void trackStatistics() const override { |
12505 | if (AllCalleesKnown) { |
12506 | STATS_DECLTRACK( |
12507 | Eliminated, CallSites, |
12508 | "Number of indirect call sites eliminated via specialization" ) |
12509 | } else { |
12510 | STATS_DECLTRACK(Specialized, CallSites, |
12511 | "Number of indirect call sites specialized" ) |
12512 | } |
12513 | } |
12514 | |
12515 | bool foreachCallee(function_ref<bool(Function *)> CB) const override { |
12516 | return isValidState() && AllCalleesKnown && all_of(Range: AssumedCallees, P: CB); |
12517 | } |
12518 | |
12519 | private: |
12520 | /// Map to remember filter results. |
12521 | DenseMap<Function *, std::optional<bool>> FilterResults; |
12522 | |
12523 | /// If the !callee metadata was present, this set will contain all potential |
12524 | /// callees (superset). |
12525 | SmallSetVector<Function *, 4> PotentialCallees; |
12526 | |
12527 | /// This set contains all currently assumed calllees, which might grow over |
12528 | /// time. |
12529 | SmallSetVector<Function *, 4> AssumedCallees; |
12530 | |
12531 | /// Flag to indicate if all possible callees are in the AssumedCallees set or |
12532 | /// if there could be others. |
12533 | bool AllCalleesKnown = true; |
12534 | }; |
12535 | } // namespace |
12536 | |
12537 | /// --------------------- Invariant Load Pointer ------------------------------- |
12538 | namespace { |
12539 | |
12540 | struct AAInvariantLoadPointerImpl |
12541 | : public StateWrapper<BitIntegerState<uint8_t, 15>, |
12542 | AAInvariantLoadPointer> { |
12543 | |
12544 | enum { |
12545 | // pointer does not alias within the bounds of the function |
12546 | IS_NOALIAS = 1 << 0, |
12547 | // pointer is not involved in any effectful instructions within the bounds |
12548 | // of the function |
12549 | IS_NOEFFECT = 1 << 1, |
12550 | // loads are invariant within the bounds of the function |
12551 | IS_LOCALLY_INVARIANT = 1 << 2, |
12552 | // memory lifetime is constrained within the bounds of the function |
12553 | IS_LOCALLY_CONSTRAINED = 1 << 3, |
12554 | |
12555 | IS_BEST_STATE = IS_NOALIAS | IS_NOEFFECT | IS_LOCALLY_INVARIANT | |
12556 | IS_LOCALLY_CONSTRAINED, |
12557 | }; |
12558 | static_assert(getBestState() == IS_BEST_STATE, "Unexpected best state" ); |
12559 | |
12560 | using Base = |
12561 | StateWrapper<BitIntegerState<uint8_t, 15>, AAInvariantLoadPointer>; |
12562 | |
12563 | // the BitIntegerState is optimistic about IS_NOALIAS and IS_NOEFFECT, but |
12564 | // pessimistic about IS_KNOWN_INVARIANT |
12565 | AAInvariantLoadPointerImpl(const IRPosition &IRP, Attributor &A) |
12566 | : Base(IRP) {} |
12567 | |
12568 | bool isKnownInvariant() const final { |
12569 | return isKnownLocallyInvariant() && isKnown(BitsEncoding: IS_LOCALLY_CONSTRAINED); |
12570 | } |
12571 | |
12572 | bool isKnownLocallyInvariant() const final { |
12573 | if (isKnown(BitsEncoding: IS_LOCALLY_INVARIANT)) |
12574 | return true; |
12575 | return isKnown(BitsEncoding: IS_NOALIAS | IS_NOEFFECT); |
12576 | } |
12577 | |
12578 | bool isAssumedInvariant() const final { |
12579 | return isAssumedLocallyInvariant() && isAssumed(BitsEncoding: IS_LOCALLY_CONSTRAINED); |
12580 | } |
12581 | |
12582 | bool isAssumedLocallyInvariant() const final { |
12583 | if (isAssumed(BitsEncoding: IS_LOCALLY_INVARIANT)) |
12584 | return true; |
12585 | return isAssumed(BitsEncoding: IS_NOALIAS | IS_NOEFFECT); |
12586 | } |
12587 | |
12588 | ChangeStatus updateImpl(Attributor &A) override { |
12589 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
12590 | |
12591 | Changed |= updateNoAlias(A); |
12592 | if (requiresNoAlias() && !isAssumed(BitsEncoding: IS_NOALIAS)) |
12593 | return indicatePessimisticFixpoint(); |
12594 | |
12595 | Changed |= updateNoEffect(A); |
12596 | |
12597 | Changed |= updateLocalInvariance(A); |
12598 | |
12599 | return Changed; |
12600 | } |
12601 | |
12602 | ChangeStatus manifest(Attributor &A) override { |
12603 | if (!isKnownInvariant()) |
12604 | return ChangeStatus::UNCHANGED; |
12605 | |
12606 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
12607 | const Value *Ptr = &getAssociatedValue(); |
12608 | const auto TagInvariantLoads = [&](const Use &U, bool &) { |
12609 | if (U.get() != Ptr) |
12610 | return true; |
12611 | auto *I = dyn_cast<Instruction>(Val: U.getUser()); |
12612 | if (!I) |
12613 | return true; |
12614 | |
12615 | // Ensure that we are only changing uses from the corresponding callgraph |
12616 | // SSC in the case that the AA isn't run on the entire module |
12617 | if (!A.isRunOn(Fn: I->getFunction())) |
12618 | return true; |
12619 | |
12620 | if (I->hasMetadata(KindID: LLVMContext::MD_invariant_load)) |
12621 | return true; |
12622 | |
12623 | if (auto *LI = dyn_cast<LoadInst>(Val: I)) { |
12624 | LI->setMetadata(KindID: LLVMContext::MD_invariant_load, |
12625 | Node: MDNode::get(Context&: LI->getContext(), MDs: {})); |
12626 | Changed = ChangeStatus::CHANGED; |
12627 | } |
12628 | return true; |
12629 | }; |
12630 | |
12631 | (void)A.checkForAllUses(Pred: TagInvariantLoads, QueryingAA: *this, V: *Ptr); |
12632 | return Changed; |
12633 | } |
12634 | |
12635 | /// See AbstractAttribute::getAsStr(). |
12636 | const std::string getAsStr(Attributor *) const override { |
12637 | if (isKnownInvariant()) |
12638 | return "load-invariant pointer" ; |
12639 | return "non-invariant pointer" ; |
12640 | } |
12641 | |
12642 | /// See AbstractAttribute::trackStatistics(). |
12643 | void trackStatistics() const override {} |
12644 | |
12645 | private: |
12646 | /// Indicate that noalias is required for the pointer to be invariant. |
12647 | bool requiresNoAlias() const { |
12648 | switch (getPositionKind()) { |
12649 | default: |
12650 | // Conservatively default to require noalias. |
12651 | return true; |
12652 | case IRP_FLOAT: |
12653 | case IRP_RETURNED: |
12654 | case IRP_CALL_SITE: |
12655 | return false; |
12656 | case IRP_CALL_SITE_RETURNED: { |
12657 | const auto &CB = cast<CallBase>(Val&: getAnchorValue()); |
12658 | return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
12659 | Call: &CB, /*MustPreserveNullness=*/false); |
12660 | } |
12661 | case IRP_ARGUMENT: { |
12662 | const Function *F = getAssociatedFunction(); |
12663 | assert(F && "no associated function for argument" ); |
12664 | return !isCallableCC(CC: F->getCallingConv()); |
12665 | } |
12666 | } |
12667 | } |
12668 | |
12669 | bool isExternal() const { |
12670 | const Function *F = getAssociatedFunction(); |
12671 | if (!F) |
12672 | return true; |
12673 | return isCallableCC(CC: F->getCallingConv()) && |
12674 | getPositionKind() != IRP_CALL_SITE_RETURNED; |
12675 | } |
12676 | |
12677 | ChangeStatus updateNoAlias(Attributor &A) { |
12678 | if (isKnown(BitsEncoding: IS_NOALIAS) || !isAssumed(BitsEncoding: IS_NOALIAS)) |
12679 | return ChangeStatus::UNCHANGED; |
12680 | |
12681 | // Try to use AANoAlias. |
12682 | if (const auto *ANoAlias = A.getOrCreateAAFor<AANoAlias>( |
12683 | IRP: getIRPosition(), QueryingAA: this, DepClass: DepClassTy::REQUIRED)) { |
12684 | if (ANoAlias->isKnownNoAlias()) { |
12685 | addKnownBits(Bits: IS_NOALIAS); |
12686 | return ChangeStatus::CHANGED; |
12687 | } |
12688 | |
12689 | if (!ANoAlias->isAssumedNoAlias()) { |
12690 | removeAssumedBits(BitsEncoding: IS_NOALIAS); |
12691 | return ChangeStatus::CHANGED; |
12692 | } |
12693 | |
12694 | return ChangeStatus::UNCHANGED; |
12695 | } |
12696 | |
12697 | // Try to infer noalias from argument attribute, since it is applicable for |
12698 | // the duration of the function. |
12699 | if (const Argument *Arg = getAssociatedArgument()) { |
12700 | if (Arg->hasNoAliasAttr()) { |
12701 | addKnownBits(Bits: IS_NOALIAS); |
12702 | return ChangeStatus::UNCHANGED; |
12703 | } |
12704 | |
12705 | // Noalias information is not provided, and cannot be inferred, |
12706 | // so we conservatively assume the pointer aliases. |
12707 | removeAssumedBits(BitsEncoding: IS_NOALIAS); |
12708 | return ChangeStatus::CHANGED; |
12709 | } |
12710 | |
12711 | return ChangeStatus::UNCHANGED; |
12712 | } |
12713 | |
12714 | ChangeStatus updateNoEffect(Attributor &A) { |
12715 | if (isKnown(BitsEncoding: IS_NOEFFECT) || !isAssumed(BitsEncoding: IS_NOEFFECT)) |
12716 | return ChangeStatus::UNCHANGED; |
12717 | |
12718 | if (!getAssociatedFunction()) |
12719 | return indicatePessimisticFixpoint(); |
12720 | |
12721 | if (isa<AllocaInst>(Val: &getAssociatedValue())) |
12722 | return indicatePessimisticFixpoint(); |
12723 | |
12724 | const auto HasNoEffectLoads = [&](const Use &U, bool &) { |
12725 | const auto *LI = dyn_cast<LoadInst>(Val: U.getUser()); |
12726 | return !LI || !LI->mayHaveSideEffects(); |
12727 | }; |
12728 | if (!A.checkForAllUses(Pred: HasNoEffectLoads, QueryingAA: *this, V: getAssociatedValue())) |
12729 | return indicatePessimisticFixpoint(); |
12730 | |
12731 | if (const auto *AMemoryBehavior = A.getOrCreateAAFor<AAMemoryBehavior>( |
12732 | IRP: getIRPosition(), QueryingAA: this, DepClass: DepClassTy::REQUIRED)) { |
12733 | // For non-instructions, try to use AAMemoryBehavior to infer the readonly |
12734 | // attribute |
12735 | if (!AMemoryBehavior->isAssumedReadOnly()) |
12736 | return indicatePessimisticFixpoint(); |
12737 | |
12738 | if (AMemoryBehavior->isKnownReadOnly()) { |
12739 | addKnownBits(Bits: IS_NOEFFECT); |
12740 | return ChangeStatus::UNCHANGED; |
12741 | } |
12742 | |
12743 | return ChangeStatus::UNCHANGED; |
12744 | } |
12745 | |
12746 | if (const Argument *Arg = getAssociatedArgument()) { |
12747 | if (Arg->onlyReadsMemory()) { |
12748 | addKnownBits(Bits: IS_NOEFFECT); |
12749 | return ChangeStatus::UNCHANGED; |
12750 | } |
12751 | |
12752 | // Readonly information is not provided, and cannot be inferred from |
12753 | // AAMemoryBehavior. |
12754 | return indicatePessimisticFixpoint(); |
12755 | } |
12756 | |
12757 | return ChangeStatus::UNCHANGED; |
12758 | } |
12759 | |
12760 | ChangeStatus updateLocalInvariance(Attributor &A) { |
12761 | if (isKnown(BitsEncoding: IS_LOCALLY_INVARIANT) || !isAssumed(BitsEncoding: IS_LOCALLY_INVARIANT)) |
12762 | return ChangeStatus::UNCHANGED; |
12763 | |
12764 | // try to infer invariance from underlying objects |
12765 | const auto *AUO = A.getOrCreateAAFor<AAUnderlyingObjects>( |
12766 | IRP: getIRPosition(), QueryingAA: this, DepClass: DepClassTy::REQUIRED); |
12767 | if (!AUO) |
12768 | return ChangeStatus::UNCHANGED; |
12769 | |
12770 | bool UsedAssumedInformation = false; |
12771 | const auto IsLocallyInvariantLoadIfPointer = [&](const Value &V) { |
12772 | if (!V.getType()->isPointerTy()) |
12773 | return true; |
12774 | const auto *IsInvariantLoadPointer = |
12775 | A.getOrCreateAAFor<AAInvariantLoadPointer>(IRP: IRPosition::value(V), QueryingAA: this, |
12776 | DepClass: DepClassTy::REQUIRED); |
12777 | // Conservatively fail if invariance cannot be inferred. |
12778 | if (!IsInvariantLoadPointer) |
12779 | return false; |
12780 | |
12781 | if (IsInvariantLoadPointer->isKnownLocallyInvariant()) |
12782 | return true; |
12783 | if (!IsInvariantLoadPointer->isAssumedLocallyInvariant()) |
12784 | return false; |
12785 | |
12786 | UsedAssumedInformation = true; |
12787 | return true; |
12788 | }; |
12789 | if (!AUO->forallUnderlyingObjects(Pred: IsLocallyInvariantLoadIfPointer)) |
12790 | return indicatePessimisticFixpoint(); |
12791 | |
12792 | if (const auto *CB = dyn_cast<CallBase>(Val: &getAnchorValue())) { |
12793 | if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
12794 | Call: CB, /*MustPreserveNullness=*/false)) { |
12795 | for (const Value *Arg : CB->args()) { |
12796 | if (!IsLocallyInvariantLoadIfPointer(*Arg)) |
12797 | return indicatePessimisticFixpoint(); |
12798 | } |
12799 | } |
12800 | } |
12801 | |
12802 | if (!UsedAssumedInformation) { |
12803 | // Pointer is known and not just assumed to be locally invariant. |
12804 | addKnownBits(Bits: IS_LOCALLY_INVARIANT); |
12805 | return ChangeStatus::CHANGED; |
12806 | } |
12807 | |
12808 | return ChangeStatus::UNCHANGED; |
12809 | } |
12810 | }; |
12811 | |
12812 | struct AAInvariantLoadPointerFloating final : AAInvariantLoadPointerImpl { |
12813 | AAInvariantLoadPointerFloating(const IRPosition &IRP, Attributor &A) |
12814 | : AAInvariantLoadPointerImpl(IRP, A) {} |
12815 | }; |
12816 | |
12817 | struct AAInvariantLoadPointerReturned final : AAInvariantLoadPointerImpl { |
12818 | AAInvariantLoadPointerReturned(const IRPosition &IRP, Attributor &A) |
12819 | : AAInvariantLoadPointerImpl(IRP, A) {} |
12820 | |
12821 | void initialize(Attributor &) override { |
12822 | removeAssumedBits(BitsEncoding: IS_LOCALLY_CONSTRAINED); |
12823 | } |
12824 | }; |
12825 | |
12826 | struct AAInvariantLoadPointerCallSiteReturned final |
12827 | : AAInvariantLoadPointerImpl { |
12828 | AAInvariantLoadPointerCallSiteReturned(const IRPosition &IRP, Attributor &A) |
12829 | : AAInvariantLoadPointerImpl(IRP, A) {} |
12830 | |
12831 | void initialize(Attributor &A) override { |
12832 | const Function *F = getAssociatedFunction(); |
12833 | assert(F && "no associated function for return from call" ); |
12834 | |
12835 | if (!F->isDeclaration() && !F->isIntrinsic()) |
12836 | return AAInvariantLoadPointerImpl::initialize(A); |
12837 | |
12838 | const auto &CB = cast<CallBase>(Val&: getAnchorValue()); |
12839 | if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
12840 | Call: &CB, /*MustPreserveNullness=*/false)) |
12841 | return AAInvariantLoadPointerImpl::initialize(A); |
12842 | |
12843 | if (F->onlyReadsMemory() && F->hasNoSync()) |
12844 | return AAInvariantLoadPointerImpl::initialize(A); |
12845 | |
12846 | // At this point, the function is opaque, so we conservatively assume |
12847 | // non-invariance. |
12848 | indicatePessimisticFixpoint(); |
12849 | } |
12850 | }; |
12851 | |
12852 | struct AAInvariantLoadPointerArgument final : AAInvariantLoadPointerImpl { |
12853 | AAInvariantLoadPointerArgument(const IRPosition &IRP, Attributor &A) |
12854 | : AAInvariantLoadPointerImpl(IRP, A) {} |
12855 | |
12856 | void initialize(Attributor &) override { |
12857 | const Function *F = getAssociatedFunction(); |
12858 | assert(F && "no associated function for argument" ); |
12859 | |
12860 | if (!isCallableCC(CC: F->getCallingConv())) { |
12861 | addKnownBits(Bits: IS_LOCALLY_CONSTRAINED); |
12862 | return; |
12863 | } |
12864 | |
12865 | if (!F->hasLocalLinkage()) |
12866 | removeAssumedBits(BitsEncoding: IS_LOCALLY_CONSTRAINED); |
12867 | } |
12868 | }; |
12869 | |
12870 | struct AAInvariantLoadPointerCallSiteArgument final |
12871 | : AAInvariantLoadPointerImpl { |
12872 | AAInvariantLoadPointerCallSiteArgument(const IRPosition &IRP, Attributor &A) |
12873 | : AAInvariantLoadPointerImpl(IRP, A) {} |
12874 | }; |
12875 | } // namespace |
12876 | |
12877 | /// ------------------------ Address Space ------------------------------------ |
12878 | namespace { |
12879 | |
12880 | template <typename InstType> |
12881 | static bool makeChange(Attributor &A, InstType *MemInst, const Use &U, |
12882 | Value *OriginalValue, PointerType *NewPtrTy, |
12883 | bool UseOriginalValue) { |
12884 | if (U.getOperandNo() != InstType::getPointerOperandIndex()) |
12885 | return false; |
12886 | |
12887 | if (MemInst->isVolatile()) { |
12888 | auto *TTI = A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>( |
12889 | *MemInst->getFunction()); |
12890 | unsigned NewAS = NewPtrTy->getPointerAddressSpace(); |
12891 | if (!TTI || !TTI->hasVolatileVariant(MemInst, NewAS)) |
12892 | return false; |
12893 | } |
12894 | |
12895 | if (UseOriginalValue) { |
12896 | A.changeUseAfterManifest(U&: const_cast<Use &>(U), NV&: *OriginalValue); |
12897 | return true; |
12898 | } |
12899 | |
12900 | Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy); |
12901 | CastInst->insertBefore(MemInst->getIterator()); |
12902 | A.changeUseAfterManifest(U&: const_cast<Use &>(U), NV&: *CastInst); |
12903 | return true; |
12904 | } |
12905 | |
12906 | struct AAAddressSpaceImpl : public AAAddressSpace { |
12907 | AAAddressSpaceImpl(const IRPosition &IRP, Attributor &A) |
12908 | : AAAddressSpace(IRP, A) {} |
12909 | |
12910 | uint32_t getAddressSpace() const override { |
12911 | assert(isValidState() && "the AA is invalid" ); |
12912 | return AssumedAddressSpace; |
12913 | } |
12914 | |
12915 | /// See AbstractAttribute::initialize(...). |
12916 | void initialize(Attributor &A) override { |
12917 | assert(getAssociatedType()->isPtrOrPtrVectorTy() && |
12918 | "Associated value is not a pointer" ); |
12919 | |
12920 | if (!A.getInfoCache().getFlatAddressSpace().has_value()) { |
12921 | indicatePessimisticFixpoint(); |
12922 | return; |
12923 | } |
12924 | |
12925 | unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value(); |
12926 | unsigned AS = getAssociatedType()->getPointerAddressSpace(); |
12927 | if (AS != FlatAS) { |
12928 | [[maybe_unused]] bool R = takeAddressSpace(AS); |
12929 | assert(R && "The take should happen" ); |
12930 | indicateOptimisticFixpoint(); |
12931 | } |
12932 | } |
12933 | |
12934 | ChangeStatus updateImpl(Attributor &A) override { |
12935 | uint32_t OldAddressSpace = AssumedAddressSpace; |
12936 | unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value(); |
12937 | |
12938 | auto CheckAddressSpace = [&](Value &Obj) { |
12939 | // Ignore undef. |
12940 | if (isa<UndefValue>(Val: &Obj)) |
12941 | return true; |
12942 | |
12943 | // If the object already has a non-flat address space, we simply take it. |
12944 | unsigned ObjAS = Obj.getType()->getPointerAddressSpace(); |
12945 | if (ObjAS != FlatAS) |
12946 | return takeAddressSpace(AS: ObjAS); |
12947 | |
12948 | // At this point, we know Obj is in the flat address space. For a final |
12949 | // attempt, we want to use getAssumedAddrSpace, but first we must get the |
12950 | // associated function, if possible. |
12951 | Function *F = nullptr; |
12952 | if (auto *Arg = dyn_cast<Argument>(Val: &Obj)) |
12953 | F = Arg->getParent(); |
12954 | else if (auto *I = dyn_cast<Instruction>(Val: &Obj)) |
12955 | F = I->getFunction(); |
12956 | |
12957 | // Use getAssumedAddrSpace if the associated function exists. |
12958 | if (F) { |
12959 | auto *TTI = |
12960 | A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(F: *F); |
12961 | unsigned AssumedAS = TTI->getAssumedAddrSpace(V: &Obj); |
12962 | if (AssumedAS != ~0U) |
12963 | return takeAddressSpace(AS: AssumedAS); |
12964 | } |
12965 | |
12966 | // Now we can't do anything else but to take the flat AS. |
12967 | return takeAddressSpace(AS: FlatAS); |
12968 | }; |
12969 | |
12970 | auto *AUO = A.getOrCreateAAFor<AAUnderlyingObjects>(IRP: getIRPosition(), QueryingAA: this, |
12971 | DepClass: DepClassTy::REQUIRED); |
12972 | if (!AUO->forallUnderlyingObjects(Pred: CheckAddressSpace)) |
12973 | return indicatePessimisticFixpoint(); |
12974 | |
12975 | return OldAddressSpace == AssumedAddressSpace ? ChangeStatus::UNCHANGED |
12976 | : ChangeStatus::CHANGED; |
12977 | } |
12978 | |
12979 | /// See AbstractAttribute::manifest(...). |
12980 | ChangeStatus manifest(Attributor &A) override { |
12981 | unsigned NewAS = getAddressSpace(); |
12982 | |
12983 | if (NewAS == InvalidAddressSpace || |
12984 | NewAS == getAssociatedType()->getPointerAddressSpace()) |
12985 | return ChangeStatus::UNCHANGED; |
12986 | |
12987 | unsigned FlatAS = A.getInfoCache().getFlatAddressSpace().value(); |
12988 | |
12989 | Value *AssociatedValue = &getAssociatedValue(); |
12990 | Value *OriginalValue = peelAddrspacecast(V: AssociatedValue, FlatAS); |
12991 | |
12992 | PointerType *NewPtrTy = |
12993 | PointerType::get(C&: getAssociatedType()->getContext(), AddressSpace: NewAS); |
12994 | bool UseOriginalValue = |
12995 | OriginalValue->getType()->getPointerAddressSpace() == NewAS; |
12996 | |
12997 | bool Changed = false; |
12998 | |
12999 | auto Pred = [&](const Use &U, bool &) { |
13000 | if (U.get() != AssociatedValue) |
13001 | return true; |
13002 | auto *Inst = dyn_cast<Instruction>(Val: U.getUser()); |
13003 | if (!Inst) |
13004 | return true; |
13005 | // This is a WA to make sure we only change uses from the corresponding |
13006 | // CGSCC if the AA is run on CGSCC instead of the entire module. |
13007 | if (!A.isRunOn(Fn: Inst->getFunction())) |
13008 | return true; |
13009 | if (auto *LI = dyn_cast<LoadInst>(Val: Inst)) { |
13010 | Changed |= |
13011 | makeChange(A, MemInst: LI, U, OriginalValue, NewPtrTy, UseOriginalValue); |
13012 | } else if (auto *SI = dyn_cast<StoreInst>(Val: Inst)) { |
13013 | Changed |= |
13014 | makeChange(A, MemInst: SI, U, OriginalValue, NewPtrTy, UseOriginalValue); |
13015 | } else if (auto *RMW = dyn_cast<AtomicRMWInst>(Val: Inst)) { |
13016 | Changed |= |
13017 | makeChange(A, MemInst: RMW, U, OriginalValue, NewPtrTy, UseOriginalValue); |
13018 | } else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Val: Inst)) { |
13019 | Changed |= |
13020 | makeChange(A, MemInst: CmpX, U, OriginalValue, NewPtrTy, UseOriginalValue); |
13021 | } |
13022 | return true; |
13023 | }; |
13024 | |
13025 | // It doesn't matter if we can't check all uses as we can simply |
13026 | // conservatively ignore those that can not be visited. |
13027 | (void)A.checkForAllUses(Pred, QueryingAA: *this, V: getAssociatedValue(), |
13028 | /* CheckBBLivenessOnly */ true); |
13029 | |
13030 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
13031 | } |
13032 | |
13033 | /// See AbstractAttribute::getAsStr(). |
13034 | const std::string getAsStr(Attributor *A) const override { |
13035 | if (!isValidState()) |
13036 | return "addrspace(<invalid>)" ; |
13037 | return "addrspace(" + |
13038 | (AssumedAddressSpace == InvalidAddressSpace |
13039 | ? "none" |
13040 | : std::to_string(val: AssumedAddressSpace)) + |
13041 | ")" ; |
13042 | } |
13043 | |
13044 | private: |
13045 | uint32_t AssumedAddressSpace = InvalidAddressSpace; |
13046 | |
13047 | bool takeAddressSpace(uint32_t AS) { |
13048 | if (AssumedAddressSpace == InvalidAddressSpace) { |
13049 | AssumedAddressSpace = AS; |
13050 | return true; |
13051 | } |
13052 | return AssumedAddressSpace == AS; |
13053 | } |
13054 | |
13055 | static Value *peelAddrspacecast(Value *V, unsigned FlatAS) { |
13056 | if (auto *I = dyn_cast<AddrSpaceCastInst>(Val: V)) { |
13057 | assert(I->getSrcAddressSpace() != FlatAS && |
13058 | "there should not be flat AS -> non-flat AS" ); |
13059 | return I->getPointerOperand(); |
13060 | } |
13061 | if (auto *C = dyn_cast<ConstantExpr>(Val: V)) |
13062 | if (C->getOpcode() == Instruction::AddrSpaceCast) { |
13063 | assert(C->getOperand(0)->getType()->getPointerAddressSpace() != |
13064 | FlatAS && |
13065 | "there should not be flat AS -> non-flat AS X" ); |
13066 | return C->getOperand(i_nocapture: 0); |
13067 | } |
13068 | return V; |
13069 | } |
13070 | }; |
13071 | |
13072 | struct AAAddressSpaceFloating final : AAAddressSpaceImpl { |
13073 | AAAddressSpaceFloating(const IRPosition &IRP, Attributor &A) |
13074 | : AAAddressSpaceImpl(IRP, A) {} |
13075 | |
13076 | void trackStatistics() const override { |
13077 | STATS_DECLTRACK_FLOATING_ATTR(addrspace); |
13078 | } |
13079 | }; |
13080 | |
13081 | struct AAAddressSpaceReturned final : AAAddressSpaceImpl { |
13082 | AAAddressSpaceReturned(const IRPosition &IRP, Attributor &A) |
13083 | : AAAddressSpaceImpl(IRP, A) {} |
13084 | |
13085 | /// See AbstractAttribute::initialize(...). |
13086 | void initialize(Attributor &A) override { |
13087 | // TODO: we don't rewrite function argument for now because it will need to |
13088 | // rewrite the function signature and all call sites. |
13089 | (void)indicatePessimisticFixpoint(); |
13090 | } |
13091 | |
13092 | void trackStatistics() const override { |
13093 | STATS_DECLTRACK_FNRET_ATTR(addrspace); |
13094 | } |
13095 | }; |
13096 | |
13097 | struct AAAddressSpaceCallSiteReturned final : AAAddressSpaceImpl { |
13098 | AAAddressSpaceCallSiteReturned(const IRPosition &IRP, Attributor &A) |
13099 | : AAAddressSpaceImpl(IRP, A) {} |
13100 | |
13101 | void trackStatistics() const override { |
13102 | STATS_DECLTRACK_CSRET_ATTR(addrspace); |
13103 | } |
13104 | }; |
13105 | |
13106 | struct AAAddressSpaceArgument final : AAAddressSpaceImpl { |
13107 | AAAddressSpaceArgument(const IRPosition &IRP, Attributor &A) |
13108 | : AAAddressSpaceImpl(IRP, A) {} |
13109 | |
13110 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(addrspace); } |
13111 | }; |
13112 | |
13113 | struct AAAddressSpaceCallSiteArgument final : AAAddressSpaceImpl { |
13114 | AAAddressSpaceCallSiteArgument(const IRPosition &IRP, Attributor &A) |
13115 | : AAAddressSpaceImpl(IRP, A) {} |
13116 | |
13117 | /// See AbstractAttribute::initialize(...). |
13118 | void initialize(Attributor &A) override { |
13119 | // TODO: we don't rewrite call site argument for now because it will need to |
13120 | // rewrite the function signature of the callee. |
13121 | (void)indicatePessimisticFixpoint(); |
13122 | } |
13123 | |
13124 | void trackStatistics() const override { |
13125 | STATS_DECLTRACK_CSARG_ATTR(addrspace); |
13126 | } |
13127 | }; |
13128 | } // namespace |
13129 | |
13130 | /// ----------- Allocation Info ---------- |
13131 | namespace { |
13132 | struct AAAllocationInfoImpl : public AAAllocationInfo { |
13133 | AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A) |
13134 | : AAAllocationInfo(IRP, A) {} |
13135 | |
13136 | std::optional<TypeSize> getAllocatedSize() const override { |
13137 | assert(isValidState() && "the AA is invalid" ); |
13138 | return AssumedAllocatedSize; |
13139 | } |
13140 | |
13141 | std::optional<TypeSize> findInitialAllocationSize(Instruction *I, |
13142 | const DataLayout &DL) { |
13143 | |
13144 | // TODO: implement case for malloc like instructions |
13145 | switch (I->getOpcode()) { |
13146 | case Instruction::Alloca: { |
13147 | AllocaInst *AI = cast<AllocaInst>(Val: I); |
13148 | return AI->getAllocationSize(DL); |
13149 | } |
13150 | default: |
13151 | return std::nullopt; |
13152 | } |
13153 | } |
13154 | |
13155 | ChangeStatus updateImpl(Attributor &A) override { |
13156 | |
13157 | const IRPosition &IRP = getIRPosition(); |
13158 | Instruction *I = IRP.getCtxI(); |
13159 | |
13160 | // TODO: update check for malloc like calls |
13161 | if (!isa<AllocaInst>(Val: I)) |
13162 | return indicatePessimisticFixpoint(); |
13163 | |
13164 | bool IsKnownNoCapture; |
13165 | if (!AA::hasAssumedIRAttr<Attribute::Captures>( |
13166 | A, QueryingAA: this, IRP, DepClass: DepClassTy::OPTIONAL, IsKnown&: IsKnownNoCapture)) |
13167 | return indicatePessimisticFixpoint(); |
13168 | |
13169 | const AAPointerInfo *PI = |
13170 | A.getOrCreateAAFor<AAPointerInfo>(IRP, QueryingAA: *this, DepClass: DepClassTy::REQUIRED); |
13171 | |
13172 | if (!PI) |
13173 | return indicatePessimisticFixpoint(); |
13174 | |
13175 | if (!PI->getState().isValidState() || PI->reachesReturn()) |
13176 | return indicatePessimisticFixpoint(); |
13177 | |
13178 | const DataLayout &DL = A.getDataLayout(); |
13179 | const auto AllocationSize = findInitialAllocationSize(I, DL); |
13180 | |
13181 | // If allocation size is nullopt, we give up. |
13182 | if (!AllocationSize) |
13183 | return indicatePessimisticFixpoint(); |
13184 | |
13185 | // For zero sized allocations, we give up. |
13186 | // Since we can't reduce further |
13187 | if (*AllocationSize == 0) |
13188 | return indicatePessimisticFixpoint(); |
13189 | |
13190 | int64_t BinSize = PI->numOffsetBins(); |
13191 | |
13192 | // TODO: implement for multiple bins |
13193 | if (BinSize > 1) |
13194 | return indicatePessimisticFixpoint(); |
13195 | |
13196 | if (BinSize == 0) { |
13197 | auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false)); |
13198 | if (!changeAllocationSize(Size: NewAllocationSize)) |
13199 | return ChangeStatus::UNCHANGED; |
13200 | return ChangeStatus::CHANGED; |
13201 | } |
13202 | |
13203 | // TODO: refactor this to be part of multiple bin case |
13204 | const auto &It = PI->begin(); |
13205 | |
13206 | // TODO: handle if Offset is not zero |
13207 | if (It->first.Offset != 0) |
13208 | return indicatePessimisticFixpoint(); |
13209 | |
13210 | uint64_t SizeOfBin = It->first.Offset + It->first.Size; |
13211 | |
13212 | if (SizeOfBin >= *AllocationSize) |
13213 | return indicatePessimisticFixpoint(); |
13214 | |
13215 | auto NewAllocationSize = |
13216 | std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false)); |
13217 | |
13218 | if (!changeAllocationSize(Size: NewAllocationSize)) |
13219 | return ChangeStatus::UNCHANGED; |
13220 | |
13221 | return ChangeStatus::CHANGED; |
13222 | } |
13223 | |
13224 | /// See AbstractAttribute::manifest(...). |
13225 | ChangeStatus manifest(Attributor &A) override { |
13226 | |
13227 | assert(isValidState() && |
13228 | "Manifest should only be called if the state is valid." ); |
13229 | |
13230 | Instruction *I = getIRPosition().getCtxI(); |
13231 | |
13232 | auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue(); |
13233 | |
13234 | unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8; |
13235 | |
13236 | switch (I->getOpcode()) { |
13237 | // TODO: add case for malloc like calls |
13238 | case Instruction::Alloca: { |
13239 | |
13240 | AllocaInst *AI = cast<AllocaInst>(Val: I); |
13241 | |
13242 | Type *CharType = Type::getInt8Ty(C&: I->getContext()); |
13243 | |
13244 | auto *NumBytesToValue = |
13245 | ConstantInt::get(Context&: I->getContext(), V: APInt(32, NumBytesToAllocate)); |
13246 | |
13247 | BasicBlock::iterator insertPt = AI->getIterator(); |
13248 | insertPt = std::next(x: insertPt); |
13249 | AllocaInst *NewAllocaInst = |
13250 | new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue, |
13251 | AI->getAlign(), AI->getName(), insertPt); |
13252 | |
13253 | if (A.changeAfterManifest(IRP: IRPosition::inst(I: *AI), NV&: *NewAllocaInst)) |
13254 | return ChangeStatus::CHANGED; |
13255 | |
13256 | break; |
13257 | } |
13258 | default: |
13259 | break; |
13260 | } |
13261 | |
13262 | return ChangeStatus::UNCHANGED; |
13263 | } |
13264 | |
13265 | /// See AbstractAttribute::getAsStr(). |
13266 | const std::string getAsStr(Attributor *A) const override { |
13267 | if (!isValidState()) |
13268 | return "allocationinfo(<invalid>)" ; |
13269 | return "allocationinfo(" + |
13270 | (AssumedAllocatedSize == HasNoAllocationSize |
13271 | ? "none" |
13272 | : std::to_string(val: AssumedAllocatedSize->getFixedValue())) + |
13273 | ")" ; |
13274 | } |
13275 | |
13276 | private: |
13277 | std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize; |
13278 | |
13279 | // Maintain the computed allocation size of the object. |
13280 | // Returns (bool) weather the size of the allocation was modified or not. |
13281 | bool changeAllocationSize(std::optional<TypeSize> Size) { |
13282 | if (AssumedAllocatedSize == HasNoAllocationSize || |
13283 | AssumedAllocatedSize != Size) { |
13284 | AssumedAllocatedSize = Size; |
13285 | return true; |
13286 | } |
13287 | return false; |
13288 | } |
13289 | }; |
13290 | |
13291 | struct AAAllocationInfoFloating : AAAllocationInfoImpl { |
13292 | AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A) |
13293 | : AAAllocationInfoImpl(IRP, A) {} |
13294 | |
13295 | void trackStatistics() const override { |
13296 | STATS_DECLTRACK_FLOATING_ATTR(allocationinfo); |
13297 | } |
13298 | }; |
13299 | |
13300 | struct AAAllocationInfoReturned : AAAllocationInfoImpl { |
13301 | AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A) |
13302 | : AAAllocationInfoImpl(IRP, A) {} |
13303 | |
13304 | /// See AbstractAttribute::initialize(...). |
13305 | void initialize(Attributor &A) override { |
13306 | // TODO: we don't rewrite function argument for now because it will need to |
13307 | // rewrite the function signature and all call sites |
13308 | (void)indicatePessimisticFixpoint(); |
13309 | } |
13310 | |
13311 | void trackStatistics() const override { |
13312 | STATS_DECLTRACK_FNRET_ATTR(allocationinfo); |
13313 | } |
13314 | }; |
13315 | |
13316 | struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl { |
13317 | AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) |
13318 | : AAAllocationInfoImpl(IRP, A) {} |
13319 | |
13320 | void trackStatistics() const override { |
13321 | STATS_DECLTRACK_CSRET_ATTR(allocationinfo); |
13322 | } |
13323 | }; |
13324 | |
13325 | struct AAAllocationInfoArgument : AAAllocationInfoImpl { |
13326 | AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A) |
13327 | : AAAllocationInfoImpl(IRP, A) {} |
13328 | |
13329 | void trackStatistics() const override { |
13330 | STATS_DECLTRACK_ARG_ATTR(allocationinfo); |
13331 | } |
13332 | }; |
13333 | |
13334 | struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl { |
13335 | AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) |
13336 | : AAAllocationInfoImpl(IRP, A) {} |
13337 | |
13338 | /// See AbstractAttribute::initialize(...). |
13339 | void initialize(Attributor &A) override { |
13340 | |
13341 | (void)indicatePessimisticFixpoint(); |
13342 | } |
13343 | |
13344 | void trackStatistics() const override { |
13345 | STATS_DECLTRACK_CSARG_ATTR(allocationinfo); |
13346 | } |
13347 | }; |
13348 | } // namespace |
13349 | |
13350 | const char AANoUnwind::ID = 0; |
13351 | const char AANoSync::ID = 0; |
13352 | const char AANoFree::ID = 0; |
13353 | const char AANonNull::ID = 0; |
13354 | const char AAMustProgress::ID = 0; |
13355 | const char AANoRecurse::ID = 0; |
13356 | const char AANonConvergent::ID = 0; |
13357 | const char AAWillReturn::ID = 0; |
13358 | const char AAUndefinedBehavior::ID = 0; |
13359 | const char AANoAlias::ID = 0; |
13360 | const char AAIntraFnReachability::ID = 0; |
13361 | const char AANoReturn::ID = 0; |
13362 | const char AAIsDead::ID = 0; |
13363 | const char AADereferenceable::ID = 0; |
13364 | const char AAAlign::ID = 0; |
13365 | const char AAInstanceInfo::ID = 0; |
13366 | const char AANoCapture::ID = 0; |
13367 | const char AAValueSimplify::ID = 0; |
13368 | const char AAHeapToStack::ID = 0; |
13369 | const char AAPrivatizablePtr::ID = 0; |
13370 | const char AAMemoryBehavior::ID = 0; |
13371 | const char AAMemoryLocation::ID = 0; |
13372 | const char AAValueConstantRange::ID = 0; |
13373 | const char AAPotentialConstantValues::ID = 0; |
13374 | const char AAPotentialValues::ID = 0; |
13375 | const char AANoUndef::ID = 0; |
13376 | const char AANoFPClass::ID = 0; |
13377 | const char AACallEdges::ID = 0; |
13378 | const char AAInterFnReachability::ID = 0; |
13379 | const char AAPointerInfo::ID = 0; |
13380 | const char AAAssumptionInfo::ID = 0; |
13381 | const char AAUnderlyingObjects::ID = 0; |
13382 | const char AAInvariantLoadPointer::ID = 0; |
13383 | const char AAAddressSpace::ID = 0; |
13384 | const char AAAllocationInfo::ID = 0; |
13385 | const char AAIndirectCallInfo::ID = 0; |
13386 | const char AAGlobalValueInfo::ID = 0; |
13387 | const char AADenormalFPMath::ID = 0; |
13388 | |
13389 | // Macro magic to create the static generator function for attributes that |
13390 | // follow the naming scheme. |
13391 | |
13392 | #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ |
13393 | case IRPosition::PK: \ |
13394 | llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); |
13395 | |
13396 | #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ |
13397 | case IRPosition::PK: \ |
13398 | AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ |
13399 | ++NumAAs; \ |
13400 | break; |
13401 | |
13402 | #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ |
13403 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13404 | CLASS *AA = nullptr; \ |
13405 | switch (IRP.getPositionKind()) { \ |
13406 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ |
13407 | SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ |
13408 | SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ |
13409 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ |
13410 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ |
13411 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ |
13412 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ |
13413 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ |
13414 | } \ |
13415 | return *AA; \ |
13416 | } |
13417 | |
13418 | #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ |
13419 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13420 | CLASS *AA = nullptr; \ |
13421 | switch (IRP.getPositionKind()) { \ |
13422 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ |
13423 | SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ |
13424 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ |
13425 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ |
13426 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ |
13427 | SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ |
13428 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ |
13429 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ |
13430 | } \ |
13431 | return *AA; \ |
13432 | } |
13433 | |
13434 | #define CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(POS, SUFFIX, CLASS) \ |
13435 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13436 | CLASS *AA = nullptr; \ |
13437 | switch (IRP.getPositionKind()) { \ |
13438 | SWITCH_PK_CREATE(CLASS, IRP, POS, SUFFIX) \ |
13439 | default: \ |
13440 | llvm_unreachable("Cannot create " #CLASS " for position otherthan " #POS \ |
13441 | " position!"); \ |
13442 | } \ |
13443 | return *AA; \ |
13444 | } |
13445 | |
13446 | #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ |
13447 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13448 | CLASS *AA = nullptr; \ |
13449 | switch (IRP.getPositionKind()) { \ |
13450 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ |
13451 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ |
13452 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ |
13453 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ |
13454 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ |
13455 | SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ |
13456 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ |
13457 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ |
13458 | } \ |
13459 | return *AA; \ |
13460 | } |
13461 | |
13462 | #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ |
13463 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13464 | CLASS *AA = nullptr; \ |
13465 | switch (IRP.getPositionKind()) { \ |
13466 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ |
13467 | SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ |
13468 | SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ |
13469 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ |
13470 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ |
13471 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ |
13472 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ |
13473 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ |
13474 | } \ |
13475 | return *AA; \ |
13476 | } |
13477 | |
13478 | #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ |
13479 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ |
13480 | CLASS *AA = nullptr; \ |
13481 | switch (IRP.getPositionKind()) { \ |
13482 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ |
13483 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ |
13484 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ |
13485 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ |
13486 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ |
13487 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ |
13488 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ |
13489 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ |
13490 | } \ |
13491 | return *AA; \ |
13492 | } |
13493 | |
13494 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) |
13495 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) |
13496 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) |
13497 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) |
13498 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) |
13499 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) |
13500 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) |
13501 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) |
13502 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMustProgress) |
13503 | |
13504 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) |
13505 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) |
13506 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) |
13507 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) |
13508 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) |
13509 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo) |
13510 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) |
13511 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) |
13512 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues) |
13513 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) |
13514 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) |
13515 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFPClass) |
13516 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) |
13517 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInvariantLoadPointer) |
13518 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAddressSpace) |
13519 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAllocationInfo) |
13520 | |
13521 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) |
13522 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) |
13523 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) |
13524 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUnderlyingObjects) |
13525 | |
13526 | CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_CALL_SITE, CallSite, |
13527 | AAIndirectCallInfo) |
13528 | CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION(IRP_FLOAT, Floating, |
13529 | AAGlobalValueInfo) |
13530 | |
13531 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) |
13532 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) |
13533 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonConvergent) |
13534 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIntraFnReachability) |
13535 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInterFnReachability) |
13536 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADenormalFPMath) |
13537 | |
13538 | CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) |
13539 | |
13540 | #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION |
13541 | #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION |
13542 | #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION |
13543 | #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION |
13544 | #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION |
13545 | #undef CREATE_ABSTRACT_ATTRIBUTE_FOR_ONE_POSITION |
13546 | #undef SWITCH_PK_CREATE |
13547 | #undef SWITCH_PK_INV |
13548 | |