1 | //===- Loads.cpp - Local load analysis ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines simple local analyses for load instructions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/Analysis/Loads.h" |
14 | #include "llvm/Analysis/AliasAnalysis.h" |
15 | #include "llvm/Analysis/AssumeBundleQueries.h" |
16 | #include "llvm/Analysis/LoopAccessAnalysis.h" |
17 | #include "llvm/Analysis/LoopInfo.h" |
18 | #include "llvm/Analysis/MemoryBuiltins.h" |
19 | #include "llvm/Analysis/MemoryLocation.h" |
20 | #include "llvm/Analysis/ScalarEvolution.h" |
21 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
22 | #include "llvm/Analysis/ValueTracking.h" |
23 | #include "llvm/IR/DataLayout.h" |
24 | #include "llvm/IR/IntrinsicInst.h" |
25 | #include "llvm/IR/Operator.h" |
26 | |
27 | using namespace llvm; |
28 | |
29 | static bool isAligned(const Value *Base, Align Alignment, |
30 | const DataLayout &DL) { |
31 | return Base->getPointerAlignment(DL) >= Alignment; |
32 | } |
33 | |
34 | /// Test if V is always a pointer to allocated and suitably aligned memory for |
35 | /// a simple load or store. |
36 | static bool isDereferenceableAndAlignedPointer( |
37 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
38 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
39 | const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited, |
40 | unsigned MaxDepth) { |
41 | assert(V->getType()->isPointerTy() && "Base must be pointer" ); |
42 | |
43 | // Recursion limit. |
44 | if (MaxDepth-- == 0) |
45 | return false; |
46 | |
47 | // Already visited? Bail out, we've likely hit unreachable code. |
48 | if (!Visited.insert(Ptr: V).second) |
49 | return false; |
50 | |
51 | // Note that it is not safe to speculate into a malloc'd region because |
52 | // malloc may return null. |
53 | |
54 | // For GEPs, determine if the indexing lands within the allocated object. |
55 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(Val: V)) { |
56 | const Value *Base = GEP->getPointerOperand(); |
57 | |
58 | APInt Offset(DL.getIndexTypeSizeInBits(Ty: GEP->getType()), 0); |
59 | if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || |
60 | !Offset.urem(RHS: APInt(Offset.getBitWidth(), Alignment.value())) |
61 | .isMinValue()) |
62 | return false; |
63 | |
64 | // If the base pointer is dereferenceable for Offset+Size bytes, then the |
65 | // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base |
66 | // pointer is aligned to Align bytes, and the Offset is divisible by Align |
67 | // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also |
68 | // aligned to Align bytes. |
69 | |
70 | // Offset and Size may have different bit widths if we have visited an |
71 | // addrspacecast, so we can't do arithmetic directly on the APInt values. |
72 | return isDereferenceableAndAlignedPointer( |
73 | V: Base, Alignment, Size: Offset + Size.sextOrTrunc(width: Offset.getBitWidth()), DL, |
74 | CtxI, AC, DT, TLI, Visited, MaxDepth); |
75 | } |
76 | |
77 | // bitcast instructions are no-ops as far as dereferenceability is concerned. |
78 | if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(Val: V)) { |
79 | if (BC->getSrcTy()->isPointerTy()) |
80 | return isDereferenceableAndAlignedPointer( |
81 | V: BC->getOperand(i_nocapture: 0), Alignment, Size, DL, CtxI, AC, DT, TLI, |
82 | Visited, MaxDepth); |
83 | } |
84 | |
85 | // Recurse into both hands of select. |
86 | if (const SelectInst *Sel = dyn_cast<SelectInst>(Val: V)) { |
87 | return isDereferenceableAndAlignedPointer(V: Sel->getTrueValue(), Alignment, |
88 | Size, DL, CtxI, AC, DT, TLI, |
89 | Visited, MaxDepth) && |
90 | isDereferenceableAndAlignedPointer(V: Sel->getFalseValue(), Alignment, |
91 | Size, DL, CtxI, AC, DT, TLI, |
92 | Visited, MaxDepth); |
93 | } |
94 | |
95 | auto IsKnownDeref = [&]() { |
96 | bool CheckForNonNull, CheckForFreed; |
97 | if (!Size.ule(RHS: V->getPointerDereferenceableBytes(DL, CanBeNull&: CheckForNonNull, |
98 | CanBeFreed&: CheckForFreed)) || |
99 | CheckForFreed) |
100 | return false; |
101 | if (CheckForNonNull && |
102 | !isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI))) |
103 | return false; |
104 | // When using something like !dereferenceable on a load, the |
105 | // dereferenceability may only be valid on a specific control-flow path. |
106 | // If the instruction doesn't dominate the context instruction, we're |
107 | // asking about dereferenceability under the assumption that the |
108 | // instruction has been speculated to the point of the context instruction, |
109 | // in which case we don't know if the dereferenceability info still holds. |
110 | // We don't bother handling allocas here, as they aren't speculatable |
111 | // anyway. |
112 | auto *I = dyn_cast<Instruction>(Val: V); |
113 | if (I && !isa<AllocaInst>(Val: I)) |
114 | return CtxI && isValidAssumeForContext(I, CxtI: CtxI, DT); |
115 | return true; |
116 | }; |
117 | if (IsKnownDeref()) { |
118 | // As we recursed through GEPs to get here, we've incrementally checked |
119 | // that each step advanced by a multiple of the alignment. If our base is |
120 | // properly aligned, then the original offset accessed must also be. |
121 | return isAligned(Base: V, Alignment, DL); |
122 | } |
123 | |
124 | /// TODO refactor this function to be able to search independently for |
125 | /// Dereferencability and Alignment requirements. |
126 | |
127 | |
128 | if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
129 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: true)) |
130 | return isDereferenceableAndAlignedPointer(V: RP, Alignment, Size, DL, CtxI, |
131 | AC, DT, TLI, Visited, MaxDepth); |
132 | |
133 | // If we have a call we can't recurse through, check to see if this is an |
134 | // allocation function for which we can establish an minimum object size. |
135 | // Such a minimum object size is analogous to a deref_or_null attribute in |
136 | // that we still need to prove the result non-null at point of use. |
137 | // NOTE: We can only use the object size as a base fact as we a) need to |
138 | // prove alignment too, and b) don't want the compile time impact of a |
139 | // separate recursive walk. |
140 | ObjectSizeOpts Opts; |
141 | // TODO: It may be okay to round to align, but that would imply that |
142 | // accessing slightly out of bounds was legal, and we're currently |
143 | // inconsistent about that. For the moment, be conservative. |
144 | Opts.RoundToAlign = false; |
145 | Opts.NullIsUnknownSize = true; |
146 | uint64_t ObjSize; |
147 | if (getObjectSize(Ptr: V, Size&: ObjSize, DL, TLI, Opts)) { |
148 | APInt KnownDerefBytes(Size.getBitWidth(), ObjSize); |
149 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
150 | isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI)) && |
151 | !V->canBeFreed()) { |
152 | // As we recursed through GEPs to get here, we've incrementally |
153 | // checked that each step advanced by a multiple of the alignment. If |
154 | // our base is properly aligned, then the original offset accessed |
155 | // must also be. |
156 | return isAligned(Base: V, Alignment, DL); |
157 | } |
158 | } |
159 | } |
160 | |
161 | // For gc.relocate, look through relocations |
162 | if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(Val: V)) |
163 | return isDereferenceableAndAlignedPointer(V: RelocateInst->getDerivedPtr(), |
164 | Alignment, Size, DL, CtxI, AC, DT, |
165 | TLI, Visited, MaxDepth); |
166 | |
167 | if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(Val: V)) |
168 | return isDereferenceableAndAlignedPointer(V: ASC->getOperand(i_nocapture: 0), Alignment, |
169 | Size, DL, CtxI, AC, DT, TLI, |
170 | Visited, MaxDepth); |
171 | |
172 | // Dereferenceable information from assumptions is only valid if the value |
173 | // cannot be freed between the assumption and use. For now just use the |
174 | // information for values that cannot be freed in the function. |
175 | // TODO: More precisely check if the pointer can be freed between assumption |
176 | // and use. |
177 | if (CtxI && AC && !V->canBeFreed()) { |
178 | /// Look through assumes to see if both dereferencability and alignment can |
179 | /// be proven by an assume if needed. |
180 | RetainedKnowledge AlignRK; |
181 | RetainedKnowledge DerefRK; |
182 | bool IsAligned = V->getPointerAlignment(DL) >= Alignment; |
183 | if (getKnowledgeForValue( |
184 | V, AttrKinds: {Attribute::Dereferenceable, Attribute::Alignment}, AC&: *AC, |
185 | Filter: [&](RetainedKnowledge RK, Instruction *Assume, auto) { |
186 | if (!isValidAssumeForContext(I: Assume, CxtI: CtxI, DT)) |
187 | return false; |
188 | if (RK.AttrKind == Attribute::Alignment) |
189 | AlignRK = std::max(a: AlignRK, b: RK); |
190 | if (RK.AttrKind == Attribute::Dereferenceable) |
191 | DerefRK = std::max(a: DerefRK, b: RK); |
192 | IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value(); |
193 | if (IsAligned && DerefRK && |
194 | DerefRK.ArgValue >= Size.getZExtValue()) |
195 | return true; // We have found what we needed so we stop looking |
196 | return false; // Other assumes may have better information. so |
197 | // keep looking |
198 | })) |
199 | return true; |
200 | } |
201 | |
202 | // If we don't know, assume the worst. |
203 | return false; |
204 | } |
205 | |
206 | bool llvm::isDereferenceableAndAlignedPointer( |
207 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
208 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
209 | const TargetLibraryInfo *TLI) { |
210 | // Note: At the moment, Size can be zero. This ends up being interpreted as |
211 | // a query of whether [Base, V] is dereferenceable and V is aligned (since |
212 | // that's what the implementation happened to do). It's unclear if this is |
213 | // the desired semantic, but at least SelectionDAG does exercise this case. |
214 | |
215 | SmallPtrSet<const Value *, 32> Visited; |
216 | return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, |
217 | DT, TLI, Visited, MaxDepth: 16); |
218 | } |
219 | |
220 | bool llvm::isDereferenceableAndAlignedPointer( |
221 | const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, |
222 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
223 | const TargetLibraryInfo *TLI) { |
224 | // For unsized types or scalable vectors we don't know exactly how many bytes |
225 | // are dereferenced, so bail out. |
226 | if (!Ty->isSized() || Ty->isScalableTy()) |
227 | return false; |
228 | |
229 | // When dereferenceability information is provided by a dereferenceable |
230 | // attribute, we know exactly how many bytes are dereferenceable. If we can |
231 | // determine the exact offset to the attributed variable, we can use that |
232 | // information here. |
233 | |
234 | APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), |
235 | DL.getTypeStoreSize(Ty)); |
236 | return isDereferenceableAndAlignedPointer(V, Alignment, Size: AccessSize, DL, CtxI, |
237 | AC, DT, TLI); |
238 | } |
239 | |
240 | bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, |
241 | const DataLayout &DL, |
242 | const Instruction *CtxI, |
243 | AssumptionCache *AC, |
244 | const DominatorTree *DT, |
245 | const TargetLibraryInfo *TLI) { |
246 | return isDereferenceableAndAlignedPointer(V, Ty, Alignment: Align(1), DL, CtxI, AC, DT, |
247 | TLI); |
248 | } |
249 | |
250 | /// Test if A and B will obviously have the same value. |
251 | /// |
252 | /// This includes recognizing that %t0 and %t1 will have the same |
253 | /// value in code like this: |
254 | /// \code |
255 | /// %t0 = getelementptr \@a, 0, 3 |
256 | /// store i32 0, i32* %t0 |
257 | /// %t1 = getelementptr \@a, 0, 3 |
258 | /// %t2 = load i32* %t1 |
259 | /// \endcode |
260 | /// |
261 | static bool AreEquivalentAddressValues(const Value *A, const Value *B) { |
262 | // Test if the values are trivially equivalent. |
263 | if (A == B) |
264 | return true; |
265 | |
266 | // Test if the values come from identical arithmetic instructions. |
267 | // Use isIdenticalToWhenDefined instead of isIdenticalTo because |
268 | // this function is only used when one address use dominates the |
269 | // other, which means that they'll always either have the same |
270 | // value or one of them will have an undefined value. |
271 | if (isa<BinaryOperator>(Val: A) || isa<CastInst>(Val: A) || isa<PHINode>(Val: A) || |
272 | isa<GetElementPtrInst>(Val: A)) |
273 | if (const Instruction *BI = dyn_cast<Instruction>(Val: B)) |
274 | if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI)) |
275 | return true; |
276 | |
277 | // Otherwise they may not be equivalent. |
278 | return false; |
279 | } |
280 | |
281 | bool llvm::isDereferenceableAndAlignedInLoop( |
282 | LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, |
283 | AssumptionCache *AC, SmallVectorImpl<const SCEVPredicate *> *Predicates) { |
284 | const Align Alignment = LI->getAlign(); |
285 | auto &DL = LI->getDataLayout(); |
286 | Value *Ptr = LI->getPointerOperand(); |
287 | APInt EltSize(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), |
288 | DL.getTypeStoreSize(Ty: LI->getType()).getFixedValue()); |
289 | |
290 | // If given a uniform (i.e. non-varying) address, see if we can prove the |
291 | // access is safe within the loop w/o needing predication. |
292 | if (L->isLoopInvariant(V: Ptr)) |
293 | return isDereferenceableAndAlignedPointer( |
294 | V: Ptr, Alignment, Size: EltSize, DL, CtxI: &*L->getHeader()->getFirstNonPHIIt(), AC, |
295 | DT: &DT); |
296 | |
297 | const SCEV *PtrScev = SE.getSCEV(V: Ptr); |
298 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: PtrScev); |
299 | |
300 | // Check to see if we have a repeating access pattern and it's possible |
301 | // to prove all accesses are well aligned. |
302 | if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine()) |
303 | return false; |
304 | |
305 | auto *Step = dyn_cast<SCEVConstant>(Val: AddRec->getStepRecurrence(SE)); |
306 | if (!Step) |
307 | return false; |
308 | |
309 | // For the moment, restrict ourselves to the case where the access size is a |
310 | // multiple of the requested alignment and the base is aligned. |
311 | // TODO: generalize if a case found which warrants |
312 | if (EltSize.urem(RHS: Alignment.value()) != 0) |
313 | return false; |
314 | |
315 | // TODO: Handle overlapping accesses. |
316 | if (EltSize.ugt(RHS: Step->getAPInt().abs())) |
317 | return false; |
318 | |
319 | const SCEV *MaxBECount = |
320 | Predicates ? SE.getPredicatedConstantMaxBackedgeTakenCount(L, Predicates&: *Predicates) |
321 | : SE.getConstantMaxBackedgeTakenCount(L); |
322 | const SCEV *BECount = Predicates |
323 | ? SE.getPredicatedBackedgeTakenCount(L, Predicates&: *Predicates) |
324 | : SE.getBackedgeTakenCount(L); |
325 | if (isa<SCEVCouldNotCompute>(Val: MaxBECount)) |
326 | return false; |
327 | |
328 | const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess( |
329 | Lp: L, PtrExpr: PtrScev, AccessTy: LI->getType(), BTC: BECount, MaxBTC: MaxBECount, SE: &SE, PointerBounds: nullptr); |
330 | if (isa<SCEVCouldNotCompute>(Val: AccessStart) || |
331 | isa<SCEVCouldNotCompute>(Val: AccessEnd)) |
332 | return false; |
333 | |
334 | // Try to get the access size. |
335 | const SCEV *PtrDiff = SE.getMinusSCEV(LHS: AccessEnd, RHS: AccessStart); |
336 | if (isa<SCEVCouldNotCompute>(Val: PtrDiff)) |
337 | return false; |
338 | APInt MaxPtrDiff = SE.getUnsignedRangeMax(S: PtrDiff); |
339 | |
340 | Value *Base = nullptr; |
341 | APInt AccessSize; |
342 | if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(Val: AccessStart)) { |
343 | Base = NewBase->getValue(); |
344 | AccessSize = MaxPtrDiff; |
345 | } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(Val: AccessStart)) { |
346 | if (MinAdd->getNumOperands() != 2) |
347 | return false; |
348 | |
349 | const auto *Offset = dyn_cast<SCEVConstant>(Val: MinAdd->getOperand(i: 0)); |
350 | const auto *NewBase = dyn_cast<SCEVUnknown>(Val: MinAdd->getOperand(i: 1)); |
351 | if (!Offset || !NewBase) |
352 | return false; |
353 | |
354 | // The following code below assumes the offset is unsigned, but GEP |
355 | // offsets are treated as signed so we can end up with a signed value |
356 | // here too. For example, suppose the initial PHI value is (i8 255), |
357 | // the offset will be treated as (i8 -1) and sign-extended to (i64 -1). |
358 | if (Offset->getAPInt().isNegative()) |
359 | return false; |
360 | |
361 | // For the moment, restrict ourselves to the case where the offset is a |
362 | // multiple of the requested alignment and the base is aligned. |
363 | // TODO: generalize if a case found which warrants |
364 | if (Offset->getAPInt().urem(RHS: Alignment.value()) != 0) |
365 | return false; |
366 | |
367 | AccessSize = MaxPtrDiff + Offset->getAPInt(); |
368 | Base = NewBase->getValue(); |
369 | } else |
370 | return false; |
371 | |
372 | Instruction * = &*L->getHeader()->getFirstNonPHIIt(); |
373 | return isDereferenceableAndAlignedPointer(V: Base, Alignment, Size: AccessSize, DL, |
374 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
375 | } |
376 | |
377 | static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI) { |
378 | const Function &F = *CtxI.getFunction(); |
379 | // Speculative load may create a race that did not exist in the source. |
380 | return F.hasFnAttribute(Kind: Attribute::SanitizeThread) || |
381 | // Speculative load may load data from dirty regions. |
382 | F.hasFnAttribute(Kind: Attribute::SanitizeAddress) || |
383 | F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress); |
384 | } |
385 | |
386 | bool llvm::mustSuppressSpeculation(const LoadInst &LI) { |
387 | return !LI.isUnordered() || suppressSpeculativeLoadForSanitizers(CtxI: LI); |
388 | } |
389 | |
390 | /// Check if executing a load of this pointer value cannot trap. |
391 | /// |
392 | /// If DT and ScanFrom are specified this method performs context-sensitive |
393 | /// analysis and returns true if it is safe to load immediately before ScanFrom. |
394 | /// |
395 | /// If it is not obviously safe to load from the specified pointer, we do |
396 | /// a quick local scan of the basic block containing \c ScanFrom, to determine |
397 | /// if the address is already accessed. |
398 | /// |
399 | /// This uses the pointee type to determine how many bytes need to be safe to |
400 | /// load from the pointer. |
401 | bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, |
402 | const DataLayout &DL, |
403 | Instruction *ScanFrom, |
404 | AssumptionCache *AC, |
405 | const DominatorTree *DT, |
406 | const TargetLibraryInfo *TLI) { |
407 | // If DT is not specified we can't make context-sensitive query |
408 | const Instruction* CtxI = DT ? ScanFrom : nullptr; |
409 | if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT, |
410 | TLI)) { |
411 | // With sanitizers `Dereferenceable` is not always enough for unconditional |
412 | // load. |
413 | if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(CtxI: *ScanFrom)) |
414 | return true; |
415 | } |
416 | |
417 | if (!ScanFrom) |
418 | return false; |
419 | |
420 | if (Size.getBitWidth() > 64) |
421 | return false; |
422 | const TypeSize LoadSize = TypeSize::getFixed(ExactSize: Size.getZExtValue()); |
423 | |
424 | // Otherwise, be a little bit aggressive by scanning the local block where we |
425 | // want to check to see if the pointer is already being loaded or stored |
426 | // from/to. If so, the previous load or store would have already trapped, |
427 | // so there is no harm doing an extra load (also, CSE will later eliminate |
428 | // the load entirely). |
429 | BasicBlock::iterator BBI = ScanFrom->getIterator(), |
430 | E = ScanFrom->getParent()->begin(); |
431 | |
432 | // We can at least always strip pointer casts even though we can't use the |
433 | // base here. |
434 | V = V->stripPointerCasts(); |
435 | |
436 | while (BBI != E) { |
437 | --BBI; |
438 | |
439 | // If we see a free or a call which may write to memory (i.e. which might do |
440 | // a free) the pointer could be marked invalid. |
441 | if (isa<CallInst>(Val: BBI) && BBI->mayWriteToMemory() && |
442 | !isa<LifetimeIntrinsic>(Val: BBI)) |
443 | return false; |
444 | |
445 | Value *AccessedPtr; |
446 | Type *AccessedTy; |
447 | Align AccessedAlign; |
448 | if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) { |
449 | // Ignore volatile loads. The execution of a volatile load cannot |
450 | // be used to prove an address is backed by regular memory; it can, |
451 | // for example, point to an MMIO register. |
452 | if (LI->isVolatile()) |
453 | continue; |
454 | AccessedPtr = LI->getPointerOperand(); |
455 | AccessedTy = LI->getType(); |
456 | AccessedAlign = LI->getAlign(); |
457 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val&: BBI)) { |
458 | // Ignore volatile stores (see comment for loads). |
459 | if (SI->isVolatile()) |
460 | continue; |
461 | AccessedPtr = SI->getPointerOperand(); |
462 | AccessedTy = SI->getValueOperand()->getType(); |
463 | AccessedAlign = SI->getAlign(); |
464 | } else |
465 | continue; |
466 | |
467 | if (AccessedAlign < Alignment) |
468 | continue; |
469 | |
470 | // Handle trivial cases. |
471 | if (AccessedPtr == V && |
472 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
473 | return true; |
474 | |
475 | if (AreEquivalentAddressValues(A: AccessedPtr->stripPointerCasts(), B: V) && |
476 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
477 | return true; |
478 | } |
479 | return false; |
480 | } |
481 | |
482 | bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment, |
483 | const DataLayout &DL, |
484 | Instruction *ScanFrom, |
485 | AssumptionCache *AC, |
486 | const DominatorTree *DT, |
487 | const TargetLibraryInfo *TLI) { |
488 | TypeSize TySize = DL.getTypeStoreSize(Ty); |
489 | if (TySize.isScalable()) |
490 | return false; |
491 | APInt Size(DL.getIndexTypeSizeInBits(Ty: V->getType()), TySize.getFixedValue()); |
492 | return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, |
493 | TLI); |
494 | } |
495 | |
496 | /// DefMaxInstsToScan - the default number of maximum instructions |
497 | /// to scan in the block, used by FindAvailableLoadedValue(). |
498 | /// FindAvailableLoadedValue() was introduced in r60148, to improve jump |
499 | /// threading in part by eliminating partially redundant loads. |
500 | /// At that point, the value of MaxInstsToScan was already set to '6' |
501 | /// without documented explanation. |
502 | cl::opt<unsigned> |
503 | llvm::DefMaxInstsToScan("available-load-scan-limit" , cl::init(Val: 6), cl::Hidden, |
504 | cl::desc("Use this to specify the default maximum number of instructions " |
505 | "to scan backward from a given instruction, when searching for " |
506 | "available loaded value" )); |
507 | |
508 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, |
509 | BasicBlock::iterator &ScanFrom, |
510 | unsigned MaxInstsToScan, |
511 | BatchAAResults *AA, bool *IsLoad, |
512 | unsigned *NumScanedInst) { |
513 | // Don't CSE load that is volatile or anything stronger than unordered. |
514 | if (!Load->isUnordered()) |
515 | return nullptr; |
516 | |
517 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
518 | return findAvailablePtrLoadStore(Loc, AccessTy: Load->getType(), AtLeastAtomic: Load->isAtomic(), |
519 | ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoadCSE: IsLoad, |
520 | NumScanedInst); |
521 | } |
522 | |
523 | // Check if the load and the store have the same base, constant offsets and |
524 | // non-overlapping access ranges. |
525 | static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, |
526 | Type *LoadTy, |
527 | const Value *StorePtr, |
528 | Type *StoreTy, |
529 | const DataLayout &DL) { |
530 | APInt LoadOffset(DL.getIndexTypeSizeInBits(Ty: LoadPtr->getType()), 0); |
531 | APInt StoreOffset(DL.getIndexTypeSizeInBits(Ty: StorePtr->getType()), 0); |
532 | const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets( |
533 | DL, Offset&: LoadOffset, /* AllowNonInbounds */ false); |
534 | const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets( |
535 | DL, Offset&: StoreOffset, /* AllowNonInbounds */ false); |
536 | if (LoadBase != StoreBase) |
537 | return false; |
538 | auto LoadAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: LoadTy)); |
539 | auto StoreAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: StoreTy)); |
540 | ConstantRange LoadRange(LoadOffset, |
541 | LoadOffset + LoadAccessSize.toRaw()); |
542 | ConstantRange StoreRange(StoreOffset, |
543 | StoreOffset + StoreAccessSize.toRaw()); |
544 | return LoadRange.intersectWith(CR: StoreRange).isEmptySet(); |
545 | } |
546 | |
547 | static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr, |
548 | Type *AccessTy, bool AtLeastAtomic, |
549 | const DataLayout &DL, bool *IsLoadCSE) { |
550 | // If this is a load of Ptr, the loaded value is available. |
551 | // (This is true even if the load is volatile or atomic, although |
552 | // those cases are unlikely.) |
553 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
554 | // We can value forward from an atomic to a non-atomic, but not the |
555 | // other way around. |
556 | if (LI->isAtomic() < AtLeastAtomic) |
557 | return nullptr; |
558 | |
559 | Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts(); |
560 | if (!AreEquivalentAddressValues(A: LoadPtr, B: Ptr)) |
561 | return nullptr; |
562 | |
563 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: LI->getType(), DestTy: AccessTy, DL)) { |
564 | if (IsLoadCSE) |
565 | *IsLoadCSE = true; |
566 | return LI; |
567 | } |
568 | } |
569 | |
570 | // If this is a store through Ptr, the value is available! |
571 | // (This is true even if the store is volatile or atomic, although |
572 | // those cases are unlikely.) |
573 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
574 | // We can value forward from an atomic to a non-atomic, but not the |
575 | // other way around. |
576 | if (SI->isAtomic() < AtLeastAtomic) |
577 | return nullptr; |
578 | |
579 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
580 | if (!AreEquivalentAddressValues(A: StorePtr, B: Ptr)) |
581 | return nullptr; |
582 | |
583 | if (IsLoadCSE) |
584 | *IsLoadCSE = false; |
585 | |
586 | Value *Val = SI->getValueOperand(); |
587 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: Val->getType(), DestTy: AccessTy, DL)) |
588 | return Val; |
589 | |
590 | TypeSize StoreSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
591 | TypeSize LoadSize = DL.getTypeSizeInBits(Ty: AccessTy); |
592 | if (TypeSize::isKnownLE(LHS: LoadSize, RHS: StoreSize)) |
593 | if (auto *C = dyn_cast<Constant>(Val)) |
594 | return ConstantFoldLoadFromConst(C, Ty: AccessTy, DL); |
595 | } |
596 | |
597 | if (auto *MSI = dyn_cast<MemSetInst>(Val: Inst)) { |
598 | // Don't forward from (non-atomic) memset to atomic load. |
599 | if (AtLeastAtomic) |
600 | return nullptr; |
601 | |
602 | // Only handle constant memsets. |
603 | auto *Val = dyn_cast<ConstantInt>(Val: MSI->getValue()); |
604 | auto *Len = dyn_cast<ConstantInt>(Val: MSI->getLength()); |
605 | if (!Val || !Len) |
606 | return nullptr; |
607 | |
608 | // TODO: Handle offsets. |
609 | Value *Dst = MSI->getDest(); |
610 | if (!AreEquivalentAddressValues(A: Dst, B: Ptr)) |
611 | return nullptr; |
612 | |
613 | if (IsLoadCSE) |
614 | *IsLoadCSE = false; |
615 | |
616 | TypeSize LoadTypeSize = DL.getTypeSizeInBits(Ty: AccessTy); |
617 | if (LoadTypeSize.isScalable()) |
618 | return nullptr; |
619 | |
620 | // Make sure the read bytes are contained in the memset. |
621 | uint64_t LoadSize = LoadTypeSize.getFixedValue(); |
622 | if ((Len->getValue() * 8).ult(RHS: LoadSize)) |
623 | return nullptr; |
624 | |
625 | APInt Splat = LoadSize >= 8 ? APInt::getSplat(NewLen: LoadSize, V: Val->getValue()) |
626 | : Val->getValue().trunc(width: LoadSize); |
627 | ConstantInt *SplatC = ConstantInt::get(Context&: MSI->getContext(), V: Splat); |
628 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: SplatC->getType(), DestTy: AccessTy, DL)) |
629 | return SplatC; |
630 | |
631 | return nullptr; |
632 | } |
633 | |
634 | return nullptr; |
635 | } |
636 | |
637 | Value *llvm::findAvailablePtrLoadStore( |
638 | const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, |
639 | BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, |
640 | BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) { |
641 | if (MaxInstsToScan == 0) |
642 | MaxInstsToScan = ~0U; |
643 | |
644 | const DataLayout &DL = ScanBB->getDataLayout(); |
645 | const Value *StrippedPtr = Loc.Ptr->stripPointerCasts(); |
646 | |
647 | while (ScanFrom != ScanBB->begin()) { |
648 | // We must ignore debug info directives when counting (otherwise they |
649 | // would affect codegen). |
650 | Instruction *Inst = &*--ScanFrom; |
651 | if (Inst->isDebugOrPseudoInst()) |
652 | continue; |
653 | |
654 | // Restore ScanFrom to expected value in case next test succeeds |
655 | ScanFrom++; |
656 | |
657 | if (NumScanedInst) |
658 | ++(*NumScanedInst); |
659 | |
660 | // Don't scan huge blocks. |
661 | if (MaxInstsToScan-- == 0) |
662 | return nullptr; |
663 | |
664 | --ScanFrom; |
665 | |
666 | if (Value *Available = getAvailableLoadStore(Inst, Ptr: StrippedPtr, AccessTy, |
667 | AtLeastAtomic, DL, IsLoadCSE)) |
668 | return Available; |
669 | |
670 | // Try to get the store size for the type. |
671 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
672 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
673 | |
674 | // If both StrippedPtr and StorePtr reach all the way to an alloca or |
675 | // global and they are different, ignore the store. This is a trivial form |
676 | // of alias analysis that is important for reg2mem'd code. |
677 | if ((isa<AllocaInst>(Val: StrippedPtr) || isa<GlobalVariable>(Val: StrippedPtr)) && |
678 | (isa<AllocaInst>(Val: StorePtr) || isa<GlobalVariable>(Val: StorePtr)) && |
679 | StrippedPtr != StorePtr) |
680 | continue; |
681 | |
682 | if (!AA) { |
683 | // When AA isn't available, but if the load and the store have the same |
684 | // base, constant offsets and non-overlapping access ranges, ignore the |
685 | // store. This is a simple form of alias analysis that is used by the |
686 | // inliner. FIXME: use BasicAA if possible. |
687 | if (areNonOverlapSameBaseLoadAndStore( |
688 | LoadPtr: Loc.Ptr, LoadTy: AccessTy, StorePtr: SI->getPointerOperand(), |
689 | StoreTy: SI->getValueOperand()->getType(), DL)) |
690 | continue; |
691 | } else { |
692 | // If we have alias analysis and it says the store won't modify the |
693 | // loaded value, ignore the store. |
694 | if (!isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: Loc))) |
695 | continue; |
696 | } |
697 | |
698 | // Otherwise the store that may or may not alias the pointer, bail out. |
699 | ++ScanFrom; |
700 | return nullptr; |
701 | } |
702 | |
703 | // If this is some other instruction that may clobber Ptr, bail out. |
704 | if (Inst->mayWriteToMemory()) { |
705 | // If alias analysis claims that it really won't modify the load, |
706 | // ignore it. |
707 | if (AA && !isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc))) |
708 | continue; |
709 | |
710 | // May modify the pointer, bail out. |
711 | ++ScanFrom; |
712 | return nullptr; |
713 | } |
714 | } |
715 | |
716 | // Got to the start of the block, we didn't find it, but are done for this |
717 | // block. |
718 | return nullptr; |
719 | } |
720 | |
721 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA, |
722 | bool *IsLoadCSE, |
723 | unsigned MaxInstsToScan) { |
724 | const DataLayout &DL = Load->getDataLayout(); |
725 | Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts(); |
726 | BasicBlock *ScanBB = Load->getParent(); |
727 | Type *AccessTy = Load->getType(); |
728 | bool AtLeastAtomic = Load->isAtomic(); |
729 | |
730 | if (!Load->isUnordered()) |
731 | return nullptr; |
732 | |
733 | // Try to find an available value first, and delay expensive alias analysis |
734 | // queries until later. |
735 | Value *Available = nullptr; |
736 | SmallVector<Instruction *> MustNotAliasInsts; |
737 | for (Instruction &Inst : make_range(x: ++Load->getReverseIterator(), |
738 | y: ScanBB->rend())) { |
739 | if (Inst.isDebugOrPseudoInst()) |
740 | continue; |
741 | |
742 | if (MaxInstsToScan-- == 0) |
743 | return nullptr; |
744 | |
745 | Available = getAvailableLoadStore(Inst: &Inst, Ptr: StrippedPtr, AccessTy, |
746 | AtLeastAtomic, DL, IsLoadCSE); |
747 | if (Available) |
748 | break; |
749 | |
750 | if (Inst.mayWriteToMemory()) |
751 | MustNotAliasInsts.push_back(Elt: &Inst); |
752 | } |
753 | |
754 | // If we found an available value, ensure that the instructions in between |
755 | // did not modify the memory location. |
756 | if (Available) { |
757 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
758 | for (Instruction *Inst : MustNotAliasInsts) |
759 | if (isModSet(MRI: AA.getModRefInfo(I: Inst, OptLoc: Loc))) |
760 | return nullptr; |
761 | } |
762 | |
763 | return Available; |
764 | } |
765 | |
766 | // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only |
767 | // feeds into them. |
768 | static bool isPointerUseReplacable(const Use &U) { |
769 | unsigned Limit = 40; |
770 | SmallVector<const User *> Worklist({U.getUser()}); |
771 | SmallPtrSet<const User *, 8> Visited; |
772 | |
773 | while (!Worklist.empty() && --Limit) { |
774 | auto *User = Worklist.pop_back_val(); |
775 | if (!Visited.insert(Ptr: User).second) |
776 | continue; |
777 | if (isa<ICmpInst, PtrToIntInst>(Val: User)) |
778 | continue; |
779 | if (isa<PHINode, SelectInst>(Val: User)) |
780 | Worklist.append(in_start: User->user_begin(), in_end: User->user_end()); |
781 | else |
782 | return false; |
783 | } |
784 | |
785 | return Limit != 0; |
786 | } |
787 | |
788 | // Returns true if `To` is a null pointer, constant dereferenceable pointer or |
789 | // both pointers have the same underlying objects. |
790 | static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, |
791 | const DataLayout &DL) { |
792 | // This is not strictly correct, but we do it for now to retain important |
793 | // optimizations. |
794 | if (isa<ConstantPointerNull>(Val: To)) |
795 | return true; |
796 | if (isa<Constant>(Val: To) && |
797 | isDereferenceablePointer(V: To, Ty: Type::getInt8Ty(C&: To->getContext()), DL)) |
798 | return true; |
799 | return getUnderlyingObjectAggressive(V: From) == |
800 | getUnderlyingObjectAggressive(V: To); |
801 | } |
802 | |
803 | bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To, |
804 | const DataLayout &DL) { |
805 | assert(U->getType() == To->getType() && "values must have matching types" ); |
806 | // Not a pointer, just return true. |
807 | if (!To->getType()->isPointerTy()) |
808 | return true; |
809 | |
810 | if (isPointerAlwaysReplaceable(From: &*U, To, DL)) |
811 | return true; |
812 | return isPointerUseReplacable(U); |
813 | } |
814 | |
815 | bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To, |
816 | const DataLayout &DL) { |
817 | assert(From->getType() == To->getType() && "values must have matching types" ); |
818 | // Not a pointer, just return true. |
819 | if (!From->getType()->isPointerTy()) |
820 | return true; |
821 | |
822 | return isPointerAlwaysReplaceable(From, To, DL); |
823 | } |
824 | |
825 | bool llvm::isDereferenceableReadOnlyLoop( |
826 | Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, |
827 | SmallVectorImpl<const SCEVPredicate *> *Predicates) { |
828 | for (BasicBlock *BB : L->blocks()) { |
829 | for (Instruction &I : *BB) { |
830 | if (auto *LI = dyn_cast<LoadInst>(Val: &I)) { |
831 | if (!isDereferenceableAndAlignedInLoop(LI, L, SE&: *SE, DT&: *DT, AC, Predicates)) |
832 | return false; |
833 | } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow()) |
834 | return false; |
835 | } |
836 | } |
837 | return true; |
838 | } |
839 | |