1 | //===- Loads.cpp - Local load analysis ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines simple local analyses for load instructions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/Analysis/Loads.h" |
14 | #include "llvm/Analysis/AliasAnalysis.h" |
15 | #include "llvm/Analysis/AssumeBundleQueries.h" |
16 | #include "llvm/Analysis/LoopInfo.h" |
17 | #include "llvm/Analysis/MemoryBuiltins.h" |
18 | #include "llvm/Analysis/MemoryLocation.h" |
19 | #include "llvm/Analysis/ScalarEvolution.h" |
20 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
21 | #include "llvm/Analysis/ValueTracking.h" |
22 | #include "llvm/IR/DataLayout.h" |
23 | #include "llvm/IR/IntrinsicInst.h" |
24 | #include "llvm/IR/Module.h" |
25 | #include "llvm/IR/Operator.h" |
26 | |
27 | using namespace llvm; |
28 | |
29 | static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment, |
30 | const DataLayout &DL) { |
31 | Align BA = Base->getPointerAlignment(DL); |
32 | return BA >= Alignment && Offset.isAligned(A: BA); |
33 | } |
34 | |
35 | /// Test if V is always a pointer to allocated and suitably aligned memory for |
36 | /// a simple load or store. |
37 | static bool isDereferenceableAndAlignedPointer( |
38 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
39 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
40 | const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited, |
41 | unsigned MaxDepth) { |
42 | assert(V->getType()->isPointerTy() && "Base must be pointer" ); |
43 | |
44 | // Recursion limit. |
45 | if (MaxDepth-- == 0) |
46 | return false; |
47 | |
48 | // Already visited? Bail out, we've likely hit unreachable code. |
49 | if (!Visited.insert(Ptr: V).second) |
50 | return false; |
51 | |
52 | // Note that it is not safe to speculate into a malloc'd region because |
53 | // malloc may return null. |
54 | |
55 | // For GEPs, determine if the indexing lands within the allocated object. |
56 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(Val: V)) { |
57 | const Value *Base = GEP->getPointerOperand(); |
58 | |
59 | APInt Offset(DL.getIndexTypeSizeInBits(Ty: GEP->getType()), 0); |
60 | if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || |
61 | !Offset.urem(RHS: APInt(Offset.getBitWidth(), Alignment.value())) |
62 | .isMinValue()) |
63 | return false; |
64 | |
65 | // If the base pointer is dereferenceable for Offset+Size bytes, then the |
66 | // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base |
67 | // pointer is aligned to Align bytes, and the Offset is divisible by Align |
68 | // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also |
69 | // aligned to Align bytes. |
70 | |
71 | // Offset and Size may have different bit widths if we have visited an |
72 | // addrspacecast, so we can't do arithmetic directly on the APInt values. |
73 | return isDereferenceableAndAlignedPointer( |
74 | V: Base, Alignment, Size: Offset + Size.sextOrTrunc(width: Offset.getBitWidth()), DL, |
75 | CtxI, AC, DT, TLI, Visited, MaxDepth); |
76 | } |
77 | |
78 | // bitcast instructions are no-ops as far as dereferenceability is concerned. |
79 | if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(Val: V)) { |
80 | if (BC->getSrcTy()->isPointerTy()) |
81 | return isDereferenceableAndAlignedPointer( |
82 | V: BC->getOperand(i_nocapture: 0), Alignment, Size, DL, CtxI, AC, DT, TLI, |
83 | Visited, MaxDepth); |
84 | } |
85 | |
86 | // Recurse into both hands of select. |
87 | if (const SelectInst *Sel = dyn_cast<SelectInst>(Val: V)) { |
88 | return isDereferenceableAndAlignedPointer(V: Sel->getTrueValue(), Alignment, |
89 | Size, DL, CtxI, AC, DT, TLI, |
90 | Visited, MaxDepth) && |
91 | isDereferenceableAndAlignedPointer(V: Sel->getFalseValue(), Alignment, |
92 | Size, DL, CtxI, AC, DT, TLI, |
93 | Visited, MaxDepth); |
94 | } |
95 | |
96 | bool CheckForNonNull, CheckForFreed; |
97 | APInt KnownDerefBytes(Size.getBitWidth(), |
98 | V->getPointerDereferenceableBytes(DL, CanBeNull&: CheckForNonNull, |
99 | CanBeFreed&: CheckForFreed)); |
100 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
101 | !CheckForFreed) |
102 | if (!CheckForNonNull || |
103 | isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI))) { |
104 | // As we recursed through GEPs to get here, we've incrementally checked |
105 | // that each step advanced by a multiple of the alignment. If our base is |
106 | // properly aligned, then the original offset accessed must also be. |
107 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
108 | return isAligned(Base: V, Offset, Alignment, DL); |
109 | } |
110 | |
111 | /// TODO refactor this function to be able to search independently for |
112 | /// Dereferencability and Alignment requirements. |
113 | |
114 | |
115 | if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
116 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: true)) |
117 | return isDereferenceableAndAlignedPointer(V: RP, Alignment, Size, DL, CtxI, |
118 | AC, DT, TLI, Visited, MaxDepth); |
119 | |
120 | // If we have a call we can't recurse through, check to see if this is an |
121 | // allocation function for which we can establish an minimum object size. |
122 | // Such a minimum object size is analogous to a deref_or_null attribute in |
123 | // that we still need to prove the result non-null at point of use. |
124 | // NOTE: We can only use the object size as a base fact as we a) need to |
125 | // prove alignment too, and b) don't want the compile time impact of a |
126 | // separate recursive walk. |
127 | ObjectSizeOpts Opts; |
128 | // TODO: It may be okay to round to align, but that would imply that |
129 | // accessing slightly out of bounds was legal, and we're currently |
130 | // inconsistent about that. For the moment, be conservative. |
131 | Opts.RoundToAlign = false; |
132 | Opts.NullIsUnknownSize = true; |
133 | uint64_t ObjSize; |
134 | if (getObjectSize(Ptr: V, Size&: ObjSize, DL, TLI, Opts)) { |
135 | APInt KnownDerefBytes(Size.getBitWidth(), ObjSize); |
136 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
137 | isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI)) && |
138 | !V->canBeFreed()) { |
139 | // As we recursed through GEPs to get here, we've incrementally |
140 | // checked that each step advanced by a multiple of the alignment. If |
141 | // our base is properly aligned, then the original offset accessed |
142 | // must also be. |
143 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
144 | return isAligned(Base: V, Offset, Alignment, DL); |
145 | } |
146 | } |
147 | } |
148 | |
149 | // For gc.relocate, look through relocations |
150 | if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(Val: V)) |
151 | return isDereferenceableAndAlignedPointer(V: RelocateInst->getDerivedPtr(), |
152 | Alignment, Size, DL, CtxI, AC, DT, |
153 | TLI, Visited, MaxDepth); |
154 | |
155 | if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(Val: V)) |
156 | return isDereferenceableAndAlignedPointer(V: ASC->getOperand(i_nocapture: 0), Alignment, |
157 | Size, DL, CtxI, AC, DT, TLI, |
158 | Visited, MaxDepth); |
159 | |
160 | if (CtxI) { |
161 | /// Look through assumes to see if both dereferencability and alignment can |
162 | /// be provent by an assume |
163 | RetainedKnowledge AlignRK; |
164 | RetainedKnowledge DerefRK; |
165 | if (getKnowledgeForValue( |
166 | V, AttrKinds: {Attribute::Dereferenceable, Attribute::Alignment}, AC, |
167 | Filter: [&](RetainedKnowledge RK, Instruction *Assume, auto) { |
168 | if (!isValidAssumeForContext(I: Assume, CxtI: CtxI, DT)) |
169 | return false; |
170 | if (RK.AttrKind == Attribute::Alignment) |
171 | AlignRK = std::max(a: AlignRK, b: RK); |
172 | if (RK.AttrKind == Attribute::Dereferenceable) |
173 | DerefRK = std::max(a: DerefRK, b: RK); |
174 | if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() && |
175 | DerefRK.ArgValue >= Size.getZExtValue()) |
176 | return true; // We have found what we needed so we stop looking |
177 | return false; // Other assumes may have better information. so |
178 | // keep looking |
179 | })) |
180 | return true; |
181 | } |
182 | |
183 | // If we don't know, assume the worst. |
184 | return false; |
185 | } |
186 | |
187 | bool llvm::isDereferenceableAndAlignedPointer( |
188 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
189 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
190 | const TargetLibraryInfo *TLI) { |
191 | // Note: At the moment, Size can be zero. This ends up being interpreted as |
192 | // a query of whether [Base, V] is dereferenceable and V is aligned (since |
193 | // that's what the implementation happened to do). It's unclear if this is |
194 | // the desired semantic, but at least SelectionDAG does exercise this case. |
195 | |
196 | SmallPtrSet<const Value *, 32> Visited; |
197 | return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, |
198 | DT, TLI, Visited, MaxDepth: 16); |
199 | } |
200 | |
201 | bool llvm::isDereferenceableAndAlignedPointer( |
202 | const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, |
203 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
204 | const TargetLibraryInfo *TLI) { |
205 | // For unsized types or scalable vectors we don't know exactly how many bytes |
206 | // are dereferenced, so bail out. |
207 | if (!Ty->isSized() || Ty->isScalableTy()) |
208 | return false; |
209 | |
210 | // When dereferenceability information is provided by a dereferenceable |
211 | // attribute, we know exactly how many bytes are dereferenceable. If we can |
212 | // determine the exact offset to the attributed variable, we can use that |
213 | // information here. |
214 | |
215 | APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), |
216 | DL.getTypeStoreSize(Ty)); |
217 | return isDereferenceableAndAlignedPointer(V, Alignment, Size: AccessSize, DL, CtxI, |
218 | AC, DT, TLI); |
219 | } |
220 | |
221 | bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, |
222 | const DataLayout &DL, |
223 | const Instruction *CtxI, |
224 | AssumptionCache *AC, |
225 | const DominatorTree *DT, |
226 | const TargetLibraryInfo *TLI) { |
227 | return isDereferenceableAndAlignedPointer(V, Ty, Alignment: Align(1), DL, CtxI, AC, DT, |
228 | TLI); |
229 | } |
230 | |
231 | /// Test if A and B will obviously have the same value. |
232 | /// |
233 | /// This includes recognizing that %t0 and %t1 will have the same |
234 | /// value in code like this: |
235 | /// \code |
236 | /// %t0 = getelementptr \@a, 0, 3 |
237 | /// store i32 0, i32* %t0 |
238 | /// %t1 = getelementptr \@a, 0, 3 |
239 | /// %t2 = load i32* %t1 |
240 | /// \endcode |
241 | /// |
242 | static bool AreEquivalentAddressValues(const Value *A, const Value *B) { |
243 | // Test if the values are trivially equivalent. |
244 | if (A == B) |
245 | return true; |
246 | |
247 | // Test if the values come from identical arithmetic instructions. |
248 | // Use isIdenticalToWhenDefined instead of isIdenticalTo because |
249 | // this function is only used when one address use dominates the |
250 | // other, which means that they'll always either have the same |
251 | // value or one of them will have an undefined value. |
252 | if (isa<BinaryOperator>(Val: A) || isa<CastInst>(Val: A) || isa<PHINode>(Val: A) || |
253 | isa<GetElementPtrInst>(Val: A)) |
254 | if (const Instruction *BI = dyn_cast<Instruction>(Val: B)) |
255 | if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI)) |
256 | return true; |
257 | |
258 | // Otherwise they may not be equivalent. |
259 | return false; |
260 | } |
261 | |
262 | bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, |
263 | ScalarEvolution &SE, |
264 | DominatorTree &DT, |
265 | AssumptionCache *AC) { |
266 | auto &DL = LI->getDataLayout(); |
267 | Value *Ptr = LI->getPointerOperand(); |
268 | |
269 | APInt EltSize(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), |
270 | DL.getTypeStoreSize(Ty: LI->getType()).getFixedValue()); |
271 | const Align Alignment = LI->getAlign(); |
272 | |
273 | Instruction * = L->getHeader()->getFirstNonPHI(); |
274 | |
275 | // If given a uniform (i.e. non-varying) address, see if we can prove the |
276 | // access is safe within the loop w/o needing predication. |
277 | if (L->isLoopInvariant(V: Ptr)) |
278 | return isDereferenceableAndAlignedPointer(V: Ptr, Alignment, Size: EltSize, DL, |
279 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
280 | |
281 | // Otherwise, check to see if we have a repeating access pattern where we can |
282 | // prove that all accesses are well aligned and dereferenceable. |
283 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: SE.getSCEV(V: Ptr)); |
284 | if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine()) |
285 | return false; |
286 | auto* Step = dyn_cast<SCEVConstant>(Val: AddRec->getStepRecurrence(SE)); |
287 | if (!Step) |
288 | return false; |
289 | |
290 | auto TC = SE.getSmallConstantMaxTripCount(L); |
291 | if (!TC) |
292 | return false; |
293 | |
294 | // TODO: Handle overlapping accesses. |
295 | // We should be computing AccessSize as (TC - 1) * Step + EltSize. |
296 | if (EltSize.sgt(RHS: Step->getAPInt())) |
297 | return false; |
298 | |
299 | // Compute the total access size for access patterns with unit stride and |
300 | // patterns with gaps. For patterns with unit stride, Step and EltSize are the |
301 | // same. |
302 | // For patterns with gaps (i.e. non unit stride), we are |
303 | // accessing EltSize bytes at every Step. |
304 | APInt AccessSize = TC * Step->getAPInt(); |
305 | |
306 | assert(SE.isLoopInvariant(AddRec->getStart(), L) && |
307 | "implied by addrec definition" ); |
308 | Value *Base = nullptr; |
309 | if (auto *StartS = dyn_cast<SCEVUnknown>(Val: AddRec->getStart())) { |
310 | Base = StartS->getValue(); |
311 | } else if (auto *StartS = dyn_cast<SCEVAddExpr>(Val: AddRec->getStart())) { |
312 | // Handle (NewBase + offset) as start value. |
313 | const auto *Offset = dyn_cast<SCEVConstant>(Val: StartS->getOperand(i: 0)); |
314 | const auto *NewBase = dyn_cast<SCEVUnknown>(Val: StartS->getOperand(i: 1)); |
315 | if (StartS->getNumOperands() == 2 && Offset && NewBase) { |
316 | // The following code below assumes the offset is unsigned, but GEP |
317 | // offsets are treated as signed so we can end up with a signed value |
318 | // here too. For example, suppose the initial PHI value is (i8 255), |
319 | // the offset will be treated as (i8 -1) and sign-extended to (i64 -1). |
320 | if (Offset->getAPInt().isNegative()) |
321 | return false; |
322 | |
323 | // For the moment, restrict ourselves to the case where the offset is a |
324 | // multiple of the requested alignment and the base is aligned. |
325 | // TODO: generalize if a case found which warrants |
326 | if (Offset->getAPInt().urem(RHS: Alignment.value()) != 0) |
327 | return false; |
328 | Base = NewBase->getValue(); |
329 | bool Overflow = false; |
330 | AccessSize = AccessSize.uadd_ov(RHS: Offset->getAPInt(), Overflow); |
331 | if (Overflow) |
332 | return false; |
333 | } |
334 | } |
335 | |
336 | if (!Base) |
337 | return false; |
338 | |
339 | // For the moment, restrict ourselves to the case where the access size is a |
340 | // multiple of the requested alignment and the base is aligned. |
341 | // TODO: generalize if a case found which warrants |
342 | if (EltSize.urem(RHS: Alignment.value()) != 0) |
343 | return false; |
344 | return isDereferenceableAndAlignedPointer(V: Base, Alignment, Size: AccessSize, DL, |
345 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
346 | } |
347 | |
348 | /// Check if executing a load of this pointer value cannot trap. |
349 | /// |
350 | /// If DT and ScanFrom are specified this method performs context-sensitive |
351 | /// analysis and returns true if it is safe to load immediately before ScanFrom. |
352 | /// |
353 | /// If it is not obviously safe to load from the specified pointer, we do |
354 | /// a quick local scan of the basic block containing \c ScanFrom, to determine |
355 | /// if the address is already accessed. |
356 | /// |
357 | /// This uses the pointee type to determine how many bytes need to be safe to |
358 | /// load from the pointer. |
359 | bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, |
360 | const DataLayout &DL, |
361 | Instruction *ScanFrom, |
362 | AssumptionCache *AC, |
363 | const DominatorTree *DT, |
364 | const TargetLibraryInfo *TLI) { |
365 | // If DT is not specified we can't make context-sensitive query |
366 | const Instruction* CtxI = DT ? ScanFrom : nullptr; |
367 | if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT, |
368 | TLI)) |
369 | return true; |
370 | |
371 | if (!ScanFrom) |
372 | return false; |
373 | |
374 | if (Size.getBitWidth() > 64) |
375 | return false; |
376 | const TypeSize LoadSize = TypeSize::getFixed(ExactSize: Size.getZExtValue()); |
377 | |
378 | // Otherwise, be a little bit aggressive by scanning the local block where we |
379 | // want to check to see if the pointer is already being loaded or stored |
380 | // from/to. If so, the previous load or store would have already trapped, |
381 | // so there is no harm doing an extra load (also, CSE will later eliminate |
382 | // the load entirely). |
383 | BasicBlock::iterator BBI = ScanFrom->getIterator(), |
384 | E = ScanFrom->getParent()->begin(); |
385 | |
386 | // We can at least always strip pointer casts even though we can't use the |
387 | // base here. |
388 | V = V->stripPointerCasts(); |
389 | |
390 | while (BBI != E) { |
391 | --BBI; |
392 | |
393 | // If we see a free or a call which may write to memory (i.e. which might do |
394 | // a free) the pointer could be marked invalid. |
395 | if (isa<CallInst>(Val: BBI) && BBI->mayWriteToMemory() && |
396 | !isa<LifetimeIntrinsic>(Val: BBI) && !isa<DbgInfoIntrinsic>(Val: BBI)) |
397 | return false; |
398 | |
399 | Value *AccessedPtr; |
400 | Type *AccessedTy; |
401 | Align AccessedAlign; |
402 | if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) { |
403 | // Ignore volatile loads. The execution of a volatile load cannot |
404 | // be used to prove an address is backed by regular memory; it can, |
405 | // for example, point to an MMIO register. |
406 | if (LI->isVolatile()) |
407 | continue; |
408 | AccessedPtr = LI->getPointerOperand(); |
409 | AccessedTy = LI->getType(); |
410 | AccessedAlign = LI->getAlign(); |
411 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val&: BBI)) { |
412 | // Ignore volatile stores (see comment for loads). |
413 | if (SI->isVolatile()) |
414 | continue; |
415 | AccessedPtr = SI->getPointerOperand(); |
416 | AccessedTy = SI->getValueOperand()->getType(); |
417 | AccessedAlign = SI->getAlign(); |
418 | } else |
419 | continue; |
420 | |
421 | if (AccessedAlign < Alignment) |
422 | continue; |
423 | |
424 | // Handle trivial cases. |
425 | if (AccessedPtr == V && |
426 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
427 | return true; |
428 | |
429 | if (AreEquivalentAddressValues(A: AccessedPtr->stripPointerCasts(), B: V) && |
430 | TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy))) |
431 | return true; |
432 | } |
433 | return false; |
434 | } |
435 | |
436 | bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment, |
437 | const DataLayout &DL, |
438 | Instruction *ScanFrom, |
439 | AssumptionCache *AC, |
440 | const DominatorTree *DT, |
441 | const TargetLibraryInfo *TLI) { |
442 | TypeSize TySize = DL.getTypeStoreSize(Ty); |
443 | if (TySize.isScalable()) |
444 | return false; |
445 | APInt Size(DL.getIndexTypeSizeInBits(Ty: V->getType()), TySize.getFixedValue()); |
446 | return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, |
447 | TLI); |
448 | } |
449 | |
450 | /// DefMaxInstsToScan - the default number of maximum instructions |
451 | /// to scan in the block, used by FindAvailableLoadedValue(). |
452 | /// FindAvailableLoadedValue() was introduced in r60148, to improve jump |
453 | /// threading in part by eliminating partially redundant loads. |
454 | /// At that point, the value of MaxInstsToScan was already set to '6' |
455 | /// without documented explanation. |
456 | cl::opt<unsigned> |
457 | llvm::DefMaxInstsToScan("available-load-scan-limit" , cl::init(Val: 6), cl::Hidden, |
458 | cl::desc("Use this to specify the default maximum number of instructions " |
459 | "to scan backward from a given instruction, when searching for " |
460 | "available loaded value" )); |
461 | |
462 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, |
463 | BasicBlock::iterator &ScanFrom, |
464 | unsigned MaxInstsToScan, |
465 | BatchAAResults *AA, bool *IsLoad, |
466 | unsigned *NumScanedInst) { |
467 | // Don't CSE load that is volatile or anything stronger than unordered. |
468 | if (!Load->isUnordered()) |
469 | return nullptr; |
470 | |
471 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
472 | return findAvailablePtrLoadStore(Loc, AccessTy: Load->getType(), AtLeastAtomic: Load->isAtomic(), |
473 | ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoadCSE: IsLoad, |
474 | NumScanedInst); |
475 | } |
476 | |
477 | // Check if the load and the store have the same base, constant offsets and |
478 | // non-overlapping access ranges. |
479 | static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, |
480 | Type *LoadTy, |
481 | const Value *StorePtr, |
482 | Type *StoreTy, |
483 | const DataLayout &DL) { |
484 | APInt LoadOffset(DL.getIndexTypeSizeInBits(Ty: LoadPtr->getType()), 0); |
485 | APInt StoreOffset(DL.getIndexTypeSizeInBits(Ty: StorePtr->getType()), 0); |
486 | const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets( |
487 | DL, Offset&: LoadOffset, /* AllowNonInbounds */ false); |
488 | const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets( |
489 | DL, Offset&: StoreOffset, /* AllowNonInbounds */ false); |
490 | if (LoadBase != StoreBase) |
491 | return false; |
492 | auto LoadAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: LoadTy)); |
493 | auto StoreAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: StoreTy)); |
494 | ConstantRange LoadRange(LoadOffset, |
495 | LoadOffset + LoadAccessSize.toRaw()); |
496 | ConstantRange StoreRange(StoreOffset, |
497 | StoreOffset + StoreAccessSize.toRaw()); |
498 | return LoadRange.intersectWith(CR: StoreRange).isEmptySet(); |
499 | } |
500 | |
501 | static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr, |
502 | Type *AccessTy, bool AtLeastAtomic, |
503 | const DataLayout &DL, bool *IsLoadCSE) { |
504 | // If this is a load of Ptr, the loaded value is available. |
505 | // (This is true even if the load is volatile or atomic, although |
506 | // those cases are unlikely.) |
507 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
508 | // We can value forward from an atomic to a non-atomic, but not the |
509 | // other way around. |
510 | if (LI->isAtomic() < AtLeastAtomic) |
511 | return nullptr; |
512 | |
513 | Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts(); |
514 | if (!AreEquivalentAddressValues(A: LoadPtr, B: Ptr)) |
515 | return nullptr; |
516 | |
517 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: LI->getType(), DestTy: AccessTy, DL)) { |
518 | if (IsLoadCSE) |
519 | *IsLoadCSE = true; |
520 | return LI; |
521 | } |
522 | } |
523 | |
524 | // If this is a store through Ptr, the value is available! |
525 | // (This is true even if the store is volatile or atomic, although |
526 | // those cases are unlikely.) |
527 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
528 | // We can value forward from an atomic to a non-atomic, but not the |
529 | // other way around. |
530 | if (SI->isAtomic() < AtLeastAtomic) |
531 | return nullptr; |
532 | |
533 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
534 | if (!AreEquivalentAddressValues(A: StorePtr, B: Ptr)) |
535 | return nullptr; |
536 | |
537 | if (IsLoadCSE) |
538 | *IsLoadCSE = false; |
539 | |
540 | Value *Val = SI->getValueOperand(); |
541 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: Val->getType(), DestTy: AccessTy, DL)) |
542 | return Val; |
543 | |
544 | TypeSize StoreSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
545 | TypeSize LoadSize = DL.getTypeSizeInBits(Ty: AccessTy); |
546 | if (TypeSize::isKnownLE(LHS: LoadSize, RHS: StoreSize)) |
547 | if (auto *C = dyn_cast<Constant>(Val)) |
548 | return ConstantFoldLoadFromConst(C, Ty: AccessTy, DL); |
549 | } |
550 | |
551 | if (auto *MSI = dyn_cast<MemSetInst>(Val: Inst)) { |
552 | // Don't forward from (non-atomic) memset to atomic load. |
553 | if (AtLeastAtomic) |
554 | return nullptr; |
555 | |
556 | // Only handle constant memsets. |
557 | auto *Val = dyn_cast<ConstantInt>(Val: MSI->getValue()); |
558 | auto *Len = dyn_cast<ConstantInt>(Val: MSI->getLength()); |
559 | if (!Val || !Len) |
560 | return nullptr; |
561 | |
562 | // TODO: Handle offsets. |
563 | Value *Dst = MSI->getDest(); |
564 | if (!AreEquivalentAddressValues(A: Dst, B: Ptr)) |
565 | return nullptr; |
566 | |
567 | if (IsLoadCSE) |
568 | *IsLoadCSE = false; |
569 | |
570 | TypeSize LoadTypeSize = DL.getTypeSizeInBits(Ty: AccessTy); |
571 | if (LoadTypeSize.isScalable()) |
572 | return nullptr; |
573 | |
574 | // Make sure the read bytes are contained in the memset. |
575 | uint64_t LoadSize = LoadTypeSize.getFixedValue(); |
576 | if ((Len->getValue() * 8).ult(RHS: LoadSize)) |
577 | return nullptr; |
578 | |
579 | APInt Splat = LoadSize >= 8 ? APInt::getSplat(NewLen: LoadSize, V: Val->getValue()) |
580 | : Val->getValue().trunc(width: LoadSize); |
581 | ConstantInt *SplatC = ConstantInt::get(Context&: MSI->getContext(), V: Splat); |
582 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: SplatC->getType(), DestTy: AccessTy, DL)) |
583 | return SplatC; |
584 | |
585 | return nullptr; |
586 | } |
587 | |
588 | return nullptr; |
589 | } |
590 | |
591 | Value *llvm::findAvailablePtrLoadStore( |
592 | const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, |
593 | BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, |
594 | BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) { |
595 | if (MaxInstsToScan == 0) |
596 | MaxInstsToScan = ~0U; |
597 | |
598 | const DataLayout &DL = ScanBB->getDataLayout(); |
599 | const Value *StrippedPtr = Loc.Ptr->stripPointerCasts(); |
600 | |
601 | while (ScanFrom != ScanBB->begin()) { |
602 | // We must ignore debug info directives when counting (otherwise they |
603 | // would affect codegen). |
604 | Instruction *Inst = &*--ScanFrom; |
605 | if (Inst->isDebugOrPseudoInst()) |
606 | continue; |
607 | |
608 | // Restore ScanFrom to expected value in case next test succeeds |
609 | ScanFrom++; |
610 | |
611 | if (NumScanedInst) |
612 | ++(*NumScanedInst); |
613 | |
614 | // Don't scan huge blocks. |
615 | if (MaxInstsToScan-- == 0) |
616 | return nullptr; |
617 | |
618 | --ScanFrom; |
619 | |
620 | if (Value *Available = getAvailableLoadStore(Inst, Ptr: StrippedPtr, AccessTy, |
621 | AtLeastAtomic, DL, IsLoadCSE)) |
622 | return Available; |
623 | |
624 | // Try to get the store size for the type. |
625 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
626 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
627 | |
628 | // If both StrippedPtr and StorePtr reach all the way to an alloca or |
629 | // global and they are different, ignore the store. This is a trivial form |
630 | // of alias analysis that is important for reg2mem'd code. |
631 | if ((isa<AllocaInst>(Val: StrippedPtr) || isa<GlobalVariable>(Val: StrippedPtr)) && |
632 | (isa<AllocaInst>(Val: StorePtr) || isa<GlobalVariable>(Val: StorePtr)) && |
633 | StrippedPtr != StorePtr) |
634 | continue; |
635 | |
636 | if (!AA) { |
637 | // When AA isn't available, but if the load and the store have the same |
638 | // base, constant offsets and non-overlapping access ranges, ignore the |
639 | // store. This is a simple form of alias analysis that is used by the |
640 | // inliner. FIXME: use BasicAA if possible. |
641 | if (areNonOverlapSameBaseLoadAndStore( |
642 | LoadPtr: Loc.Ptr, LoadTy: AccessTy, StorePtr: SI->getPointerOperand(), |
643 | StoreTy: SI->getValueOperand()->getType(), DL)) |
644 | continue; |
645 | } else { |
646 | // If we have alias analysis and it says the store won't modify the |
647 | // loaded value, ignore the store. |
648 | if (!isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: Loc))) |
649 | continue; |
650 | } |
651 | |
652 | // Otherwise the store that may or may not alias the pointer, bail out. |
653 | ++ScanFrom; |
654 | return nullptr; |
655 | } |
656 | |
657 | // If this is some other instruction that may clobber Ptr, bail out. |
658 | if (Inst->mayWriteToMemory()) { |
659 | // If alias analysis claims that it really won't modify the load, |
660 | // ignore it. |
661 | if (AA && !isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc))) |
662 | continue; |
663 | |
664 | // May modify the pointer, bail out. |
665 | ++ScanFrom; |
666 | return nullptr; |
667 | } |
668 | } |
669 | |
670 | // Got to the start of the block, we didn't find it, but are done for this |
671 | // block. |
672 | return nullptr; |
673 | } |
674 | |
675 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA, |
676 | bool *IsLoadCSE, |
677 | unsigned MaxInstsToScan) { |
678 | const DataLayout &DL = Load->getDataLayout(); |
679 | Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts(); |
680 | BasicBlock *ScanBB = Load->getParent(); |
681 | Type *AccessTy = Load->getType(); |
682 | bool AtLeastAtomic = Load->isAtomic(); |
683 | |
684 | if (!Load->isUnordered()) |
685 | return nullptr; |
686 | |
687 | // Try to find an available value first, and delay expensive alias analysis |
688 | // queries until later. |
689 | Value *Available = nullptr; |
690 | SmallVector<Instruction *> MustNotAliasInsts; |
691 | for (Instruction &Inst : make_range(x: ++Load->getReverseIterator(), |
692 | y: ScanBB->rend())) { |
693 | if (Inst.isDebugOrPseudoInst()) |
694 | continue; |
695 | |
696 | if (MaxInstsToScan-- == 0) |
697 | return nullptr; |
698 | |
699 | Available = getAvailableLoadStore(Inst: &Inst, Ptr: StrippedPtr, AccessTy, |
700 | AtLeastAtomic, DL, IsLoadCSE); |
701 | if (Available) |
702 | break; |
703 | |
704 | if (Inst.mayWriteToMemory()) |
705 | MustNotAliasInsts.push_back(Elt: &Inst); |
706 | } |
707 | |
708 | // If we found an available value, ensure that the instructions in between |
709 | // did not modify the memory location. |
710 | if (Available) { |
711 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
712 | for (Instruction *Inst : MustNotAliasInsts) |
713 | if (isModSet(MRI: AA.getModRefInfo(I: Inst, OptLoc: Loc))) |
714 | return nullptr; |
715 | } |
716 | |
717 | return Available; |
718 | } |
719 | |
720 | // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only |
721 | // feeds into them. |
722 | static bool isPointerUseReplacable(const Use &U) { |
723 | unsigned Limit = 40; |
724 | SmallVector<const User *> Worklist({U.getUser()}); |
725 | SmallPtrSet<const User *, 8> Visited; |
726 | |
727 | while (!Worklist.empty() && --Limit) { |
728 | auto *User = Worklist.pop_back_val(); |
729 | if (!Visited.insert(Ptr: User).second) |
730 | continue; |
731 | if (isa<ICmpInst, PtrToIntInst>(Val: User)) |
732 | continue; |
733 | if (isa<PHINode, SelectInst>(Val: User)) |
734 | Worklist.append(in_start: User->user_begin(), in_end: User->user_end()); |
735 | else |
736 | return false; |
737 | } |
738 | |
739 | return Limit != 0; |
740 | } |
741 | |
742 | // Returns true if `To` is a null pointer, constant dereferenceable pointer or |
743 | // both pointers have the same underlying objects. |
744 | static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, |
745 | const DataLayout &DL) { |
746 | // This is not strictly correct, but we do it for now to retain important |
747 | // optimizations. |
748 | if (isa<ConstantPointerNull>(Val: To)) |
749 | return true; |
750 | if (isa<Constant>(Val: To) && |
751 | isDereferenceablePointer(V: To, Ty: Type::getInt8Ty(C&: To->getContext()), DL)) |
752 | return true; |
753 | return getUnderlyingObjectAggressive(V: From) == |
754 | getUnderlyingObjectAggressive(V: To); |
755 | } |
756 | |
757 | bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To, |
758 | const DataLayout &DL) { |
759 | assert(U->getType() == To->getType() && "values must have matching types" ); |
760 | // Not a pointer, just return true. |
761 | if (!To->getType()->isPointerTy()) |
762 | return true; |
763 | |
764 | if (isPointerAlwaysReplaceable(From: &*U, To, DL)) |
765 | return true; |
766 | return isPointerUseReplacable(U); |
767 | } |
768 | |
769 | bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To, |
770 | const DataLayout &DL) { |
771 | assert(From->getType() == To->getType() && "values must have matching types" ); |
772 | // Not a pointer, just return true. |
773 | if (!From->getType()->isPointerTy()) |
774 | return true; |
775 | |
776 | return isPointerAlwaysReplaceable(From, To, DL); |
777 | } |
778 | |
779 | bool llvm::isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, |
780 | DominatorTree *DT, |
781 | AssumptionCache *AC) { |
782 | for (BasicBlock *BB : L->blocks()) { |
783 | for (Instruction &I : *BB) { |
784 | if (auto *LI = dyn_cast<LoadInst>(Val: &I)) { |
785 | if (!isDereferenceableAndAlignedInLoop(LI, L, SE&: *SE, DT&: *DT, AC)) |
786 | return false; |
787 | } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow()) |
788 | return false; |
789 | } |
790 | } |
791 | return true; |
792 | } |
793 | |