1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
14#include "llvm/Analysis/AliasAnalysis.h"
15#include "llvm/Analysis/AssumeBundleQueries.h"
16#include "llvm/Analysis/LoopAccessAnalysis.h"
17#include "llvm/Analysis/LoopInfo.h"
18#include "llvm/Analysis/MemoryBuiltins.h"
19#include "llvm/Analysis/MemoryLocation.h"
20#include "llvm/Analysis/ScalarEvolution.h"
21#include "llvm/Analysis/ScalarEvolutionExpressions.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/GetElementPtrTypeIterator.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Operator.h"
27
28using namespace llvm;
29
30static bool isAligned(const Value *Base, Align Alignment,
31 const DataLayout &DL) {
32 return Base->getPointerAlignment(DL) >= Alignment;
33}
34
35static bool isDereferenceableAndAlignedPointerViaAssumption(
36 const Value *Ptr, Align Alignment,
37 function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
38 const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
39 const DominatorTree *DT) {
40 if (!CtxI)
41 return false;
42 /// Look through assumes to see if both dereferencability and alignment can
43 /// be proven by an assume if needed.
44 RetainedKnowledge AlignRK;
45 RetainedKnowledge DerefRK;
46 bool PtrCanBeFreed = Ptr->canBeFreed();
47 bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
48 return getKnowledgeForValue(
49 V: Ptr, AttrKinds: {Attribute::Dereferenceable, Attribute::Alignment}, AC&: *AC,
50 Filter: [&](RetainedKnowledge RK, Instruction *Assume, auto) {
51 if (!isValidAssumeForContext(I: Assume, CxtI: CtxI, DT))
52 return false;
53 if (RK.AttrKind == Attribute::Alignment)
54 AlignRK = std::max(a: AlignRK, b: RK);
55
56 // Dereferenceable information from assumptions is only valid if the
57 // value cannot be freed between the assumption and use.
58 if ((!PtrCanBeFreed || willNotFreeBetween(Assume, CtxI)) &&
59 RK.AttrKind == Attribute::Dereferenceable)
60 DerefRK = std::max(a: DerefRK, b: RK);
61 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
62 if (IsAligned && DerefRK && CheckSize(DerefRK))
63 return true; // We have found what we needed so we stop looking
64 return false; // Other assumes may have better information. so
65 // keep looking
66 });
67}
68
69/// Test if V is always a pointer to allocated and suitably aligned memory for
70/// a simple load or store.
71static bool isDereferenceableAndAlignedPointer(
72 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
73 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
74 const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
75 unsigned MaxDepth) {
76 assert(V->getType()->isPointerTy() && "Base must be pointer");
77
78 // Recursion limit.
79 if (MaxDepth-- == 0)
80 return false;
81
82 // Already visited? Bail out, we've likely hit unreachable code.
83 if (!Visited.insert(Ptr: V).second)
84 return false;
85
86 // Note that it is not safe to speculate into a malloc'd region because
87 // malloc may return null.
88
89 // For GEPs, determine if the indexing lands within the allocated object.
90 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(Val: V)) {
91 const Value *Base = GEP->getPointerOperand();
92
93 APInt Offset(DL.getIndexTypeSizeInBits(Ty: GEP->getType()), 0);
94 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
95 !Offset.urem(RHS: APInt(Offset.getBitWidth(), Alignment.value()))
96 .isMinValue())
97 return false;
98
99 // If the base pointer is dereferenceable for Offset+Size bytes, then the
100 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
101 // pointer is aligned to Align bytes, and the Offset is divisible by Align
102 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
103 // aligned to Align bytes.
104
105 // Offset and Size may have different bit widths if we have visited an
106 // addrspacecast, so we can't do arithmetic directly on the APInt values.
107 return isDereferenceableAndAlignedPointer(
108 V: Base, Alignment, Size: Offset + Size.sextOrTrunc(width: Offset.getBitWidth()), DL,
109 CtxI, AC, DT, TLI, Visited, MaxDepth);
110 }
111
112 // bitcast instructions are no-ops as far as dereferenceability is concerned.
113 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(Val: V)) {
114 if (BC->getSrcTy()->isPointerTy())
115 return isDereferenceableAndAlignedPointer(
116 V: BC->getOperand(i_nocapture: 0), Alignment, Size, DL, CtxI, AC, DT, TLI,
117 Visited, MaxDepth);
118 }
119
120 // Recurse into both hands of select.
121 if (const SelectInst *Sel = dyn_cast<SelectInst>(Val: V)) {
122 return isDereferenceableAndAlignedPointer(V: Sel->getTrueValue(), Alignment,
123 Size, DL, CtxI, AC, DT, TLI,
124 Visited, MaxDepth) &&
125 isDereferenceableAndAlignedPointer(V: Sel->getFalseValue(), Alignment,
126 Size, DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth);
128 }
129
130 auto IsKnownDeref = [&]() {
131 bool CheckForNonNull, CheckForFreed;
132 if (!Size.ule(RHS: V->getPointerDereferenceableBytes(DL, CanBeNull&: CheckForNonNull,
133 CanBeFreed&: CheckForFreed)) ||
134 CheckForFreed)
135 return false;
136 if (CheckForNonNull &&
137 !isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI)))
138 return false;
139 // When using something like !dereferenceable on a load, the
140 // dereferenceability may only be valid on a specific control-flow path.
141 // If the instruction doesn't dominate the context instruction, we're
142 // asking about dereferenceability under the assumption that the
143 // instruction has been speculated to the point of the context instruction,
144 // in which case we don't know if the dereferenceability info still holds.
145 // We don't bother handling allocas here, as they aren't speculatable
146 // anyway.
147 auto *I = dyn_cast<Instruction>(Val: V);
148 if (I && !isa<AllocaInst>(Val: I))
149 return CtxI && isValidAssumeForContext(I, CxtI: CtxI, DT);
150 return true;
151 };
152 if (IsKnownDeref()) {
153 // As we recursed through GEPs to get here, we've incrementally checked
154 // that each step advanced by a multiple of the alignment. If our base is
155 // properly aligned, then the original offset accessed must also be.
156 return isAligned(Base: V, Alignment, DL);
157 }
158
159 /// TODO refactor this function to be able to search independently for
160 /// Dereferencability and Alignment requirements.
161
162
163 if (const auto *Call = dyn_cast<CallBase>(Val: V)) {
164 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: true))
165 return isDereferenceableAndAlignedPointer(V: RP, Alignment, Size, DL, CtxI,
166 AC, DT, TLI, Visited, MaxDepth);
167
168 // If we have a call we can't recurse through, check to see if this is an
169 // allocation function for which we can establish an minimum object size.
170 // Such a minimum object size is analogous to a deref_or_null attribute in
171 // that we still need to prove the result non-null at point of use.
172 // NOTE: We can only use the object size as a base fact as we a) need to
173 // prove alignment too, and b) don't want the compile time impact of a
174 // separate recursive walk.
175 ObjectSizeOpts Opts;
176 // TODO: It may be okay to round to align, but that would imply that
177 // accessing slightly out of bounds was legal, and we're currently
178 // inconsistent about that. For the moment, be conservative.
179 Opts.RoundToAlign = false;
180 Opts.NullIsUnknownSize = true;
181 uint64_t ObjSize;
182 if (getObjectSize(Ptr: V, Size&: ObjSize, DL, TLI, Opts)) {
183 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
184 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) &&
185 isKnownNonZero(V, Q: SimplifyQuery(DL, DT, AC, CtxI)) &&
186 !V->canBeFreed()) {
187 // As we recursed through GEPs to get here, we've incrementally
188 // checked that each step advanced by a multiple of the alignment. If
189 // our base is properly aligned, then the original offset accessed
190 // must also be.
191 return isAligned(Base: V, Alignment, DL);
192 }
193 }
194 }
195
196 // For gc.relocate, look through relocations
197 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(Val: V))
198 return isDereferenceableAndAlignedPointer(V: RelocateInst->getDerivedPtr(),
199 Alignment, Size, DL, CtxI, AC, DT,
200 TLI, Visited, MaxDepth);
201
202 if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(Val: V))
203 return isDereferenceableAndAlignedPointer(V: ASC->getOperand(i_nocapture: 0), Alignment,
204 Size, DL, CtxI, AC, DT, TLI,
205 Visited, MaxDepth);
206
207 return AC && isDereferenceableAndAlignedPointerViaAssumption(
208 Ptr: V, Alignment,
209 CheckSize: [Size](const RetainedKnowledge &RK) {
210 return RK.ArgValue >= Size.getZExtValue();
211 },
212 DL, CtxI, AC, DT);
213}
214
215bool llvm::isDereferenceableAndAlignedPointer(
216 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
217 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
218 const TargetLibraryInfo *TLI) {
219 // Note: At the moment, Size can be zero. This ends up being interpreted as
220 // a query of whether [Base, V] is dereferenceable and V is aligned (since
221 // that's what the implementation happened to do). It's unclear if this is
222 // the desired semantic, but at least SelectionDAG does exercise this case.
223
224 SmallPtrSet<const Value *, 32> Visited;
225 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
226 DT, TLI, Visited, MaxDepth: 16);
227}
228
229bool llvm::isDereferenceableAndAlignedPointer(
230 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
231 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
232 const TargetLibraryInfo *TLI) {
233 // For unsized types or scalable vectors we don't know exactly how many bytes
234 // are dereferenced, so bail out.
235 if (!Ty->isSized() || Ty->isScalableTy())
236 return false;
237
238 // When dereferenceability information is provided by a dereferenceable
239 // attribute, we know exactly how many bytes are dereferenceable. If we can
240 // determine the exact offset to the attributed variable, we can use that
241 // information here.
242
243 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
244 DL.getTypeStoreSize(Ty));
245 return isDereferenceableAndAlignedPointer(V, Alignment, Size: AccessSize, DL, CtxI,
246 AC, DT, TLI);
247}
248
249bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
250 const DataLayout &DL,
251 const Instruction *CtxI,
252 AssumptionCache *AC,
253 const DominatorTree *DT,
254 const TargetLibraryInfo *TLI) {
255 return isDereferenceableAndAlignedPointer(V, Ty, Alignment: Align(1), DL, CtxI, AC, DT,
256 TLI);
257}
258
259/// Test if A and B will obviously have the same value.
260///
261/// This includes recognizing that %t0 and %t1 will have the same
262/// value in code like this:
263/// \code
264/// %t0 = getelementptr \@a, 0, 3
265/// store i32 0, i32* %t0
266/// %t1 = getelementptr \@a, 0, 3
267/// %t2 = load i32* %t1
268/// \endcode
269///
270static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
271 // Test if the values are trivially equivalent.
272 if (A == B)
273 return true;
274
275 // Test if the values come from identical arithmetic instructions.
276 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
277 // this function is only used when one address use dominates the
278 // other, which means that they'll always either have the same
279 // value or one of them will have an undefined value.
280 if (isa<CastInst>(Val: A) || isa<PHINode>(Val: A) || isa<GetElementPtrInst>(Val: A))
281 if (const Instruction *BI = dyn_cast<Instruction>(Val: B))
282 if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI))
283 return true;
284
285 // Otherwise they may not be equivalent.
286 return false;
287}
288
289bool llvm::isDereferenceableAndAlignedInLoop(
290 LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
291 AssumptionCache *AC, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
292 const Align Alignment = LI->getAlign();
293 auto &DL = LI->getDataLayout();
294 Value *Ptr = LI->getPointerOperand();
295 APInt EltSize(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()),
296 DL.getTypeStoreSize(Ty: LI->getType()).getFixedValue());
297
298 // If given a uniform (i.e. non-varying) address, see if we can prove the
299 // access is safe within the loop w/o needing predication.
300 if (L->isLoopInvariant(V: Ptr))
301 return isDereferenceableAndAlignedPointer(
302 V: Ptr, Alignment, Size: EltSize, DL, CtxI: &*L->getHeader()->getFirstNonPHIIt(), AC,
303 DT: &DT);
304
305 const SCEV *PtrScev = SE.getSCEV(V: Ptr);
306 auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: PtrScev);
307
308 // Check to see if we have a repeating access pattern and it's possible
309 // to prove all accesses are well aligned.
310 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
311 return false;
312
313 auto *Step = dyn_cast<SCEVConstant>(Val: AddRec->getStepRecurrence(SE));
314 if (!Step)
315 return false;
316
317 // For the moment, restrict ourselves to the case where the access size is a
318 // multiple of the requested alignment and the base is aligned.
319 // TODO: generalize if a case found which warrants
320 if (EltSize.urem(RHS: Alignment.value()) != 0)
321 return false;
322
323 // TODO: Handle overlapping accesses.
324 if (EltSize.ugt(RHS: Step->getAPInt().abs()))
325 return false;
326
327 const SCEV *MaxBECount =
328 Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, Predicates&: *Predicates)
329 : SE.getSymbolicMaxBackedgeTakenCount(L);
330 const SCEV *BECount = Predicates
331 ? SE.getPredicatedBackedgeTakenCount(L, Predicates&: *Predicates)
332 : SE.getBackedgeTakenCount(L);
333 if (isa<SCEVCouldNotCompute>(Val: MaxBECount))
334 return false;
335 std::optional<ScalarEvolution::LoopGuards> LoopGuards;
336 const auto &[AccessStart, AccessEnd] =
337 getStartAndEndForAccess(Lp: L, PtrExpr: PtrScev, AccessTy: LI->getType(), BTC: BECount, MaxBTC: MaxBECount,
338 SE: &SE, PointerBounds: nullptr, DT: &DT, AC, LoopGuards);
339 if (isa<SCEVCouldNotCompute>(Val: AccessStart) ||
340 isa<SCEVCouldNotCompute>(Val: AccessEnd))
341 return false;
342
343 // Try to get the access size.
344 const SCEV *PtrDiff = SE.getMinusSCEV(LHS: AccessEnd, RHS: AccessStart);
345 if (isa<SCEVCouldNotCompute>(Val: PtrDiff))
346 return false;
347
348 if (!LoopGuards)
349 LoopGuards.emplace(
350 args: ScalarEvolution::LoopGuards::collect(L: AddRec->getLoop(), SE));
351
352 APInt MaxPtrDiff =
353 SE.getUnsignedRangeMax(S: SE.applyLoopGuards(Expr: PtrDiff, Guards: *LoopGuards));
354
355 Value *Base = nullptr;
356 APInt AccessSize;
357 const SCEV *AccessSizeSCEV = nullptr;
358 if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(Val: AccessStart)) {
359 Base = NewBase->getValue();
360 AccessSize = MaxPtrDiff;
361 AccessSizeSCEV = PtrDiff;
362 } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(Val: AccessStart)) {
363 if (MinAdd->getNumOperands() != 2)
364 return false;
365
366 const auto *Offset = dyn_cast<SCEVConstant>(Val: MinAdd->getOperand(i: 0));
367 const auto *NewBase = dyn_cast<SCEVUnknown>(Val: MinAdd->getOperand(i: 1));
368 if (!Offset || !NewBase)
369 return false;
370
371 // The following code below assumes the offset is unsigned, but GEP
372 // offsets are treated as signed so we can end up with a signed value
373 // here too. For example, suppose the initial PHI value is (i8 255),
374 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
375 if (Offset->getAPInt().isNegative())
376 return false;
377
378 // For the moment, restrict ourselves to the case where the offset is a
379 // multiple of the requested alignment and the base is aligned.
380 // TODO: generalize if a case found which warrants
381 if (Offset->getAPInt().urem(RHS: Alignment.value()) != 0)
382 return false;
383
384 bool Overflow = false;
385 AccessSize = MaxPtrDiff.uadd_ov(RHS: Offset->getAPInt(), Overflow);
386 if (Overflow)
387 return false;
388 AccessSizeSCEV = SE.getAddExpr(LHS: PtrDiff, RHS: Offset);
389 Base = NewBase->getValue();
390 } else
391 return false;
392
393 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
394 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
395 if (isa<BranchInst>(Val: LoopPred->getTerminator()))
396 CtxI = LoopPred->getTerminator();
397 }
398 return isDereferenceableAndAlignedPointerViaAssumption(
399 Ptr: Base, Alignment,
400 CheckSize: [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) {
401 return SE.isKnownPredicate(
402 Pred: CmpInst::ICMP_ULE,
403 LHS: SE.applyLoopGuards(Expr: AccessSizeSCEV, Guards: *LoopGuards),
404 RHS: SE.applyLoopGuards(Expr: SE.getSCEV(V: RK.IRArgValue), Guards: *LoopGuards));
405 },
406 DL, CtxI, AC, DT: &DT) ||
407 isDereferenceableAndAlignedPointer(V: Base, Alignment, Size: AccessSize, DL,
408 CtxI, AC, DT: &DT);
409}
410
411static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI) {
412 const Function &F = *CtxI.getFunction();
413 // Speculative load may create a race that did not exist in the source.
414 return F.hasFnAttribute(Kind: Attribute::SanitizeThread) ||
415 // Speculative load may load data from dirty regions.
416 F.hasFnAttribute(Kind: Attribute::SanitizeAddress) ||
417 F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress);
418}
419
420bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
421 return !LI.isUnordered() || suppressSpeculativeLoadForSanitizers(CtxI: LI);
422}
423
424/// Check if executing a load of this pointer value cannot trap.
425///
426/// If DT and ScanFrom are specified this method performs context-sensitive
427/// analysis and returns true if it is safe to load immediately before ScanFrom.
428///
429/// If it is not obviously safe to load from the specified pointer, we do
430/// a quick local scan of the basic block containing \c ScanFrom, to determine
431/// if the address is already accessed.
432///
433/// This uses the pointee type to determine how many bytes need to be safe to
434/// load from the pointer.
435bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
436 const DataLayout &DL,
437 Instruction *ScanFrom,
438 AssumptionCache *AC,
439 const DominatorTree *DT,
440 const TargetLibraryInfo *TLI) {
441 // If DT is not specified we can't make context-sensitive query
442 const Instruction* CtxI = DT ? ScanFrom : nullptr;
443 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
444 TLI)) {
445 // With sanitizers `Dereferenceable` is not always enough for unconditional
446 // load.
447 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(CtxI: *ScanFrom))
448 return true;
449 }
450
451 if (!ScanFrom)
452 return false;
453
454 if (Size.getBitWidth() > 64)
455 return false;
456 const TypeSize LoadSize = TypeSize::getFixed(ExactSize: Size.getZExtValue());
457
458 // Otherwise, be a little bit aggressive by scanning the local block where we
459 // want to check to see if the pointer is already being loaded or stored
460 // from/to. If so, the previous load or store would have already trapped,
461 // so there is no harm doing an extra load (also, CSE will later eliminate
462 // the load entirely).
463 BasicBlock::iterator BBI = ScanFrom->getIterator(),
464 E = ScanFrom->getParent()->begin();
465
466 // We can at least always strip pointer casts even though we can't use the
467 // base here.
468 V = V->stripPointerCasts();
469
470 while (BBI != E) {
471 --BBI;
472
473 // If we see a free or a call which may write to memory (i.e. which might do
474 // a free) the pointer could be marked invalid.
475 if (isa<CallInst>(Val: BBI) && BBI->mayWriteToMemory() &&
476 !isa<LifetimeIntrinsic>(Val: BBI))
477 return false;
478
479 Value *AccessedPtr;
480 Type *AccessedTy;
481 Align AccessedAlign;
482 if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) {
483 // Ignore volatile loads. The execution of a volatile load cannot
484 // be used to prove an address is backed by regular memory; it can,
485 // for example, point to an MMIO register.
486 if (LI->isVolatile())
487 continue;
488 AccessedPtr = LI->getPointerOperand();
489 AccessedTy = LI->getType();
490 AccessedAlign = LI->getAlign();
491 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val&: BBI)) {
492 // Ignore volatile stores (see comment for loads).
493 if (SI->isVolatile())
494 continue;
495 AccessedPtr = SI->getPointerOperand();
496 AccessedTy = SI->getValueOperand()->getType();
497 AccessedAlign = SI->getAlign();
498 } else
499 continue;
500
501 if (AccessedAlign < Alignment)
502 continue;
503
504 // Handle trivial cases.
505 if (AccessedPtr == V &&
506 TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy)))
507 return true;
508
509 if (AreEquivalentAddressValues(A: AccessedPtr->stripPointerCasts(), B: V) &&
510 TypeSize::isKnownLE(LHS: LoadSize, RHS: DL.getTypeStoreSize(Ty: AccessedTy)))
511 return true;
512 }
513 return false;
514}
515
516bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
517 const DataLayout &DL,
518 Instruction *ScanFrom,
519 AssumptionCache *AC,
520 const DominatorTree *DT,
521 const TargetLibraryInfo *TLI) {
522 TypeSize TySize = DL.getTypeStoreSize(Ty);
523 if (TySize.isScalable())
524 return false;
525 APInt Size(DL.getIndexTypeSizeInBits(Ty: V->getType()), TySize.getFixedValue());
526 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
527 TLI);
528}
529
530/// DefMaxInstsToScan - the default number of maximum instructions
531/// to scan in the block, used by FindAvailableLoadedValue().
532/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
533/// threading in part by eliminating partially redundant loads.
534/// At that point, the value of MaxInstsToScan was already set to '6'
535/// without documented explanation.
536cl::opt<unsigned>
537llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(Val: 6), cl::Hidden,
538 cl::desc("Use this to specify the default maximum number of instructions "
539 "to scan backward from a given instruction, when searching for "
540 "available loaded value"));
541
542Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
543 BasicBlock::iterator &ScanFrom,
544 unsigned MaxInstsToScan,
545 BatchAAResults *AA, bool *IsLoad,
546 unsigned *NumScanedInst) {
547 // Don't CSE load that is volatile or anything stronger than unordered.
548 if (!Load->isUnordered())
549 return nullptr;
550
551 MemoryLocation Loc = MemoryLocation::get(LI: Load);
552 return findAvailablePtrLoadStore(Loc, AccessTy: Load->getType(), AtLeastAtomic: Load->isAtomic(),
553 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoadCSE: IsLoad,
554 NumScanedInst);
555}
556
557// Check if the load and the store have the same base, constant offsets and
558// non-overlapping access ranges.
559static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
560 Type *LoadTy,
561 const Value *StorePtr,
562 Type *StoreTy,
563 const DataLayout &DL) {
564 APInt LoadOffset(DL.getIndexTypeSizeInBits(Ty: LoadPtr->getType()), 0);
565 APInt StoreOffset(DL.getIndexTypeSizeInBits(Ty: StorePtr->getType()), 0);
566 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
567 DL, Offset&: LoadOffset, /* AllowNonInbounds */ false);
568 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
569 DL, Offset&: StoreOffset, /* AllowNonInbounds */ false);
570 if (LoadBase != StoreBase)
571 return false;
572 auto LoadAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: LoadTy));
573 auto StoreAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: StoreTy));
574 ConstantRange LoadRange(LoadOffset,
575 LoadOffset + LoadAccessSize.toRaw());
576 ConstantRange StoreRange(StoreOffset,
577 StoreOffset + StoreAccessSize.toRaw());
578 return LoadRange.intersectWith(CR: StoreRange).isEmptySet();
579}
580
581static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
582 Type *AccessTy, bool AtLeastAtomic,
583 const DataLayout &DL, bool *IsLoadCSE) {
584 // If this is a load of Ptr, the loaded value is available.
585 // (This is true even if the load is volatile or atomic, although
586 // those cases are unlikely.)
587 if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) {
588 // We can value forward from an atomic to a non-atomic, but not the
589 // other way around.
590 if (LI->isAtomic() < AtLeastAtomic)
591 return nullptr;
592
593 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
594 if (!AreEquivalentAddressValues(A: LoadPtr, B: Ptr))
595 return nullptr;
596
597 if (CastInst::isBitOrNoopPointerCastable(SrcTy: LI->getType(), DestTy: AccessTy, DL)) {
598 if (IsLoadCSE)
599 *IsLoadCSE = true;
600 return LI;
601 }
602 }
603
604 // If this is a store through Ptr, the value is available!
605 // (This is true even if the store is volatile or atomic, although
606 // those cases are unlikely.)
607 if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) {
608 // We can value forward from an atomic to a non-atomic, but not the
609 // other way around.
610 if (SI->isAtomic() < AtLeastAtomic)
611 return nullptr;
612
613 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
614 if (!AreEquivalentAddressValues(A: StorePtr, B: Ptr))
615 return nullptr;
616
617 if (IsLoadCSE)
618 *IsLoadCSE = false;
619
620 Value *Val = SI->getValueOperand();
621 if (CastInst::isBitOrNoopPointerCastable(SrcTy: Val->getType(), DestTy: AccessTy, DL))
622 return Val;
623
624 TypeSize StoreSize = DL.getTypeSizeInBits(Ty: Val->getType());
625 TypeSize LoadSize = DL.getTypeSizeInBits(Ty: AccessTy);
626 if (TypeSize::isKnownLE(LHS: LoadSize, RHS: StoreSize))
627 if (auto *C = dyn_cast<Constant>(Val))
628 return ConstantFoldLoadFromConst(C, Ty: AccessTy, DL);
629 }
630
631 if (auto *MSI = dyn_cast<MemSetInst>(Val: Inst)) {
632 // Don't forward from (non-atomic) memset to atomic load.
633 if (AtLeastAtomic)
634 return nullptr;
635
636 // Only handle constant memsets.
637 auto *Val = dyn_cast<ConstantInt>(Val: MSI->getValue());
638 auto *Len = dyn_cast<ConstantInt>(Val: MSI->getLength());
639 if (!Val || !Len)
640 return nullptr;
641
642 // Handle offsets.
643 int64_t StoreOffset = 0, LoadOffset = 0;
644 const Value *StoreBase =
645 GetPointerBaseWithConstantOffset(Ptr: MSI->getDest(), Offset&: StoreOffset, DL);
646 const Value *LoadBase =
647 GetPointerBaseWithConstantOffset(Ptr, Offset&: LoadOffset, DL);
648 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
649 return nullptr;
650
651 if (IsLoadCSE)
652 *IsLoadCSE = false;
653
654 TypeSize LoadTypeSize = DL.getTypeSizeInBits(Ty: AccessTy);
655 if (LoadTypeSize.isScalable())
656 return nullptr;
657
658 // Make sure the read bytes are contained in the memset.
659 uint64_t LoadSize = LoadTypeSize.getFixedValue();
660 if ((Len->getValue() * 8).ult(RHS: LoadSize + (LoadOffset - StoreOffset) * 8))
661 return nullptr;
662
663 APInt Splat = LoadSize >= 8 ? APInt::getSplat(NewLen: LoadSize, V: Val->getValue())
664 : Val->getValue().trunc(width: LoadSize);
665 ConstantInt *SplatC = ConstantInt::get(Context&: MSI->getContext(), V: Splat);
666 if (CastInst::isBitOrNoopPointerCastable(SrcTy: SplatC->getType(), DestTy: AccessTy, DL))
667 return SplatC;
668
669 return nullptr;
670 }
671
672 return nullptr;
673}
674
675Value *llvm::findAvailablePtrLoadStore(
676 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
677 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
678 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
679 if (MaxInstsToScan == 0)
680 MaxInstsToScan = ~0U;
681
682 const DataLayout &DL = ScanBB->getDataLayout();
683 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
684
685 while (ScanFrom != ScanBB->begin()) {
686 // We must ignore debug info directives when counting (otherwise they
687 // would affect codegen).
688 Instruction *Inst = &*--ScanFrom;
689 if (Inst->isDebugOrPseudoInst())
690 continue;
691
692 // Restore ScanFrom to expected value in case next test succeeds
693 ScanFrom++;
694
695 if (NumScanedInst)
696 ++(*NumScanedInst);
697
698 // Don't scan huge blocks.
699 if (MaxInstsToScan-- == 0)
700 return nullptr;
701
702 --ScanFrom;
703
704 if (Value *Available = getAvailableLoadStore(Inst, Ptr: StrippedPtr, AccessTy,
705 AtLeastAtomic, DL, IsLoadCSE))
706 return Available;
707
708 // Try to get the store size for the type.
709 if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) {
710 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
711
712 // If both StrippedPtr and StorePtr reach all the way to an alloca or
713 // global and they are different, ignore the store. This is a trivial form
714 // of alias analysis that is important for reg2mem'd code.
715 if ((isa<AllocaInst>(Val: StrippedPtr) || isa<GlobalVariable>(Val: StrippedPtr)) &&
716 (isa<AllocaInst>(Val: StorePtr) || isa<GlobalVariable>(Val: StorePtr)) &&
717 StrippedPtr != StorePtr)
718 continue;
719
720 if (!AA) {
721 // When AA isn't available, but if the load and the store have the same
722 // base, constant offsets and non-overlapping access ranges, ignore the
723 // store. This is a simple form of alias analysis that is used by the
724 // inliner. FIXME: use BasicAA if possible.
725 if (areNonOverlapSameBaseLoadAndStore(
726 LoadPtr: Loc.Ptr, LoadTy: AccessTy, StorePtr: SI->getPointerOperand(),
727 StoreTy: SI->getValueOperand()->getType(), DL))
728 continue;
729 } else {
730 // If we have alias analysis and it says the store won't modify the
731 // loaded value, ignore the store.
732 if (!isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: Loc)))
733 continue;
734 }
735
736 // Otherwise the store that may or may not alias the pointer, bail out.
737 ++ScanFrom;
738 return nullptr;
739 }
740
741 // If this is some other instruction that may clobber Ptr, bail out.
742 if (Inst->mayWriteToMemory()) {
743 // If alias analysis claims that it really won't modify the load,
744 // ignore it.
745 if (AA && !isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc)))
746 continue;
747
748 // May modify the pointer, bail out.
749 ++ScanFrom;
750 return nullptr;
751 }
752 }
753
754 // Got to the start of the block, we didn't find it, but are done for this
755 // block.
756 return nullptr;
757}
758
759Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
760 bool *IsLoadCSE,
761 unsigned MaxInstsToScan) {
762 const DataLayout &DL = Load->getDataLayout();
763 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
764 BasicBlock *ScanBB = Load->getParent();
765 Type *AccessTy = Load->getType();
766 bool AtLeastAtomic = Load->isAtomic();
767
768 if (!Load->isUnordered())
769 return nullptr;
770
771 // Try to find an available value first, and delay expensive alias analysis
772 // queries until later.
773 Value *Available = nullptr;
774 SmallVector<Instruction *> MustNotAliasInsts;
775 for (Instruction &Inst : make_range(x: ++Load->getReverseIterator(),
776 y: ScanBB->rend())) {
777 if (Inst.isDebugOrPseudoInst())
778 continue;
779
780 if (MaxInstsToScan-- == 0)
781 return nullptr;
782
783 Available = getAvailableLoadStore(Inst: &Inst, Ptr: StrippedPtr, AccessTy,
784 AtLeastAtomic, DL, IsLoadCSE);
785 if (Available)
786 break;
787
788 if (Inst.mayWriteToMemory())
789 MustNotAliasInsts.push_back(Elt: &Inst);
790 }
791
792 // If we found an available value, ensure that the instructions in between
793 // did not modify the memory location.
794 if (Available) {
795 MemoryLocation Loc = MemoryLocation::get(LI: Load);
796 for (Instruction *Inst : MustNotAliasInsts)
797 if (isModSet(MRI: AA.getModRefInfo(I: Inst, OptLoc: Loc)))
798 return nullptr;
799 }
800
801 return Available;
802}
803
804// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
805// feeds into them.
806static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits) {
807 unsigned Limit = 40;
808 SmallVector<const User *> Worklist({U.getUser()});
809 SmallPtrSet<const User *, 8> Visited;
810
811 while (!Worklist.empty() && --Limit) {
812 auto *User = Worklist.pop_back_val();
813 if (!Visited.insert(Ptr: User).second)
814 continue;
815 if (isa<ICmpInst, PtrToAddrInst>(Val: User))
816 continue;
817 // FIXME: The PtrToIntInst case here is not strictly correct, as it
818 // changes which provenance is exposed.
819 if (!HasNonAddressBits && isa<PtrToIntInst>(Val: User))
820 continue;
821 if (isa<PHINode, SelectInst>(Val: User))
822 Worklist.append(in_start: User->user_begin(), in_end: User->user_end());
823 else
824 return false;
825 }
826
827 return Limit != 0;
828}
829
830// Returns true if `To` is a null pointer, constant dereferenceable pointer or
831// both pointers have the same underlying objects.
832static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
833 const DataLayout &DL) {
834 // This is not strictly correct, but we do it for now to retain important
835 // optimizations.
836 if (isa<ConstantPointerNull>(Val: To))
837 return true;
838 if (isa<Constant>(Val: To) && To->getType()->isPointerTy() &&
839 isDereferenceablePointer(V: To, Ty: Type::getInt8Ty(C&: To->getContext()), DL))
840 return true;
841 return getUnderlyingObjectAggressive(V: From) ==
842 getUnderlyingObjectAggressive(V: To);
843}
844
845bool llvm::canReplacePointersInUseIfEqual(const Use &U, const Value *To,
846 const DataLayout &DL) {
847 Type *Ty = To->getType();
848 assert(U->getType() == Ty && "values must have matching types");
849 // Not a pointer, just return true.
850 if (!Ty->isPtrOrPtrVectorTy())
851 return true;
852
853 // Do not perform replacements in lifetime intrinsic arguments.
854 if (isa<LifetimeIntrinsic>(Val: U.getUser()))
855 return false;
856
857 if (isPointerAlwaysReplaceable(From: &*U, To, DL))
858 return true;
859
860 bool HasNonAddressBits =
861 DL.getAddressSizeInBits(Ty) != DL.getPointerTypeSizeInBits(Ty);
862 return isPointerUseReplacable(U, HasNonAddressBits);
863}
864
865bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
866 const DataLayout &DL) {
867 assert(From->getType() == To->getType() && "values must have matching types");
868 // Not a pointer, just return true.
869 if (!From->getType()->isPtrOrPtrVectorTy())
870 return true;
871
872 return isPointerAlwaysReplaceable(From, To, DL);
873}
874
875bool llvm::isReadOnlyLoop(
876 Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
877 SmallVectorImpl<LoadInst *> &NonDereferenceableAndAlignedLoads,
878 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
879 for (BasicBlock *BB : L->blocks()) {
880 for (Instruction &I : *BB) {
881 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
882 if (!isDereferenceableAndAlignedInLoop(LI, L, SE&: *SE, DT&: *DT, AC, Predicates))
883 NonDereferenceableAndAlignedLoads.push_back(Elt: LI);
884 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() ||
885 I.mayThrow()) {
886 return false;
887 }
888 }
889 }
890 return true;
891}
892
893LinearExpression llvm::decomposeLinearExpression(const DataLayout &DL,
894 Value *Ptr) {
895 assert(Ptr->getType()->isPointerTy() && "Must be called with pointer arg");
896
897 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: Ptr->getType());
898 LinearExpression Expr(Ptr, BitWidth);
899
900 while (true) {
901 auto *GEP = dyn_cast<GEPOperator>(Val: Expr.BasePtr);
902 if (!GEP || GEP->getSourceElementType()->isScalableTy())
903 return Expr;
904
905 Value *VarIndex = nullptr;
906 for (Value *Index : GEP->indices()) {
907 if (isa<ConstantInt>(Val: Index))
908 continue;
909 // Only allow a single variable index. We do not bother to handle the
910 // case of the same variable index appearing multiple times.
911 if (Expr.Index || VarIndex)
912 return Expr;
913 VarIndex = Index;
914 }
915
916 // Don't return non-canonical indexes.
917 if (VarIndex && !VarIndex->getType()->isIntegerTy(Bitwidth: BitWidth))
918 return Expr;
919
920 // We have verified that we can fully handle this GEP, so we can update Expr
921 // members past this point.
922 Expr.BasePtr = GEP->getPointerOperand();
923 Expr.Flags = Expr.Flags.intersectForOffsetAdd(Other: GEP->getNoWrapFlags());
924 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
925 GTI != GTE; ++GTI) {
926 Value *Index = GTI.getOperand();
927 if (auto *ConstOffset = dyn_cast<ConstantInt>(Val: Index)) {
928 if (ConstOffset->isZero())
929 continue;
930 if (StructType *STy = GTI.getStructTypeOrNull()) {
931 unsigned ElementIdx = ConstOffset->getZExtValue();
932 const StructLayout *SL = DL.getStructLayout(Ty: STy);
933 Expr.Offset += SL->getElementOffset(Idx: ElementIdx);
934 continue;
935 }
936 // Truncate if type size exceeds index space.
937 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),
938 /*isSigned=*/false,
939 /*implcitTrunc=*/true);
940 Expr.Offset += ConstOffset->getValue() * IndexedSize;
941 continue;
942 }
943
944 // FIXME: Also look through a mul/shl in the index.
945 assert(Expr.Index == nullptr && "Shouldn't have index yet");
946 Expr.Index = Index;
947 // Truncate if type size exceeds index space.
948 Expr.Scale = APInt(BitWidth, GTI.getSequentialElementStride(DL),
949 /*isSigned=*/false, /*implicitTrunc=*/true);
950 }
951 }
952
953 return Expr;
954}
955