1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/LazyValueInfo.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/Analysis/AssumeBundleQueries.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/ConstantFolding.h"
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/Analysis/Passes.h"
22#include "llvm/Analysis/TargetLibraryInfo.h"
23#include "llvm/Analysis/ValueLattice.h"
24#include "llvm/Analysis/ValueTracking.h"
25#include "llvm/IR/AssemblyAnnotationWriter.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/ConstantRange.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instructions.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/LLVMContext.h"
36#include "llvm/IR/Module.h"
37#include "llvm/IR/PatternMatch.h"
38#include "llvm/IR/ValueHandle.h"
39#include "llvm/InitializePasses.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/FormattedStream.h"
42#include "llvm/Support/KnownBits.h"
43#include "llvm/Support/raw_ostream.h"
44#include <optional>
45using namespace llvm;
46using namespace PatternMatch;
47
48#define DEBUG_TYPE "lazy-value-info"
49
50// This is the number of worklist items we will process to try to discover an
51// answer for a given value.
52static const unsigned MaxProcessedPerValue = 500;
53
54char LazyValueInfoWrapperPass::ID = 0;
55LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {}
56INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
57 "Lazy Value Information Analysis", false, true)
58INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
59INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
60INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
61 "Lazy Value Information Analysis", false, true)
62
63static cl::opt<bool> PerPredRanges(
64 "lvi-per-pred-ranges", cl::Hidden, cl::init(Val: false),
65 cl::desc("Enable tracking of ranges for a value in a block for"
66 "each block predecessor (default = false)"));
67
68namespace llvm {
69FunctionPass *createLazyValueInfoPass() {
70 return new LazyValueInfoWrapperPass();
71}
72} // namespace llvm
73
74AnalysisKey LazyValueAnalysis::Key;
75
76/// Returns true if this lattice value represents at most one possible value.
77/// This is as precise as any lattice value can get while still representing
78/// reachable code.
79static bool hasSingleValue(const ValueLatticeElement &Val) {
80 if (Val.isConstantRange() &&
81 Val.getConstantRange().isSingleElement())
82 // Integer constants are single element ranges
83 return true;
84 if (Val.isConstant())
85 // Non integer constants
86 return true;
87 return false;
88}
89
90//===----------------------------------------------------------------------===//
91// LazyValueInfoCache Decl
92//===----------------------------------------------------------------------===//
93
94namespace {
95 /// A callback value handle updates the cache when values are erased.
96 class LazyValueInfoCache;
97 struct LVIValueHandle final : public CallbackVH {
98 LazyValueInfoCache *Parent;
99
100 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
101 : CallbackVH(V), Parent(P) { }
102
103 void deleted() override;
104 void allUsesReplacedWith(Value *V) override {
105 deleted();
106 }
107 };
108} // end anonymous namespace
109
110namespace {
111using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
112using BBLatticeElementMap =
113 SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4>;
114using PredecessorValueLatticeMap =
115 SmallDenseMap<AssertingVH<Value>, BBLatticeElementMap, 2>;
116
117/// This is the cache kept by LazyValueInfo which
118/// maintains information about queries across the clients' queries.
119class LazyValueInfoCache {
120 /// This is all of the cached information for one basic block. It contains
121 /// the per-value lattice elements, as well as a separate set for
122 /// overdefined values to reduce memory usage. Additionally pointers
123 /// dereferenced in the block are cached for nullability queries.
124 struct BlockCacheEntry {
125 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
126 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
127 // std::nullopt indicates that the nonnull pointers for this basic block
128 // block have not been computed yet.
129 std::optional<NonNullPointerSet> NonNullPointers;
130 // This is an extension of the above LatticeElements, caching, for each
131 // Value, a ValueLatticeElement, for each predecessor of the BB tracked by
132 // this entry.
133 std::optional<PredecessorValueLatticeMap> PredecessorLatticeElements;
134 };
135
136 /// Cached information per basic block.
137 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
138 BlockCache;
139 /// Set of value handles used to erase values from the cache on deletion.
140 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
141
142 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
143 auto It = BlockCache.find_as(Val: BB);
144 if (It == BlockCache.end())
145 return nullptr;
146 return It->second.get();
147 }
148
149 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
150 auto It = BlockCache.find_as(Val: BB);
151 if (It == BlockCache.end()) {
152 std::unique_ptr<BlockCacheEntry> BCE =
153 std::make_unique<BlockCacheEntry>();
154 if (PerPredRanges)
155 BCE->PredecessorLatticeElements =
156 std::make_optional<PredecessorValueLatticeMap>();
157 It = BlockCache.insert(KV: {BB, std::move(BCE)}).first;
158 }
159
160 return It->second.get();
161 }
162
163 void addValueHandle(Value *Val) {
164 auto HandleIt = ValueHandles.find_as(Val);
165 if (HandleIt == ValueHandles.end())
166 ValueHandles.insert(V: {Val, this});
167 }
168
169public:
170 void insertResult(Value *Val, BasicBlock *BB,
171 const ValueLatticeElement &Result) {
172 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
173
174 // Insert over-defined values into their own cache to reduce memory
175 // overhead.
176 if (Result.isOverdefined())
177 Entry->OverDefined.insert(V: Val);
178 else
179 Entry->LatticeElements.insert(KV: {Val, Result});
180
181 addValueHandle(Val);
182 }
183
184 void insertPredecessorResults(Value *Val, BasicBlock *BB,
185 BBLatticeElementMap &PredLatticeElements) {
186 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
187
188 Entry->PredecessorLatticeElements->insert(KV: {Val, PredLatticeElements});
189
190 addValueHandle(Val);
191 }
192
193 std::optional<BBLatticeElementMap>
194 getCachedPredecessorInfo(Value *V, BasicBlock *BB) const {
195 const BlockCacheEntry *Entry = getBlockEntry(BB);
196 if (!Entry)
197 return std::nullopt;
198
199 auto LatticeIt = Entry->PredecessorLatticeElements->find_as(Val: V);
200 if (LatticeIt == Entry->PredecessorLatticeElements->end())
201 return std::nullopt;
202
203 return LatticeIt->second;
204 }
205
206 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
207 BasicBlock *BB) const {
208 const BlockCacheEntry *Entry = getBlockEntry(BB);
209 if (!Entry)
210 return std::nullopt;
211
212 if (Entry->OverDefined.count(V))
213 return ValueLatticeElement::getOverdefined();
214
215 auto LatticeIt = Entry->LatticeElements.find_as(Val: V);
216 if (LatticeIt == Entry->LatticeElements.end())
217 return std::nullopt;
218
219 return LatticeIt->second;
220 }
221
222 bool
223 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
224 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
225 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
226 if (!Entry->NonNullPointers) {
227 Entry->NonNullPointers = InitFn(BB);
228 for (Value *V : *Entry->NonNullPointers)
229 addValueHandle(Val: V);
230 }
231
232 return Entry->NonNullPointers->count(V);
233 }
234
235 /// clear - Empty the cache.
236 void clear() {
237 BlockCache.clear();
238 ValueHandles.clear();
239 }
240
241 /// Inform the cache that a given value has been deleted.
242 void eraseValue(Value *V);
243
244 /// This is part of the update interface to inform the cache
245 /// that a block has been deleted.
246 void eraseBlock(BasicBlock *BB);
247
248 /// Updates the cache to remove any influence an overdefined value in
249 /// OldSucc might have (unless also overdefined in NewSucc). This just
250 /// flushes elements from the cache and does not add any.
251 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
252};
253} // namespace
254
255void LazyValueInfoCache::eraseValue(Value *V) {
256 for (auto &Pair : BlockCache) {
257 Pair.second->LatticeElements.erase(Val: V);
258 Pair.second->OverDefined.erase(V);
259 if (Pair.second->NonNullPointers)
260 Pair.second->NonNullPointers->erase(V);
261 if (PerPredRanges)
262 Pair.second->PredecessorLatticeElements->erase(Val: V);
263 }
264
265 auto HandleIt = ValueHandles.find_as(Val: V);
266 if (HandleIt != ValueHandles.end())
267 ValueHandles.erase(I: HandleIt);
268}
269
270void LVIValueHandle::deleted() {
271 // This erasure deallocates *this, so it MUST happen after we're done
272 // using any and all members of *this.
273 Parent->eraseValue(V: *this);
274}
275
276void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
277 // Clear all when a BB is removed.
278 if (PerPredRanges)
279 for (auto &Pair : BlockCache)
280 Pair.second->PredecessorLatticeElements->clear();
281 BlockCache.erase(Val: BB);
282}
283
284void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
285 BasicBlock *NewSucc) {
286 // When an edge in the graph has been threaded, values that we could not
287 // determine a value for before (i.e. were marked overdefined) may be
288 // possible to solve now. We do NOT try to proactively update these values.
289 // Instead, we clear their entries from the cache, and allow lazy updating to
290 // recompute them when needed.
291
292 // The updating process is fairly simple: we need to drop cached info
293 // for all values that were marked overdefined in OldSucc, and for those same
294 // values in any successor of OldSucc (except NewSucc) in which they were
295 // also marked overdefined.
296 std::vector<BasicBlock*> worklist;
297 worklist.push_back(x: OldSucc);
298
299 const BlockCacheEntry *Entry = getBlockEntry(BB: OldSucc);
300 if (!Entry || Entry->OverDefined.empty())
301 return; // Nothing to process here.
302 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
303 Entry->OverDefined.end());
304
305 // Use a worklist to perform a depth-first search of OldSucc's successors.
306 // NOTE: We do not need a visited list since any blocks we have already
307 // visited will have had their overdefined markers cleared already, and we
308 // thus won't loop to their successors.
309 while (!worklist.empty()) {
310 BasicBlock *ToUpdate = worklist.back();
311 worklist.pop_back();
312
313 // Skip blocks only accessible through NewSucc.
314 if (ToUpdate == NewSucc) continue;
315
316 // If a value was marked overdefined in OldSucc, and is here too...
317 auto OI = BlockCache.find_as(Val: ToUpdate);
318 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
319 continue;
320 auto &ValueSet = OI->second->OverDefined;
321
322 bool changed = false;
323 for (Value *V : ValsToClear) {
324 if (!ValueSet.erase(V))
325 continue;
326
327 // If we removed anything, then we potentially need to update
328 // blocks successors too.
329 changed = true;
330 }
331
332 if (!changed) continue;
333
334 llvm::append_range(C&: worklist, R: successors(BB: ToUpdate));
335 }
336}
337
338namespace llvm {
339namespace {
340/// An assembly annotator class to print LazyValueCache information in
341/// comments.
342class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
343 LazyValueInfoImpl *LVIImpl;
344 // While analyzing which blocks we can solve values for, we need the dominator
345 // information.
346 DominatorTree &DT;
347
348public:
349 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
350 : LVIImpl(L), DT(DTree) {}
351
352 void emitBasicBlockStartAnnot(const BasicBlock *BB,
353 formatted_raw_ostream &OS) override;
354
355 void emitInstructionAnnot(const Instruction *I,
356 formatted_raw_ostream &OS) override;
357};
358} // namespace
359// The actual implementation of the lazy analysis and update.
360class LazyValueInfoImpl {
361
362 /// Cached results from previous queries
363 LazyValueInfoCache TheCache;
364
365 /// This stack holds the state of the value solver during a query.
366 /// It basically emulates the callstack of the naive
367 /// recursive value lookup process.
368 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
369
370 /// Keeps track of which block-value pairs are in BlockValueStack.
371 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
372
373 /// Push BV onto BlockValueStack unless it's already in there.
374 /// Returns true on success.
375 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
376 if (!BlockValueSet.insert(V: BV).second)
377 return false; // It's already in the stack.
378
379 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
380 << BV.first->getName() << "\n");
381 BlockValueStack.push_back(Elt: BV);
382 return true;
383 }
384
385 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
386 const DataLayout &DL; ///< A mandatory DataLayout
387
388 /// Declaration of the llvm.experimental.guard() intrinsic,
389 /// if it exists in the module.
390 Function *GuardDecl;
391
392 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
393 Instruction *CxtI);
394 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
395 BasicBlock *T,
396 Instruction *CxtI = nullptr);
397
398 // These methods process one work item and may add more. A false value
399 // returned means that the work item was not completely processed and must
400 // be revisited after going through the new items.
401 bool solveBlockValue(Value *Val, BasicBlock *BB);
402 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
403 BasicBlock *BB);
404 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
405 BasicBlock *BB);
406 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
407 BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
409 BasicBlock *BB);
410 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
413 Instruction *I, BasicBlock *BB,
414 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
415 OpFn);
416 std::optional<ValueLatticeElement>
417 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
418 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
419 BasicBlock *BB);
420 std::optional<ValueLatticeElement>
421 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
422 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
423 BasicBlock *BB);
424 std::optional<ValueLatticeElement>
425 solveBlockValueInsertElement(InsertElementInst *IEI, BasicBlock *BB);
426 std::optional<ValueLatticeElement>
427 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
428 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
429 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
430 ValueLatticeElement &BBLV,
431 Instruction *BBI);
432
433 void solve();
434
435 // For the following methods, if UseBlockValue is true, the function may
436 // push additional values to the worklist and return nullopt. If
437 // UseBlockValue is false, it will never return nullopt.
438
439 std::optional<ValueLatticeElement>
440 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
441 const APInt &Offset, Instruction *CxtI,
442 bool UseBlockValue);
443
444 std::optional<ValueLatticeElement>
445 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
446 bool UseBlockValue);
447 ValueLatticeElement getValueFromTrunc(Value *Val, TruncInst *Trunc,
448 bool IsTrueDest);
449
450 std::optional<ValueLatticeElement>
451 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
452 bool UseBlockValue, unsigned Depth = 0);
453
454 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
455 BasicBlock *BBFrom,
456 BasicBlock *BBTo,
457 bool UseBlockValue);
458
459public:
460 /// This is the query interface to determine the lattice value for the
461 /// specified Value* at the context instruction (if specified) or at the
462 /// start of the block.
463 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
464 Instruction *CxtI = nullptr);
465
466 /// This is the query interface to determine the lattice value for the
467 /// specified Value* at the specified instruction using only information
468 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
469 /// recursive query is performed.
470 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
471
472 /// This is the query interface to determine the lattice
473 /// value for the specified Value* that is true on the specified edge.
474 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
475 BasicBlock *ToBB,
476 Instruction *CxtI = nullptr);
477
478 ValueLatticeElement getValueAtUse(const Use &U);
479
480 /// Complete flush all previously computed values
481 void clear() {
482 TheCache.clear();
483 }
484
485 /// Printing the LazyValueInfo Analysis.
486 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
487 LazyValueInfoAnnotatedWriter Writer(this, DTree);
488 F.print(OS, AAW: &Writer);
489 }
490
491 /// This is part of the update interface to remove information related to this
492 /// value from the cache.
493 void forgetValue(Value *V) { TheCache.eraseValue(V); }
494
495 /// This is part of the update interface to inform the cache
496 /// that a block has been deleted.
497 void eraseBlock(BasicBlock *BB) {
498 TheCache.eraseBlock(BB);
499 }
500
501 /// This is the update interface to inform the cache that an edge from
502 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
503 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
504
505 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
506 Function *GuardDecl)
507 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
508};
509} // namespace llvm
510
511void LazyValueInfoImpl::solve() {
512 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack =
513 BlockValueStack;
514
515 unsigned processedCount = 0;
516 while (!BlockValueStack.empty()) {
517 processedCount++;
518 // Abort if we have to process too many values to get a result for this one.
519 // Because of the design of the overdefined cache currently being per-block
520 // to avoid naming-related issues (IE it wants to try to give different
521 // results for the same name in different blocks), overdefined results don't
522 // get cached globally, which in turn means we will often try to rediscover
523 // the same overdefined result again and again. Once something like
524 // PredicateInfo is used in LVI or CVP, we should be able to make the
525 // overdefined cache global, and remove this throttle.
526 if (processedCount > MaxProcessedPerValue) {
527 LLVM_DEBUG(
528 dbgs() << "Giving up on stack because we are getting too deep\n");
529 // Fill in the original values
530 while (!StartingStack.empty()) {
531 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
532 TheCache.insertResult(Val: e.second, BB: e.first,
533 Result: ValueLatticeElement::getOverdefined());
534 StartingStack.pop_back();
535 }
536 BlockValueSet.clear();
537 BlockValueStack.clear();
538 return;
539 }
540 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
541 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
542 unsigned StackSize = BlockValueStack.size();
543 (void) StackSize;
544
545 if (solveBlockValue(Val: e.second, BB: e.first)) {
546 // The work item was completely processed.
547 assert(BlockValueStack.size() == StackSize &&
548 BlockValueStack.back() == e && "Nothing should have been pushed!");
549#ifndef NDEBUG
550 std::optional<ValueLatticeElement> BBLV =
551 TheCache.getCachedValueInfo(e.second, e.first);
552 assert(BBLV && "Result should be in cache!");
553 LLVM_DEBUG(
554 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
555 << *BBLV << "\n");
556#endif
557
558 BlockValueStack.pop_back();
559 BlockValueSet.erase(V: e);
560 } else {
561 // More work needs to be done before revisiting.
562 assert(BlockValueStack.size() == StackSize + 1 &&
563 "Exactly one element should have been pushed!");
564 }
565 }
566}
567
568std::optional<ValueLatticeElement>
569LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
570 Instruction *CxtI) {
571 // If already a constant, there is nothing to compute.
572 if (Constant *VC = dyn_cast<Constant>(Val))
573 return ValueLatticeElement::get(C: VC);
574
575 if (std::optional<ValueLatticeElement> OptLatticeVal =
576 TheCache.getCachedValueInfo(V: Val, BB)) {
577 intersectAssumeOrGuardBlockValueConstantRange(Val, BBLV&: *OptLatticeVal, BBI: CxtI);
578 return OptLatticeVal;
579 }
580
581 // We have hit a cycle, assume overdefined.
582 if (!pushBlockValue(BV: { BB, Val }))
583 return ValueLatticeElement::getOverdefined();
584
585 // Yet to be resolved.
586 return std::nullopt;
587}
588
589static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
590 switch (BBI->getOpcode()) {
591 default:
592 break;
593 case Instruction::Call:
594 case Instruction::Invoke:
595 if (std::optional<ConstantRange> Range = cast<CallBase>(Val: BBI)->getRange())
596 return ValueLatticeElement::getRange(CR: *Range);
597 [[fallthrough]];
598 case Instruction::Load:
599 if (MDNode *Ranges = BBI->getMetadata(KindID: LLVMContext::MD_range))
600 if (isa<IntegerType>(Val: BBI->getType())) {
601 return ValueLatticeElement::getRange(
602 CR: getConstantRangeFromMetadata(RangeMD: *Ranges));
603 }
604 break;
605 };
606 // Nothing known - will be intersected with other facts
607 return ValueLatticeElement::getOverdefined();
608}
609
610bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
611 assert(!isa<Constant>(Val) && "Value should not be constant");
612 assert(!TheCache.getCachedValueInfo(Val, BB) &&
613 "Value should not be in cache");
614
615 // Hold off inserting this value into the Cache in case we have to return
616 // false and come back later.
617 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
618 if (!Res)
619 // Work pushed, will revisit
620 return false;
621
622 TheCache.insertResult(Val, BB, Result: *Res);
623 return true;
624}
625
626std::optional<ValueLatticeElement>
627LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
628 Instruction *BBI = dyn_cast<Instruction>(Val);
629 if (!BBI || BBI->getParent() != BB)
630 return solveBlockValueNonLocal(Val, BB);
631
632 if (PHINode *PN = dyn_cast<PHINode>(Val: BBI))
633 return solveBlockValuePHINode(PN, BB);
634
635 if (auto *SI = dyn_cast<SelectInst>(Val: BBI))
636 return solveBlockValueSelect(S: SI, BB);
637
638 // If this value is a nonnull pointer, record it's range and bailout. Note
639 // that for all other pointer typed values, we terminate the search at the
640 // definition. We could easily extend this to look through geps, bitcasts,
641 // and the like to prove non-nullness, but it's not clear that's worth it
642 // compile time wise. The context-insensitive value walk done inside
643 // isKnownNonZero gets most of the profitable cases at much less expense.
644 // This does mean that we have a sensitivity to where the defining
645 // instruction is placed, even if it could legally be hoisted much higher.
646 // That is unfortunate.
647 PointerType *PT = dyn_cast<PointerType>(Val: BBI->getType());
648 if (PT && isKnownNonZero(V: BBI, Q: DL))
649 return ValueLatticeElement::getNot(C: ConstantPointerNull::get(T: PT));
650
651 if (BBI->getType()->isIntOrIntVectorTy()) {
652 if (auto *CI = dyn_cast<CastInst>(Val: BBI))
653 return solveBlockValueCast(CI, BB);
654
655 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: BBI))
656 return solveBlockValueBinaryOp(BBI: BO, BB);
657
658 if (auto *IEI = dyn_cast<InsertElementInst>(Val: BBI))
659 return solveBlockValueInsertElement(IEI, BB);
660
661 if (auto *EVI = dyn_cast<ExtractValueInst>(Val: BBI))
662 return solveBlockValueExtractValue(EVI, BB);
663
664 if (auto *II = dyn_cast<IntrinsicInst>(Val: BBI))
665 return solveBlockValueIntrinsic(II, BB);
666 }
667
668 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
669 << "' - unknown inst def found.\n");
670 return getFromRangeMetadata(BBI);
671}
672
673static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet,
674 bool IsDereferenced = true) {
675 // TODO: Use NullPointerIsDefined instead.
676 if (Ptr->getType()->getPointerAddressSpace() == 0)
677 PtrSet.insert(V: IsDereferenced ? getUnderlyingObject(V: Ptr)
678 : Ptr->stripInBoundsOffsets());
679}
680
681static void AddNonNullPointersByInstruction(
682 Instruction *I, NonNullPointerSet &PtrSet) {
683 if (LoadInst *L = dyn_cast<LoadInst>(Val: I)) {
684 AddNonNullPointer(Ptr: L->getPointerOperand(), PtrSet);
685 } else if (StoreInst *S = dyn_cast<StoreInst>(Val: I)) {
686 AddNonNullPointer(Ptr: S->getPointerOperand(), PtrSet);
687 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: I)) {
688 if (MI->isVolatile()) return;
689
690 // FIXME: check whether it has a valuerange that excludes zero?
691 ConstantInt *Len = dyn_cast<ConstantInt>(Val: MI->getLength());
692 if (!Len || Len->isZero()) return;
693
694 AddNonNullPointer(Ptr: MI->getRawDest(), PtrSet);
695 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: MI))
696 AddNonNullPointer(Ptr: MTI->getRawSource(), PtrSet);
697 } else if (auto *CB = dyn_cast<CallBase>(Val: I)) {
698 for (auto &U : CB->args()) {
699 if (U->getType()->isPointerTy() &&
700 CB->paramHasNonNullAttr(ArgNo: CB->getArgOperandNo(U: &U),
701 /*AllowUndefOrPoison=*/false))
702 AddNonNullPointer(Ptr: U.get(), PtrSet, /*IsDereferenced=*/false);
703 }
704 }
705}
706
707bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
708 if (NullPointerIsDefined(F: BB->getParent(),
709 AS: Val->getType()->getPointerAddressSpace()))
710 return false;
711
712 Val = Val->stripInBoundsOffsets();
713 return TheCache.isNonNullAtEndOfBlock(V: Val, BB, InitFn: [](BasicBlock *BB) {
714 NonNullPointerSet NonNullPointers;
715 for (Instruction &I : *BB)
716 AddNonNullPointersByInstruction(I: &I, PtrSet&: NonNullPointers);
717 return NonNullPointers;
718 });
719}
720
721std::optional<ValueLatticeElement>
722LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
723 ValueLatticeElement Result; // Start Undefined.
724
725 // If this is the entry block, we must be asking about an argument.
726 if (BB->isEntryBlock()) {
727 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
728 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
729 return ValueLatticeElement::getRange(CR: *Range);
730 return ValueLatticeElement::getOverdefined();
731 }
732
733 // Loop over all of our predecessors, merging what we know from them into
734 // result. If we encounter an unexplored predecessor, we eagerly explore it
735 // in a depth first manner. In practice, this has the effect of discovering
736 // paths we can't analyze eagerly without spending compile times analyzing
737 // other paths. This heuristic benefits from the fact that predecessors are
738 // frequently arranged such that dominating ones come first and we quickly
739 // find a path to function entry. TODO: We should consider explicitly
740 // canonicalizing to make this true rather than relying on this happy
741 // accident.
742 std::optional<BBLatticeElementMap> PredLatticeElements;
743 if (PerPredRanges)
744 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
745 for (BasicBlock *Pred : predecessors(BB)) {
746 // Skip self loops.
747 if (Pred == BB)
748 continue;
749 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(V: Val, F: Pred, T: BB);
750 if (!EdgeResult)
751 // Explore that input, then return here
752 return std::nullopt;
753
754 Result.mergeIn(RHS: *EdgeResult);
755
756 // If we hit overdefined, exit early. The BlockVals entry is already set
757 // to overdefined.
758 if (Result.isOverdefined()) {
759 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
760 << "' - overdefined because of pred '"
761 << Pred->getName() << "' (non local).\n");
762 return Result;
763 }
764 if (PerPredRanges)
765 PredLatticeElements->insert(KV: {Pred, *EdgeResult});
766 }
767
768 if (PerPredRanges)
769 TheCache.insertPredecessorResults(Val, BB, PredLatticeElements&: *PredLatticeElements);
770
771 // Return the merged value, which is more precise than 'overdefined'.
772 assert(!Result.isOverdefined());
773 return Result;
774}
775
776std::optional<ValueLatticeElement>
777LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
778 ValueLatticeElement Result; // Start Undefined.
779
780 // Loop over all of our predecessors, merging what we know from them into
781 // result. See the comment about the chosen traversal order in
782 // solveBlockValueNonLocal; the same reasoning applies here.
783 std::optional<BBLatticeElementMap> PredLatticeElements;
784 if (PerPredRanges)
785 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
786 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
787 BasicBlock *PhiBB = PN->getIncomingBlock(i);
788 Value *PhiVal = PN->getIncomingValue(i);
789 // Note that we can provide PN as the context value to getEdgeValue, even
790 // though the results will be cached, because PN is the value being used as
791 // the cache key in the caller.
792 std::optional<ValueLatticeElement> EdgeResult =
793 getEdgeValue(V: PhiVal, F: PhiBB, T: BB, CxtI: PN);
794 if (!EdgeResult)
795 // Explore that input, then return here
796 return std::nullopt;
797
798 Result.mergeIn(RHS: *EdgeResult);
799
800 // If we hit overdefined, exit early. The BlockVals entry is already set
801 // to overdefined.
802 if (Result.isOverdefined()) {
803 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
804 << "' - overdefined because of pred (local).\n");
805
806 return Result;
807 }
808
809 if (PerPredRanges)
810 PredLatticeElements->insert(KV: {PhiBB, *EdgeResult});
811 }
812
813 if (PerPredRanges)
814 TheCache.insertPredecessorResults(Val: PN, BB, PredLatticeElements&: *PredLatticeElements);
815
816 // Return the merged value, which is more precise than 'overdefined'.
817 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
818 return Result;
819}
820
821// If we can determine a constraint on the value given conditions assumed by
822// the program, intersect those constraints with BBLV
823void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
824 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
825 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
826 if (!BBI)
827 return;
828
829 BasicBlock *BB = BBI->getParent();
830 for (auto &AssumeVH : AC->assumptionsFor(V: Val)) {
831 if (!AssumeVH)
832 continue;
833
834 // Only check assumes in the block of the context instruction. Other
835 // assumes will have already been taken into account when the value was
836 // propagated from predecessor blocks.
837 auto *I = cast<AssumeInst>(Val&: AssumeVH);
838
839 if (I->getParent() != BB || !isValidAssumeForContext(I, CxtI: BBI))
840 continue;
841
842 if (AssumeVH.Index != AssumptionCache::ExprResultIdx) {
843 if (RetainedKnowledge RK = getKnowledgeFromBundle(
844 Assume&: *I, BOI: I->bundle_op_info_begin()[AssumeVH.Index])) {
845 if (RK.WasOn != Val)
846 continue;
847 switch (RK.AttrKind) {
848 case Attribute::NonNull:
849 BBLV = BBLV.intersect(Other: ValueLatticeElement::getNot(
850 C: Constant::getNullValue(Ty: RK.WasOn->getType())));
851 break;
852
853 case Attribute::Dereferenceable:
854 if (auto *CI = dyn_cast<ConstantInt>(Val: RK.IRArgValue);
855 CI && !CI->isZero())
856 BBLV = BBLV.intersect(Other: ValueLatticeElement::getNot(
857 C: Constant::getNullValue(Ty: RK.WasOn->getType())));
858 break;
859
860 default:
861 break;
862 }
863 }
864 } else {
865 BBLV = BBLV.intersect(Other: *getValueFromCondition(Val, Cond: I->getArgOperand(i: 0),
866 /*IsTrueDest*/ true,
867 /*UseBlockValue*/ false));
868 }
869 }
870
871 // If guards are not used in the module, don't spend time looking for them
872 if (GuardDecl && !GuardDecl->use_empty() &&
873 BBI->getIterator() != BB->begin()) {
874 for (Instruction &I :
875 make_range(x: std::next(x: BBI->getIterator().getReverse()), y: BB->rend())) {
876 Value *Cond = nullptr;
877 if (match(V: &I, P: m_Intrinsic<Intrinsic::experimental_guard>(Op0: m_Value(V&: Cond))))
878 BBLV = BBLV.intersect(Other: *getValueFromCondition(Val, Cond,
879 /*IsTrueDest*/ true,
880 /*UseBlockValue*/ false));
881 }
882 }
883
884 if (BBLV.isOverdefined()) {
885 // Check whether we're checking at the terminator, and the pointer has
886 // been dereferenced in this block.
887 PointerType *PTy = dyn_cast<PointerType>(Val: Val->getType());
888 if (PTy && BB->getTerminator() == BBI &&
889 isNonNullAtEndOfBlock(Val, BB))
890 BBLV = ValueLatticeElement::getNot(C: ConstantPointerNull::get(T: PTy));
891 }
892}
893
894std::optional<ValueLatticeElement>
895LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
896 // Recurse on our inputs if needed
897 std::optional<ValueLatticeElement> OptTrueVal =
898 getBlockValue(Val: SI->getTrueValue(), BB, CxtI: SI);
899 if (!OptTrueVal)
900 return std::nullopt;
901 ValueLatticeElement &TrueVal = *OptTrueVal;
902
903 std::optional<ValueLatticeElement> OptFalseVal =
904 getBlockValue(Val: SI->getFalseValue(), BB, CxtI: SI);
905 if (!OptFalseVal)
906 return std::nullopt;
907 ValueLatticeElement &FalseVal = *OptFalseVal;
908
909 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
910 const ConstantRange &TrueCR = TrueVal.asConstantRange(Ty: SI->getType());
911 const ConstantRange &FalseCR = FalseVal.asConstantRange(Ty: SI->getType());
912 Value *LHS = nullptr;
913 Value *RHS = nullptr;
914 SelectPatternResult SPR = matchSelectPattern(V: SI, LHS, RHS);
915 // Is this a min specifically of our two inputs? (Avoid the risk of
916 // ValueTracking getting smarter looking back past our immediate inputs.)
917 if (SelectPatternResult::isMinOrMax(SPF: SPR.Flavor) &&
918 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
919 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
920 ConstantRange ResultCR = [&]() {
921 switch (SPR.Flavor) {
922 default:
923 llvm_unreachable("unexpected minmax type!");
924 case SPF_SMIN: /// Signed minimum
925 return TrueCR.smin(Other: FalseCR);
926 case SPF_UMIN: /// Unsigned minimum
927 return TrueCR.umin(Other: FalseCR);
928 case SPF_SMAX: /// Signed maximum
929 return TrueCR.smax(Other: FalseCR);
930 case SPF_UMAX: /// Unsigned maximum
931 return TrueCR.umax(Other: FalseCR);
932 };
933 }();
934 return ValueLatticeElement::getRange(
935 CR: ResultCR, MayIncludeUndef: TrueVal.isConstantRangeIncludingUndef() ||
936 FalseVal.isConstantRangeIncludingUndef());
937 }
938
939 if (SPR.Flavor == SPF_ABS) {
940 if (LHS == SI->getTrueValue())
941 return ValueLatticeElement::getRange(
942 CR: TrueCR.abs(), MayIncludeUndef: TrueVal.isConstantRangeIncludingUndef());
943 if (LHS == SI->getFalseValue())
944 return ValueLatticeElement::getRange(
945 CR: FalseCR.abs(), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
946 }
947
948 if (SPR.Flavor == SPF_NABS) {
949 ConstantRange Zero(APInt::getZero(numBits: TrueCR.getBitWidth()));
950 if (LHS == SI->getTrueValue())
951 return ValueLatticeElement::getRange(
952 CR: Zero.sub(Other: TrueCR.abs()), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
953 if (LHS == SI->getFalseValue())
954 return ValueLatticeElement::getRange(
955 CR: Zero.sub(Other: FalseCR.abs()), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
956 }
957 }
958
959 // Can we constrain the facts about the true and false values by using the
960 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
961 // TODO: We could potentially refine an overdefined true value above.
962 Value *Cond = SI->getCondition();
963 // If the value is undef, a different value may be chosen in
964 // the select condition.
965 if (isGuaranteedNotToBeUndef(V: Cond, AC)) {
966 TrueVal =
967 TrueVal.intersect(Other: *getValueFromCondition(Val: SI->getTrueValue(), Cond,
968 /*IsTrueDest*/ true,
969 /*UseBlockValue*/ false));
970 FalseVal =
971 FalseVal.intersect(Other: *getValueFromCondition(Val: SI->getFalseValue(), Cond,
972 /*IsTrueDest*/ false,
973 /*UseBlockValue*/ false));
974 }
975
976 TrueVal.mergeIn(RHS: FalseVal);
977 return TrueVal;
978}
979
980std::optional<ConstantRange>
981LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
982 std::optional<ValueLatticeElement> OptVal = getBlockValue(Val: V, BB, CxtI);
983 if (!OptVal)
984 return std::nullopt;
985 return OptVal->asConstantRange(Ty: V->getType());
986}
987
988std::optional<ValueLatticeElement>
989LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
990 // Filter out casts we don't know how to reason about before attempting to
991 // recurse on our operand. This can cut a long search short if we know we're
992 // not going to be able to get any useful information anways.
993 switch (CI->getOpcode()) {
994 case Instruction::Trunc:
995 case Instruction::SExt:
996 case Instruction::ZExt:
997 break;
998 default:
999 // Unhandled instructions are overdefined.
1000 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1001 << "' - overdefined (unknown cast).\n");
1002 return ValueLatticeElement::getOverdefined();
1003 }
1004
1005 // Figure out the range of the LHS. If that fails, we still apply the
1006 // transfer rule on the full set since we may be able to locally infer
1007 // interesting facts.
1008 std::optional<ConstantRange> LHSRes = getRangeFor(V: CI->getOperand(i_nocapture: 0), CxtI: CI, BB);
1009 if (!LHSRes)
1010 // More work to do before applying this transfer rule.
1011 return std::nullopt;
1012 const ConstantRange &LHSRange = *LHSRes;
1013
1014 const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
1015
1016 // NOTE: We're currently limited by the set of operations that ConstantRange
1017 // can evaluate symbolically. Enhancing that set will allows us to analyze
1018 // more definitions.
1019 ConstantRange Res = ConstantRange::getEmpty(BitWidth: ResultBitWidth);
1020 if (auto *Trunc = dyn_cast<TruncInst>(Val: CI))
1021 Res = LHSRange.truncate(BitWidth: ResultBitWidth, NoWrapKind: Trunc->getNoWrapKind());
1022 else
1023 Res = LHSRange.castOp(CastOp: CI->getOpcode(), BitWidth: ResultBitWidth);
1024
1025 return ValueLatticeElement::getRange(CR: Res);
1026}
1027
1028std::optional<ValueLatticeElement>
1029LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
1030 Instruction *I, BasicBlock *BB,
1031 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
1032 OpFn) {
1033 Value *LHS = I->getOperand(i: 0);
1034 Value *RHS = I->getOperand(i: 1);
1035
1036 auto ThreadBinOpOverSelect =
1037 [&](Value *X, const ConstantRange &CRX, SelectInst *Y,
1038 bool XIsLHS) -> std::optional<ValueLatticeElement> {
1039 Value *Cond = Y->getCondition();
1040 // Only handle selects with constant values.
1041 Constant *TrueC = dyn_cast<Constant>(Val: Y->getTrueValue());
1042 if (!TrueC)
1043 return std::nullopt;
1044 Constant *FalseC = dyn_cast<Constant>(Val: Y->getFalseValue());
1045 if (!FalseC)
1046 return std::nullopt;
1047 if (!isGuaranteedNotToBeUndef(V: Cond, AC))
1048 return std::nullopt;
1049
1050 ConstantRange TrueX =
1051 CRX.intersectWith(CR: getValueFromCondition(Val: X, Cond, /*CondIsTrue=*/IsTrueDest: true,
1052 /*UseBlockValue=*/false)
1053 ->asConstantRange(Ty: X->getType()));
1054 ConstantRange FalseX =
1055 CRX.intersectWith(CR: getValueFromCondition(Val: X, Cond, /*CondIsTrue=*/IsTrueDest: false,
1056 /*UseBlockValue=*/false)
1057 ->asConstantRange(Ty: X->getType()));
1058 ConstantRange TrueY = TrueC->toConstantRange();
1059 ConstantRange FalseY = FalseC->toConstantRange();
1060
1061 if (XIsLHS)
1062 return ValueLatticeElement::getRange(
1063 CR: OpFn(TrueX, TrueY).unionWith(CR: OpFn(FalseX, FalseY)));
1064 return ValueLatticeElement::getRange(
1065 CR: OpFn(TrueY, TrueX).unionWith(CR: OpFn(FalseY, FalseX)));
1066 };
1067
1068 // Figure out the ranges of the operands. If that fails, use a
1069 // conservative range, but apply the transfer rule anyways. This
1070 // lets us pick up facts from expressions like "and i32 (call i32
1071 // @foo()), 32"
1072 std::optional<ConstantRange> LHSRes = getRangeFor(V: LHS, CxtI: I, BB);
1073 if (!LHSRes)
1074 return std::nullopt;
1075
1076 // Try to thread binop over rhs select
1077 if (auto *SI = dyn_cast<SelectInst>(Val: RHS)) {
1078 if (auto Res = ThreadBinOpOverSelect(LHS, *LHSRes, SI, /*XIsLHS=*/true))
1079 return *Res;
1080 }
1081
1082 std::optional<ConstantRange> RHSRes = getRangeFor(V: RHS, CxtI: I, BB);
1083 if (!RHSRes)
1084 return std::nullopt;
1085
1086 // Try to thread binop over lhs select
1087 if (auto *SI = dyn_cast<SelectInst>(Val: LHS)) {
1088 if (auto Res = ThreadBinOpOverSelect(RHS, *RHSRes, SI, /*XIsLHS=*/false))
1089 return *Res;
1090 }
1091
1092 const ConstantRange &LHSRange = *LHSRes;
1093 const ConstantRange &RHSRange = *RHSRes;
1094
1095 std::optional<ValueLatticeElement> MergedResult =
1096 ValueLatticeElement::getRange(CR: OpFn(LHSRange, RHSRange));
1097
1098 if (!PerPredRanges)
1099 return MergedResult;
1100
1101 std::optional<BBLatticeElementMap> PredLHS =
1102 TheCache.getCachedPredecessorInfo(V: LHS, BB);
1103 if (!PredLHS)
1104 return MergedResult;
1105 std::optional<BBLatticeElementMap> PredRHS =
1106 TheCache.getCachedPredecessorInfo(V: RHS, BB);
1107 if (!PredRHS)
1108 return MergedResult;
1109
1110 const BBLatticeElementMap &LHSPredMap = *PredLHS;
1111 const BBLatticeElementMap &RHSPredMap = *PredRHS;
1112
1113 BBLatticeElementMap PredLatticeElements;
1114 ValueLatticeElement OverallPredResult;
1115 for (auto *Pred : predecessors(BB)) {
1116 auto LHSIt = LHSPredMap.find_as(Val: Pred);
1117 if (LHSIt == LHSPredMap.end())
1118 return MergedResult;
1119 const ValueLatticeElement &LHSFromPred = LHSIt->second;
1120 std::optional<ConstantRange> LHSFromPredRes =
1121 LHSFromPred.asConstantRange(Ty: LHS->getType());
1122 if (!LHSFromPredRes)
1123 return MergedResult;
1124
1125 auto RHSIt = RHSPredMap.find_as(Val: Pred);
1126 if (RHSIt == RHSPredMap.end())
1127 return MergedResult;
1128 const ValueLatticeElement &RHSFromPred = RHSIt->second;
1129 std::optional<ConstantRange> RHSFromPredRes =
1130 RHSFromPred.asConstantRange(Ty: RHS->getType());
1131 if (!RHSFromPredRes)
1132 return MergedResult;
1133
1134 const ConstantRange &LHSFromPredRange = *LHSFromPredRes;
1135 const ConstantRange &RHSFromPredRange = *RHSFromPredRes;
1136 std::optional<ValueLatticeElement> PredResult =
1137 ValueLatticeElement::getRange(CR: OpFn(LHSFromPredRange, RHSFromPredRange));
1138 if (!PredResult)
1139 return MergedResult;
1140 if (PredResult->isOverdefined()) {
1141 LLVM_DEBUG(
1142 dbgs() << " pred BB '" << Pred->getName() << "' for BB '"
1143 << BB->getName()
1144 << "' overdefined. Discarding all predecessor intervals.\n");
1145 return MergedResult;
1146 }
1147 PredLatticeElements.insert(KV: {Pred, *PredResult});
1148 OverallPredResult.mergeIn(RHS: *PredResult);
1149 }
1150
1151 // If this point is reached, all predecessors for both LHS and RHS have
1152 // constant ranges previously computed. Can cache result and use the
1153 // OverallPredResult;
1154 TheCache.insertPredecessorResults(Val: I, BB, PredLatticeElements);
1155
1156 LLVM_DEBUG(dbgs() << " Using predecessor intervals, evaluated " << *I
1157 << " to: " << OverallPredResult << ".\n");
1158
1159 if (!MergedResult)
1160 return OverallPredResult;
1161
1162 LLVM_DEBUG(dbgs() << " Intersecting intervals for " << *I << ": "
1163 << OverallPredResult << " and " << MergedResult << ".\n");
1164 return MergedResult->intersect(Other: OverallPredResult);
1165}
1166
1167std::optional<ValueLatticeElement>
1168LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
1169 assert(BO->getOperand(0)->getType()->isSized() &&
1170 "all operands to binary operators are sized");
1171 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Val: BO)) {
1172 unsigned NoWrapKind = OBO->getNoWrapKind();
1173 return solveBlockValueBinaryOpImpl(
1174 I: BO, BB,
1175 OpFn: [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1176 return CR1.overflowingBinaryOp(BinOp: BO->getOpcode(), Other: CR2, NoWrapKind);
1177 });
1178 }
1179
1180 return solveBlockValueBinaryOpImpl(
1181 I: BO, BB, OpFn: [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1182 return CR1.binaryOp(BinOp: BO->getOpcode(), Other: CR2);
1183 });
1184}
1185
1186std::optional<ValueLatticeElement>
1187LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1188 BasicBlock *BB) {
1189 return solveBlockValueBinaryOpImpl(
1190 I: WO, BB, OpFn: [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1191 return CR1.binaryOp(BinOp: WO->getBinaryOp(), Other: CR2);
1192 });
1193}
1194
1195std::optional<ValueLatticeElement>
1196LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1197 ValueLatticeElement MetadataVal = getFromRangeMetadata(BBI: II);
1198 if (!ConstantRange::isIntrinsicSupported(IntrinsicID: II->getIntrinsicID())) {
1199 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1200 << "' - unknown intrinsic.\n");
1201 return MetadataVal;
1202 }
1203
1204 SmallVector<ConstantRange, 2> OpRanges;
1205 for (Value *Op : II->args()) {
1206 std::optional<ConstantRange> Range = getRangeFor(V: Op, CxtI: II, BB);
1207 if (!Range)
1208 return std::nullopt;
1209 OpRanges.push_back(Elt: *Range);
1210 }
1211
1212 return ValueLatticeElement::getRange(
1213 CR: ConstantRange::intrinsic(IntrinsicID: II->getIntrinsicID(), Ops: OpRanges))
1214 .intersect(Other: MetadataVal);
1215}
1216
1217std::optional<ValueLatticeElement>
1218LazyValueInfoImpl::solveBlockValueInsertElement(InsertElementInst *IEI,
1219 BasicBlock *BB) {
1220 std::optional<ValueLatticeElement> OptEltVal =
1221 getBlockValue(Val: IEI->getOperand(i_nocapture: 1), BB, CxtI: IEI);
1222 if (!OptEltVal)
1223 return std::nullopt;
1224 ValueLatticeElement &Res = *OptEltVal;
1225
1226 std::optional<ValueLatticeElement> OptVecVal =
1227 getBlockValue(Val: IEI->getOperand(i_nocapture: 0), BB, CxtI: IEI);
1228 if (!OptVecVal)
1229 return std::nullopt;
1230
1231 // Bail out if the inserted element is a constant expression. Unlike other
1232 // ValueLattice types, these are not considered an implicit splat when a
1233 // vector type is used.
1234 // We could call ConstantFoldInsertElementInstruction here to handle these.
1235 if (OptEltVal->isConstant())
1236 return ValueLatticeElement::getOverdefined();
1237
1238 Res.mergeIn(RHS: *OptVecVal);
1239 return Res;
1240}
1241
1242std::optional<ValueLatticeElement>
1243LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1244 BasicBlock *BB) {
1245 if (auto *WO = dyn_cast<WithOverflowInst>(Val: EVI->getAggregateOperand()))
1246 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1247 return solveBlockValueOverflowIntrinsic(WO, BB);
1248
1249 // Handle extractvalue of insertvalue to allow further simplification
1250 // based on replaced with.overflow intrinsics.
1251 if (Value *V = simplifyExtractValueInst(
1252 Agg: EVI->getAggregateOperand(), Idxs: EVI->getIndices(),
1253 Q: EVI->getDataLayout()))
1254 return getBlockValue(Val: V, BB, CxtI: EVI);
1255
1256 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1257 << "' - overdefined (unknown extractvalue).\n");
1258 return ValueLatticeElement::getOverdefined();
1259}
1260
1261static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1262 ICmpInst::Predicate Pred) {
1263 if (LHS == Val)
1264 return true;
1265
1266 // Handle range checking idiom produced by InstCombine. We will subtract the
1267 // offset from the allowed range for RHS in this case.
1268 const APInt *C;
1269 if (match(V: LHS, P: m_AddLike(L: m_Specific(V: Val), R: m_APInt(Res&: C)))) {
1270 Offset = *C;
1271 return true;
1272 }
1273
1274 // Handle the symmetric case. This appears in saturation patterns like
1275 // (x == 16) ? 16 : (x + 1).
1276 if (match(V: Val, P: m_AddLike(L: m_Specific(V: LHS), R: m_APInt(Res&: C)))) {
1277 Offset = -*C;
1278 return true;
1279 }
1280
1281 // If (x | y) < C, then (x < C) && (y < C).
1282 if (match(V: LHS, P: m_c_Or(L: m_Specific(V: Val), R: m_Value())) &&
1283 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1284 return true;
1285
1286 // If (x & y) > C, then (x > C) && (y > C).
1287 if (match(V: LHS, P: m_c_And(L: m_Specific(V: Val), R: m_Value())) &&
1288 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1289 return true;
1290
1291 return false;
1292}
1293
1294/// Get value range for a "(Val + Offset) Pred RHS" condition.
1295std::optional<ValueLatticeElement>
1296LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1297 Value *RHS,
1298 const APInt &Offset,
1299 Instruction *CxtI,
1300 bool UseBlockValue) {
1301 ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
1302 /*isFullSet=*/true);
1303 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: RHS)) {
1304 RHSRange = ConstantRange(CI->getValue());
1305 } else if (UseBlockValue) {
1306 std::optional<ValueLatticeElement> R =
1307 getBlockValue(Val: RHS, BB: CxtI->getParent(), CxtI);
1308 if (!R)
1309 return std::nullopt;
1310 RHSRange = R->asConstantRange(Ty: RHS->getType());
1311 }
1312
1313 ConstantRange TrueValues =
1314 ConstantRange::makeAllowedICmpRegion(Pred, Other: RHSRange);
1315 return ValueLatticeElement::getRange(CR: TrueValues.subtract(CI: Offset));
1316}
1317
1318static std::optional<ConstantRange>
1319getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS,
1320 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1321 bool Invert = false;
1322 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1323 Pred = ICmpInst::getInversePredicate(pred: Pred);
1324 Invert = true;
1325 }
1326 if (Pred == ICmpInst::ICMP_SLE) {
1327 Pred = ICmpInst::ICMP_SLT;
1328 if (RHS.isMaxSignedValue())
1329 return std::nullopt; // Could also return full/empty here, if we wanted.
1330 ++RHS;
1331 }
1332 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1333 if (auto CR = Fn(RHS))
1334 return Invert ? CR->inverse() : CR;
1335 return std::nullopt;
1336}
1337
1338/// Get value range for a "ctpop(Val) Pred RHS" condition.
1339static ValueLatticeElement getValueFromICmpCtpop(ICmpInst::Predicate Pred,
1340 Value *RHS) {
1341 unsigned BitWidth = RHS->getType()->getScalarSizeInBits();
1342
1343 auto *RHSConst = dyn_cast<ConstantInt>(Val: RHS);
1344 if (!RHSConst)
1345 return ValueLatticeElement::getOverdefined();
1346
1347 ConstantRange ResValRange =
1348 ConstantRange::makeExactICmpRegion(Pred, Other: RHSConst->getValue());
1349
1350 unsigned ResMin = ResValRange.getUnsignedMin().getLimitedValue(Limit: BitWidth);
1351 unsigned ResMax = ResValRange.getUnsignedMax().getLimitedValue(Limit: BitWidth);
1352
1353 APInt ValMin = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: ResMin);
1354 APInt ValMax = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: ResMax);
1355 return ValueLatticeElement::getRange(
1356 CR: ConstantRange::getNonEmpty(Lower: std::move(ValMin), Upper: ValMax + 1));
1357}
1358
1359std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1360 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1361 Value *LHS = ICI->getOperand(i_nocapture: 0);
1362 Value *RHS = ICI->getOperand(i_nocapture: 1);
1363
1364 // Get the predicate that must hold along the considered edge.
1365 CmpInst::Predicate EdgePred =
1366 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1367
1368 if (isa<Constant>(Val: RHS)) {
1369 if (ICI->isEquality() && LHS == Val) {
1370 if (EdgePred == ICmpInst::ICMP_EQ)
1371 return ValueLatticeElement::get(C: cast<Constant>(Val: RHS));
1372 else if (!isa<UndefValue>(Val: RHS))
1373 return ValueLatticeElement::getNot(C: cast<Constant>(Val: RHS));
1374 }
1375 }
1376
1377 Type *Ty = Val->getType();
1378 if (!Ty->isIntegerTy())
1379 return ValueLatticeElement::getOverdefined();
1380
1381 unsigned BitWidth = Ty->getScalarSizeInBits();
1382 APInt Offset(BitWidth, 0);
1383 if (matchICmpOperand(Offset, LHS, Val, Pred: EdgePred))
1384 return getValueFromSimpleICmpCondition(Pred: EdgePred, RHS, Offset, CxtI: ICI,
1385 UseBlockValue);
1386
1387 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(pred: EdgePred);
1388 if (matchICmpOperand(Offset, LHS: RHS, Val, Pred: SwappedPred))
1389 return getValueFromSimpleICmpCondition(Pred: SwappedPred, RHS: LHS, Offset, CxtI: ICI,
1390 UseBlockValue);
1391
1392 if (match(V: LHS, P: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Specific(V: Val))))
1393 return getValueFromICmpCtpop(Pred: EdgePred, RHS);
1394
1395 const APInt *Mask, *C;
1396 if (match(V: LHS, P: m_And(L: m_Specific(V: Val), R: m_APInt(Res&: Mask))) &&
1397 match(V: RHS, P: m_APInt(Res&: C))) {
1398 // If (Val & Mask) == C then all the masked bits are known and we can
1399 // compute a value range based on that.
1400 if (EdgePred == ICmpInst::ICMP_EQ) {
1401 KnownBits Known;
1402 Known.Zero = ~*C & *Mask;
1403 Known.One = *C & *Mask;
1404 return ValueLatticeElement::getRange(
1405 CR: ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1406 }
1407
1408 if (EdgePred == ICmpInst::ICMP_NE)
1409 return ValueLatticeElement::getRange(
1410 CR: ConstantRange::makeMaskNotEqualRange(Mask: *Mask, C: *C));
1411 }
1412
1413 // If (X urem Modulus) >= C, then X >= C.
1414 // If trunc X >= C, then X >= C.
1415 // TODO: An upper bound could be computed as well.
1416 if (match(V: LHS, P: m_CombineOr(L: m_URem(L: m_Specific(V: Val), R: m_Value()),
1417 R: m_Trunc(Op: m_Specific(V: Val)))) &&
1418 match(V: RHS, P: m_APInt(Res&: C))) {
1419 // Use the icmp region so we don't have to deal with different predicates.
1420 ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred: EdgePred, Other: *C);
1421 if (!CR.isEmptySet())
1422 return ValueLatticeElement::getRange(CR: ConstantRange::getNonEmpty(
1423 Lower: CR.getUnsignedMin().zext(width: BitWidth), Upper: APInt(BitWidth, 0)));
1424 }
1425
1426 // Recognize:
1427 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1428 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1429 const APInt *ShAmtC;
1430 if (CmpInst::isSigned(predicate: EdgePred) &&
1431 match(V: LHS, P: m_AShr(L: m_Specific(V: Val), R: m_APInt(Res&: ShAmtC))) &&
1432 match(V: RHS, P: m_APInt(Res&: C))) {
1433 auto CR = getRangeViaSLT(
1434 Pred: EdgePred, RHS: *C, Fn: [&](const APInt &RHS) -> std::optional<ConstantRange> {
1435 APInt New = RHS << *ShAmtC;
1436 if ((New.ashr(ShiftAmt: *ShAmtC)) != RHS)
1437 return std::nullopt;
1438 return ConstantRange::getNonEmpty(
1439 Lower: APInt::getSignedMinValue(numBits: New.getBitWidth()), Upper: New);
1440 });
1441 if (CR)
1442 return ValueLatticeElement::getRange(CR: *CR);
1443 }
1444
1445 // a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b
1446 Value *X, *Y;
1447 if (ICI->isEquality() && match(V: Val, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
1448 // Peek through ptrtoints
1449 match(V: X, P: m_PtrToIntSameSize(DL, Op: m_Value(V&: X)));
1450 match(V: Y, P: m_PtrToIntSameSize(DL, Op: m_Value(V&: Y)));
1451 if ((X == LHS && Y == RHS) || (X == RHS && Y == LHS)) {
1452 Constant *NullVal = Constant::getNullValue(Ty: Val->getType());
1453 if (EdgePred == ICmpInst::ICMP_EQ)
1454 return ValueLatticeElement::get(C: NullVal);
1455 return ValueLatticeElement::getNot(C: NullVal);
1456 }
1457 }
1458
1459 return ValueLatticeElement::getOverdefined();
1460}
1461
1462ValueLatticeElement LazyValueInfoImpl::getValueFromTrunc(Value *Val,
1463 TruncInst *Trunc,
1464 bool IsTrueDest) {
1465 assert(Trunc->getType()->isIntOrIntVectorTy(1));
1466
1467 if (Trunc->getOperand(i_nocapture: 0) != Val)
1468 return ValueLatticeElement::getOverdefined();
1469
1470 Type *Ty = Val->getType();
1471
1472 if (Trunc->hasNoUnsignedWrap()) {
1473 if (IsTrueDest)
1474 return ValueLatticeElement::get(C: ConstantInt::get(Ty, V: 1));
1475 return ValueLatticeElement::get(C: Constant::getNullValue(Ty));
1476 }
1477
1478 if (IsTrueDest)
1479 return ValueLatticeElement::getNot(C: Constant::getNullValue(Ty));
1480 return ValueLatticeElement::getNot(C: Constant::getAllOnesValue(Ty));
1481}
1482
1483// Handle conditions of the form
1484// extractvalue(op.with.overflow(%x, C), 1).
1485static ValueLatticeElement getValueFromOverflowCondition(
1486 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1487 // TODO: This only works with a constant RHS for now. We could also compute
1488 // the range of the RHS, but this doesn't fit into the current structure of
1489 // the edge value calculation.
1490 const APInt *C;
1491 if (WO->getLHS() != Val || !match(V: WO->getRHS(), P: m_APInt(Res&: C)))
1492 return ValueLatticeElement::getOverdefined();
1493
1494 // Calculate the possible values of %x for which no overflow occurs.
1495 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1496 BinOp: WO->getBinaryOp(), Other: *C, NoWrapKind: WO->getNoWrapKind());
1497
1498 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1499 // constrained to it's inverse (all values that might cause overflow).
1500 if (IsTrueDest)
1501 NWR = NWR.inverse();
1502 return ValueLatticeElement::getRange(CR: NWR);
1503}
1504
1505std::optional<ValueLatticeElement>
1506LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1507 bool IsTrueDest, bool UseBlockValue,
1508 unsigned Depth) {
1509 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Val: Cond))
1510 return getValueFromICmpCondition(Val, ICI, isTrueDest: IsTrueDest, UseBlockValue);
1511
1512 if (auto *Trunc = dyn_cast<TruncInst>(Val: Cond))
1513 return getValueFromTrunc(Val, Trunc, IsTrueDest);
1514
1515 if (auto *EVI = dyn_cast<ExtractValueInst>(Val: Cond))
1516 if (auto *WO = dyn_cast<WithOverflowInst>(Val: EVI->getAggregateOperand()))
1517 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1518 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1519
1520 if (++Depth == MaxAnalysisRecursionDepth)
1521 return ValueLatticeElement::getOverdefined();
1522
1523 Value *N;
1524 if (match(V: Cond, P: m_Not(V: m_Value(V&: N))))
1525 return getValueFromCondition(Val, Cond: N, IsTrueDest: !IsTrueDest, UseBlockValue, Depth);
1526
1527 Value *L, *R;
1528 bool IsAnd;
1529 if (match(V: Cond, P: m_LogicalAnd(L: m_Value(V&: L), R: m_Value(V&: R))))
1530 IsAnd = true;
1531 else if (match(V: Cond, P: m_LogicalOr(L: m_Value(V&: L), R: m_Value(V&: R))))
1532 IsAnd = false;
1533 else
1534 return ValueLatticeElement::getOverdefined();
1535
1536 std::optional<ValueLatticeElement> LV =
1537 getValueFromCondition(Val, Cond: L, IsTrueDest, UseBlockValue, Depth);
1538 if (!LV)
1539 return std::nullopt;
1540 std::optional<ValueLatticeElement> RV =
1541 getValueFromCondition(Val, Cond: R, IsTrueDest, UseBlockValue, Depth);
1542 if (!RV)
1543 return std::nullopt;
1544
1545 // if (L && R) -> intersect L and R
1546 // if (!(L || R)) -> intersect !L and !R
1547 // if (L || R) -> union L and R
1548 // if (!(L && R)) -> union !L and !R
1549 if (IsTrueDest ^ IsAnd) {
1550 LV->mergeIn(RHS: *RV);
1551 return *LV;
1552 }
1553
1554 return LV->intersect(Other: *RV);
1555}
1556
1557// Return true if Usr has Op as an operand, otherwise false.
1558static bool usesOperand(User *Usr, Value *Op) {
1559 return is_contained(Range: Usr->operands(), Element: Op);
1560}
1561
1562// Return true if the instruction type of Val is supported by
1563// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1564// Call this before calling constantFoldUser() to find out if it's even worth
1565// attempting to call it.
1566static bool isOperationFoldable(User *Usr) {
1567 return isa<CastInst>(Val: Usr) || isa<BinaryOperator>(Val: Usr) || isa<FreezeInst>(Val: Usr);
1568}
1569
1570// Check if Usr can be simplified to an integer constant when the value of one
1571// of its operands Op is an integer constant OpConstVal. If so, return it as an
1572// lattice value range with a single element or otherwise return an overdefined
1573// lattice value.
1574static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1575 const APInt &OpConstVal,
1576 const DataLayout &DL) {
1577 assert(isOperationFoldable(Usr) && "Precondition");
1578 Constant* OpConst = Constant::getIntegerValue(Ty: Op->getType(), V: OpConstVal);
1579 // Check if Usr can be simplified to a constant.
1580 if (auto *CI = dyn_cast<CastInst>(Val: Usr)) {
1581 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1582 if (auto *C = dyn_cast_or_null<ConstantInt>(
1583 Val: simplifyCastInst(CastOpc: CI->getOpcode(), Op: OpConst,
1584 Ty: CI->getDestTy(), Q: DL))) {
1585 return ValueLatticeElement::getRange(CR: ConstantRange(C->getValue()));
1586 }
1587 } else if (auto *BO = dyn_cast<BinaryOperator>(Val: Usr)) {
1588 bool Op0Match = BO->getOperand(i_nocapture: 0) == Op;
1589 bool Op1Match = BO->getOperand(i_nocapture: 1) == Op;
1590 assert((Op0Match || Op1Match) &&
1591 "Operand 0 nor Operand 1 isn't a match");
1592 Value *LHS = Op0Match ? OpConst : BO->getOperand(i_nocapture: 0);
1593 Value *RHS = Op1Match ? OpConst : BO->getOperand(i_nocapture: 1);
1594 if (auto *C = dyn_cast_or_null<ConstantInt>(
1595 Val: simplifyBinOp(Opcode: BO->getOpcode(), LHS, RHS, Q: DL))) {
1596 return ValueLatticeElement::getRange(CR: ConstantRange(C->getValue()));
1597 }
1598 } else if (isa<FreezeInst>(Val: Usr)) {
1599 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1600 return ValueLatticeElement::getRange(CR: ConstantRange(OpConstVal));
1601 }
1602 return ValueLatticeElement::getOverdefined();
1603}
1604
1605/// Compute the value of Val on the edge BBFrom -> BBTo.
1606std::optional<ValueLatticeElement>
1607LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1608 BasicBlock *BBTo, bool UseBlockValue) {
1609 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1610 // know that v != 0.
1611 if (BranchInst *BI = dyn_cast<BranchInst>(Val: BBFrom->getTerminator())) {
1612 // If this is a conditional branch and only one successor goes to BBTo, then
1613 // we may be able to infer something from the condition.
1614 if (BI->isConditional() &&
1615 BI->getSuccessor(i: 0) != BI->getSuccessor(i: 1)) {
1616 bool isTrueDest = BI->getSuccessor(i: 0) == BBTo;
1617 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1618 "BBTo isn't a successor of BBFrom");
1619 Value *Condition = BI->getCondition();
1620
1621 // If V is the condition of the branch itself, then we know exactly what
1622 // it is.
1623 // NB: The condition on a `br` can't be a vector type.
1624 if (Condition == Val)
1625 return ValueLatticeElement::get(C: ConstantInt::get(
1626 Ty: Type::getInt1Ty(C&: Val->getContext()), V: isTrueDest));
1627
1628 // If the condition of the branch is an equality comparison, we may be
1629 // able to infer the value.
1630 std::optional<ValueLatticeElement> Result =
1631 getValueFromCondition(Val, Cond: Condition, IsTrueDest: isTrueDest, UseBlockValue);
1632 if (!Result)
1633 return std::nullopt;
1634
1635 if (!Result->isOverdefined())
1636 return Result;
1637
1638 if (User *Usr = dyn_cast<User>(Val)) {
1639 assert(Result->isOverdefined() && "Result isn't overdefined");
1640 // Check with isOperationFoldable() first to avoid linearly iterating
1641 // over the operands unnecessarily which can be expensive for
1642 // instructions with many operands.
1643 if (isa<IntegerType>(Val: Usr->getType()) && isOperationFoldable(Usr)) {
1644 const DataLayout &DL = BBTo->getDataLayout();
1645 if (usesOperand(Usr, Op: Condition)) {
1646 // If Val has Condition as an operand and Val can be folded into a
1647 // constant with either Condition == true or Condition == false,
1648 // propagate the constant.
1649 // eg.
1650 // ; %Val is true on the edge to %then.
1651 // %Val = and i1 %Condition, true.
1652 // br %Condition, label %then, label %else
1653 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1654 Result = constantFoldUser(Usr, Op: Condition, OpConstVal: ConditionVal, DL);
1655 } else if (isa<TruncInst, ZExtInst, SExtInst>(Val: Usr)) {
1656 ValueLatticeElement OpLatticeVal =
1657 *getValueFromCondition(Val: Usr->getOperand(i: 0), Cond: Condition,
1658 IsTrueDest: isTrueDest, /*UseBlockValue*/ false);
1659
1660 if (OpLatticeVal.isConstantRange()) {
1661 const unsigned ResultBitWidth =
1662 Usr->getType()->getScalarSizeInBits();
1663 if (auto *Trunc = dyn_cast<TruncInst>(Val: Usr))
1664 return ValueLatticeElement::getRange(
1665 CR: OpLatticeVal.getConstantRange().truncate(
1666 BitWidth: ResultBitWidth, NoWrapKind: Trunc->getNoWrapKind()));
1667
1668 return ValueLatticeElement::getRange(
1669 CR: OpLatticeVal.getConstantRange().castOp(
1670 CastOp: cast<CastInst>(Val: Usr)->getOpcode(), BitWidth: ResultBitWidth));
1671 }
1672 if (OpLatticeVal.isConstant()) {
1673 Constant *C = OpLatticeVal.getConstant();
1674 if (auto *CastC = ConstantFoldCastOperand(
1675 Opcode: cast<CastInst>(Val: Usr)->getOpcode(), C, DestTy: Usr->getType(), DL))
1676 return ValueLatticeElement::get(C: CastC);
1677 }
1678 return ValueLatticeElement::getOverdefined();
1679 } else {
1680 // If one of Val's operand has an inferred value, we may be able to
1681 // infer the value of Val.
1682 // eg.
1683 // ; %Val is 94 on the edge to %then.
1684 // %Val = add i8 %Op, 1
1685 // %Condition = icmp eq i8 %Op, 93
1686 // br i1 %Condition, label %then, label %else
1687 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1688 Value *Op = Usr->getOperand(i);
1689 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1690 Val: Op, Cond: Condition, IsTrueDest: isTrueDest, /*UseBlockValue*/ false);
1691 if (std::optional<APInt> OpConst =
1692 OpLatticeVal.asConstantInteger()) {
1693 Result = constantFoldUser(Usr, Op, OpConstVal: *OpConst, DL);
1694 break;
1695 }
1696 }
1697 }
1698 }
1699 }
1700 if (!Result->isOverdefined())
1701 return Result;
1702 }
1703 }
1704
1705 // If the edge was formed by a switch on the value, then we may know exactly
1706 // what it is.
1707 if (SwitchInst *SI = dyn_cast<SwitchInst>(Val: BBFrom->getTerminator())) {
1708 Value *Condition = SI->getCondition();
1709 if (!isa<IntegerType>(Val: Val->getType()))
1710 return ValueLatticeElement::getOverdefined();
1711 bool ValUsesConditionAndMayBeFoldable = false;
1712 if (Condition != Val) {
1713 // Check if Val has Condition as an operand.
1714 if (User *Usr = dyn_cast<User>(Val))
1715 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1716 usesOperand(Usr, Op: Condition);
1717 if (!ValUsesConditionAndMayBeFoldable)
1718 return ValueLatticeElement::getOverdefined();
1719 }
1720 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1721 "Condition != Val nor Val doesn't use Condition");
1722
1723 bool DefaultCase = SI->getDefaultDest() == BBTo;
1724 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1725 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1726
1727 for (auto Case : SI->cases()) {
1728 APInt CaseValue = Case.getCaseValue()->getValue();
1729 ConstantRange EdgeVal(CaseValue);
1730 if (ValUsesConditionAndMayBeFoldable) {
1731 User *Usr = cast<User>(Val);
1732 const DataLayout &DL = BBTo->getDataLayout();
1733 ValueLatticeElement EdgeLatticeVal =
1734 constantFoldUser(Usr, Op: Condition, OpConstVal: CaseValue, DL);
1735 if (EdgeLatticeVal.isOverdefined())
1736 return ValueLatticeElement::getOverdefined();
1737 EdgeVal = EdgeLatticeVal.getConstantRange();
1738 }
1739 if (DefaultCase) {
1740 // It is possible that the default destination is the destination of
1741 // some cases. We cannot perform difference for those cases.
1742 // We know Condition != CaseValue in BBTo. In some cases we can use
1743 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1744 // only do this when f is identity (i.e. Val == Condition), but we
1745 // should be able to do this for any injective f.
1746 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1747 EdgesVals = EdgesVals.difference(CR: EdgeVal);
1748 } else if (Case.getCaseSuccessor() == BBTo)
1749 EdgesVals = EdgesVals.unionWith(CR: EdgeVal);
1750 }
1751 return ValueLatticeElement::getRange(CR: std::move(EdgesVals));
1752 }
1753 return ValueLatticeElement::getOverdefined();
1754}
1755
1756/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1757/// the basic block if the edge does not constrain Val.
1758std::optional<ValueLatticeElement>
1759LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1760 BasicBlock *BBTo, Instruction *CxtI) {
1761 // If already a constant, there is nothing to compute.
1762 if (Constant *VC = dyn_cast<Constant>(Val))
1763 return ValueLatticeElement::get(C: VC);
1764
1765 std::optional<ValueLatticeElement> LocalResult =
1766 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1767 if (!LocalResult)
1768 return std::nullopt;
1769
1770 if (hasSingleValue(Val: *LocalResult))
1771 // Can't get any more precise here
1772 return LocalResult;
1773
1774 std::optional<ValueLatticeElement> OptInBlock =
1775 getBlockValue(Val, BB: BBFrom, CxtI: BBFrom->getTerminator());
1776 if (!OptInBlock)
1777 return std::nullopt;
1778 ValueLatticeElement &InBlock = *OptInBlock;
1779
1780 // We can use the context instruction (generically the ultimate instruction
1781 // the calling pass is trying to simplify) here, even though the result of
1782 // this function is generally cached when called from the solve* functions
1783 // (and that cached result might be used with queries using a different
1784 // context instruction), because when this function is called from the solve*
1785 // functions, the context instruction is not provided. When called from
1786 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1787 // but then the result is not cached.
1788 intersectAssumeOrGuardBlockValueConstantRange(Val, BBLV&: InBlock, BBI: CxtI);
1789
1790 return LocalResult->intersect(Other: InBlock);
1791}
1792
1793ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1794 Instruction *CxtI) {
1795 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1796 << BB->getName() << "'\n");
1797
1798 assert(BlockValueStack.empty() && BlockValueSet.empty());
1799 std::optional<ValueLatticeElement> OptResult = getBlockValue(Val: V, BB, CxtI);
1800 if (!OptResult) {
1801 solve();
1802 OptResult = getBlockValue(Val: V, BB, CxtI);
1803 assert(OptResult && "Value not available after solving");
1804 }
1805
1806 LLVM_DEBUG(dbgs() << " Result = " << *OptResult << "\n");
1807 return *OptResult;
1808}
1809
1810ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1811 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1812 << "'\n");
1813
1814 if (auto *C = dyn_cast<Constant>(Val: V))
1815 return ValueLatticeElement::get(C);
1816
1817 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1818 if (auto *I = dyn_cast<Instruction>(Val: V))
1819 Result = getFromRangeMetadata(BBI: I);
1820 intersectAssumeOrGuardBlockValueConstantRange(Val: V, BBLV&: Result, BBI: CxtI);
1821
1822 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1823 return Result;
1824}
1825
1826ValueLatticeElement LazyValueInfoImpl::
1827getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1828 Instruction *CxtI) {
1829 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1830 << FromBB->getName() << "' to '" << ToBB->getName()
1831 << "'\n");
1832
1833 std::optional<ValueLatticeElement> Result =
1834 getEdgeValue(Val: V, BBFrom: FromBB, BBTo: ToBB, CxtI);
1835 while (!Result) {
1836 // As the worklist only explicitly tracks block values (but not edge values)
1837 // we may have to call solve() multiple times, as the edge value calculation
1838 // may request additional block values.
1839 solve();
1840 Result = getEdgeValue(Val: V, BBFrom: FromBB, BBTo: ToBB, CxtI);
1841 }
1842
1843 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1844 return *Result;
1845}
1846
1847ValueLatticeElement LazyValueInfoImpl::getValueAtUse(const Use &U) {
1848 Value *V = U.get();
1849 auto *CxtI = cast<Instruction>(Val: U.getUser());
1850 ValueLatticeElement VL = getValueInBlock(V, BB: CxtI->getParent(), CxtI);
1851
1852 // Check whether the only (possibly transitive) use of the value is in a
1853 // position where V can be constrained by a select or branch condition.
1854 const Use *CurrU = &U;
1855 // TODO: Increase limit?
1856 const unsigned MaxUsesToInspect = 3;
1857 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1858 std::optional<ValueLatticeElement> CondVal;
1859 auto *CurrI = cast<Instruction>(Val: CurrU->getUser());
1860 if (auto *SI = dyn_cast<SelectInst>(Val: CurrI)) {
1861 // If the value is undef, a different value may be chosen in
1862 // the select condition and at use.
1863 if (!isGuaranteedNotToBeUndef(V: SI->getCondition(), AC))
1864 break;
1865 if (CurrU->getOperandNo() == 1)
1866 CondVal =
1867 *getValueFromCondition(Val: V, Cond: SI->getCondition(), /*IsTrueDest*/ true,
1868 /*UseBlockValue*/ false);
1869 else if (CurrU->getOperandNo() == 2)
1870 CondVal =
1871 *getValueFromCondition(Val: V, Cond: SI->getCondition(), /*IsTrueDest*/ false,
1872 /*UseBlockValue*/ false);
1873 } else if (auto *PHI = dyn_cast<PHINode>(Val: CurrI)) {
1874 // TODO: Use non-local query?
1875 CondVal = *getEdgeValueLocal(Val: V, BBFrom: PHI->getIncomingBlock(U: *CurrU),
1876 BBTo: PHI->getParent(), /*UseBlockValue*/ false);
1877 }
1878 if (CondVal)
1879 VL = VL.intersect(Other: *CondVal);
1880
1881 // Only follow one-use chain, to allow direct intersection of conditions.
1882 // If there are multiple uses, we would have to intersect with the union of
1883 // all conditions at different uses.
1884 // Stop walking if we hit a non-speculatable instruction. Even if the
1885 // result is only used under a specific condition, executing the
1886 // instruction itself may cause side effects or UB already.
1887 // This also disallows looking through phi nodes: If the phi node is part
1888 // of a cycle, we might end up reasoning about values from different cycle
1889 // iterations (PR60629).
1890 if (!CurrI->hasOneUse() ||
1891 !isSafeToSpeculativelyExecuteWithVariableReplaced(
1892 I: CurrI, /*IgnoreUBImplyingAttrs=*/false))
1893 break;
1894 CurrU = &*CurrI->use_begin();
1895 }
1896 return VL;
1897}
1898
1899void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1900 BasicBlock *NewSucc) {
1901 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1902}
1903
1904//===----------------------------------------------------------------------===//
1905// LazyValueInfo Impl
1906//===----------------------------------------------------------------------===//
1907
1908bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1909 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1910
1911 if (auto *Impl = Info.getImpl())
1912 Impl->clear();
1913
1914 // Fully lazy.
1915 return false;
1916}
1917
1918void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1919 AU.setPreservesAll();
1920 AU.addRequired<AssumptionCacheTracker>();
1921 AU.addRequired<TargetLibraryInfoWrapperPass>();
1922}
1923
1924LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1925
1926/// This lazily constructs the LazyValueInfoImpl.
1927LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1928 if (!PImpl) {
1929 assert(M && "getCache() called with a null Module");
1930 const DataLayout &DL = M->getDataLayout();
1931 Function *GuardDecl =
1932 Intrinsic::getDeclarationIfExists(M, id: Intrinsic::experimental_guard);
1933 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1934 }
1935 return *PImpl;
1936}
1937
1938LazyValueInfoImpl *LazyValueInfo::getImpl() { return PImpl; }
1939
1940LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1941
1942void LazyValueInfo::releaseMemory() {
1943 // If the cache was allocated, free it.
1944 if (auto *Impl = getImpl()) {
1945 delete &*Impl;
1946 PImpl = nullptr;
1947 }
1948}
1949
1950bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1951 FunctionAnalysisManager::Invalidator &Inv) {
1952 // We need to invalidate if we have either failed to preserve this analyses
1953 // result directly or if any of its dependencies have been invalidated.
1954 auto PAC = PA.getChecker<LazyValueAnalysis>();
1955 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1956 return true;
1957
1958 return false;
1959}
1960
1961void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1962
1963LazyValueInfo LazyValueAnalysis::run(Function &F,
1964 FunctionAnalysisManager &FAM) {
1965 auto &AC = FAM.getResult<AssumptionAnalysis>(IR&: F);
1966
1967 return LazyValueInfo(&AC, &F.getDataLayout());
1968}
1969
1970/// Returns true if we can statically tell that this value will never be a
1971/// "useful" constant. In practice, this means we've got something like an
1972/// alloca or a malloc call for which a comparison against a constant can
1973/// only be guarding dead code. Note that we are potentially giving up some
1974/// precision in dead code (a constant result) in favour of avoiding a
1975/// expensive search for a easily answered common query.
1976static bool isKnownNonConstant(Value *V) {
1977 V = V->stripPointerCasts();
1978 // The return val of alloc cannot be a Constant.
1979 if (isa<AllocaInst>(Val: V))
1980 return true;
1981 return false;
1982}
1983
1984Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1985 // Bail out early if V is known not to be a Constant.
1986 if (isKnownNonConstant(V))
1987 return nullptr;
1988
1989 BasicBlock *BB = CxtI->getParent();
1990 ValueLatticeElement Result =
1991 getOrCreateImpl(M: BB->getModule()).getValueInBlock(V, BB, CxtI);
1992
1993 if (Result.isConstant())
1994 return Result.getConstant();
1995 if (Result.isConstantRange()) {
1996 const ConstantRange &CR = Result.getConstantRange();
1997 if (const APInt *SingleVal = CR.getSingleElement())
1998 return ConstantInt::get(Ty: V->getType(), V: *SingleVal);
1999 }
2000 return nullptr;
2001}
2002
2003ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
2004 bool UndefAllowed) {
2005 BasicBlock *BB = CxtI->getParent();
2006 ValueLatticeElement Result =
2007 getOrCreateImpl(M: BB->getModule()).getValueInBlock(V, BB, CxtI);
2008 return Result.asConstantRange(Ty: V->getType(), UndefAllowed);
2009}
2010
2011ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
2012 bool UndefAllowed) {
2013 auto *Inst = cast<Instruction>(Val: U.getUser());
2014 ValueLatticeElement Result =
2015 getOrCreateImpl(M: Inst->getModule()).getValueAtUse(U);
2016 return Result.asConstantRange(Ty: U->getType(), UndefAllowed);
2017}
2018
2019/// Determine whether the specified value is known to be a
2020/// constant on the specified edge. Return null if not.
2021Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
2022 BasicBlock *ToBB,
2023 Instruction *CxtI) {
2024 Module *M = FromBB->getModule();
2025 ValueLatticeElement Result =
2026 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2027
2028 if (Result.isConstant())
2029 return Result.getConstant();
2030 if (Result.isConstantRange()) {
2031 const ConstantRange &CR = Result.getConstantRange();
2032 if (const APInt *SingleVal = CR.getSingleElement())
2033 return ConstantInt::get(Ty: V->getType(), V: *SingleVal);
2034 }
2035 return nullptr;
2036}
2037
2038ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
2039 BasicBlock *FromBB,
2040 BasicBlock *ToBB,
2041 Instruction *CxtI) {
2042 Module *M = FromBB->getModule();
2043 ValueLatticeElement Result =
2044 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2045 // TODO: Should undef be allowed here?
2046 return Result.asConstantRange(Ty: V->getType(), /*UndefAllowed*/ true);
2047}
2048
2049static Constant *getPredicateResult(CmpInst::Predicate Pred, Constant *C,
2050 const ValueLatticeElement &Val,
2051 const DataLayout &DL) {
2052 // If we know the value is a constant, evaluate the conditional.
2053 if (Val.isConstant())
2054 return ConstantFoldCompareInstOperands(Predicate: Pred, LHS: Val.getConstant(), RHS: C, DL);
2055
2056 Type *ResTy = CmpInst::makeCmpResultType(opnd_type: C->getType());
2057 if (Val.isConstantRange()) {
2058 const ConstantRange &CR = Val.getConstantRange();
2059 ConstantRange RHS = C->toConstantRange();
2060 if (CR.icmp(Pred, Other: RHS))
2061 return ConstantInt::getTrue(Ty: ResTy);
2062 if (CR.icmp(Pred: CmpInst::getInversePredicate(pred: Pred), Other: RHS))
2063 return ConstantInt::getFalse(Ty: ResTy);
2064 return nullptr;
2065 }
2066
2067 if (Val.isNotConstant()) {
2068 // If this is an equality comparison, we can try to fold it knowing that
2069 // "V != C1".
2070 if (Pred == ICmpInst::ICMP_EQ) {
2071 // !C1 == C -> false iff C1 == C.
2072 Constant *Res = ConstantFoldCompareInstOperands(
2073 Predicate: ICmpInst::ICMP_NE, LHS: Val.getNotConstant(), RHS: C, DL);
2074 if (Res && Res->isNullValue())
2075 return ConstantInt::getFalse(Ty: ResTy);
2076 } else if (Pred == ICmpInst::ICMP_NE) {
2077 // !C1 != C -> true iff C1 == C.
2078 Constant *Res = ConstantFoldCompareInstOperands(
2079 Predicate: ICmpInst::ICMP_NE, LHS: Val.getNotConstant(), RHS: C, DL);
2080 if (Res && Res->isNullValue())
2081 return ConstantInt::getTrue(Ty: ResTy);
2082 }
2083 return nullptr;
2084 }
2085
2086 return nullptr;
2087}
2088
2089/// Determine whether the specified value comparison with a constant is known to
2090/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
2091Constant *LazyValueInfo::getPredicateOnEdge(CmpInst::Predicate Pred, Value *V,
2092 Constant *C, BasicBlock *FromBB,
2093 BasicBlock *ToBB,
2094 Instruction *CxtI) {
2095 Module *M = FromBB->getModule();
2096 ValueLatticeElement Result =
2097 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2098
2099 return getPredicateResult(Pred, C, Val: Result, DL: M->getDataLayout());
2100}
2101
2102Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *V,
2103 Constant *C, Instruction *CxtI,
2104 bool UseBlockValue) {
2105 // Is or is not NonNull are common predicates being queried. If
2106 // isKnownNonZero can tell us the result of the predicate, we can
2107 // return it quickly. But this is only a fastpath, and falling
2108 // through would still be correct.
2109 Module *M = CxtI->getModule();
2110 const DataLayout &DL = M->getDataLayout();
2111 if (V->getType()->isPointerTy() && C->isNullValue() &&
2112 isKnownNonZero(V: V->stripPointerCastsSameRepresentation(), Q: DL)) {
2113 Type *ResTy = CmpInst::makeCmpResultType(opnd_type: C->getType());
2114 if (Pred == ICmpInst::ICMP_EQ)
2115 return ConstantInt::getFalse(Ty: ResTy);
2116 else if (Pred == ICmpInst::ICMP_NE)
2117 return ConstantInt::getTrue(Ty: ResTy);
2118 }
2119
2120 auto &Impl = getOrCreateImpl(M);
2121 ValueLatticeElement Result =
2122 UseBlockValue ? Impl.getValueInBlock(V, BB: CxtI->getParent(), CxtI)
2123 : Impl.getValueAt(V, CxtI);
2124 Constant *Ret = getPredicateResult(Pred, C, Val: Result, DL);
2125 if (Ret)
2126 return Ret;
2127
2128 // Note: The following bit of code is somewhat distinct from the rest of LVI;
2129 // LVI as a whole tries to compute a lattice value which is conservatively
2130 // correct at a given location. In this case, we have a predicate which we
2131 // weren't able to prove about the merged result, and we're pushing that
2132 // predicate back along each incoming edge to see if we can prove it
2133 // separately for each input. As a motivating example, consider:
2134 // bb1:
2135 // %v1 = ... ; constantrange<1, 5>
2136 // br label %merge
2137 // bb2:
2138 // %v2 = ... ; constantrange<10, 20>
2139 // br label %merge
2140 // merge:
2141 // %phi = phi [%v1, %v2] ; constantrange<1,20>
2142 // %pred = icmp eq i32 %phi, 8
2143 // We can't tell from the lattice value for '%phi' that '%pred' is false
2144 // along each path, but by checking the predicate over each input separately,
2145 // we can.
2146 // We limit the search to one step backwards from the current BB and value.
2147 // We could consider extending this to search further backwards through the
2148 // CFG and/or value graph, but there are non-obvious compile time vs quality
2149 // tradeoffs.
2150 BasicBlock *BB = CxtI->getParent();
2151
2152 // Function entry or an unreachable block. Bail to avoid confusing
2153 // analysis below.
2154 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
2155 if (PI == PE)
2156 return nullptr;
2157
2158 // If V is a PHI node in the same block as the context, we need to ask
2159 // questions about the predicate as applied to the incoming value along
2160 // each edge. This is useful for eliminating cases where the predicate is
2161 // known along all incoming edges.
2162 if (auto *PHI = dyn_cast<PHINode>(Val: V))
2163 if (PHI->getParent() == BB) {
2164 Constant *Baseline = nullptr;
2165 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
2166 Value *Incoming = PHI->getIncomingValue(i);
2167 BasicBlock *PredBB = PHI->getIncomingBlock(i);
2168 // Note that PredBB may be BB itself.
2169 Constant *Result =
2170 getPredicateOnEdge(Pred, V: Incoming, C, FromBB: PredBB, ToBB: BB, CxtI);
2171
2172 // Keep going as long as we've seen a consistent known result for
2173 // all inputs.
2174 Baseline = (i == 0) ? Result /* First iteration */
2175 : (Baseline == Result ? Baseline
2176 : nullptr); /* All others */
2177 if (!Baseline)
2178 break;
2179 }
2180 if (Baseline)
2181 return Baseline;
2182 }
2183
2184 // For a comparison where the V is outside this block, it's possible
2185 // that we've branched on it before. Look to see if the value is known
2186 // on all incoming edges.
2187 if (!isa<Instruction>(Val: V) || cast<Instruction>(Val: V)->getParent() != BB) {
2188 // For predecessor edge, determine if the comparison is true or false
2189 // on that edge. If they're all true or all false, we can conclude
2190 // the value of the comparison in this block.
2191 Constant *Baseline = getPredicateOnEdge(Pred, V, C, FromBB: *PI, ToBB: BB, CxtI);
2192 if (Baseline) {
2193 // Check that all remaining incoming values match the first one.
2194 while (++PI != PE) {
2195 Constant *Ret = getPredicateOnEdge(Pred, V, C, FromBB: *PI, ToBB: BB, CxtI);
2196 if (Ret != Baseline)
2197 break;
2198 }
2199 // If we terminated early, then one of the values didn't match.
2200 if (PI == PE) {
2201 return Baseline;
2202 }
2203 }
2204 }
2205
2206 return nullptr;
2207}
2208
2209Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *LHS,
2210 Value *RHS, Instruction *CxtI,
2211 bool UseBlockValue) {
2212 if (auto *C = dyn_cast<Constant>(Val: RHS))
2213 return getPredicateAt(Pred, V: LHS, C, CxtI, UseBlockValue);
2214 if (auto *C = dyn_cast<Constant>(Val: LHS))
2215 return getPredicateAt(Pred: CmpInst::getSwappedPredicate(pred: Pred), V: RHS, C, CxtI,
2216 UseBlockValue);
2217
2218 // Got two non-Constant values. Try to determine the comparison results based
2219 // on the block values of the two operands, e.g. because they have
2220 // non-overlapping ranges.
2221 if (UseBlockValue) {
2222 Module *M = CxtI->getModule();
2223 ValueLatticeElement L =
2224 getOrCreateImpl(M).getValueInBlock(V: LHS, BB: CxtI->getParent(), CxtI);
2225 if (L.isOverdefined())
2226 return nullptr;
2227
2228 ValueLatticeElement R =
2229 getOrCreateImpl(M).getValueInBlock(V: RHS, BB: CxtI->getParent(), CxtI);
2230 Type *Ty = CmpInst::makeCmpResultType(opnd_type: LHS->getType());
2231 return L.getCompare(Pred, Ty, Other: R, DL: M->getDataLayout());
2232 }
2233 return nullptr;
2234}
2235
2236void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
2237 BasicBlock *NewSucc) {
2238 if (auto *Impl = getImpl())
2239 Impl->threadEdge(PredBB, OldSucc, NewSucc);
2240}
2241
2242void LazyValueInfo::forgetValue(Value *V) {
2243 if (auto *Impl = getImpl())
2244 Impl->forgetValue(V);
2245}
2246
2247void LazyValueInfo::eraseBlock(BasicBlock *BB) {
2248 if (auto *Impl = getImpl())
2249 Impl->eraseBlock(BB);
2250}
2251
2252void LazyValueInfo::clear() {
2253 if (auto *Impl = getImpl())
2254 Impl->clear();
2255}
2256
2257void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
2258 if (auto *Impl = getImpl())
2259 Impl->printLVI(F, DTree, OS);
2260}
2261
2262// Print the LVI for the function arguments at the start of each basic block.
2263void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2264 const BasicBlock *BB, formatted_raw_ostream &OS) {
2265 // Find if there are latticevalues defined for arguments of the function.
2266 auto *F = BB->getParent();
2267 for (const auto &Arg : F->args()) {
2268 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2269 V: const_cast<Argument *>(&Arg), BB: const_cast<BasicBlock *>(BB));
2270 if (Result.isUnknown())
2271 continue;
2272 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2273 }
2274}
2275
2276// This function prints the LVI analysis for the instruction I at the beginning
2277// of various basic blocks. It relies on calculated values that are stored in
2278// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2279// LazyValueInfo for `I`, and print that info.
2280void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2281 const Instruction *I, formatted_raw_ostream &OS) {
2282
2283 auto *ParentBB = I->getParent();
2284 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2285 // We can generate (solve) LVI values only for blocks that are dominated by
2286 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2287 // that contain redundant/uninteresting information, we print LVI for
2288 // blocks that may use this LVI information (such as immediate successor
2289 // blocks, and blocks that contain uses of `I`).
2290 auto printResult = [&](const BasicBlock *BB) {
2291 if (!BlocksContainingLVI.insert(Ptr: BB).second)
2292 return;
2293 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2294 V: const_cast<Instruction *>(I), BB: const_cast<BasicBlock *>(BB));
2295 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2296 BB->printAsOperand(O&: OS, PrintType: false);
2297 OS << "' is: " << Result << "\n";
2298 };
2299
2300 printResult(ParentBB);
2301 // Print the LVI analysis results for the immediate successor blocks, that
2302 // are dominated by `ParentBB`.
2303 for (const auto *BBSucc : successors(BB: ParentBB))
2304 if (DT.dominates(A: ParentBB, B: BBSucc))
2305 printResult(BBSucc);
2306
2307 // Print LVI in blocks where `I` is used.
2308 for (const auto *U : I->users())
2309 if (auto *UseI = dyn_cast<Instruction>(Val: U))
2310 if (!isa<PHINode>(Val: UseI) || DT.dominates(A: ParentBB, B: UseI->getParent()))
2311 printResult(UseI->getParent());
2312
2313}
2314
2315PreservedAnalyses LazyValueInfoPrinterPass::run(Function &F,
2316 FunctionAnalysisManager &AM) {
2317 OS << "LVI for function '" << F.getName() << "':\n";
2318 auto &LVI = AM.getResult<LazyValueAnalysis>(IR&: F);
2319 auto &DTree = AM.getResult<DominatorTreeAnalysis>(IR&: F);
2320 LVI.printLVI(F, DTree, OS);
2321 return PreservedAnalyses::all();
2322}
2323