1//===-- ConstraintElimination.cpp - Eliminate conds using constraints. ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Eliminate conditions based on constraints collected from dominating
10// conditions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/ConstraintElimination.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/ScopeExit.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/Analysis/ConstraintSystem.h"
20#include "llvm/Analysis/GlobalsModRef.h"
21#include "llvm/Analysis/LoopInfo.h"
22#include "llvm/Analysis/MemoryBuiltins.h"
23#include "llvm/Analysis/OptimizationRemarkEmitter.h"
24#include "llvm/Analysis/ScalarEvolution.h"
25#include "llvm/Analysis/ScalarEvolutionExpressions.h"
26#include "llvm/Analysis/TargetLibraryInfo.h"
27#include "llvm/Analysis/ValueTracking.h"
28#include "llvm/IR/DataLayout.h"
29#include "llvm/IR/DebugInfo.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/IRBuilder.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Module.h"
36#include "llvm/IR/PatternMatch.h"
37#include "llvm/IR/Verifier.h"
38#include "llvm/Pass.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/DebugCounter.h"
42#include "llvm/Support/MathExtras.h"
43#include "llvm/Transforms/Utils/Cloning.h"
44#include "llvm/Transforms/Utils/ValueMapper.h"
45
46#include <optional>
47#include <string>
48
49using namespace llvm;
50using namespace PatternMatch;
51
52#define DEBUG_TYPE "constraint-elimination"
53
54STATISTIC(NumCondsRemoved, "Number of instructions removed");
55DEBUG_COUNTER(EliminatedCounter, "conds-eliminated",
56 "Controls which conditions are eliminated");
57
58static cl::opt<unsigned>
59 MaxRows("constraint-elimination-max-rows", cl::init(Val: 500), cl::Hidden,
60 cl::desc("Maximum number of rows to keep in constraint system"));
61
62static cl::opt<bool> DumpReproducers(
63 "constraint-elimination-dump-reproducers", cl::init(Val: false), cl::Hidden,
64 cl::desc("Dump IR to reproduce successful transformations."));
65
66static int64_t MaxConstraintValue = std::numeric_limits<int64_t>::max();
67static int64_t MinSignedConstraintValue = std::numeric_limits<int64_t>::min();
68
69static Instruction *getContextInstForUse(Use &U) {
70 Instruction *UserI = cast<Instruction>(Val: U.getUser());
71 if (auto *Phi = dyn_cast<PHINode>(Val: UserI))
72 UserI = Phi->getIncomingBlock(U)->getTerminator();
73 return UserI;
74}
75
76namespace {
77/// Struct to express a condition of the form %Op0 Pred %Op1.
78struct ConditionTy {
79 CmpPredicate Pred;
80 Value *Op0 = nullptr;
81 Value *Op1 = nullptr;
82
83 ConditionTy() = default;
84 ConditionTy(CmpPredicate Pred, Value *Op0, Value *Op1)
85 : Pred(Pred), Op0(Op0), Op1(Op1) {}
86};
87
88/// Represents either
89/// * a condition that holds on entry to a block (=condition fact)
90/// * an assume (=assume fact)
91/// * a use of a compare instruction to simplify.
92/// It also tracks the Dominator DFS in and out numbers for each entry.
93struct FactOrCheck {
94 enum class EntryTy {
95 ConditionFact, /// A condition that holds on entry to a block.
96 InstFact, /// A fact that holds after Inst executed (e.g. an assume or
97 /// min/mix intrinsic.
98 InstCheck, /// An instruction to simplify (e.g. an overflow math
99 /// intrinsics).
100 UseCheck /// An use of a compare instruction to simplify.
101 };
102
103 union {
104 Instruction *Inst;
105 Use *U;
106 ConditionTy Cond;
107 };
108
109 /// A pre-condition that must hold for the current fact to be added to the
110 /// system.
111 ConditionTy DoesHold;
112
113 unsigned NumIn;
114 unsigned NumOut;
115 EntryTy Ty;
116
117 FactOrCheck(EntryTy Ty, DomTreeNode *DTN, Instruction *Inst)
118 : Inst(Inst), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
119 Ty(Ty) {}
120
121 FactOrCheck(DomTreeNode *DTN, Use *U)
122 : U(U), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
123 Ty(EntryTy::UseCheck) {}
124
125 FactOrCheck(DomTreeNode *DTN, CmpPredicate Pred, Value *Op0, Value *Op1,
126 ConditionTy Precond = {})
127 : Cond(Pred, Op0, Op1), DoesHold(Precond), NumIn(DTN->getDFSNumIn()),
128 NumOut(DTN->getDFSNumOut()), Ty(EntryTy::ConditionFact) {}
129
130 static FactOrCheck getConditionFact(DomTreeNode *DTN, CmpPredicate Pred,
131 Value *Op0, Value *Op1,
132 ConditionTy Precond = {}) {
133 return FactOrCheck(DTN, Pred, Op0, Op1, Precond);
134 }
135
136 static FactOrCheck getInstFact(DomTreeNode *DTN, Instruction *Inst) {
137 return FactOrCheck(EntryTy::InstFact, DTN, Inst);
138 }
139
140 static FactOrCheck getCheck(DomTreeNode *DTN, Use *U) {
141 return FactOrCheck(DTN, U);
142 }
143
144 static FactOrCheck getCheck(DomTreeNode *DTN, CallInst *CI) {
145 return FactOrCheck(EntryTy::InstCheck, DTN, CI);
146 }
147
148 bool isCheck() const {
149 return Ty == EntryTy::InstCheck || Ty == EntryTy::UseCheck;
150 }
151
152 Instruction *getContextInst() const {
153 assert(!isConditionFact());
154 if (Ty == EntryTy::UseCheck)
155 return getContextInstForUse(U&: *U);
156 return Inst;
157 }
158
159 Instruction *getInstructionToSimplify() const {
160 assert(isCheck());
161 if (Ty == EntryTy::InstCheck)
162 return Inst;
163 // The use may have been simplified to a constant already.
164 return dyn_cast<Instruction>(Val&: *U);
165 }
166
167 bool isConditionFact() const { return Ty == EntryTy::ConditionFact; }
168};
169
170/// Keep state required to build worklist.
171struct State {
172 DominatorTree &DT;
173 LoopInfo &LI;
174 ScalarEvolution &SE;
175 TargetLibraryInfo &TLI;
176 SmallVector<FactOrCheck, 64> WorkList;
177
178 State(DominatorTree &DT, LoopInfo &LI, ScalarEvolution &SE,
179 TargetLibraryInfo &TLI)
180 : DT(DT), LI(LI), SE(SE), TLI(TLI) {}
181
182 /// Process block \p BB and add known facts to work-list.
183 void addInfoFor(BasicBlock &BB);
184
185 /// Try to add facts for loop inductions (AddRecs) in EQ/NE compares
186 /// controlling the loop header.
187 void addInfoForInductions(BasicBlock &BB);
188
189 /// Returns true if we can add a known condition from BB to its successor
190 /// block Succ.
191 bool canAddSuccessor(BasicBlock &BB, BasicBlock *Succ) const {
192 return DT.dominates(BBE: BasicBlockEdge(&BB, Succ), BB: Succ);
193 }
194};
195
196class ConstraintInfo;
197
198struct StackEntry {
199 unsigned NumIn;
200 unsigned NumOut;
201 bool IsSigned = false;
202 /// Variables that can be removed from the system once the stack entry gets
203 /// removed.
204 SmallVector<Value *, 2> ValuesToRelease;
205
206 StackEntry(unsigned NumIn, unsigned NumOut, bool IsSigned,
207 SmallVector<Value *, 2> ValuesToRelease)
208 : NumIn(NumIn), NumOut(NumOut), IsSigned(IsSigned),
209 ValuesToRelease(std::move(ValuesToRelease)) {}
210};
211
212struct ConstraintTy {
213 SmallVector<int64_t, 8> Coefficients;
214 SmallVector<ConditionTy, 2> Preconditions;
215
216 bool IsSigned = false;
217
218 ConstraintTy() = default;
219
220 ConstraintTy(SmallVector<int64_t, 8> Coefficients, bool IsSigned, bool IsEq,
221 bool IsNe)
222 : Coefficients(std::move(Coefficients)), IsSigned(IsSigned), IsEq(IsEq),
223 IsNe(IsNe) {}
224
225 unsigned size() const { return Coefficients.size(); }
226
227 unsigned empty() const { return Coefficients.empty(); }
228
229 /// Returns true if all preconditions for this list of constraints are
230 /// satisfied given \p Info.
231 bool isValid(const ConstraintInfo &Info) const;
232
233 bool isEq() const { return IsEq; }
234
235 bool isNe() const { return IsNe; }
236
237 /// Check if the current constraint is implied by the given ConstraintSystem.
238 ///
239 /// \return true or false if the constraint is proven to be respectively true,
240 /// or false. When the constraint cannot be proven to be either true or false,
241 /// std::nullopt is returned.
242 std::optional<bool> isImpliedBy(const ConstraintSystem &CS) const;
243
244private:
245 bool IsEq = false;
246 bool IsNe = false;
247};
248
249/// Wrapper encapsulating separate constraint systems and corresponding value
250/// mappings for both unsigned and signed information. Facts are added to and
251/// conditions are checked against the corresponding system depending on the
252/// signed-ness of their predicates. While the information is kept separate
253/// based on signed-ness, certain conditions can be transferred between the two
254/// systems.
255class ConstraintInfo {
256
257 ConstraintSystem UnsignedCS;
258 ConstraintSystem SignedCS;
259
260 const DataLayout &DL;
261
262public:
263 ConstraintInfo(const DataLayout &DL, ArrayRef<Value *> FunctionArgs)
264 : UnsignedCS(FunctionArgs), SignedCS(FunctionArgs), DL(DL) {
265 auto &Value2Index = getValue2Index(Signed: false);
266 // Add Arg > -1 constraints to unsigned system for all function arguments.
267 for (Value *Arg : FunctionArgs) {
268 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0),
269 false, false, false);
270 VarPos.Coefficients[Value2Index[Arg]] = -1;
271 UnsignedCS.addVariableRow(R: VarPos.Coefficients);
272 }
273 }
274
275 DenseMap<Value *, unsigned> &getValue2Index(bool Signed) {
276 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
277 }
278 const DenseMap<Value *, unsigned> &getValue2Index(bool Signed) const {
279 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
280 }
281
282 ConstraintSystem &getCS(bool Signed) {
283 return Signed ? SignedCS : UnsignedCS;
284 }
285 const ConstraintSystem &getCS(bool Signed) const {
286 return Signed ? SignedCS : UnsignedCS;
287 }
288
289 void popLastConstraint(bool Signed) { getCS(Signed).popLastConstraint(); }
290 void popLastNVariables(bool Signed, unsigned N) {
291 getCS(Signed).popLastNVariables(N);
292 }
293
294 bool doesHold(CmpInst::Predicate Pred, Value *A, Value *B) const;
295
296 void addFact(CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
297 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack);
298
299 /// Turn a comparison of the form \p Op0 \p Pred \p Op1 into a vector of
300 /// constraints, using indices from the corresponding constraint system.
301 /// New variables that need to be added to the system are collected in
302 /// \p NewVariables.
303 ConstraintTy getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
304 SmallVectorImpl<Value *> &NewVariables,
305 bool ForceSignedSystem = false) const;
306
307 /// Turns a comparison of the form \p Op0 \p Pred \p Op1 into a vector of
308 /// constraints using getConstraint. Returns an empty constraint if the result
309 /// cannot be used to query the existing constraint system, e.g. because it
310 /// would require adding new variables. Also tries to convert signed
311 /// predicates to unsigned ones if possible to allow using the unsigned system
312 /// which increases the effectiveness of the signed <-> unsigned transfer
313 /// logic.
314 ConstraintTy getConstraintForSolving(CmpInst::Predicate Pred, Value *Op0,
315 Value *Op1) const;
316
317 /// Try to add information from \p A \p Pred \p B to the unsigned/signed
318 /// system if \p Pred is signed/unsigned.
319 void transferToOtherSystem(CmpInst::Predicate Pred, Value *A, Value *B,
320 unsigned NumIn, unsigned NumOut,
321 SmallVectorImpl<StackEntry> &DFSInStack);
322
323private:
324 /// Adds facts into constraint system. \p ForceSignedSystem can be set when
325 /// the \p Pred is eq/ne, and signed constraint system is used when it's
326 /// specified.
327 void addFactImpl(CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
328 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack,
329 bool ForceSignedSystem);
330};
331
332/// Represents a (Coefficient * Variable) entry after IR decomposition.
333struct DecompEntry {
334 int64_t Coefficient;
335 Value *Variable;
336
337 DecompEntry(int64_t Coefficient, Value *Variable)
338 : Coefficient(Coefficient), Variable(Variable) {}
339};
340
341/// Represents an Offset + Coefficient1 * Variable1 + ... decomposition.
342struct Decomposition {
343 int64_t Offset = 0;
344 SmallVector<DecompEntry, 3> Vars;
345
346 Decomposition(int64_t Offset) : Offset(Offset) {}
347 Decomposition(Value *V) { Vars.emplace_back(Args: 1, Args&: V); }
348 Decomposition(int64_t Offset, ArrayRef<DecompEntry> Vars)
349 : Offset(Offset), Vars(Vars) {}
350
351 /// Add \p OtherOffset and return true if the operation overflows, i.e. the
352 /// new decomposition is invalid.
353 [[nodiscard]] bool add(int64_t OtherOffset) {
354 return AddOverflow(X: Offset, Y: OtherOffset, Result&: Offset);
355 }
356
357 /// Add \p Other and return true if the operation overflows, i.e. the new
358 /// decomposition is invalid.
359 [[nodiscard]] bool add(const Decomposition &Other) {
360 if (add(OtherOffset: Other.Offset))
361 return true;
362 append_range(C&: Vars, R: Other.Vars);
363 return false;
364 }
365
366 /// Subtract \p Other and return true if the operation overflows, i.e. the new
367 /// decomposition is invalid.
368 [[nodiscard]] bool sub(const Decomposition &Other) {
369 Decomposition Tmp = Other;
370 if (Tmp.mul(Factor: -1))
371 return true;
372 if (add(OtherOffset: Tmp.Offset))
373 return true;
374 append_range(C&: Vars, R&: Tmp.Vars);
375 return false;
376 }
377
378 /// Multiply all coefficients by \p Factor and return true if the operation
379 /// overflows, i.e. the new decomposition is invalid.
380 [[nodiscard]] bool mul(int64_t Factor) {
381 if (MulOverflow(X: Offset, Y: Factor, Result&: Offset))
382 return true;
383 for (auto &Var : Vars)
384 if (MulOverflow(X: Var.Coefficient, Y: Factor, Result&: Var.Coefficient))
385 return true;
386 return false;
387 }
388};
389
390// Variable and constant offsets for a chain of GEPs, with base pointer BasePtr.
391struct OffsetResult {
392 Value *BasePtr;
393 APInt ConstantOffset;
394 SmallMapVector<Value *, APInt, 4> VariableOffsets;
395 GEPNoWrapFlags NW;
396
397 OffsetResult() : BasePtr(nullptr), ConstantOffset(0, uint64_t(0)) {}
398
399 OffsetResult(GEPOperator &GEP, const DataLayout &DL)
400 : BasePtr(GEP.getPointerOperand()), NW(GEP.getNoWrapFlags()) {
401 ConstantOffset = APInt(DL.getIndexTypeSizeInBits(Ty: BasePtr->getType()), 0);
402 }
403};
404} // namespace
405
406// Try to collect variable and constant offsets for \p GEP, partly traversing
407// nested GEPs. Returns an OffsetResult with nullptr as BasePtr of collecting
408// the offset fails.
409static OffsetResult collectOffsets(GEPOperator &GEP, const DataLayout &DL) {
410 OffsetResult Result(GEP, DL);
411 unsigned BitWidth = Result.ConstantOffset.getBitWidth();
412 if (!GEP.collectOffset(DL, BitWidth, VariableOffsets&: Result.VariableOffsets,
413 ConstantOffset&: Result.ConstantOffset))
414 return {};
415
416 // If we have a nested GEP, check if we can combine the constant offset of the
417 // inner GEP with the outer GEP.
418 if (auto *InnerGEP = dyn_cast<GetElementPtrInst>(Val: Result.BasePtr)) {
419 SmallMapVector<Value *, APInt, 4> VariableOffsets2;
420 APInt ConstantOffset2(BitWidth, 0);
421 bool CanCollectInner = InnerGEP->collectOffset(
422 DL, BitWidth, VariableOffsets&: VariableOffsets2, ConstantOffset&: ConstantOffset2);
423 // TODO: Support cases with more than 1 variable offset.
424 if (!CanCollectInner || Result.VariableOffsets.size() > 1 ||
425 VariableOffsets2.size() > 1 ||
426 (Result.VariableOffsets.size() >= 1 && VariableOffsets2.size() >= 1)) {
427 // More than 1 variable index, use outer result.
428 return Result;
429 }
430 Result.BasePtr = InnerGEP->getPointerOperand();
431 Result.ConstantOffset += ConstantOffset2;
432 if (Result.VariableOffsets.size() == 0 && VariableOffsets2.size() == 1)
433 Result.VariableOffsets = VariableOffsets2;
434 Result.NW &= InnerGEP->getNoWrapFlags();
435 }
436 return Result;
437}
438
439static Decomposition decompose(Value *V,
440 SmallVectorImpl<ConditionTy> &Preconditions,
441 bool IsSigned, const DataLayout &DL);
442
443static bool canUseSExt(ConstantInt *CI) {
444 const APInt &Val = CI->getValue();
445 return Val.sgt(RHS: MinSignedConstraintValue) && Val.slt(RHS: MaxConstraintValue);
446}
447
448static Decomposition decomposeGEP(GEPOperator &GEP,
449 SmallVectorImpl<ConditionTy> &Preconditions,
450 bool IsSigned, const DataLayout &DL) {
451 // Do not reason about pointers where the index size is larger than 64 bits,
452 // as the coefficients used to encode constraints are 64 bit integers.
453 if (DL.getIndexTypeSizeInBits(Ty: GEP.getPointerOperand()->getType()) > 64)
454 return &GEP;
455
456 assert(!IsSigned && "The logic below only supports decomposition for "
457 "unsigned predicates at the moment.");
458 const auto &[BasePtr, ConstantOffset, VariableOffsets, NW] =
459 collectOffsets(GEP, DL);
460 // We support either plain gep nuw, or gep nusw with non-negative offset,
461 // which implies gep nuw.
462 if (!BasePtr || NW == GEPNoWrapFlags::none())
463 return &GEP;
464
465 Decomposition Result(ConstantOffset.getSExtValue(), DecompEntry(1, BasePtr));
466 for (auto [Index, Scale] : VariableOffsets) {
467 auto IdxResult = decompose(V: Index, Preconditions, IsSigned, DL);
468 if (IdxResult.mul(Factor: Scale.getSExtValue()))
469 return &GEP;
470 if (Result.add(Other: IdxResult))
471 return &GEP;
472
473 if (!NW.hasNoUnsignedWrap()) {
474 // Try to prove nuw from nusw and nneg.
475 assert(NW.hasNoUnsignedSignedWrap() && "Must have nusw flag");
476 if (!isKnownNonNegative(V: Index, SQ: DL))
477 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Index,
478 Args: ConstantInt::get(Ty: Index->getType(), V: 0));
479 }
480 }
481 return Result;
482}
483
484// Decomposes \p V into a constant offset + list of pairs { Coefficient,
485// Variable } where Coefficient * Variable. The sum of the constant offset and
486// pairs equals \p V.
487static Decomposition decompose(Value *V,
488 SmallVectorImpl<ConditionTy> &Preconditions,
489 bool IsSigned, const DataLayout &DL) {
490
491 auto MergeResults = [&Preconditions, IsSigned,
492 &DL](Value *A, Value *B,
493 bool IsSignedB) -> std::optional<Decomposition> {
494 auto ResA = decompose(V: A, Preconditions, IsSigned, DL);
495 auto ResB = decompose(V: B, Preconditions, IsSigned: IsSignedB, DL);
496 if (ResA.add(Other: ResB))
497 return std::nullopt;
498 return ResA;
499 };
500
501 Type *Ty = V->getType()->getScalarType();
502 if (Ty->isPointerTy() && !IsSigned) {
503 if (auto *GEP = dyn_cast<GEPOperator>(Val: V))
504 return decomposeGEP(GEP&: *GEP, Preconditions, IsSigned, DL);
505 if (isa<ConstantPointerNull>(Val: V))
506 return int64_t(0);
507
508 return V;
509 }
510
511 // Don't handle integers > 64 bit. Our coefficients are 64-bit large, so
512 // coefficient add/mul may wrap, while the operation in the full bit width
513 // would not.
514 if (!Ty->isIntegerTy() || Ty->getIntegerBitWidth() > 64)
515 return V;
516
517 // Decompose \p V used with a signed predicate.
518 if (IsSigned) {
519 if (auto *CI = dyn_cast<ConstantInt>(Val: V)) {
520 if (canUseSExt(CI))
521 return CI->getSExtValue();
522 }
523 Value *Op0;
524 Value *Op1;
525
526 if (match(V, P: m_SExt(Op: m_Value(V&: Op0))))
527 V = Op0;
528 else if (match(V, P: m_NNegZExt(Op: m_Value(V&: Op0)))) {
529 V = Op0;
530 } else if (match(V, P: m_NSWTrunc(Op: m_Value(V&: Op0)))) {
531 if (Op0->getType()->getScalarSizeInBits() <= 64)
532 V = Op0;
533 }
534
535 if (match(V, P: m_NSWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
536 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
537 return *Decomp;
538 return V;
539 }
540
541 if (match(V, P: m_NSWSub(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
542 auto ResA = decompose(V: Op0, Preconditions, IsSigned, DL);
543 auto ResB = decompose(V: Op1, Preconditions, IsSigned, DL);
544 if (!ResA.sub(Other: ResB))
545 return ResA;
546 return V;
547 }
548
549 ConstantInt *CI;
550 if (match(V, P: m_NSWMul(L: m_Value(V&: Op0), R: m_ConstantInt(CI))) && canUseSExt(CI)) {
551 auto Result = decompose(V: Op0, Preconditions, IsSigned, DL);
552 if (!Result.mul(Factor: CI->getSExtValue()))
553 return Result;
554 return V;
555 }
556
557 // (shl nsw x, shift) is (mul nsw x, (1<<shift)), with the exception of
558 // shift == bw-1.
559 if (match(V, P: m_NSWShl(L: m_Value(V&: Op0), R: m_ConstantInt(CI)))) {
560 uint64_t Shift = CI->getValue().getLimitedValue();
561 if (Shift < Ty->getIntegerBitWidth() - 1) {
562 assert(Shift < 64 && "Would overflow");
563 auto Result = decompose(V: Op0, Preconditions, IsSigned, DL);
564 if (!Result.mul(Factor: int64_t(1) << Shift))
565 return Result;
566 return V;
567 }
568 }
569
570 return V;
571 }
572
573 if (auto *CI = dyn_cast<ConstantInt>(Val: V)) {
574 if (CI->uge(Num: MaxConstraintValue))
575 return V;
576 return int64_t(CI->getZExtValue());
577 }
578
579 Value *Op0;
580 if (match(V, P: m_ZExt(Op: m_Value(V&: Op0)))) {
581 V = Op0;
582 } else if (match(V, P: m_SExt(Op: m_Value(V&: Op0)))) {
583 V = Op0;
584 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op0,
585 Args: ConstantInt::get(Ty: Op0->getType(), V: 0));
586 } else if (auto *Trunc = dyn_cast<TruncInst>(Val: V)) {
587 if (Trunc->getSrcTy()->getScalarSizeInBits() <= 64) {
588 if (Trunc->hasNoUnsignedWrap() || Trunc->hasNoSignedWrap()) {
589 V = Trunc->getOperand(i_nocapture: 0);
590 if (!Trunc->hasNoUnsignedWrap())
591 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: V,
592 Args: ConstantInt::get(Ty: V->getType(), V: 0));
593 }
594 }
595 }
596
597 Value *Op1;
598 ConstantInt *CI;
599 if (match(V, P: m_NUWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
600 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
601 return *Decomp;
602 return V;
603 }
604
605 if (match(V, P: m_Add(L: m_Value(V&: Op0), R: m_ConstantInt(CI))) && CI->isNegative() &&
606 canUseSExt(CI)) {
607 Preconditions.emplace_back(
608 Args: CmpInst::ICMP_UGE, Args&: Op0,
609 Args: ConstantInt::get(Ty: Op0->getType(), V: CI->getSExtValue() * -1));
610 if (auto Decomp = MergeResults(Op0, CI, true))
611 return *Decomp;
612 return V;
613 }
614
615 if (match(V, P: m_NSWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
616 if (!isKnownNonNegative(V: Op0, SQ: DL))
617 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op0,
618 Args: ConstantInt::get(Ty: Op0->getType(), V: 0));
619 if (!isKnownNonNegative(V: Op1, SQ: DL))
620 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op1,
621 Args: ConstantInt::get(Ty: Op1->getType(), V: 0));
622
623 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
624 return *Decomp;
625 return V;
626 }
627
628 // Decompose or as an add if there are no common bits between the operands.
629 if (match(V, P: m_DisjointOr(L: m_Value(V&: Op0), R: m_ConstantInt(CI)))) {
630 if (auto Decomp = MergeResults(Op0, CI, IsSigned))
631 return *Decomp;
632 return V;
633 }
634
635 if (match(V, P: m_NUWShl(L: m_Value(V&: Op1), R: m_ConstantInt(CI))) && canUseSExt(CI)) {
636 if (CI->getSExtValue() < 0 || CI->getSExtValue() >= 64)
637 return V;
638 auto Result = decompose(V: Op1, Preconditions, IsSigned, DL);
639 if (!Result.mul(Factor: int64_t{1} << CI->getSExtValue()))
640 return Result;
641 return V;
642 }
643
644 if (match(V, P: m_NUWMul(L: m_Value(V&: Op1), R: m_ConstantInt(CI))) && canUseSExt(CI) &&
645 (!CI->isNegative())) {
646 auto Result = decompose(V: Op1, Preconditions, IsSigned, DL);
647 if (!Result.mul(Factor: CI->getSExtValue()))
648 return Result;
649 return V;
650 }
651
652 if (match(V, P: m_NUWSub(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
653 auto ResA = decompose(V: Op0, Preconditions, IsSigned, DL);
654 auto ResB = decompose(V: Op1, Preconditions, IsSigned, DL);
655 if (!ResA.sub(Other: ResB))
656 return ResA;
657 return V;
658 }
659
660 return V;
661}
662
663ConstraintTy
664ConstraintInfo::getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
665 SmallVectorImpl<Value *> &NewVariables,
666 bool ForceSignedSystem) const {
667 assert(NewVariables.empty() && "NewVariables must be empty when passed in");
668 assert((!ForceSignedSystem || CmpInst::isEquality(Pred)) &&
669 "signed system can only be forced on eq/ne");
670
671 bool IsEq = false;
672 bool IsNe = false;
673
674 // Try to convert Pred to one of ULE/ULT/SLE/SLT.
675 switch (Pred) {
676 case CmpInst::ICMP_UGT:
677 case CmpInst::ICMP_UGE:
678 case CmpInst::ICMP_SGT:
679 case CmpInst::ICMP_SGE: {
680 Pred = CmpInst::getSwappedPredicate(pred: Pred);
681 std::swap(a&: Op0, b&: Op1);
682 break;
683 }
684 case CmpInst::ICMP_EQ:
685 if (!ForceSignedSystem && match(V: Op1, P: m_Zero())) {
686 Pred = CmpInst::ICMP_ULE;
687 } else {
688 IsEq = true;
689 Pred = CmpInst::ICMP_ULE;
690 }
691 break;
692 case CmpInst::ICMP_NE:
693 if (!ForceSignedSystem && match(V: Op1, P: m_Zero())) {
694 Pred = CmpInst::getSwappedPredicate(pred: CmpInst::ICMP_UGT);
695 std::swap(a&: Op0, b&: Op1);
696 } else {
697 IsNe = true;
698 Pred = CmpInst::ICMP_ULE;
699 }
700 break;
701 default:
702 break;
703 }
704
705 if (Pred != CmpInst::ICMP_ULE && Pred != CmpInst::ICMP_ULT &&
706 Pred != CmpInst::ICMP_SLE && Pred != CmpInst::ICMP_SLT)
707 return {};
708
709 SmallVector<ConditionTy, 4> Preconditions;
710 bool IsSigned = ForceSignedSystem || CmpInst::isSigned(predicate: Pred);
711 auto &Value2Index = getValue2Index(Signed: IsSigned);
712 auto ADec = decompose(V: Op0->stripPointerCastsSameRepresentation(),
713 Preconditions, IsSigned, DL);
714 auto BDec = decompose(V: Op1->stripPointerCastsSameRepresentation(),
715 Preconditions, IsSigned, DL);
716 int64_t Offset1 = ADec.Offset;
717 int64_t Offset2 = BDec.Offset;
718 Offset1 *= -1;
719
720 auto &VariablesA = ADec.Vars;
721 auto &VariablesB = BDec.Vars;
722
723 // First try to look up \p V in Value2Index and NewVariables. Otherwise add a
724 // new entry to NewVariables.
725 SmallDenseMap<Value *, unsigned> NewIndexMap;
726 auto GetOrAddIndex = [&Value2Index, &NewVariables,
727 &NewIndexMap](Value *V) -> unsigned {
728 auto V2I = Value2Index.find(Val: V);
729 if (V2I != Value2Index.end())
730 return V2I->second;
731 auto [It, Inserted] = NewIndexMap.try_emplace(
732 Key: V, Args: Value2Index.size() + NewVariables.size() + 1);
733 if (Inserted)
734 NewVariables.push_back(Elt: V);
735 return It->second;
736 };
737
738 // Make sure all variables have entries in Value2Index or NewVariables.
739 for (const auto &KV : concat<DecompEntry>(Ranges&: VariablesA, Ranges&: VariablesB))
740 GetOrAddIndex(KV.Variable);
741
742 // Build result constraint, by first adding all coefficients from A and then
743 // subtracting all coefficients from B.
744 ConstraintTy Res(
745 SmallVector<int64_t, 8>(Value2Index.size() + NewVariables.size() + 1, 0),
746 IsSigned, IsEq, IsNe);
747 auto &R = Res.Coefficients;
748 for (const auto &KV : VariablesA)
749 R[GetOrAddIndex(KV.Variable)] += KV.Coefficient;
750
751 for (const auto &KV : VariablesB) {
752 auto &Coeff = R[GetOrAddIndex(KV.Variable)];
753 if (SubOverflow(X: Coeff, Y: KV.Coefficient, Result&: Coeff))
754 return {};
755 }
756
757 int64_t OffsetSum;
758 if (AddOverflow(X: Offset1, Y: Offset2, Result&: OffsetSum))
759 return {};
760 if (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT)
761 if (AddOverflow(X: OffsetSum, Y: int64_t(-1), Result&: OffsetSum))
762 return {};
763 R[0] = OffsetSum;
764 Res.Preconditions = std::move(Preconditions);
765
766 // Remove any (Coefficient, Variable) entry where the Coefficient is 0 for new
767 // variables.
768 while (!NewVariables.empty()) {
769 int64_t Last = R.back();
770 if (Last != 0)
771 break;
772 R.pop_back();
773 Value *RemovedV = NewVariables.pop_back_val();
774 NewIndexMap.erase(Val: RemovedV);
775 }
776
777 return Res;
778}
779
780ConstraintTy ConstraintInfo::getConstraintForSolving(CmpInst::Predicate Pred,
781 Value *Op0,
782 Value *Op1) const {
783 Constant *NullC = Constant::getNullValue(Ty: Op0->getType());
784 // Handle trivially true compares directly to avoid adding V UGE 0 constraints
785 // for all variables in the unsigned system.
786 if ((Pred == CmpInst::ICMP_ULE && Op0 == NullC) ||
787 (Pred == CmpInst::ICMP_UGE && Op1 == NullC)) {
788 auto &Value2Index = getValue2Index(Signed: false);
789 // Return constraint that's trivially true.
790 return ConstraintTy(SmallVector<int64_t, 8>(Value2Index.size(), 0), false,
791 false, false);
792 }
793
794 // If both operands are known to be non-negative, change signed predicates to
795 // unsigned ones. This increases the reasoning effectiveness in combination
796 // with the signed <-> unsigned transfer logic.
797 if (CmpInst::isSigned(predicate: Pred) &&
798 isKnownNonNegative(V: Op0, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1) &&
799 isKnownNonNegative(V: Op1, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
800 Pred = ICmpInst::getUnsignedPredicate(Pred);
801
802 SmallVector<Value *> NewVariables;
803 ConstraintTy R = getConstraint(Pred, Op0, Op1, NewVariables);
804 if (!NewVariables.empty())
805 return {};
806 return R;
807}
808
809bool ConstraintTy::isValid(const ConstraintInfo &Info) const {
810 return Coefficients.size() > 0 &&
811 all_of(Range: Preconditions, P: [&Info](const ConditionTy &C) {
812 return Info.doesHold(Pred: C.Pred, A: C.Op0, B: C.Op1);
813 });
814}
815
816std::optional<bool>
817ConstraintTy::isImpliedBy(const ConstraintSystem &CS) const {
818 bool IsConditionImplied = CS.isConditionImplied(R: Coefficients);
819
820 if (IsEq || IsNe) {
821 auto NegatedOrEqual = ConstraintSystem::negateOrEqual(R: Coefficients);
822 bool IsNegatedOrEqualImplied =
823 !NegatedOrEqual.empty() && CS.isConditionImplied(R: NegatedOrEqual);
824
825 // In order to check that `%a == %b` is true (equality), both conditions `%a
826 // >= %b` and `%a <= %b` must hold true. When checking for equality (`IsEq`
827 // is true), we return true if they both hold, false in the other cases.
828 if (IsConditionImplied && IsNegatedOrEqualImplied)
829 return IsEq;
830
831 auto Negated = ConstraintSystem::negate(R: Coefficients);
832 bool IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(R: Negated);
833
834 auto StrictLessThan = ConstraintSystem::toStrictLessThan(R: Coefficients);
835 bool IsStrictLessThanImplied =
836 !StrictLessThan.empty() && CS.isConditionImplied(R: StrictLessThan);
837
838 // In order to check that `%a != %b` is true (non-equality), either
839 // condition `%a > %b` or `%a < %b` must hold true. When checking for
840 // non-equality (`IsNe` is true), we return true if one of the two holds,
841 // false in the other cases.
842 if (IsNegatedImplied || IsStrictLessThanImplied)
843 return IsNe;
844
845 return std::nullopt;
846 }
847
848 if (IsConditionImplied)
849 return true;
850
851 auto Negated = ConstraintSystem::negate(R: Coefficients);
852 auto IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(R: Negated);
853 if (IsNegatedImplied)
854 return false;
855
856 // Neither the condition nor its negated holds, did not prove anything.
857 return std::nullopt;
858}
859
860bool ConstraintInfo::doesHold(CmpInst::Predicate Pred, Value *A,
861 Value *B) const {
862 auto R = getConstraintForSolving(Pred, Op0: A, Op1: B);
863 return R.isValid(Info: *this) &&
864 getCS(Signed: R.IsSigned).isConditionImplied(R: R.Coefficients);
865}
866
867void ConstraintInfo::transferToOtherSystem(
868 CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
869 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack) {
870 auto IsKnownNonNegative = [this](Value *V) {
871 return doesHold(Pred: CmpInst::ICMP_SGE, A: V, B: ConstantInt::get(Ty: V->getType(), V: 0)) ||
872 isKnownNonNegative(V, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1);
873 };
874 // Check if we can combine facts from the signed and unsigned systems to
875 // derive additional facts.
876 if (!A->getType()->isIntegerTy())
877 return;
878 // FIXME: This currently depends on the order we add facts. Ideally we
879 // would first add all known facts and only then try to add additional
880 // facts.
881 switch (Pred) {
882 default:
883 break;
884 case CmpInst::ICMP_ULT:
885 case CmpInst::ICMP_ULE:
886 // If B is a signed positive constant, then A >=s 0 and A <s (or <=s) B.
887 if (IsKnownNonNegative(B)) {
888 addFact(Pred: CmpInst::ICMP_SGE, A, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
889 NumOut, DFSInStack);
890 addFact(Pred: ICmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut,
891 DFSInStack);
892 }
893 break;
894 case CmpInst::ICMP_UGE:
895 case CmpInst::ICMP_UGT:
896 // If A is a signed positive constant, then B >=s 0 and A >s (or >=s) B.
897 if (IsKnownNonNegative(A)) {
898 addFact(Pred: CmpInst::ICMP_SGE, A: B, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
899 NumOut, DFSInStack);
900 addFact(Pred: ICmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut,
901 DFSInStack);
902 }
903 break;
904 case CmpInst::ICMP_SLT:
905 if (IsKnownNonNegative(A))
906 addFact(Pred: CmpInst::ICMP_ULT, A, B, NumIn, NumOut, DFSInStack);
907 break;
908 case CmpInst::ICMP_SGT: {
909 if (doesHold(Pred: CmpInst::ICMP_SGE, A: B, B: Constant::getAllOnesValue(Ty: B->getType())))
910 addFact(Pred: CmpInst::ICMP_UGE, A, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
911 NumOut, DFSInStack);
912 if (IsKnownNonNegative(B))
913 addFact(Pred: CmpInst::ICMP_UGT, A, B, NumIn, NumOut, DFSInStack);
914
915 break;
916 }
917 case CmpInst::ICMP_SGE:
918 if (IsKnownNonNegative(B))
919 addFact(Pred: CmpInst::ICMP_UGE, A, B, NumIn, NumOut, DFSInStack);
920 break;
921 }
922}
923
924#ifndef NDEBUG
925
926static void dumpConstraint(ArrayRef<int64_t> C,
927 const DenseMap<Value *, unsigned> &Value2Index) {
928 ConstraintSystem CS(Value2Index);
929 CS.addVariableRowFill(C);
930 CS.dump();
931}
932#endif
933
934void State::addInfoForInductions(BasicBlock &BB) {
935 auto *L = LI.getLoopFor(BB: &BB);
936 if (!L || L->getHeader() != &BB)
937 return;
938
939 Value *A;
940 Value *B;
941 CmpPredicate Pred;
942
943 if (!match(V: BB.getTerminator(),
944 P: m_Br(C: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B)), T: m_Value(), F: m_Value())))
945 return;
946 PHINode *PN = dyn_cast<PHINode>(Val: A);
947 if (!PN) {
948 Pred = CmpInst::getSwappedPredicate(pred: Pred);
949 std::swap(a&: A, b&: B);
950 PN = dyn_cast<PHINode>(Val: A);
951 }
952
953 if (!PN || PN->getParent() != &BB || PN->getNumIncomingValues() != 2 ||
954 !SE.isSCEVable(Ty: PN->getType()))
955 return;
956
957 BasicBlock *InLoopSucc = nullptr;
958 if (Pred == CmpInst::ICMP_NE)
959 InLoopSucc = cast<BranchInst>(Val: BB.getTerminator())->getSuccessor(i: 0);
960 else if (Pred == CmpInst::ICMP_EQ)
961 InLoopSucc = cast<BranchInst>(Val: BB.getTerminator())->getSuccessor(i: 1);
962 else
963 return;
964
965 if (!L->contains(BB: InLoopSucc) || !L->isLoopExiting(BB: &BB) || InLoopSucc == &BB)
966 return;
967
968 auto *AR = dyn_cast_or_null<SCEVAddRecExpr>(Val: SE.getSCEV(V: PN));
969 BasicBlock *LoopPred = L->getLoopPredecessor();
970 if (!AR || AR->getLoop() != L || !LoopPred)
971 return;
972
973 const SCEV *StartSCEV = AR->getStart();
974 Value *StartValue = nullptr;
975 if (auto *C = dyn_cast<SCEVConstant>(Val: StartSCEV)) {
976 StartValue = C->getValue();
977 } else {
978 StartValue = PN->getIncomingValueForBlock(BB: LoopPred);
979 assert(SE.getSCEV(StartValue) == StartSCEV && "inconsistent start value");
980 }
981
982 DomTreeNode *DTN = DT.getNode(BB: InLoopSucc);
983 auto IncUnsigned = SE.getMonotonicPredicateType(LHS: AR, Pred: CmpInst::ICMP_UGT);
984 auto IncSigned = SE.getMonotonicPredicateType(LHS: AR, Pred: CmpInst::ICMP_SGT);
985 bool MonotonicallyIncreasingUnsigned =
986 IncUnsigned == ScalarEvolution::MonotonicallyIncreasing;
987 bool MonotonicallyIncreasingSigned =
988 IncSigned == ScalarEvolution::MonotonicallyIncreasing;
989 // If SCEV guarantees that AR does not wrap, PN >= StartValue can be added
990 // unconditionally.
991 if (MonotonicallyIncreasingUnsigned)
992 WorkList.push_back(
993 Elt: FactOrCheck::getConditionFact(DTN, Pred: CmpInst::ICMP_UGE, Op0: PN, Op1: StartValue));
994 if (MonotonicallyIncreasingSigned)
995 WorkList.push_back(
996 Elt: FactOrCheck::getConditionFact(DTN, Pred: CmpInst::ICMP_SGE, Op0: PN, Op1: StartValue));
997
998 APInt StepOffset;
999 if (auto *C = dyn_cast<SCEVConstant>(Val: AR->getStepRecurrence(SE)))
1000 StepOffset = C->getAPInt();
1001 else
1002 return;
1003
1004 // Make sure the bound B is loop-invariant.
1005 if (!L->isLoopInvariant(V: B))
1006 return;
1007
1008 // Handle negative steps.
1009 if (StepOffset.isNegative()) {
1010 // TODO: Extend to allow steps > -1.
1011 if (!(-StepOffset).isOne())
1012 return;
1013
1014 // AR may wrap.
1015 // Add StartValue >= PN conditional on B <= StartValue which guarantees that
1016 // the loop exits before wrapping with a step of -1.
1017 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1018 DTN, Pred: CmpInst::ICMP_UGE, Op0: StartValue, Op1: PN,
1019 Precond: ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
1020 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1021 DTN, Pred: CmpInst::ICMP_SGE, Op0: StartValue, Op1: PN,
1022 Precond: ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
1023 // Add PN > B conditional on B <= StartValue which guarantees that the loop
1024 // exits when reaching B with a step of -1.
1025 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1026 DTN, Pred: CmpInst::ICMP_UGT, Op0: PN, Op1: B,
1027 Precond: ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
1028 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1029 DTN, Pred: CmpInst::ICMP_SGT, Op0: PN, Op1: B,
1030 Precond: ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
1031 return;
1032 }
1033
1034 // Make sure AR either steps by 1 or that the value we compare against is a
1035 // GEP based on the same start value and all offsets are a multiple of the
1036 // step size, to guarantee that the induction will reach the value.
1037 if (StepOffset.isZero() || StepOffset.isNegative())
1038 return;
1039
1040 if (!StepOffset.isOne()) {
1041 // Check whether B-Start is known to be a multiple of StepOffset.
1042 const SCEV *BMinusStart = SE.getMinusSCEV(LHS: SE.getSCEV(V: B), RHS: StartSCEV);
1043 if (isa<SCEVCouldNotCompute>(Val: BMinusStart) ||
1044 !SE.getConstantMultiple(S: BMinusStart).urem(RHS: StepOffset).isZero())
1045 return;
1046 }
1047
1048 // AR may wrap. Add PN >= StartValue conditional on StartValue <= B which
1049 // guarantees that the loop exits before wrapping in combination with the
1050 // restrictions on B and the step above.
1051 if (!MonotonicallyIncreasingUnsigned)
1052 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1053 DTN, Pred: CmpInst::ICMP_UGE, Op0: PN, Op1: StartValue,
1054 Precond: ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
1055 if (!MonotonicallyIncreasingSigned)
1056 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1057 DTN, Pred: CmpInst::ICMP_SGE, Op0: PN, Op1: StartValue,
1058 Precond: ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
1059
1060 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1061 DTN, Pred: CmpInst::ICMP_ULT, Op0: PN, Op1: B,
1062 Precond: ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
1063 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1064 DTN, Pred: CmpInst::ICMP_SLT, Op0: PN, Op1: B,
1065 Precond: ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
1066
1067 // Try to add condition from header to the dedicated exit blocks. When exiting
1068 // either with EQ or NE in the header, we know that the induction value must
1069 // be u<= B, as other exits may only exit earlier.
1070 assert(!StepOffset.isNegative() && "induction must be increasing");
1071 assert((Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) &&
1072 "unsupported predicate");
1073 ConditionTy Precond = {CmpInst::ICMP_ULE, StartValue, B};
1074 SmallVector<BasicBlock *> ExitBBs;
1075 L->getExitBlocks(ExitBlocks&: ExitBBs);
1076 for (BasicBlock *EB : ExitBBs) {
1077 // Bail out on non-dedicated exits.
1078 if (DT.dominates(A: &BB, B: EB)) {
1079 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1080 DTN: DT.getNode(BB: EB), Pred: CmpInst::ICMP_ULE, Op0: A, Op1: B, Precond));
1081 }
1082 }
1083}
1084
1085static bool getConstraintFromMemoryAccess(GetElementPtrInst &GEP,
1086 uint64_t AccessSize,
1087 CmpPredicate &Pred, Value *&A,
1088 Value *&B, const DataLayout &DL,
1089 const TargetLibraryInfo &TLI) {
1090 auto Offset = collectOffsets(GEP&: cast<GEPOperator>(Val&: GEP), DL);
1091 if (!Offset.NW.hasNoUnsignedWrap())
1092 return false;
1093
1094 if (Offset.VariableOffsets.size() != 1)
1095 return false;
1096
1097 uint64_t BitWidth = Offset.ConstantOffset.getBitWidth();
1098 auto &[Index, Scale] = Offset.VariableOffsets.front();
1099 // Bail out on non-canonical GEPs.
1100 if (Index->getType()->getScalarSizeInBits() != BitWidth)
1101 return false;
1102
1103 ObjectSizeOpts Opts;
1104 // Workaround for gep inbounds, ptr null, idx.
1105 Opts.NullIsUnknownSize = true;
1106 // Be conservative since we are not clear on whether an out of bounds access
1107 // to the padding is UB or not.
1108 Opts.RoundToAlign = true;
1109 std::optional<TypeSize> Size =
1110 getBaseObjectSize(Ptr: Offset.BasePtr, DL, TLI: &TLI, Opts);
1111 if (!Size || Size->isScalable())
1112 return false;
1113
1114 // Index * Scale + ConstOffset + AccessSize <= AllocSize
1115 // With nuw flag, we know that the index addition doesn't have unsigned wrap.
1116 // If (AllocSize - (ConstOffset + AccessSize)) wraps around, there is no valid
1117 // value for Index.
1118 APInt MaxIndex = (APInt(BitWidth, Size->getFixedValue() - AccessSize,
1119 /*isSigned=*/false, /*implicitTrunc=*/true) -
1120 Offset.ConstantOffset)
1121 .udiv(RHS: Scale);
1122 Pred = ICmpInst::ICMP_ULE;
1123 A = Index;
1124 B = ConstantInt::get(Ty: Index->getType(), V: MaxIndex);
1125 return true;
1126}
1127
1128void State::addInfoFor(BasicBlock &BB) {
1129 addInfoForInductions(BB);
1130 auto &DL = BB.getDataLayout();
1131
1132 // True as long as the current instruction is guaranteed to execute.
1133 bool GuaranteedToExecute = true;
1134 // Queue conditions and assumes.
1135 for (Instruction &I : BB) {
1136 if (auto *Cmp = dyn_cast<ICmpInst>(Val: &I)) {
1137 for (Use &U : Cmp->uses()) {
1138 auto *UserI = getContextInstForUse(U);
1139 auto *DTN = DT.getNode(BB: UserI->getParent());
1140 if (!DTN)
1141 continue;
1142 WorkList.push_back(Elt: FactOrCheck::getCheck(DTN, U: &U));
1143 }
1144 continue;
1145 }
1146
1147 auto AddFactFromMemoryAccess = [&](Value *Ptr, Type *AccessType) {
1148 auto *GEP = dyn_cast<GetElementPtrInst>(Val: Ptr);
1149 if (!GEP)
1150 return;
1151 TypeSize AccessSize = DL.getTypeStoreSize(Ty: AccessType);
1152 if (!AccessSize.isFixed())
1153 return;
1154 if (GuaranteedToExecute) {
1155 CmpPredicate Pred;
1156 Value *A, *B;
1157 if (getConstraintFromMemoryAccess(GEP&: *GEP, AccessSize: AccessSize.getFixedValue(),
1158 Pred, A, B, DL, TLI)) {
1159 // The memory access is guaranteed to execute when BB is entered,
1160 // hence the constraint holds on entry to BB.
1161 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1162 DTN: DT.getNode(BB: I.getParent()), Pred, Op0: A, Op1: B));
1163 }
1164 } else {
1165 WorkList.emplace_back(
1166 Args: FactOrCheck::getInstFact(DTN: DT.getNode(BB: I.getParent()), Inst: &I));
1167 }
1168 };
1169
1170 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
1171 if (!LI->isVolatile())
1172 AddFactFromMemoryAccess(LI->getPointerOperand(), LI->getAccessType());
1173 }
1174 if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
1175 if (!SI->isVolatile())
1176 AddFactFromMemoryAccess(SI->getPointerOperand(), SI->getAccessType());
1177 }
1178
1179 auto *II = dyn_cast<IntrinsicInst>(Val: &I);
1180 Intrinsic::ID ID = II ? II->getIntrinsicID() : Intrinsic::not_intrinsic;
1181 switch (ID) {
1182 case Intrinsic::assume: {
1183 Value *A, *B;
1184 CmpPredicate Pred;
1185 if (!match(V: I.getOperand(i: 0), P: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))))
1186 break;
1187 if (GuaranteedToExecute) {
1188 // The assume is guaranteed to execute when BB is entered, hence Cond
1189 // holds on entry to BB.
1190 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1191 DTN: DT.getNode(BB: I.getParent()), Pred, Op0: A, Op1: B));
1192 } else {
1193 WorkList.emplace_back(
1194 Args: FactOrCheck::getInstFact(DTN: DT.getNode(BB: I.getParent()), Inst: &I));
1195 }
1196 break;
1197 }
1198 // Enqueue ssub_with_overflow for simplification.
1199 case Intrinsic::ssub_with_overflow:
1200 case Intrinsic::ucmp:
1201 case Intrinsic::scmp:
1202 WorkList.push_back(
1203 Elt: FactOrCheck::getCheck(DTN: DT.getNode(BB: &BB), CI: cast<CallInst>(Val: &I)));
1204 break;
1205 // Enqueue the intrinsics to add extra info.
1206 case Intrinsic::umin:
1207 case Intrinsic::umax:
1208 case Intrinsic::smin:
1209 case Intrinsic::smax:
1210 // TODO: handle llvm.abs as well
1211 WorkList.push_back(
1212 Elt: FactOrCheck::getCheck(DTN: DT.getNode(BB: &BB), CI: cast<CallInst>(Val: &I)));
1213 [[fallthrough]];
1214 case Intrinsic::uadd_sat:
1215 case Intrinsic::usub_sat:
1216 // TODO: Check if it is possible to instead only added the min/max facts
1217 // when simplifying uses of the min/max intrinsics.
1218 if (!isGuaranteedNotToBePoison(V: &I))
1219 break;
1220 [[fallthrough]];
1221 case Intrinsic::abs:
1222 WorkList.push_back(Elt: FactOrCheck::getInstFact(DTN: DT.getNode(BB: &BB), Inst: &I));
1223 break;
1224 }
1225
1226 // Add facts from unsigned division and remainder.
1227 // urem x, n: result < n and result <= x
1228 // udiv x, n: result <= x
1229 if (auto *BO = dyn_cast<BinaryOperator>(Val: &I)) {
1230 if ((BO->getOpcode() == Instruction::URem ||
1231 BO->getOpcode() == Instruction::UDiv) &&
1232 isGuaranteedNotToBePoison(V: BO))
1233 WorkList.push_back(Elt: FactOrCheck::getInstFact(DTN: DT.getNode(BB: &BB), Inst: BO));
1234 }
1235
1236 GuaranteedToExecute &= isGuaranteedToTransferExecutionToSuccessor(I: &I);
1237 }
1238
1239 if (auto *Switch = dyn_cast<SwitchInst>(Val: BB.getTerminator())) {
1240 for (auto &Case : Switch->cases()) {
1241 BasicBlock *Succ = Case.getCaseSuccessor();
1242 Value *V = Case.getCaseValue();
1243 if (!canAddSuccessor(BB, Succ))
1244 continue;
1245 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1246 DTN: DT.getNode(BB: Succ), Pred: CmpInst::ICMP_EQ, Op0: Switch->getCondition(), Op1: V));
1247 }
1248 return;
1249 }
1250
1251 auto *Br = dyn_cast<BranchInst>(Val: BB.getTerminator());
1252 if (!Br || !Br->isConditional())
1253 return;
1254
1255 Value *Cond = Br->getCondition();
1256
1257 // If the condition is a chain of ORs/AND and the successor only has the
1258 // current block as predecessor, queue conditions for the successor.
1259 Value *Op0, *Op1;
1260 if (match(V: Cond, P: m_LogicalOr(L: m_Value(V&: Op0), R: m_Value(V&: Op1))) ||
1261 match(V: Cond, P: m_LogicalAnd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1262 bool IsOr = match(V: Cond, P: m_LogicalOr());
1263 bool IsAnd = match(V: Cond, P: m_LogicalAnd());
1264 // If there's a select that matches both AND and OR, we need to commit to
1265 // one of the options. Arbitrarily pick OR.
1266 if (IsOr && IsAnd)
1267 IsAnd = false;
1268
1269 BasicBlock *Successor = Br->getSuccessor(i: IsOr ? 1 : 0);
1270 if (canAddSuccessor(BB, Succ: Successor)) {
1271 SmallVector<Value *> CondWorkList;
1272 SmallPtrSet<Value *, 8> SeenCond;
1273 auto QueueValue = [&CondWorkList, &SeenCond](Value *V) {
1274 if (SeenCond.insert(Ptr: V).second)
1275 CondWorkList.push_back(Elt: V);
1276 };
1277 QueueValue(Op1);
1278 QueueValue(Op0);
1279 while (!CondWorkList.empty()) {
1280 Value *Cur = CondWorkList.pop_back_val();
1281 if (auto *Cmp = dyn_cast<ICmpInst>(Val: Cur)) {
1282 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1283 DTN: DT.getNode(BB: Successor),
1284 Pred: IsOr ? Cmp->getInverseCmpPredicate() : Cmp->getCmpPredicate(),
1285 Op0: Cmp->getOperand(i_nocapture: 0), Op1: Cmp->getOperand(i_nocapture: 1)));
1286 continue;
1287 }
1288 if (IsOr && match(V: Cur, P: m_LogicalOr(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1289 QueueValue(Op1);
1290 QueueValue(Op0);
1291 continue;
1292 }
1293 if (IsAnd && match(V: Cur, P: m_LogicalAnd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1294 QueueValue(Op1);
1295 QueueValue(Op0);
1296 continue;
1297 }
1298 }
1299 }
1300 return;
1301 }
1302
1303 auto *CmpI = dyn_cast<ICmpInst>(Val: Br->getCondition());
1304 if (!CmpI)
1305 return;
1306 if (canAddSuccessor(BB, Succ: Br->getSuccessor(i: 0)))
1307 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1308 DTN: DT.getNode(BB: Br->getSuccessor(i: 0)), Pred: CmpI->getCmpPredicate(),
1309 Op0: CmpI->getOperand(i_nocapture: 0), Op1: CmpI->getOperand(i_nocapture: 1)));
1310 if (canAddSuccessor(BB, Succ: Br->getSuccessor(i: 1)))
1311 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1312 DTN: DT.getNode(BB: Br->getSuccessor(i: 1)), Pred: CmpI->getInverseCmpPredicate(),
1313 Op0: CmpI->getOperand(i_nocapture: 0), Op1: CmpI->getOperand(i_nocapture: 1)));
1314}
1315
1316#ifndef NDEBUG
1317static void dumpUnpackedICmp(raw_ostream &OS, ICmpInst::Predicate Pred,
1318 Value *LHS, Value *RHS) {
1319 OS << "icmp " << Pred << ' ';
1320 LHS->printAsOperand(OS, /*PrintType=*/true);
1321 OS << ", ";
1322 RHS->printAsOperand(OS, /*PrintType=*/false);
1323}
1324#endif
1325
1326namespace {
1327/// Helper to keep track of a condition and if it should be treated as negated
1328/// for reproducer construction.
1329/// Pred == Predicate::BAD_ICMP_PREDICATE indicates that this entry is a
1330/// placeholder to keep the ReproducerCondStack in sync with DFSInStack.
1331struct ReproducerEntry {
1332 ICmpInst::Predicate Pred;
1333 Value *LHS;
1334 Value *RHS;
1335
1336 ReproducerEntry(ICmpInst::Predicate Pred, Value *LHS, Value *RHS)
1337 : Pred(Pred), LHS(LHS), RHS(RHS) {}
1338};
1339} // namespace
1340
1341/// Helper function to generate a reproducer function for simplifying \p Cond.
1342/// The reproducer function contains a series of @llvm.assume calls, one for
1343/// each condition in \p Stack. For each condition, the operand instruction are
1344/// cloned until we reach operands that have an entry in \p Value2Index. Those
1345/// will then be added as function arguments. \p DT is used to order cloned
1346/// instructions. The reproducer function will get added to \p M, if it is
1347/// non-null. Otherwise no reproducer function is generated.
1348static void generateReproducer(CmpInst *Cond, Module *M,
1349 ArrayRef<ReproducerEntry> Stack,
1350 ConstraintInfo &Info, DominatorTree &DT) {
1351 if (!M)
1352 return;
1353
1354 LLVMContext &Ctx = Cond->getContext();
1355
1356 LLVM_DEBUG(dbgs() << "Creating reproducer for " << *Cond << "\n");
1357
1358 ValueToValueMapTy Old2New;
1359 SmallVector<Value *> Args;
1360 SmallPtrSet<Value *, 8> Seen;
1361 // Traverse Cond and its operands recursively until we reach a value that's in
1362 // Value2Index or not an instruction, or not a operation that
1363 // ConstraintElimination can decompose. Such values will be considered as
1364 // external inputs to the reproducer, they are collected and added as function
1365 // arguments later.
1366 auto CollectArguments = [&](ArrayRef<Value *> Ops, bool IsSigned) {
1367 auto &Value2Index = Info.getValue2Index(Signed: IsSigned);
1368 SmallVector<Value *, 4> WorkList(Ops);
1369 while (!WorkList.empty()) {
1370 Value *V = WorkList.pop_back_val();
1371 if (!Seen.insert(Ptr: V).second)
1372 continue;
1373 if (Old2New.find(Val: V) != Old2New.end())
1374 continue;
1375 if (isa<Constant>(Val: V))
1376 continue;
1377
1378 auto *I = dyn_cast<Instruction>(Val: V);
1379 if (Value2Index.contains(Val: V) || !I ||
1380 !isa<CmpInst, BinaryOperator, GEPOperator, CastInst>(Val: V)) {
1381 Old2New[V] = V;
1382 Args.push_back(Elt: V);
1383 LLVM_DEBUG(dbgs() << " found external input " << *V << "\n");
1384 } else {
1385 append_range(C&: WorkList, R: I->operands());
1386 }
1387 }
1388 };
1389
1390 for (auto &Entry : Stack)
1391 if (Entry.Pred != ICmpInst::BAD_ICMP_PREDICATE)
1392 CollectArguments({Entry.LHS, Entry.RHS}, ICmpInst::isSigned(predicate: Entry.Pred));
1393 CollectArguments(Cond, ICmpInst::isSigned(predicate: Cond->getPredicate()));
1394
1395 SmallVector<Type *> ParamTys;
1396 for (auto *P : Args)
1397 ParamTys.push_back(Elt: P->getType());
1398
1399 FunctionType *FTy = FunctionType::get(Result: Cond->getType(), Params: ParamTys,
1400 /*isVarArg=*/false);
1401 Function *F = Function::Create(Ty: FTy, Linkage: Function::ExternalLinkage,
1402 N: Cond->getModule()->getName() +
1403 Cond->getFunction()->getName() + "repro",
1404 M);
1405 // Add arguments to the reproducer function for each external value collected.
1406 for (unsigned I = 0; I < Args.size(); ++I) {
1407 F->getArg(i: I)->setName(Args[I]->getName());
1408 Old2New[Args[I]] = F->getArg(i: I);
1409 }
1410
1411 BasicBlock *Entry = BasicBlock::Create(Context&: Ctx, Name: "entry", Parent: F);
1412 IRBuilder<> Builder(Entry);
1413 Builder.CreateRet(V: Builder.getTrue());
1414 Builder.SetInsertPoint(Entry->getTerminator());
1415
1416 // Clone instructions in \p Ops and their operands recursively until reaching
1417 // an value in Value2Index (external input to the reproducer). Update Old2New
1418 // mapping for the original and cloned instructions. Sort instructions to
1419 // clone by dominance, then insert the cloned instructions in the function.
1420 auto CloneInstructions = [&](ArrayRef<Value *> Ops, bool IsSigned) {
1421 SmallVector<Value *, 4> WorkList(Ops);
1422 SmallVector<Instruction *> ToClone;
1423 auto &Value2Index = Info.getValue2Index(Signed: IsSigned);
1424 while (!WorkList.empty()) {
1425 Value *V = WorkList.pop_back_val();
1426 if (Old2New.find(Val: V) != Old2New.end())
1427 continue;
1428
1429 auto *I = dyn_cast<Instruction>(Val: V);
1430 if (!Value2Index.contains(Val: V) && I) {
1431 Old2New[V] = nullptr;
1432 ToClone.push_back(Elt: I);
1433 append_range(C&: WorkList, R: I->operands());
1434 }
1435 }
1436
1437 sort(C&: ToClone,
1438 Comp: [&DT](Instruction *A, Instruction *B) { return DT.dominates(Def: A, User: B); });
1439 for (Instruction *I : ToClone) {
1440 Instruction *Cloned = I->clone();
1441 Old2New[I] = Cloned;
1442 Old2New[I]->setName(I->getName());
1443 Cloned->insertBefore(InsertPos: Builder.GetInsertPoint());
1444 Cloned->dropUnknownNonDebugMetadata();
1445 Cloned->setDebugLoc({});
1446 }
1447 };
1448
1449 // Materialize the assumptions for the reproducer using the entries in Stack.
1450 // That is, first clone the operands of the condition recursively until we
1451 // reach an external input to the reproducer and add them to the reproducer
1452 // function. Then add an ICmp for the condition (with the inverse predicate if
1453 // the entry is negated) and an assert using the ICmp.
1454 for (auto &Entry : Stack) {
1455 if (Entry.Pred == ICmpInst::BAD_ICMP_PREDICATE)
1456 continue;
1457
1458 LLVM_DEBUG(dbgs() << " Materializing assumption ";
1459 dumpUnpackedICmp(dbgs(), Entry.Pred, Entry.LHS, Entry.RHS);
1460 dbgs() << "\n");
1461 CloneInstructions({Entry.LHS, Entry.RHS}, CmpInst::isSigned(predicate: Entry.Pred));
1462
1463 auto *Cmp = Builder.CreateICmp(P: Entry.Pred, LHS: Entry.LHS, RHS: Entry.RHS);
1464 Builder.CreateAssumption(Cond: Cmp);
1465 }
1466
1467 // Finally, clone the condition to reproduce and remap instruction operands in
1468 // the reproducer using Old2New.
1469 CloneInstructions(Cond, CmpInst::isSigned(predicate: Cond->getPredicate()));
1470 Entry->getTerminator()->setOperand(i: 0, Val: Cond);
1471 remapInstructionsInBlocks(Blocks: {Entry}, VMap&: Old2New);
1472
1473 assert(!verifyFunction(*F, &dbgs()));
1474}
1475
1476static std::optional<bool> checkCondition(CmpInst::Predicate Pred, Value *A,
1477 Value *B, Instruction *CheckInst,
1478 ConstraintInfo &Info) {
1479 LLVM_DEBUG(dbgs() << "Checking " << *CheckInst << "\n");
1480
1481 auto R = Info.getConstraintForSolving(Pred, Op0: A, Op1: B);
1482 if (R.empty() || !R.isValid(Info)) {
1483 LLVM_DEBUG(dbgs() << " failed to decompose condition\n");
1484 return std::nullopt;
1485 }
1486
1487 auto &CSToUse = Info.getCS(Signed: R.IsSigned);
1488 if (auto ImpliedCondition = R.isImpliedBy(CS: CSToUse)) {
1489 if (!DebugCounter::shouldExecute(Counter&: EliminatedCounter))
1490 return std::nullopt;
1491
1492 LLVM_DEBUG({
1493 dbgs() << "Condition ";
1494 dumpUnpackedICmp(
1495 dbgs(), *ImpliedCondition ? Pred : CmpInst::getInversePredicate(Pred),
1496 A, B);
1497 dbgs() << " implied by dominating constraints\n";
1498 CSToUse.dump();
1499 });
1500 return ImpliedCondition;
1501 }
1502
1503 return std::nullopt;
1504}
1505
1506static bool checkAndReplaceCondition(
1507 ICmpInst *Cmp, ConstraintInfo &Info, unsigned NumIn, unsigned NumOut,
1508 Instruction *ContextInst, Module *ReproducerModule,
1509 ArrayRef<ReproducerEntry> ReproducerCondStack, DominatorTree &DT,
1510 SmallVectorImpl<Instruction *> &ToRemove) {
1511 auto ReplaceCmpWithConstant = [&](CmpInst *Cmp, bool IsTrue) {
1512 generateReproducer(Cond: Cmp, M: ReproducerModule, Stack: ReproducerCondStack, Info, DT);
1513 Constant *ConstantC = ConstantInt::getBool(
1514 Ty: CmpInst::makeCmpResultType(opnd_type: Cmp->getType()), V: IsTrue);
1515 bool Changed = false;
1516 Cmp->replaceUsesWithIf(New: ConstantC, ShouldReplace: [&DT, NumIn, NumOut, ContextInst,
1517 &Changed](Use &U) {
1518 auto *UserI = getContextInstForUse(U);
1519 auto *DTN = DT.getNode(BB: UserI->getParent());
1520 if (!DTN || DTN->getDFSNumIn() < NumIn || DTN->getDFSNumOut() > NumOut)
1521 return false;
1522 if (UserI->getParent() == ContextInst->getParent() &&
1523 UserI->comesBefore(Other: ContextInst))
1524 return false;
1525
1526 // Conditions in an assume trivially simplify to true. Skip uses
1527 // in assume calls to not destroy the available information.
1528 auto *II = dyn_cast<IntrinsicInst>(Val: U.getUser());
1529 bool ShouldReplace = !II || II->getIntrinsicID() != Intrinsic::assume;
1530 Changed |= ShouldReplace;
1531 return ShouldReplace;
1532 });
1533 NumCondsRemoved++;
1534
1535 // Update the debug value records that satisfy the same condition used
1536 // in replaceUsesWithIf.
1537 SmallVector<DbgVariableRecord *> DVRUsers;
1538 findDbgUsers(V: Cmp, DbgVariableRecords&: DVRUsers);
1539
1540 for (auto *DVR : DVRUsers) {
1541 auto *DTN = DT.getNode(BB: DVR->getParent());
1542 if (!DTN || DTN->getDFSNumIn() < NumIn || DTN->getDFSNumOut() > NumOut)
1543 continue;
1544
1545 auto *MarkedI = DVR->getInstruction();
1546 if (MarkedI->getParent() == ContextInst->getParent() &&
1547 MarkedI->comesBefore(Other: ContextInst))
1548 continue;
1549
1550 DVR->replaceVariableLocationOp(OldValue: Cmp, NewValue: ConstantC);
1551 }
1552
1553 if (Cmp->use_empty())
1554 ToRemove.push_back(Elt: Cmp);
1555
1556 return Changed;
1557 };
1558
1559 if (auto ImpliedCondition =
1560 checkCondition(Pred: Cmp->getPredicate(), A: Cmp->getOperand(i_nocapture: 0),
1561 B: Cmp->getOperand(i_nocapture: 1), CheckInst: Cmp, Info))
1562 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1563
1564 // When the predicate is samesign and unsigned, we can also make use of the
1565 // signed predicate information.
1566 if (Cmp->hasSameSign() && Cmp->isUnsigned())
1567 if (auto ImpliedCondition =
1568 checkCondition(Pred: Cmp->getSignedPredicate(), A: Cmp->getOperand(i_nocapture: 0),
1569 B: Cmp->getOperand(i_nocapture: 1), CheckInst: Cmp, Info))
1570 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1571
1572 return false;
1573}
1574
1575static bool checkAndReplaceMinMax(MinMaxIntrinsic *MinMax, ConstraintInfo &Info,
1576 SmallVectorImpl<Instruction *> &ToRemove) {
1577 auto ReplaceMinMaxWithOperand = [&](MinMaxIntrinsic *MinMax, bool UseLHS) {
1578 // TODO: generate reproducer for min/max.
1579 MinMax->replaceAllUsesWith(V: MinMax->getOperand(i_nocapture: UseLHS ? 0 : 1));
1580 ToRemove.push_back(Elt: MinMax);
1581 return true;
1582 };
1583
1584 ICmpInst::Predicate Pred =
1585 ICmpInst::getNonStrictPredicate(pred: MinMax->getPredicate());
1586 if (auto ImpliedCondition = checkCondition(
1587 Pred, A: MinMax->getOperand(i_nocapture: 0), B: MinMax->getOperand(i_nocapture: 1), CheckInst: MinMax, Info))
1588 return ReplaceMinMaxWithOperand(MinMax, *ImpliedCondition);
1589 if (auto ImpliedCondition = checkCondition(
1590 Pred, A: MinMax->getOperand(i_nocapture: 1), B: MinMax->getOperand(i_nocapture: 0), CheckInst: MinMax, Info))
1591 return ReplaceMinMaxWithOperand(MinMax, !*ImpliedCondition);
1592 return false;
1593}
1594
1595static bool checkAndReplaceCmp(CmpIntrinsic *I, ConstraintInfo &Info,
1596 SmallVectorImpl<Instruction *> &ToRemove) {
1597 Value *LHS = I->getOperand(i_nocapture: 0);
1598 Value *RHS = I->getOperand(i_nocapture: 1);
1599 if (checkCondition(Pred: I->getGTPredicate(), A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1600 I->replaceAllUsesWith(V: ConstantInt::get(Ty: I->getType(), V: 1));
1601 ToRemove.push_back(Elt: I);
1602 return true;
1603 }
1604 if (checkCondition(Pred: I->getLTPredicate(), A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1605 I->replaceAllUsesWith(V: ConstantInt::getSigned(Ty: I->getType(), V: -1));
1606 ToRemove.push_back(Elt: I);
1607 return true;
1608 }
1609 if (checkCondition(Pred: ICmpInst::ICMP_EQ, A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1610 I->replaceAllUsesWith(V: ConstantInt::get(Ty: I->getType(), V: 0));
1611 ToRemove.push_back(Elt: I);
1612 return true;
1613 }
1614 return false;
1615}
1616
1617static void
1618removeEntryFromStack(const StackEntry &E, ConstraintInfo &Info,
1619 Module *ReproducerModule,
1620 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack,
1621 SmallVectorImpl<StackEntry> &DFSInStack) {
1622 Info.popLastConstraint(Signed: E.IsSigned);
1623 // Remove variables in the system that went out of scope.
1624 auto &Mapping = Info.getValue2Index(Signed: E.IsSigned);
1625 for (Value *V : E.ValuesToRelease)
1626 Mapping.erase(Val: V);
1627 Info.popLastNVariables(Signed: E.IsSigned, N: E.ValuesToRelease.size());
1628 DFSInStack.pop_back();
1629 if (ReproducerModule)
1630 ReproducerCondStack.pop_back();
1631}
1632
1633/// Check if either the first condition of an AND or OR is implied by the
1634/// (negated in case of OR) second condition or vice versa.
1635static bool checkOrAndOpImpliedByOther(
1636 FactOrCheck &CB, ConstraintInfo &Info, Module *ReproducerModule,
1637 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack,
1638 SmallVectorImpl<StackEntry> &DFSInStack,
1639 SmallVectorImpl<Instruction *> &ToRemove) {
1640 Instruction *JoinOp = CB.getContextInst();
1641 if (JoinOp->use_empty())
1642 return false;
1643
1644 CmpInst *CmpToCheck = cast<CmpInst>(Val: CB.getInstructionToSimplify());
1645 unsigned OtherOpIdx = JoinOp->getOperand(i: 0) == CmpToCheck ? 1 : 0;
1646
1647 // Don't try to simplify the first condition of a select by the second, as
1648 // this may make the select more poisonous than the original one.
1649 // TODO: check if the first operand may be poison.
1650 if (OtherOpIdx != 0 && isa<SelectInst>(Val: JoinOp))
1651 return false;
1652
1653 unsigned OldSize = DFSInStack.size();
1654 llvm::scope_exit InfoRestorer([&]() {
1655 // Remove entries again.
1656 while (OldSize < DFSInStack.size()) {
1657 StackEntry E = DFSInStack.back();
1658 removeEntryFromStack(E, Info, ReproducerModule, ReproducerCondStack,
1659 DFSInStack);
1660 }
1661 });
1662 bool IsOr = match(V: JoinOp, P: m_LogicalOr());
1663 SmallVector<Value *, 4> Worklist({JoinOp->getOperand(i: OtherOpIdx)});
1664 // Do a traversal of the AND/OR tree to add facts from leaf compares.
1665 while (!Worklist.empty()) {
1666 Value *Val = Worklist.pop_back_val();
1667 Value *LHS, *RHS;
1668 CmpPredicate Pred;
1669 if (match(V: Val, P: m_ICmp(Pred, L: m_Value(V&: LHS), R: m_Value(V&: RHS)))) {
1670 // For OR, check if the negated condition implies CmpToCheck.
1671 if (IsOr)
1672 Pred = CmpInst::getInversePredicate(pred: Pred);
1673 // Optimistically add fact from the other compares in the AND/OR.
1674 Info.addFact(Pred, A: LHS, B: RHS, NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1675 continue;
1676 }
1677 if (IsOr ? match(V: Val, P: m_LogicalOr(L: m_Value(V&: LHS), R: m_Value(V&: RHS)))
1678 : match(V: Val, P: m_LogicalAnd(L: m_Value(V&: LHS), R: m_Value(V&: RHS)))) {
1679 Worklist.push_back(Elt: LHS);
1680 Worklist.push_back(Elt: RHS);
1681 }
1682 }
1683 if (OldSize == DFSInStack.size())
1684 return false;
1685
1686 // Check if the second condition can be simplified now.
1687 if (auto ImpliedCondition =
1688 checkCondition(Pred: CmpToCheck->getPredicate(), A: CmpToCheck->getOperand(i_nocapture: 0),
1689 B: CmpToCheck->getOperand(i_nocapture: 1), CheckInst: CmpToCheck, Info)) {
1690 if (IsOr == *ImpliedCondition)
1691 JoinOp->replaceAllUsesWith(
1692 V: ConstantInt::getBool(Ty: JoinOp->getType(), V: *ImpliedCondition));
1693 else
1694 JoinOp->replaceAllUsesWith(V: JoinOp->getOperand(i: OtherOpIdx));
1695 ToRemove.push_back(Elt: JoinOp);
1696 return true;
1697 }
1698
1699 return false;
1700}
1701
1702void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B,
1703 unsigned NumIn, unsigned NumOut,
1704 SmallVectorImpl<StackEntry> &DFSInStack) {
1705 addFactImpl(Pred, A, B, NumIn, NumOut, DFSInStack, ForceSignedSystem: false);
1706 // If the Pred is eq/ne, also add the fact to signed system.
1707 if (CmpInst::isEquality(pred: Pred))
1708 addFactImpl(Pred, A, B, NumIn, NumOut, DFSInStack, ForceSignedSystem: true);
1709}
1710
1711void ConstraintInfo::addFactImpl(CmpInst::Predicate Pred, Value *A, Value *B,
1712 unsigned NumIn, unsigned NumOut,
1713 SmallVectorImpl<StackEntry> &DFSInStack,
1714 bool ForceSignedSystem) {
1715 // If the constraint has a pre-condition, skip the constraint if it does not
1716 // hold.
1717 SmallVector<Value *> NewVariables;
1718 auto R = getConstraint(Pred, Op0: A, Op1: B, NewVariables, ForceSignedSystem);
1719
1720 // TODO: Support non-equality for facts as well.
1721 if (!R.isValid(Info: *this) || R.isNe())
1722 return;
1723
1724 LLVM_DEBUG(dbgs() << "Adding '"; dumpUnpackedICmp(dbgs(), Pred, A, B);
1725 dbgs() << "'\n");
1726 auto &CSToUse = getCS(Signed: R.IsSigned);
1727 if (R.Coefficients.empty())
1728 return;
1729
1730 bool Added = CSToUse.addVariableRowFill(R: R.Coefficients);
1731 if (!Added)
1732 return;
1733
1734 // If R has been added to the system, add the new variables and queue it for
1735 // removal once it goes out-of-scope.
1736 SmallVector<Value *, 2> ValuesToRelease;
1737 auto &Value2Index = getValue2Index(Signed: R.IsSigned);
1738 for (Value *V : NewVariables) {
1739 Value2Index.try_emplace(Key: V, Args: Value2Index.size() + 1);
1740 ValuesToRelease.push_back(Elt: V);
1741 }
1742
1743 LLVM_DEBUG({
1744 dbgs() << " constraint: ";
1745 dumpConstraint(R.Coefficients, getValue2Index(R.IsSigned));
1746 dbgs() << "\n";
1747 });
1748
1749 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1750 Args: std::move(ValuesToRelease));
1751
1752 if (!R.IsSigned) {
1753 for (Value *V : NewVariables) {
1754 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0),
1755 false, false, false);
1756 VarPos.Coefficients[Value2Index[V]] = -1;
1757 CSToUse.addVariableRow(R: VarPos.Coefficients);
1758 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1759 Args: SmallVector<Value *, 2>());
1760 }
1761 }
1762
1763 if (R.isEq()) {
1764 // Also add the inverted constraint for equality constraints.
1765 for (auto &Coeff : R.Coefficients)
1766 Coeff *= -1;
1767 CSToUse.addVariableRowFill(R: R.Coefficients);
1768
1769 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1770 Args: SmallVector<Value *, 2>());
1771 }
1772}
1773
1774static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B,
1775 SmallVectorImpl<Instruction *> &ToRemove) {
1776 bool Changed = false;
1777 IRBuilder<> Builder(II->getParent(), II->getIterator());
1778 Value *Sub = nullptr;
1779 for (User *U : make_early_inc_range(Range: II->users())) {
1780 if (match(V: U, P: m_ExtractValue<0>(V: m_Value()))) {
1781 if (!Sub)
1782 Sub = Builder.CreateSub(LHS: A, RHS: B);
1783 U->replaceAllUsesWith(V: Sub);
1784 Changed = true;
1785 } else if (match(V: U, P: m_ExtractValue<1>(V: m_Value()))) {
1786 U->replaceAllUsesWith(V: Builder.getFalse());
1787 Changed = true;
1788 } else
1789 continue;
1790
1791 if (U->use_empty()) {
1792 auto *I = cast<Instruction>(Val: U);
1793 ToRemove.push_back(Elt: I);
1794 I->setOperand(i: 0, Val: PoisonValue::get(T: II->getType()));
1795 Changed = true;
1796 }
1797 }
1798
1799 if (II->use_empty()) {
1800 II->eraseFromParent();
1801 Changed = true;
1802 }
1803 return Changed;
1804}
1805
1806static bool
1807tryToSimplifyOverflowMath(IntrinsicInst *II, ConstraintInfo &Info,
1808 SmallVectorImpl<Instruction *> &ToRemove) {
1809 auto DoesConditionHold = [](CmpInst::Predicate Pred, Value *A, Value *B,
1810 ConstraintInfo &Info) {
1811 auto R = Info.getConstraintForSolving(Pred, Op0: A, Op1: B);
1812 if (R.size() < 2 || !R.isValid(Info))
1813 return false;
1814
1815 auto &CSToUse = Info.getCS(Signed: R.IsSigned);
1816 return CSToUse.isConditionImplied(R: R.Coefficients);
1817 };
1818
1819 bool Changed = false;
1820 if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
1821 // If A s>= B && B s>= 0, ssub.with.overflow(a, b) should not overflow and
1822 // can be simplified to a regular sub.
1823 Value *A = II->getArgOperand(i: 0);
1824 Value *B = II->getArgOperand(i: 1);
1825 if (!DoesConditionHold(CmpInst::ICMP_SGE, A, B, Info) ||
1826 !DoesConditionHold(CmpInst::ICMP_SGE, B,
1827 ConstantInt::get(Ty: A->getType(), V: 0), Info))
1828 return false;
1829 Changed = replaceSubOverflowUses(II, A, B, ToRemove);
1830 }
1831 return Changed;
1832}
1833
1834static bool eliminateConstraints(Function &F, DominatorTree &DT, LoopInfo &LI,
1835 ScalarEvolution &SE,
1836 OptimizationRemarkEmitter &ORE,
1837 TargetLibraryInfo &TLI) {
1838 bool Changed = false;
1839 DT.updateDFSNumbers();
1840 SmallVector<Value *> FunctionArgs(llvm::make_pointer_range(Range: F.args()));
1841 ConstraintInfo Info(F.getDataLayout(), FunctionArgs);
1842 State S(DT, LI, SE, TLI);
1843 std::unique_ptr<Module> ReproducerModule(
1844 DumpReproducers ? new Module(F.getName(), F.getContext()) : nullptr);
1845
1846 // First, collect conditions implied by branches and blocks with their
1847 // Dominator DFS in and out numbers.
1848 for (BasicBlock &BB : F) {
1849 if (!DT.getNode(BB: &BB))
1850 continue;
1851 S.addInfoFor(BB);
1852 }
1853
1854 // Next, sort worklist by dominance, so that dominating conditions to check
1855 // and facts come before conditions and facts dominated by them. If a
1856 // condition to check and a fact have the same numbers, conditional facts come
1857 // first. Assume facts and checks are ordered according to their relative
1858 // order in the containing basic block. Also make sure conditions with
1859 // constant operands come before conditions without constant operands. This
1860 // increases the effectiveness of the current signed <-> unsigned fact
1861 // transfer logic.
1862 stable_sort(Range&: S.WorkList, C: [](const FactOrCheck &A, const FactOrCheck &B) {
1863 auto HasNoConstOp = [](const FactOrCheck &B) {
1864 Value *V0 = B.isConditionFact() ? B.Cond.Op0 : B.Inst->getOperand(i: 0);
1865 Value *V1 = B.isConditionFact() ? B.Cond.Op1 : B.Inst->getOperand(i: 1);
1866 return !isa<ConstantInt>(Val: V0) && !isa<ConstantInt>(Val: V1);
1867 };
1868 // If both entries have the same In numbers, conditional facts come first.
1869 // Otherwise use the relative order in the basic block.
1870 if (A.NumIn == B.NumIn) {
1871 if (A.isConditionFact() && B.isConditionFact()) {
1872 bool NoConstOpA = HasNoConstOp(A);
1873 bool NoConstOpB = HasNoConstOp(B);
1874 return NoConstOpA < NoConstOpB;
1875 }
1876 if (A.isConditionFact())
1877 return true;
1878 if (B.isConditionFact())
1879 return false;
1880 auto *InstA = A.getContextInst();
1881 auto *InstB = B.getContextInst();
1882 return InstA->comesBefore(Other: InstB);
1883 }
1884 return A.NumIn < B.NumIn;
1885 });
1886
1887 SmallVector<Instruction *> ToRemove;
1888
1889 // Finally, process ordered worklist and eliminate implied conditions.
1890 SmallVector<StackEntry, 16> DFSInStack;
1891 SmallVector<ReproducerEntry> ReproducerCondStack;
1892 for (FactOrCheck &CB : S.WorkList) {
1893 // First, pop entries from the stack that are out-of-scope for CB. Remove
1894 // the corresponding entry from the constraint system.
1895 while (!DFSInStack.empty()) {
1896 auto &E = DFSInStack.back();
1897 LLVM_DEBUG(dbgs() << "Top of stack : " << E.NumIn << " " << E.NumOut
1898 << "\n");
1899 LLVM_DEBUG(dbgs() << "CB: " << CB.NumIn << " " << CB.NumOut << "\n");
1900 assert(E.NumIn <= CB.NumIn);
1901 if (CB.NumOut <= E.NumOut)
1902 break;
1903 LLVM_DEBUG({
1904 dbgs() << "Removing ";
1905 dumpConstraint(Info.getCS(E.IsSigned).getLastConstraint(),
1906 Info.getValue2Index(E.IsSigned));
1907 dbgs() << "\n";
1908 });
1909 removeEntryFromStack(E, Info, ReproducerModule: ReproducerModule.get(), ReproducerCondStack,
1910 DFSInStack);
1911 }
1912
1913 // For a block, check if any CmpInsts become known based on the current set
1914 // of constraints.
1915 if (CB.isCheck()) {
1916 Instruction *Inst = CB.getInstructionToSimplify();
1917 if (!Inst)
1918 continue;
1919 LLVM_DEBUG(dbgs() << "Processing condition to simplify: " << *Inst
1920 << "\n");
1921 if (auto *II = dyn_cast<WithOverflowInst>(Val: Inst)) {
1922 Changed |= tryToSimplifyOverflowMath(II, Info, ToRemove);
1923 } else if (auto *Cmp = dyn_cast<ICmpInst>(Val: Inst)) {
1924 bool Simplified = checkAndReplaceCondition(
1925 Cmp, Info, NumIn: CB.NumIn, NumOut: CB.NumOut, ContextInst: CB.getContextInst(),
1926 ReproducerModule: ReproducerModule.get(), ReproducerCondStack, DT&: S.DT, ToRemove);
1927 if (!Simplified &&
1928 match(V: CB.getContextInst(), P: m_LogicalOp(L: m_Value(), R: m_Value()))) {
1929 Simplified = checkOrAndOpImpliedByOther(
1930 CB, Info, ReproducerModule: ReproducerModule.get(), ReproducerCondStack, DFSInStack,
1931 ToRemove);
1932 }
1933 Changed |= Simplified;
1934 } else if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: Inst)) {
1935 Changed |= checkAndReplaceMinMax(MinMax, Info, ToRemove);
1936 } else if (auto *CmpIntr = dyn_cast<CmpIntrinsic>(Val: Inst)) {
1937 Changed |= checkAndReplaceCmp(I: CmpIntr, Info, ToRemove);
1938 }
1939 continue;
1940 }
1941
1942 auto AddFact = [&](CmpPredicate Pred, Value *A, Value *B) {
1943 LLVM_DEBUG(dbgs() << "Processing fact to add to the system: ";
1944 dumpUnpackedICmp(dbgs(), Pred, A, B); dbgs() << "\n");
1945 if (Info.getCS(Signed: CmpInst::isSigned(predicate: Pred)).size() > MaxRows) {
1946 LLVM_DEBUG(
1947 dbgs()
1948 << "Skip adding constraint because system has too many rows.\n");
1949 return;
1950 }
1951
1952 Info.addFact(Pred, A, B, NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1953 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size())
1954 ReproducerCondStack.emplace_back(Args&: Pred, Args&: A, Args&: B);
1955
1956 if (ICmpInst::isRelational(P: Pred)) {
1957 // If samesign is present on the ICmp, simply flip the sign of the
1958 // predicate, transferring the information from the signed system to the
1959 // unsigned system, and viceversa.
1960 if (Pred.hasSameSign())
1961 Info.addFact(Pred: ICmpInst::getFlippedSignednessPredicate(Pred), A, B,
1962 NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1963 else
1964 Info.transferToOtherSystem(Pred, A, B, NumIn: CB.NumIn, NumOut: CB.NumOut,
1965 DFSInStack);
1966 }
1967
1968 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size()) {
1969 // Add dummy entries to ReproducerCondStack to keep it in sync with
1970 // DFSInStack.
1971 for (unsigned I = 0,
1972 E = (DFSInStack.size() - ReproducerCondStack.size());
1973 I < E; ++I) {
1974 ReproducerCondStack.emplace_back(Args: ICmpInst::BAD_ICMP_PREDICATE,
1975 Args: nullptr, Args: nullptr);
1976 }
1977 }
1978 };
1979
1980 CmpPredicate Pred;
1981 if (!CB.isConditionFact()) {
1982 Value *X;
1983 if (match(V: CB.Inst, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Value(V&: X)))) {
1984 // If is_int_min_poison is true then we may assume llvm.abs >= 0.
1985 if (cast<ConstantInt>(Val: CB.Inst->getOperand(i: 1))->isOne())
1986 AddFact(CmpInst::ICMP_SGE, CB.Inst,
1987 ConstantInt::get(Ty: CB.Inst->getType(), V: 0));
1988 AddFact(CmpInst::ICMP_SGE, CB.Inst, X);
1989 continue;
1990 }
1991
1992 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: CB.Inst)) {
1993 Pred = ICmpInst::getNonStrictPredicate(pred: MinMax->getPredicate());
1994 AddFact(Pred, MinMax, MinMax->getLHS());
1995 AddFact(Pred, MinMax, MinMax->getRHS());
1996 continue;
1997 }
1998 if (auto *USatI = dyn_cast<SaturatingInst>(Val: CB.Inst)) {
1999 switch (USatI->getIntrinsicID()) {
2000 default:
2001 llvm_unreachable("Unexpected intrinsic.");
2002 case Intrinsic::uadd_sat:
2003 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getLHS());
2004 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getRHS());
2005 break;
2006 case Intrinsic::usub_sat:
2007 AddFact(ICmpInst::ICMP_ULE, USatI, USatI->getLHS());
2008 break;
2009 }
2010 continue;
2011 }
2012
2013 if (auto *BO = dyn_cast<BinaryOperator>(Val: CB.Inst)) {
2014 if (BO->getOpcode() == Instruction::URem) {
2015 // urem x, n: result < n (remainder is always less than divisor)
2016 AddFact(CmpInst::ICMP_ULT, BO, BO->getOperand(i_nocapture: 1));
2017 // urem x, n: result <= x (remainder is at most the dividend)
2018 AddFact(CmpInst::ICMP_ULE, BO, BO->getOperand(i_nocapture: 0));
2019 continue;
2020 }
2021 if (BO->getOpcode() == Instruction::UDiv) {
2022 // udiv x, n: result <= x (quotient is at most the dividend)
2023 AddFact(CmpInst::ICMP_ULE, BO, BO->getOperand(i_nocapture: 0));
2024 continue;
2025 }
2026 }
2027
2028 auto &DL = F.getDataLayout();
2029 auto AddFactsAboutIndices = [&](Value *Ptr, Type *AccessType) {
2030 CmpPredicate Pred;
2031 Value *A, *B;
2032 if (getConstraintFromMemoryAccess(
2033 GEP&: *cast<GetElementPtrInst>(Val: Ptr),
2034 AccessSize: DL.getTypeStoreSize(Ty: AccessType).getFixedValue(), Pred, A, B, DL,
2035 TLI))
2036 AddFact(Pred, A, B);
2037 };
2038
2039 if (auto *LI = dyn_cast<LoadInst>(Val: CB.Inst)) {
2040 AddFactsAboutIndices(LI->getPointerOperand(), LI->getAccessType());
2041 continue;
2042 }
2043 if (auto *SI = dyn_cast<StoreInst>(Val: CB.Inst)) {
2044 AddFactsAboutIndices(SI->getPointerOperand(), SI->getAccessType());
2045 continue;
2046 }
2047 }
2048
2049 Value *A = nullptr, *B = nullptr;
2050 if (CB.isConditionFact()) {
2051 Pred = CB.Cond.Pred;
2052 A = CB.Cond.Op0;
2053 B = CB.Cond.Op1;
2054 if (CB.DoesHold.Pred != CmpInst::BAD_ICMP_PREDICATE &&
2055 !Info.doesHold(Pred: CB.DoesHold.Pred, A: CB.DoesHold.Op0, B: CB.DoesHold.Op1)) {
2056 LLVM_DEBUG({
2057 dbgs() << "Not adding fact ";
2058 dumpUnpackedICmp(dbgs(), Pred, A, B);
2059 dbgs() << " because precondition ";
2060 dumpUnpackedICmp(dbgs(), CB.DoesHold.Pred, CB.DoesHold.Op0,
2061 CB.DoesHold.Op1);
2062 dbgs() << " does not hold.\n";
2063 });
2064 continue;
2065 }
2066 } else {
2067 bool Matched = match(V: CB.Inst, P: m_Intrinsic<Intrinsic::assume>(
2068 Op0: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))));
2069 (void)Matched;
2070 assert(Matched && "Must have an assume intrinsic with a icmp operand");
2071 }
2072 AddFact(Pred, A, B);
2073 }
2074
2075 if (ReproducerModule && !ReproducerModule->functions().empty()) {
2076 std::string S;
2077 raw_string_ostream StringS(S);
2078 ReproducerModule->print(OS&: StringS, AAW: nullptr);
2079 OptimizationRemark Rem(DEBUG_TYPE, "Reproducer", &F);
2080 Rem << ore::NV("module") << S;
2081 ORE.emit(OptDiag&: Rem);
2082 }
2083
2084#ifndef NDEBUG
2085 unsigned SignedEntries =
2086 count_if(DFSInStack, [](const StackEntry &E) { return E.IsSigned; });
2087 assert(Info.getCS(false).size() - FunctionArgs.size() ==
2088 DFSInStack.size() - SignedEntries &&
2089 "updates to CS and DFSInStack are out of sync");
2090 assert(Info.getCS(true).size() == SignedEntries &&
2091 "updates to CS and DFSInStack are out of sync");
2092#endif
2093
2094 for (Instruction *I : ToRemove)
2095 I->eraseFromParent();
2096 return Changed;
2097}
2098
2099PreservedAnalyses ConstraintEliminationPass::run(Function &F,
2100 FunctionAnalysisManager &AM) {
2101 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
2102 auto &LI = AM.getResult<LoopAnalysis>(IR&: F);
2103 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(IR&: F);
2104 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2105 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2106 if (!eliminateConstraints(F, DT, LI, SE, ORE, TLI))
2107 return PreservedAnalyses::all();
2108
2109 PreservedAnalyses PA;
2110 PA.preserve<DominatorTreeAnalysis>();
2111 PA.preserve<LoopAnalysis>();
2112 PA.preserve<ScalarEvolutionAnalysis>();
2113 PA.preserveSet<CFGAnalyses>();
2114 return PA;
2115}
2116