1//===-- ConstraintElimination.cpp - Eliminate conds using constraints. ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Eliminate conditions based on constraints collected from dominating
10// conditions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/ConstraintElimination.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/ScopeExit.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/Analysis/ConstraintSystem.h"
20#include "llvm/Analysis/GlobalsModRef.h"
21#include "llvm/Analysis/LoopInfo.h"
22#include "llvm/Analysis/MemoryBuiltins.h"
23#include "llvm/Analysis/OptimizationRemarkEmitter.h"
24#include "llvm/Analysis/ScalarEvolution.h"
25#include "llvm/Analysis/ScalarEvolutionExpressions.h"
26#include "llvm/Analysis/TargetLibraryInfo.h"
27#include "llvm/Analysis/ValueTracking.h"
28#include "llvm/IR/DataLayout.h"
29#include "llvm/IR/DebugInfo.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/IRBuilder.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Module.h"
36#include "llvm/IR/PatternMatch.h"
37#include "llvm/IR/Verifier.h"
38#include "llvm/Pass.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/DebugCounter.h"
42#include "llvm/Support/MathExtras.h"
43#include "llvm/Transforms/Utils/Cloning.h"
44#include "llvm/Transforms/Utils/ValueMapper.h"
45
46#include <optional>
47#include <string>
48
49using namespace llvm;
50using namespace PatternMatch;
51
52#define DEBUG_TYPE "constraint-elimination"
53
54STATISTIC(NumCondsRemoved, "Number of instructions removed");
55DEBUG_COUNTER(EliminatedCounter, "conds-eliminated",
56 "Controls which conditions are eliminated");
57
58static cl::opt<unsigned>
59 MaxRows("constraint-elimination-max-rows", cl::init(Val: 500), cl::Hidden,
60 cl::desc("Maximum number of rows to keep in constraint system"));
61
62static cl::opt<bool> DumpReproducers(
63 "constraint-elimination-dump-reproducers", cl::init(Val: false), cl::Hidden,
64 cl::desc("Dump IR to reproduce successful transformations."));
65
66static int64_t MaxConstraintValue = std::numeric_limits<int64_t>::max();
67static int64_t MinSignedConstraintValue = std::numeric_limits<int64_t>::min();
68
69static Instruction *getContextInstForUse(Use &U) {
70 Instruction *UserI = cast<Instruction>(Val: U.getUser());
71 if (auto *Phi = dyn_cast<PHINode>(Val: UserI))
72 UserI = Phi->getIncomingBlock(U)->getTerminator();
73 return UserI;
74}
75
76namespace {
77/// Struct to express a condition of the form %Op0 Pred %Op1.
78struct ConditionTy {
79 CmpPredicate Pred;
80 Value *Op0 = nullptr;
81 Value *Op1 = nullptr;
82
83 ConditionTy() = default;
84 ConditionTy(CmpPredicate Pred, Value *Op0, Value *Op1)
85 : Pred(Pred), Op0(Op0), Op1(Op1) {}
86};
87
88/// Represents either
89/// * a condition that holds on entry to a block (=condition fact)
90/// * an assume (=assume fact)
91/// * a use of a compare instruction to simplify.
92/// It also tracks the Dominator DFS in and out numbers for each entry.
93struct FactOrCheck {
94 enum class EntryTy {
95 ConditionFact, /// A condition that holds on entry to a block.
96 InstFact, /// A fact that holds after Inst executed (e.g. an assume or
97 /// min/mix intrinsic.
98 InstCheck, /// An instruction to simplify (e.g. an overflow math
99 /// intrinsics).
100 UseCheck /// An use of a compare instruction to simplify.
101 };
102
103 union {
104 Instruction *Inst;
105 Use *U;
106 ConditionTy Cond;
107 };
108
109 /// A pre-condition that must hold for the current fact to be added to the
110 /// system.
111 ConditionTy DoesHold;
112
113 unsigned NumIn;
114 unsigned NumOut;
115 EntryTy Ty;
116
117 FactOrCheck(EntryTy Ty, DomTreeNode *DTN, Instruction *Inst)
118 : Inst(Inst), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
119 Ty(Ty) {}
120
121 FactOrCheck(DomTreeNode *DTN, Use *U)
122 : U(U), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
123 Ty(EntryTy::UseCheck) {}
124
125 FactOrCheck(DomTreeNode *DTN, CmpPredicate Pred, Value *Op0, Value *Op1,
126 ConditionTy Precond = {})
127 : Cond(Pred, Op0, Op1), DoesHold(Precond), NumIn(DTN->getDFSNumIn()),
128 NumOut(DTN->getDFSNumOut()), Ty(EntryTy::ConditionFact) {}
129
130 static FactOrCheck getConditionFact(DomTreeNode *DTN, CmpPredicate Pred,
131 Value *Op0, Value *Op1,
132 ConditionTy Precond = {}) {
133 return FactOrCheck(DTN, Pred, Op0, Op1, Precond);
134 }
135
136 static FactOrCheck getInstFact(DomTreeNode *DTN, Instruction *Inst) {
137 return FactOrCheck(EntryTy::InstFact, DTN, Inst);
138 }
139
140 static FactOrCheck getCheck(DomTreeNode *DTN, Use *U) {
141 return FactOrCheck(DTN, U);
142 }
143
144 static FactOrCheck getCheck(DomTreeNode *DTN, CallInst *CI) {
145 return FactOrCheck(EntryTy::InstCheck, DTN, CI);
146 }
147
148 bool isCheck() const {
149 return Ty == EntryTy::InstCheck || Ty == EntryTy::UseCheck;
150 }
151
152 Instruction *getContextInst() const {
153 assert(!isConditionFact());
154 if (Ty == EntryTy::UseCheck)
155 return getContextInstForUse(U&: *U);
156 return Inst;
157 }
158
159 Instruction *getInstructionToSimplify() const {
160 assert(isCheck());
161 if (Ty == EntryTy::InstCheck)
162 return Inst;
163 // The use may have been simplified to a constant already.
164 return dyn_cast<Instruction>(Val&: *U);
165 }
166
167 bool isConditionFact() const { return Ty == EntryTy::ConditionFact; }
168};
169
170/// Keep state required to build worklist.
171struct State {
172 DominatorTree &DT;
173 LoopInfo &LI;
174 ScalarEvolution &SE;
175 TargetLibraryInfo &TLI;
176 SmallVector<FactOrCheck, 64> WorkList;
177
178 State(DominatorTree &DT, LoopInfo &LI, ScalarEvolution &SE,
179 TargetLibraryInfo &TLI)
180 : DT(DT), LI(LI), SE(SE), TLI(TLI) {}
181
182 /// Process block \p BB and add known facts to work-list.
183 void addInfoFor(BasicBlock &BB);
184
185 /// Try to add facts for loop inductions (AddRecs) in EQ/NE compares
186 /// controlling the loop header.
187 void addInfoForInductions(BasicBlock &BB);
188
189 /// Returns true if we can add a known condition from BB to its successor
190 /// block Succ.
191 bool canAddSuccessor(BasicBlock &BB, BasicBlock *Succ) const {
192 return DT.dominates(BBE: BasicBlockEdge(&BB, Succ), BB: Succ);
193 }
194};
195
196class ConstraintInfo;
197
198struct StackEntry {
199 unsigned NumIn;
200 unsigned NumOut;
201 bool IsSigned = false;
202 /// Variables that can be removed from the system once the stack entry gets
203 /// removed.
204 SmallVector<Value *, 2> ValuesToRelease;
205
206 StackEntry(unsigned NumIn, unsigned NumOut, bool IsSigned,
207 SmallVector<Value *, 2> ValuesToRelease)
208 : NumIn(NumIn), NumOut(NumOut), IsSigned(IsSigned),
209 ValuesToRelease(std::move(ValuesToRelease)) {}
210};
211
212struct ConstraintTy {
213 SmallVector<int64_t, 8> Coefficients;
214 SmallVector<ConditionTy, 2> Preconditions;
215
216 bool IsSigned = false;
217
218 ConstraintTy() = default;
219
220 ConstraintTy(SmallVector<int64_t, 8> Coefficients, bool IsSigned, bool IsEq,
221 bool IsNe)
222 : Coefficients(std::move(Coefficients)), IsSigned(IsSigned), IsEq(IsEq),
223 IsNe(IsNe) {}
224
225 unsigned size() const { return Coefficients.size(); }
226
227 unsigned empty() const { return Coefficients.empty(); }
228
229 /// Returns true if all preconditions for this list of constraints are
230 /// satisfied given \p Info.
231 bool isValid(const ConstraintInfo &Info) const;
232
233 bool isEq() const { return IsEq; }
234
235 bool isNe() const { return IsNe; }
236
237 /// Check if the current constraint is implied by the given ConstraintSystem.
238 ///
239 /// \return true or false if the constraint is proven to be respectively true,
240 /// or false. When the constraint cannot be proven to be either true or false,
241 /// std::nullopt is returned.
242 std::optional<bool> isImpliedBy(const ConstraintSystem &CS) const;
243
244private:
245 bool IsEq = false;
246 bool IsNe = false;
247};
248
249/// Wrapper encapsulating separate constraint systems and corresponding value
250/// mappings for both unsigned and signed information. Facts are added to and
251/// conditions are checked against the corresponding system depending on the
252/// signed-ness of their predicates. While the information is kept separate
253/// based on signed-ness, certain conditions can be transferred between the two
254/// systems.
255class ConstraintInfo {
256
257 ConstraintSystem UnsignedCS;
258 ConstraintSystem SignedCS;
259
260 const DataLayout &DL;
261
262public:
263 ConstraintInfo(const DataLayout &DL, ArrayRef<Value *> FunctionArgs)
264 : UnsignedCS(FunctionArgs), SignedCS(FunctionArgs), DL(DL) {
265 auto &Value2Index = getValue2Index(Signed: false);
266 // Add Arg > -1 constraints to unsigned system for all function arguments.
267 for (Value *Arg : FunctionArgs) {
268 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0),
269 false, false, false);
270 VarPos.Coefficients[Value2Index[Arg]] = -1;
271 UnsignedCS.addVariableRow(R: VarPos.Coefficients);
272 }
273 }
274
275 DenseMap<Value *, unsigned> &getValue2Index(bool Signed) {
276 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
277 }
278 const DenseMap<Value *, unsigned> &getValue2Index(bool Signed) const {
279 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
280 }
281
282 ConstraintSystem &getCS(bool Signed) {
283 return Signed ? SignedCS : UnsignedCS;
284 }
285 const ConstraintSystem &getCS(bool Signed) const {
286 return Signed ? SignedCS : UnsignedCS;
287 }
288
289 void popLastConstraint(bool Signed) { getCS(Signed).popLastConstraint(); }
290 void popLastNVariables(bool Signed, unsigned N) {
291 getCS(Signed).popLastNVariables(N);
292 }
293
294 bool doesHold(CmpInst::Predicate Pred, Value *A, Value *B) const;
295
296 void addFact(CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
297 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack);
298
299 /// Turn a comparison of the form \p Op0 \p Pred \p Op1 into a vector of
300 /// constraints, using indices from the corresponding constraint system.
301 /// New variables that need to be added to the system are collected in
302 /// \p NewVariables.
303 ConstraintTy getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
304 SmallVectorImpl<Value *> &NewVariables,
305 bool ForceSignedSystem = false) const;
306
307 /// Turns a comparison of the form \p Op0 \p Pred \p Op1 into a vector of
308 /// constraints using getConstraint. Returns an empty constraint if the result
309 /// cannot be used to query the existing constraint system, e.g. because it
310 /// would require adding new variables. Also tries to convert signed
311 /// predicates to unsigned ones if possible to allow using the unsigned system
312 /// which increases the effectiveness of the signed <-> unsigned transfer
313 /// logic.
314 ConstraintTy getConstraintForSolving(CmpInst::Predicate Pred, Value *Op0,
315 Value *Op1) const;
316
317 /// Try to add information from \p A \p Pred \p B to the unsigned/signed
318 /// system if \p Pred is signed/unsigned.
319 void transferToOtherSystem(CmpInst::Predicate Pred, Value *A, Value *B,
320 unsigned NumIn, unsigned NumOut,
321 SmallVectorImpl<StackEntry> &DFSInStack);
322
323private:
324 /// Adds facts into constraint system. \p ForceSignedSystem can be set when
325 /// the \p Pred is eq/ne, and signed constraint system is used when it's
326 /// specified.
327 void addFactImpl(CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
328 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack,
329 bool ForceSignedSystem);
330};
331
332/// Represents a (Coefficient * Variable) entry after IR decomposition.
333struct DecompEntry {
334 int64_t Coefficient;
335 Value *Variable;
336
337 DecompEntry(int64_t Coefficient, Value *Variable)
338 : Coefficient(Coefficient), Variable(Variable) {}
339};
340
341/// Represents an Offset + Coefficient1 * Variable1 + ... decomposition.
342struct Decomposition {
343 int64_t Offset = 0;
344 SmallVector<DecompEntry, 3> Vars;
345
346 Decomposition(int64_t Offset) : Offset(Offset) {}
347 Decomposition(Value *V) { Vars.emplace_back(Args: 1, Args&: V); }
348 Decomposition(int64_t Offset, ArrayRef<DecompEntry> Vars)
349 : Offset(Offset), Vars(Vars) {}
350
351 /// Add \p OtherOffset and return true if the operation overflows, i.e. the
352 /// new decomposition is invalid.
353 [[nodiscard]] bool add(int64_t OtherOffset) {
354 return AddOverflow(X: Offset, Y: OtherOffset, Result&: Offset);
355 }
356
357 /// Add \p Other and return true if the operation overflows, i.e. the new
358 /// decomposition is invalid.
359 [[nodiscard]] bool add(const Decomposition &Other) {
360 if (add(OtherOffset: Other.Offset))
361 return true;
362 append_range(C&: Vars, R: Other.Vars);
363 return false;
364 }
365
366 /// Subtract \p Other and return true if the operation overflows, i.e. the new
367 /// decomposition is invalid.
368 [[nodiscard]] bool sub(const Decomposition &Other) {
369 Decomposition Tmp = Other;
370 if (Tmp.mul(Factor: -1))
371 return true;
372 if (add(OtherOffset: Tmp.Offset))
373 return true;
374 append_range(C&: Vars, R&: Tmp.Vars);
375 return false;
376 }
377
378 /// Multiply all coefficients by \p Factor and return true if the operation
379 /// overflows, i.e. the new decomposition is invalid.
380 [[nodiscard]] bool mul(int64_t Factor) {
381 if (MulOverflow(X: Offset, Y: Factor, Result&: Offset))
382 return true;
383 for (auto &Var : Vars)
384 if (MulOverflow(X: Var.Coefficient, Y: Factor, Result&: Var.Coefficient))
385 return true;
386 return false;
387 }
388};
389
390// Variable and constant offsets for a chain of GEPs, with base pointer BasePtr.
391struct OffsetResult {
392 Value *BasePtr;
393 APInt ConstantOffset;
394 SmallMapVector<Value *, APInt, 4> VariableOffsets;
395 GEPNoWrapFlags NW;
396
397 OffsetResult() : BasePtr(nullptr), ConstantOffset(0, uint64_t(0)) {}
398
399 OffsetResult(GEPOperator &GEP, const DataLayout &DL)
400 : BasePtr(GEP.getPointerOperand()), NW(GEP.getNoWrapFlags()) {
401 ConstantOffset = APInt(DL.getIndexTypeSizeInBits(Ty: BasePtr->getType()), 0);
402 }
403};
404} // namespace
405
406// Try to collect variable and constant offsets for \p GEP, partly traversing
407// nested GEPs. Returns an OffsetResult with nullptr as BasePtr of collecting
408// the offset fails.
409static OffsetResult collectOffsets(GEPOperator &GEP, const DataLayout &DL) {
410 OffsetResult Result(GEP, DL);
411 unsigned BitWidth = Result.ConstantOffset.getBitWidth();
412 if (!GEP.collectOffset(DL, BitWidth, VariableOffsets&: Result.VariableOffsets,
413 ConstantOffset&: Result.ConstantOffset))
414 return {};
415
416 // If we have a nested GEP, check if we can combine the constant offset of the
417 // inner GEP with the outer GEP.
418 if (auto *InnerGEP = dyn_cast<GetElementPtrInst>(Val: Result.BasePtr)) {
419 SmallMapVector<Value *, APInt, 4> VariableOffsets2;
420 APInt ConstantOffset2(BitWidth, 0);
421 bool CanCollectInner = InnerGEP->collectOffset(
422 DL, BitWidth, VariableOffsets&: VariableOffsets2, ConstantOffset&: ConstantOffset2);
423 // TODO: Support cases with more than 1 variable offset.
424 if (!CanCollectInner || Result.VariableOffsets.size() > 1 ||
425 VariableOffsets2.size() > 1 ||
426 (Result.VariableOffsets.size() >= 1 && VariableOffsets2.size() >= 1)) {
427 // More than 1 variable index, use outer result.
428 return Result;
429 }
430 Result.BasePtr = InnerGEP->getPointerOperand();
431 Result.ConstantOffset += ConstantOffset2;
432 if (Result.VariableOffsets.size() == 0 && VariableOffsets2.size() == 1)
433 Result.VariableOffsets = VariableOffsets2;
434 Result.NW &= InnerGEP->getNoWrapFlags();
435 }
436 return Result;
437}
438
439static Decomposition decompose(Value *V,
440 SmallVectorImpl<ConditionTy> &Preconditions,
441 bool IsSigned, const DataLayout &DL);
442
443static bool canUseSExt(ConstantInt *CI) {
444 const APInt &Val = CI->getValue();
445 return Val.sgt(RHS: MinSignedConstraintValue) && Val.slt(RHS: MaxConstraintValue);
446}
447
448static Decomposition decomposeGEP(GEPOperator &GEP,
449 SmallVectorImpl<ConditionTy> &Preconditions,
450 bool IsSigned, const DataLayout &DL) {
451 // Do not reason about pointers where the index size is larger than 64 bits,
452 // as the coefficients used to encode constraints are 64 bit integers.
453 if (DL.getIndexTypeSizeInBits(Ty: GEP.getPointerOperand()->getType()) > 64)
454 return &GEP;
455
456 assert(!IsSigned && "The logic below only supports decomposition for "
457 "unsigned predicates at the moment.");
458 const auto &[BasePtr, ConstantOffset, VariableOffsets, NW] =
459 collectOffsets(GEP, DL);
460 // We support either plain gep nuw, or gep nusw with non-negative offset,
461 // which implies gep nuw.
462 if (!BasePtr || NW == GEPNoWrapFlags::none())
463 return &GEP;
464
465 Decomposition Result(ConstantOffset.getSExtValue(), DecompEntry(1, BasePtr));
466 for (auto [Index, Scale] : VariableOffsets) {
467 auto IdxResult = decompose(V: Index, Preconditions, IsSigned, DL);
468 if (IdxResult.mul(Factor: Scale.getSExtValue()))
469 return &GEP;
470 if (Result.add(Other: IdxResult))
471 return &GEP;
472
473 if (!NW.hasNoUnsignedWrap()) {
474 // Try to prove nuw from nusw and nneg.
475 assert(NW.hasNoUnsignedSignedWrap() && "Must have nusw flag");
476 if (!isKnownNonNegative(V: Index, SQ: DL))
477 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Index,
478 Args: ConstantInt::get(Ty: Index->getType(), V: 0));
479 }
480 }
481 return Result;
482}
483
484// Decomposes \p V into a constant offset + list of pairs { Coefficient,
485// Variable } where Coefficient * Variable. The sum of the constant offset and
486// pairs equals \p V.
487static Decomposition decompose(Value *V,
488 SmallVectorImpl<ConditionTy> &Preconditions,
489 bool IsSigned, const DataLayout &DL) {
490
491 auto MergeResults = [&Preconditions, IsSigned,
492 &DL](Value *A, Value *B,
493 bool IsSignedB) -> std::optional<Decomposition> {
494 auto ResA = decompose(V: A, Preconditions, IsSigned, DL);
495 auto ResB = decompose(V: B, Preconditions, IsSigned: IsSignedB, DL);
496 if (ResA.add(Other: ResB))
497 return std::nullopt;
498 return ResA;
499 };
500
501 Type *Ty = V->getType()->getScalarType();
502 if (Ty->isPointerTy() && !IsSigned) {
503 if (auto *GEP = dyn_cast<GEPOperator>(Val: V))
504 return decomposeGEP(GEP&: *GEP, Preconditions, IsSigned, DL);
505 if (isa<ConstantPointerNull>(Val: V))
506 return int64_t(0);
507
508 return V;
509 }
510
511 // Don't handle integers > 64 bit. Our coefficients are 64-bit large, so
512 // coefficient add/mul may wrap, while the operation in the full bit width
513 // would not.
514 if (!Ty->isIntegerTy() || Ty->getIntegerBitWidth() > 64)
515 return V;
516
517 // Decompose \p V used with a signed predicate.
518 if (IsSigned) {
519 if (auto *CI = dyn_cast<ConstantInt>(Val: V)) {
520 if (canUseSExt(CI))
521 return CI->getSExtValue();
522 }
523 Value *Op0;
524 Value *Op1;
525
526 if (match(V, P: m_SExt(Op: m_Value(V&: Op0))))
527 V = Op0;
528 else if (match(V, P: m_NNegZExt(Op: m_Value(V&: Op0)))) {
529 V = Op0;
530 } else if (match(V, P: m_NSWTrunc(Op: m_Value(V&: Op0)))) {
531 if (Op0->getType()->getScalarSizeInBits() <= 64)
532 V = Op0;
533 }
534
535 if (match(V, P: m_NSWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
536 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
537 return *Decomp;
538 return V;
539 }
540
541 if (match(V, P: m_NSWSub(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
542 auto ResA = decompose(V: Op0, Preconditions, IsSigned, DL);
543 auto ResB = decompose(V: Op1, Preconditions, IsSigned, DL);
544 if (!ResA.sub(Other: ResB))
545 return ResA;
546 return V;
547 }
548
549 ConstantInt *CI;
550 if (match(V, P: m_NSWMul(L: m_Value(V&: Op0), R: m_ConstantInt(CI))) && canUseSExt(CI)) {
551 auto Result = decompose(V: Op0, Preconditions, IsSigned, DL);
552 if (!Result.mul(Factor: CI->getSExtValue()))
553 return Result;
554 return V;
555 }
556
557 // (shl nsw x, shift) is (mul nsw x, (1<<shift)), with the exception of
558 // shift == bw-1.
559 if (match(V, P: m_NSWShl(L: m_Value(V&: Op0), R: m_ConstantInt(CI)))) {
560 uint64_t Shift = CI->getValue().getLimitedValue();
561 if (Shift < Ty->getIntegerBitWidth() - 1) {
562 assert(Shift < 64 && "Would overflow");
563 auto Result = decompose(V: Op0, Preconditions, IsSigned, DL);
564 if (!Result.mul(Factor: int64_t(1) << Shift))
565 return Result;
566 return V;
567 }
568 }
569
570 return V;
571 }
572
573 if (auto *CI = dyn_cast<ConstantInt>(Val: V)) {
574 if (CI->uge(Num: MaxConstraintValue))
575 return V;
576 return int64_t(CI->getZExtValue());
577 }
578
579 Value *Op0;
580 if (match(V, P: m_ZExt(Op: m_Value(V&: Op0)))) {
581 V = Op0;
582 } else if (match(V, P: m_SExt(Op: m_Value(V&: Op0)))) {
583 V = Op0;
584 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op0,
585 Args: ConstantInt::get(Ty: Op0->getType(), V: 0));
586 } else if (auto *Trunc = dyn_cast<TruncInst>(Val: V)) {
587 if (Trunc->getSrcTy()->getScalarSizeInBits() <= 64) {
588 if (Trunc->hasNoUnsignedWrap() || Trunc->hasNoSignedWrap()) {
589 V = Trunc->getOperand(i_nocapture: 0);
590 if (!Trunc->hasNoUnsignedWrap())
591 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: V,
592 Args: ConstantInt::get(Ty: V->getType(), V: 0));
593 }
594 }
595 }
596
597 Value *Op1;
598 ConstantInt *CI;
599 if (match(V, P: m_NUWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
600 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
601 return *Decomp;
602 return V;
603 }
604
605 if (match(V, P: m_Add(L: m_Value(V&: Op0), R: m_ConstantInt(CI))) && CI->isNegative() &&
606 canUseSExt(CI)) {
607 Preconditions.emplace_back(
608 Args: CmpInst::ICMP_UGE, Args&: Op0,
609 Args: ConstantInt::get(Ty: Op0->getType(), V: CI->getSExtValue() * -1));
610 if (auto Decomp = MergeResults(Op0, CI, true))
611 return *Decomp;
612 return V;
613 }
614
615 if (match(V, P: m_NSWAdd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
616 if (!isKnownNonNegative(V: Op0, SQ: DL))
617 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op0,
618 Args: ConstantInt::get(Ty: Op0->getType(), V: 0));
619 if (!isKnownNonNegative(V: Op1, SQ: DL))
620 Preconditions.emplace_back(Args: CmpInst::ICMP_SGE, Args&: Op1,
621 Args: ConstantInt::get(Ty: Op1->getType(), V: 0));
622
623 if (auto Decomp = MergeResults(Op0, Op1, IsSigned))
624 return *Decomp;
625 return V;
626 }
627
628 // Decompose or as an add if there are no common bits between the operands.
629 if (match(V, P: m_DisjointOr(L: m_Value(V&: Op0), R: m_ConstantInt(CI)))) {
630 if (auto Decomp = MergeResults(Op0, CI, IsSigned))
631 return *Decomp;
632 return V;
633 }
634
635 if (match(V, P: m_NUWShl(L: m_Value(V&: Op1), R: m_ConstantInt(CI))) && canUseSExt(CI)) {
636 if (CI->getSExtValue() < 0 || CI->getSExtValue() >= 64)
637 return V;
638 auto Result = decompose(V: Op1, Preconditions, IsSigned, DL);
639 if (!Result.mul(Factor: int64_t{1} << CI->getSExtValue()))
640 return Result;
641 return V;
642 }
643
644 if (match(V, P: m_NUWMul(L: m_Value(V&: Op1), R: m_ConstantInt(CI))) && canUseSExt(CI) &&
645 (!CI->isNegative())) {
646 auto Result = decompose(V: Op1, Preconditions, IsSigned, DL);
647 if (!Result.mul(Factor: CI->getSExtValue()))
648 return Result;
649 return V;
650 }
651
652 if (match(V, P: m_NUWSub(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
653 auto ResA = decompose(V: Op0, Preconditions, IsSigned, DL);
654 auto ResB = decompose(V: Op1, Preconditions, IsSigned, DL);
655 if (!ResA.sub(Other: ResB))
656 return ResA;
657 return V;
658 }
659
660 return V;
661}
662
663ConstraintTy
664ConstraintInfo::getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
665 SmallVectorImpl<Value *> &NewVariables,
666 bool ForceSignedSystem) const {
667 assert(NewVariables.empty() && "NewVariables must be empty when passed in");
668 assert((!ForceSignedSystem || CmpInst::isEquality(Pred)) &&
669 "signed system can only be forced on eq/ne");
670
671 bool IsEq = false;
672 bool IsNe = false;
673
674 // Try to convert Pred to one of ULE/ULT/SLE/SLT.
675 switch (Pred) {
676 case CmpInst::ICMP_UGT:
677 case CmpInst::ICMP_UGE:
678 case CmpInst::ICMP_SGT:
679 case CmpInst::ICMP_SGE: {
680 Pred = CmpInst::getSwappedPredicate(pred: Pred);
681 std::swap(a&: Op0, b&: Op1);
682 break;
683 }
684 case CmpInst::ICMP_EQ:
685 if (!ForceSignedSystem && match(V: Op1, P: m_Zero())) {
686 Pred = CmpInst::ICMP_ULE;
687 } else {
688 IsEq = true;
689 Pred = CmpInst::ICMP_ULE;
690 }
691 break;
692 case CmpInst::ICMP_NE:
693 if (!ForceSignedSystem && match(V: Op1, P: m_Zero())) {
694 Pred = CmpInst::getSwappedPredicate(pred: CmpInst::ICMP_UGT);
695 std::swap(a&: Op0, b&: Op1);
696 } else {
697 IsNe = true;
698 Pred = CmpInst::ICMP_ULE;
699 }
700 break;
701 default:
702 break;
703 }
704
705 if (Pred != CmpInst::ICMP_ULE && Pred != CmpInst::ICMP_ULT &&
706 Pred != CmpInst::ICMP_SLE && Pred != CmpInst::ICMP_SLT)
707 return {};
708
709 SmallVector<ConditionTy, 4> Preconditions;
710 bool IsSigned = ForceSignedSystem || CmpInst::isSigned(predicate: Pred);
711 auto &Value2Index = getValue2Index(Signed: IsSigned);
712 auto ADec = decompose(V: Op0->stripPointerCastsSameRepresentation(),
713 Preconditions, IsSigned, DL);
714 auto BDec = decompose(V: Op1->stripPointerCastsSameRepresentation(),
715 Preconditions, IsSigned, DL);
716 int64_t Offset1 = ADec.Offset;
717 int64_t Offset2 = BDec.Offset;
718 Offset1 *= -1;
719
720 auto &VariablesA = ADec.Vars;
721 auto &VariablesB = BDec.Vars;
722
723 // First try to look up \p V in Value2Index and NewVariables. Otherwise add a
724 // new entry to NewVariables.
725 SmallDenseMap<Value *, unsigned> NewIndexMap;
726 auto GetOrAddIndex = [&Value2Index, &NewVariables,
727 &NewIndexMap](Value *V) -> unsigned {
728 auto V2I = Value2Index.find(Val: V);
729 if (V2I != Value2Index.end())
730 return V2I->second;
731 auto [It, Inserted] = NewIndexMap.try_emplace(
732 Key: V, Args: Value2Index.size() + NewVariables.size() + 1);
733 if (Inserted)
734 NewVariables.push_back(Elt: V);
735 return It->second;
736 };
737
738 // Make sure all variables have entries in Value2Index or NewVariables.
739 for (const auto &KV : concat<DecompEntry>(Ranges&: VariablesA, Ranges&: VariablesB))
740 GetOrAddIndex(KV.Variable);
741
742 // Build result constraint, by first adding all coefficients from A and then
743 // subtracting all coefficients from B.
744 ConstraintTy Res(
745 SmallVector<int64_t, 8>(Value2Index.size() + NewVariables.size() + 1, 0),
746 IsSigned, IsEq, IsNe);
747 auto &R = Res.Coefficients;
748 for (const auto &KV : VariablesA)
749 R[GetOrAddIndex(KV.Variable)] += KV.Coefficient;
750
751 for (const auto &KV : VariablesB) {
752 auto &Coeff = R[GetOrAddIndex(KV.Variable)];
753 if (SubOverflow(X: Coeff, Y: KV.Coefficient, Result&: Coeff))
754 return {};
755 }
756
757 int64_t OffsetSum;
758 if (AddOverflow(X: Offset1, Y: Offset2, Result&: OffsetSum))
759 return {};
760 if (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT)
761 if (AddOverflow(X: OffsetSum, Y: int64_t(-1), Result&: OffsetSum))
762 return {};
763 R[0] = OffsetSum;
764 Res.Preconditions = std::move(Preconditions);
765
766 // Remove any (Coefficient, Variable) entry where the Coefficient is 0 for new
767 // variables.
768 while (!NewVariables.empty()) {
769 int64_t Last = R.back();
770 if (Last != 0)
771 break;
772 R.pop_back();
773 Value *RemovedV = NewVariables.pop_back_val();
774 NewIndexMap.erase(Val: RemovedV);
775 }
776
777 return Res;
778}
779
780ConstraintTy ConstraintInfo::getConstraintForSolving(CmpInst::Predicate Pred,
781 Value *Op0,
782 Value *Op1) const {
783 Constant *NullC = Constant::getNullValue(Ty: Op0->getType());
784 // Handle trivially true compares directly to avoid adding V UGE 0 constraints
785 // for all variables in the unsigned system.
786 if ((Pred == CmpInst::ICMP_ULE && Op0 == NullC) ||
787 (Pred == CmpInst::ICMP_UGE && Op1 == NullC)) {
788 auto &Value2Index = getValue2Index(Signed: false);
789 // Return constraint that's trivially true.
790 return ConstraintTy(SmallVector<int64_t, 8>(Value2Index.size(), 0), false,
791 false, false);
792 }
793
794 // If both operands are known to be non-negative, change signed predicates to
795 // unsigned ones. This increases the reasoning effectiveness in combination
796 // with the signed <-> unsigned transfer logic.
797 if (CmpInst::isSigned(predicate: Pred) &&
798 isKnownNonNegative(V: Op0, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1) &&
799 isKnownNonNegative(V: Op1, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
800 Pred = ICmpInst::getUnsignedPredicate(Pred);
801
802 SmallVector<Value *> NewVariables;
803 ConstraintTy R = getConstraint(Pred, Op0, Op1, NewVariables);
804 if (!NewVariables.empty())
805 return {};
806 return R;
807}
808
809bool ConstraintTy::isValid(const ConstraintInfo &Info) const {
810 return Coefficients.size() > 0 &&
811 all_of(Range: Preconditions, P: [&Info](const ConditionTy &C) {
812 return Info.doesHold(Pred: C.Pred, A: C.Op0, B: C.Op1);
813 });
814}
815
816std::optional<bool>
817ConstraintTy::isImpliedBy(const ConstraintSystem &CS) const {
818 bool IsConditionImplied = CS.isConditionImplied(R: Coefficients);
819
820 if (IsEq || IsNe) {
821 auto NegatedOrEqual = ConstraintSystem::negateOrEqual(R: Coefficients);
822 bool IsNegatedOrEqualImplied =
823 !NegatedOrEqual.empty() && CS.isConditionImplied(R: NegatedOrEqual);
824
825 // In order to check that `%a == %b` is true (equality), both conditions `%a
826 // >= %b` and `%a <= %b` must hold true. When checking for equality (`IsEq`
827 // is true), we return true if they both hold, false in the other cases.
828 if (IsConditionImplied && IsNegatedOrEqualImplied)
829 return IsEq;
830
831 auto Negated = ConstraintSystem::negate(R: Coefficients);
832 bool IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(R: Negated);
833
834 auto StrictLessThan = ConstraintSystem::toStrictLessThan(R: Coefficients);
835 bool IsStrictLessThanImplied =
836 !StrictLessThan.empty() && CS.isConditionImplied(R: StrictLessThan);
837
838 // In order to check that `%a != %b` is true (non-equality), either
839 // condition `%a > %b` or `%a < %b` must hold true. When checking for
840 // non-equality (`IsNe` is true), we return true if one of the two holds,
841 // false in the other cases.
842 if (IsNegatedImplied || IsStrictLessThanImplied)
843 return IsNe;
844
845 return std::nullopt;
846 }
847
848 if (IsConditionImplied)
849 return true;
850
851 auto Negated = ConstraintSystem::negate(R: Coefficients);
852 auto IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(R: Negated);
853 if (IsNegatedImplied)
854 return false;
855
856 // Neither the condition nor its negated holds, did not prove anything.
857 return std::nullopt;
858}
859
860bool ConstraintInfo::doesHold(CmpInst::Predicate Pred, Value *A,
861 Value *B) const {
862 auto R = getConstraintForSolving(Pred, Op0: A, Op1: B);
863 return R.isValid(Info: *this) &&
864 getCS(Signed: R.IsSigned).isConditionImplied(R: R.Coefficients);
865}
866
867void ConstraintInfo::transferToOtherSystem(
868 CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn,
869 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack) {
870 auto IsKnownNonNegative = [this](Value *V) {
871 return doesHold(Pred: CmpInst::ICMP_SGE, A: V, B: ConstantInt::get(Ty: V->getType(), V: 0)) ||
872 isKnownNonNegative(V, SQ: DL, /*Depth=*/MaxAnalysisRecursionDepth - 1);
873 };
874 // Check if we can combine facts from the signed and unsigned systems to
875 // derive additional facts.
876 if (!A->getType()->isIntegerTy())
877 return;
878 // FIXME: This currently depends on the order we add facts. Ideally we
879 // would first add all known facts and only then try to add additional
880 // facts.
881 switch (Pred) {
882 default:
883 break;
884 case CmpInst::ICMP_ULT:
885 case CmpInst::ICMP_ULE:
886 // If B is a signed positive constant, then A >=s 0 and A <s (or <=s) B.
887 if (IsKnownNonNegative(B)) {
888 addFact(Pred: CmpInst::ICMP_SGE, A, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
889 NumOut, DFSInStack);
890 addFact(Pred: ICmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut,
891 DFSInStack);
892 }
893 break;
894 case CmpInst::ICMP_UGE:
895 case CmpInst::ICMP_UGT:
896 // If A is a signed positive constant, then B >=s 0 and A >s (or >=s) B.
897 if (IsKnownNonNegative(A)) {
898 addFact(Pred: CmpInst::ICMP_SGE, A: B, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
899 NumOut, DFSInStack);
900 addFact(Pred: ICmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut,
901 DFSInStack);
902 }
903 break;
904 case CmpInst::ICMP_SLT:
905 if (IsKnownNonNegative(A))
906 addFact(Pred: CmpInst::ICMP_ULT, A, B, NumIn, NumOut, DFSInStack);
907 break;
908 case CmpInst::ICMP_SGT: {
909 if (doesHold(Pred: CmpInst::ICMP_SGE, A: B, B: Constant::getAllOnesValue(Ty: B->getType())))
910 addFact(Pred: CmpInst::ICMP_UGE, A, B: ConstantInt::get(Ty: B->getType(), V: 0), NumIn,
911 NumOut, DFSInStack);
912 if (IsKnownNonNegative(B))
913 addFact(Pred: CmpInst::ICMP_UGT, A, B, NumIn, NumOut, DFSInStack);
914
915 break;
916 }
917 case CmpInst::ICMP_SGE:
918 if (IsKnownNonNegative(B))
919 addFact(Pred: CmpInst::ICMP_UGE, A, B, NumIn, NumOut, DFSInStack);
920 break;
921 }
922}
923
924#ifndef NDEBUG
925
926static void dumpConstraint(ArrayRef<int64_t> C,
927 const DenseMap<Value *, unsigned> &Value2Index) {
928 ConstraintSystem CS(Value2Index);
929 CS.addVariableRowFill(C);
930 CS.dump();
931}
932#endif
933
934void State::addInfoForInductions(BasicBlock &BB) {
935 auto *L = LI.getLoopFor(BB: &BB);
936 if (!L || L->getHeader() != &BB)
937 return;
938
939 Value *A;
940 Value *B;
941 CmpPredicate Pred;
942
943 if (!match(V: BB.getTerminator(),
944 P: m_Br(C: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B)), T: m_Value(), F: m_Value())))
945 return;
946 PHINode *PN = dyn_cast<PHINode>(Val: A);
947 if (!PN) {
948 Pred = CmpInst::getSwappedPredicate(pred: Pred);
949 std::swap(a&: A, b&: B);
950 PN = dyn_cast<PHINode>(Val: A);
951 }
952
953 if (!PN || PN->getParent() != &BB || PN->getNumIncomingValues() != 2 ||
954 !SE.isSCEVable(Ty: PN->getType()))
955 return;
956
957 BasicBlock *InLoopSucc = nullptr;
958 if (Pred == CmpInst::ICMP_NE)
959 InLoopSucc = cast<BranchInst>(Val: BB.getTerminator())->getSuccessor(i: 0);
960 else if (Pred == CmpInst::ICMP_EQ)
961 InLoopSucc = cast<BranchInst>(Val: BB.getTerminator())->getSuccessor(i: 1);
962 else
963 return;
964
965 if (!L->contains(BB: InLoopSucc) || !L->isLoopExiting(BB: &BB) || InLoopSucc == &BB)
966 return;
967
968 auto *AR = dyn_cast_or_null<SCEVAddRecExpr>(Val: SE.getSCEV(V: PN));
969 BasicBlock *LoopPred = L->getLoopPredecessor();
970 if (!AR || AR->getLoop() != L || !LoopPred)
971 return;
972
973 const SCEV *StartSCEV = AR->getStart();
974 Value *StartValue = nullptr;
975 if (auto *C = dyn_cast<SCEVConstant>(Val: StartSCEV)) {
976 StartValue = C->getValue();
977 } else {
978 StartValue = PN->getIncomingValueForBlock(BB: LoopPred);
979 assert(SE.getSCEV(StartValue) == StartSCEV && "inconsistent start value");
980 }
981
982 DomTreeNode *DTN = DT.getNode(BB: InLoopSucc);
983 auto IncUnsigned = SE.getMonotonicPredicateType(LHS: AR, Pred: CmpInst::ICMP_UGT);
984 auto IncSigned = SE.getMonotonicPredicateType(LHS: AR, Pred: CmpInst::ICMP_SGT);
985 bool MonotonicallyIncreasingUnsigned =
986 IncUnsigned == ScalarEvolution::MonotonicallyIncreasing;
987 bool MonotonicallyIncreasingSigned =
988 IncSigned == ScalarEvolution::MonotonicallyIncreasing;
989 // If SCEV guarantees that AR does not wrap, PN >= StartValue can be added
990 // unconditionally.
991 if (MonotonicallyIncreasingUnsigned)
992 WorkList.push_back(
993 Elt: FactOrCheck::getConditionFact(DTN, Pred: CmpInst::ICMP_UGE, Op0: PN, Op1: StartValue));
994 if (MonotonicallyIncreasingSigned)
995 WorkList.push_back(
996 Elt: FactOrCheck::getConditionFact(DTN, Pred: CmpInst::ICMP_SGE, Op0: PN, Op1: StartValue));
997
998 APInt StepOffset;
999 if (auto *C = dyn_cast<SCEVConstant>(Val: AR->getStepRecurrence(SE)))
1000 StepOffset = C->getAPInt();
1001 else
1002 return;
1003
1004 // Make sure the bound B is loop-invariant.
1005 if (!L->isLoopInvariant(V: B))
1006 return;
1007
1008 // Handle negative steps.
1009 if (StepOffset.isNegative()) {
1010 // TODO: Extend to allow steps > -1.
1011 if (!(-StepOffset).isOne())
1012 return;
1013
1014 // AR may wrap.
1015 // Add StartValue >= PN conditional on B <= StartValue which guarantees that
1016 // the loop exits before wrapping with a step of -1.
1017 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1018 DTN, Pred: CmpInst::ICMP_UGE, Op0: StartValue, Op1: PN,
1019 Precond: ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
1020 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1021 DTN, Pred: CmpInst::ICMP_SGE, Op0: StartValue, Op1: PN,
1022 Precond: ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
1023 // Add PN > B conditional on B <= StartValue which guarantees that the loop
1024 // exits when reaching B with a step of -1.
1025 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1026 DTN, Pred: CmpInst::ICMP_UGT, Op0: PN, Op1: B,
1027 Precond: ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
1028 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1029 DTN, Pred: CmpInst::ICMP_SGT, Op0: PN, Op1: B,
1030 Precond: ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
1031 return;
1032 }
1033
1034 // Make sure AR either steps by 1 or that the value we compare against is a
1035 // GEP based on the same start value and all offsets are a multiple of the
1036 // step size, to guarantee that the induction will reach the value.
1037 if (StepOffset.isZero() || StepOffset.isNegative())
1038 return;
1039
1040 if (!StepOffset.isOne()) {
1041 // Check whether B-Start is known to be a multiple of StepOffset.
1042 const SCEV *BMinusStart = SE.getMinusSCEV(LHS: SE.getSCEV(V: B), RHS: StartSCEV);
1043 if (isa<SCEVCouldNotCompute>(Val: BMinusStart) ||
1044 !SE.getConstantMultiple(S: BMinusStart).urem(RHS: StepOffset).isZero())
1045 return;
1046 }
1047
1048 // AR may wrap. Add PN >= StartValue conditional on StartValue <= B which
1049 // guarantees that the loop exits before wrapping in combination with the
1050 // restrictions on B and the step above.
1051 if (!MonotonicallyIncreasingUnsigned)
1052 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1053 DTN, Pred: CmpInst::ICMP_UGE, Op0: PN, Op1: StartValue,
1054 Precond: ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
1055 if (!MonotonicallyIncreasingSigned)
1056 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1057 DTN, Pred: CmpInst::ICMP_SGE, Op0: PN, Op1: StartValue,
1058 Precond: ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
1059
1060 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1061 DTN, Pred: CmpInst::ICMP_ULT, Op0: PN, Op1: B,
1062 Precond: ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
1063 WorkList.push_back(Elt: FactOrCheck::getConditionFact(
1064 DTN, Pred: CmpInst::ICMP_SLT, Op0: PN, Op1: B,
1065 Precond: ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
1066
1067 // Try to add condition from header to the dedicated exit blocks. When exiting
1068 // either with EQ or NE in the header, we know that the induction value must
1069 // be u<= B, as other exits may only exit earlier.
1070 assert(!StepOffset.isNegative() && "induction must be increasing");
1071 assert((Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) &&
1072 "unsupported predicate");
1073 ConditionTy Precond = {CmpInst::ICMP_ULE, StartValue, B};
1074 SmallVector<BasicBlock *> ExitBBs;
1075 L->getExitBlocks(ExitBlocks&: ExitBBs);
1076 for (BasicBlock *EB : ExitBBs) {
1077 // Bail out on non-dedicated exits.
1078 if (DT.dominates(A: &BB, B: EB)) {
1079 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1080 DTN: DT.getNode(BB: EB), Pred: CmpInst::ICMP_ULE, Op0: A, Op1: B, Precond));
1081 }
1082 }
1083}
1084
1085static bool getConstraintFromMemoryAccess(GetElementPtrInst &GEP,
1086 uint64_t AccessSize,
1087 CmpPredicate &Pred, Value *&A,
1088 Value *&B, const DataLayout &DL,
1089 const TargetLibraryInfo &TLI) {
1090 auto Offset = collectOffsets(GEP&: cast<GEPOperator>(Val&: GEP), DL);
1091 if (!Offset.NW.hasNoUnsignedWrap())
1092 return false;
1093
1094 if (Offset.VariableOffsets.size() != 1)
1095 return false;
1096
1097 uint64_t BitWidth = Offset.ConstantOffset.getBitWidth();
1098 auto &[Index, Scale] = Offset.VariableOffsets.front();
1099 // Bail out on non-canonical GEPs.
1100 if (Index->getType()->getScalarSizeInBits() != BitWidth)
1101 return false;
1102
1103 ObjectSizeOpts Opts;
1104 // Workaround for gep inbounds, ptr null, idx.
1105 Opts.NullIsUnknownSize = true;
1106 // Be conservative since we are not clear on whether an out of bounds access
1107 // to the padding is UB or not.
1108 Opts.RoundToAlign = true;
1109 std::optional<TypeSize> Size =
1110 getBaseObjectSize(Ptr: Offset.BasePtr, DL, TLI: &TLI, Opts);
1111 if (!Size || Size->isScalable())
1112 return false;
1113
1114 // Index * Scale + ConstOffset + AccessSize <= AllocSize
1115 // With nuw flag, we know that the index addition doesn't have unsigned wrap.
1116 // If (AllocSize - (ConstOffset + AccessSize)) wraps around, there is no valid
1117 // value for Index.
1118 APInt MaxIndex = (APInt(BitWidth, Size->getFixedValue() - AccessSize,
1119 /*isSigned=*/false, /*implicitTrunc=*/true) -
1120 Offset.ConstantOffset)
1121 .udiv(RHS: Scale);
1122 Pred = ICmpInst::ICMP_ULE;
1123 A = Index;
1124 B = ConstantInt::get(Ty: Index->getType(), V: MaxIndex);
1125 return true;
1126}
1127
1128void State::addInfoFor(BasicBlock &BB) {
1129 addInfoForInductions(BB);
1130 auto &DL = BB.getDataLayout();
1131
1132 // True as long as the current instruction is guaranteed to execute.
1133 bool GuaranteedToExecute = true;
1134 // Queue conditions and assumes.
1135 for (Instruction &I : BB) {
1136 if (auto *Cmp = dyn_cast<ICmpInst>(Val: &I)) {
1137 for (Use &U : Cmp->uses()) {
1138 auto *UserI = getContextInstForUse(U);
1139 auto *DTN = DT.getNode(BB: UserI->getParent());
1140 if (!DTN)
1141 continue;
1142 WorkList.push_back(Elt: FactOrCheck::getCheck(DTN, U: &U));
1143 }
1144 continue;
1145 }
1146
1147 auto AddFactFromMemoryAccess = [&](Value *Ptr, Type *AccessType) {
1148 auto *GEP = dyn_cast<GetElementPtrInst>(Val: Ptr);
1149 if (!GEP)
1150 return;
1151 TypeSize AccessSize = DL.getTypeStoreSize(Ty: AccessType);
1152 if (!AccessSize.isFixed())
1153 return;
1154 if (GuaranteedToExecute) {
1155 CmpPredicate Pred;
1156 Value *A, *B;
1157 if (getConstraintFromMemoryAccess(GEP&: *GEP, AccessSize: AccessSize.getFixedValue(),
1158 Pred, A, B, DL, TLI)) {
1159 // The memory access is guaranteed to execute when BB is entered,
1160 // hence the constraint holds on entry to BB.
1161 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1162 DTN: DT.getNode(BB: I.getParent()), Pred, Op0: A, Op1: B));
1163 }
1164 } else {
1165 WorkList.emplace_back(
1166 Args: FactOrCheck::getInstFact(DTN: DT.getNode(BB: I.getParent()), Inst: &I));
1167 }
1168 };
1169
1170 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
1171 if (!LI->isVolatile())
1172 AddFactFromMemoryAccess(LI->getPointerOperand(), LI->getAccessType());
1173 }
1174 if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
1175 if (!SI->isVolatile())
1176 AddFactFromMemoryAccess(SI->getPointerOperand(), SI->getAccessType());
1177 }
1178
1179 auto *II = dyn_cast<IntrinsicInst>(Val: &I);
1180 Intrinsic::ID ID = II ? II->getIntrinsicID() : Intrinsic::not_intrinsic;
1181 switch (ID) {
1182 case Intrinsic::assume: {
1183 Value *A, *B;
1184 CmpPredicate Pred;
1185 if (!match(V: I.getOperand(i: 0), P: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))))
1186 break;
1187 if (GuaranteedToExecute) {
1188 // The assume is guaranteed to execute when BB is entered, hence Cond
1189 // holds on entry to BB.
1190 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1191 DTN: DT.getNode(BB: I.getParent()), Pred, Op0: A, Op1: B));
1192 } else {
1193 WorkList.emplace_back(
1194 Args: FactOrCheck::getInstFact(DTN: DT.getNode(BB: I.getParent()), Inst: &I));
1195 }
1196 break;
1197 }
1198 // Enqueue ssub_with_overflow for simplification.
1199 case Intrinsic::ssub_with_overflow:
1200 case Intrinsic::ucmp:
1201 case Intrinsic::scmp:
1202 WorkList.push_back(
1203 Elt: FactOrCheck::getCheck(DTN: DT.getNode(BB: &BB), CI: cast<CallInst>(Val: &I)));
1204 break;
1205 // Enqueue the intrinsics to add extra info.
1206 case Intrinsic::umin:
1207 case Intrinsic::umax:
1208 case Intrinsic::smin:
1209 case Intrinsic::smax:
1210 // TODO: handle llvm.abs as well
1211 WorkList.push_back(
1212 Elt: FactOrCheck::getCheck(DTN: DT.getNode(BB: &BB), CI: cast<CallInst>(Val: &I)));
1213 [[fallthrough]];
1214 case Intrinsic::uadd_sat:
1215 case Intrinsic::usub_sat:
1216 // TODO: Check if it is possible to instead only added the min/max facts
1217 // when simplifying uses of the min/max intrinsics.
1218 if (!isGuaranteedNotToBePoison(V: &I))
1219 break;
1220 [[fallthrough]];
1221 case Intrinsic::abs:
1222 WorkList.push_back(Elt: FactOrCheck::getInstFact(DTN: DT.getNode(BB: &BB), Inst: &I));
1223 break;
1224 }
1225
1226 GuaranteedToExecute &= isGuaranteedToTransferExecutionToSuccessor(I: &I);
1227 }
1228
1229 if (auto *Switch = dyn_cast<SwitchInst>(Val: BB.getTerminator())) {
1230 for (auto &Case : Switch->cases()) {
1231 BasicBlock *Succ = Case.getCaseSuccessor();
1232 Value *V = Case.getCaseValue();
1233 if (!canAddSuccessor(BB, Succ))
1234 continue;
1235 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1236 DTN: DT.getNode(BB: Succ), Pred: CmpInst::ICMP_EQ, Op0: Switch->getCondition(), Op1: V));
1237 }
1238 return;
1239 }
1240
1241 auto *Br = dyn_cast<BranchInst>(Val: BB.getTerminator());
1242 if (!Br || !Br->isConditional())
1243 return;
1244
1245 Value *Cond = Br->getCondition();
1246
1247 // If the condition is a chain of ORs/AND and the successor only has the
1248 // current block as predecessor, queue conditions for the successor.
1249 Value *Op0, *Op1;
1250 if (match(V: Cond, P: m_LogicalOr(L: m_Value(V&: Op0), R: m_Value(V&: Op1))) ||
1251 match(V: Cond, P: m_LogicalAnd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1252 bool IsOr = match(V: Cond, P: m_LogicalOr());
1253 bool IsAnd = match(V: Cond, P: m_LogicalAnd());
1254 // If there's a select that matches both AND and OR, we need to commit to
1255 // one of the options. Arbitrarily pick OR.
1256 if (IsOr && IsAnd)
1257 IsAnd = false;
1258
1259 BasicBlock *Successor = Br->getSuccessor(i: IsOr ? 1 : 0);
1260 if (canAddSuccessor(BB, Succ: Successor)) {
1261 SmallVector<Value *> CondWorkList;
1262 SmallPtrSet<Value *, 8> SeenCond;
1263 auto QueueValue = [&CondWorkList, &SeenCond](Value *V) {
1264 if (SeenCond.insert(Ptr: V).second)
1265 CondWorkList.push_back(Elt: V);
1266 };
1267 QueueValue(Op1);
1268 QueueValue(Op0);
1269 while (!CondWorkList.empty()) {
1270 Value *Cur = CondWorkList.pop_back_val();
1271 if (auto *Cmp = dyn_cast<ICmpInst>(Val: Cur)) {
1272 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1273 DTN: DT.getNode(BB: Successor),
1274 Pred: IsOr ? Cmp->getInverseCmpPredicate() : Cmp->getCmpPredicate(),
1275 Op0: Cmp->getOperand(i_nocapture: 0), Op1: Cmp->getOperand(i_nocapture: 1)));
1276 continue;
1277 }
1278 if (IsOr && match(V: Cur, P: m_LogicalOr(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1279 QueueValue(Op1);
1280 QueueValue(Op0);
1281 continue;
1282 }
1283 if (IsAnd && match(V: Cur, P: m_LogicalAnd(L: m_Value(V&: Op0), R: m_Value(V&: Op1)))) {
1284 QueueValue(Op1);
1285 QueueValue(Op0);
1286 continue;
1287 }
1288 }
1289 }
1290 return;
1291 }
1292
1293 auto *CmpI = dyn_cast<ICmpInst>(Val: Br->getCondition());
1294 if (!CmpI)
1295 return;
1296 if (canAddSuccessor(BB, Succ: Br->getSuccessor(i: 0)))
1297 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1298 DTN: DT.getNode(BB: Br->getSuccessor(i: 0)), Pred: CmpI->getCmpPredicate(),
1299 Op0: CmpI->getOperand(i_nocapture: 0), Op1: CmpI->getOperand(i_nocapture: 1)));
1300 if (canAddSuccessor(BB, Succ: Br->getSuccessor(i: 1)))
1301 WorkList.emplace_back(Args: FactOrCheck::getConditionFact(
1302 DTN: DT.getNode(BB: Br->getSuccessor(i: 1)), Pred: CmpI->getInverseCmpPredicate(),
1303 Op0: CmpI->getOperand(i_nocapture: 0), Op1: CmpI->getOperand(i_nocapture: 1)));
1304}
1305
1306#ifndef NDEBUG
1307static void dumpUnpackedICmp(raw_ostream &OS, ICmpInst::Predicate Pred,
1308 Value *LHS, Value *RHS) {
1309 OS << "icmp " << Pred << ' ';
1310 LHS->printAsOperand(OS, /*PrintType=*/true);
1311 OS << ", ";
1312 RHS->printAsOperand(OS, /*PrintType=*/false);
1313}
1314#endif
1315
1316namespace {
1317/// Helper to keep track of a condition and if it should be treated as negated
1318/// for reproducer construction.
1319/// Pred == Predicate::BAD_ICMP_PREDICATE indicates that this entry is a
1320/// placeholder to keep the ReproducerCondStack in sync with DFSInStack.
1321struct ReproducerEntry {
1322 ICmpInst::Predicate Pred;
1323 Value *LHS;
1324 Value *RHS;
1325
1326 ReproducerEntry(ICmpInst::Predicate Pred, Value *LHS, Value *RHS)
1327 : Pred(Pred), LHS(LHS), RHS(RHS) {}
1328};
1329} // namespace
1330
1331/// Helper function to generate a reproducer function for simplifying \p Cond.
1332/// The reproducer function contains a series of @llvm.assume calls, one for
1333/// each condition in \p Stack. For each condition, the operand instruction are
1334/// cloned until we reach operands that have an entry in \p Value2Index. Those
1335/// will then be added as function arguments. \p DT is used to order cloned
1336/// instructions. The reproducer function will get added to \p M, if it is
1337/// non-null. Otherwise no reproducer function is generated.
1338static void generateReproducer(CmpInst *Cond, Module *M,
1339 ArrayRef<ReproducerEntry> Stack,
1340 ConstraintInfo &Info, DominatorTree &DT) {
1341 if (!M)
1342 return;
1343
1344 LLVMContext &Ctx = Cond->getContext();
1345
1346 LLVM_DEBUG(dbgs() << "Creating reproducer for " << *Cond << "\n");
1347
1348 ValueToValueMapTy Old2New;
1349 SmallVector<Value *> Args;
1350 SmallPtrSet<Value *, 8> Seen;
1351 // Traverse Cond and its operands recursively until we reach a value that's in
1352 // Value2Index or not an instruction, or not a operation that
1353 // ConstraintElimination can decompose. Such values will be considered as
1354 // external inputs to the reproducer, they are collected and added as function
1355 // arguments later.
1356 auto CollectArguments = [&](ArrayRef<Value *> Ops, bool IsSigned) {
1357 auto &Value2Index = Info.getValue2Index(Signed: IsSigned);
1358 SmallVector<Value *, 4> WorkList(Ops);
1359 while (!WorkList.empty()) {
1360 Value *V = WorkList.pop_back_val();
1361 if (!Seen.insert(Ptr: V).second)
1362 continue;
1363 if (Old2New.find(Val: V) != Old2New.end())
1364 continue;
1365 if (isa<Constant>(Val: V))
1366 continue;
1367
1368 auto *I = dyn_cast<Instruction>(Val: V);
1369 if (Value2Index.contains(Val: V) || !I ||
1370 !isa<CmpInst, BinaryOperator, GEPOperator, CastInst>(Val: V)) {
1371 Old2New[V] = V;
1372 Args.push_back(Elt: V);
1373 LLVM_DEBUG(dbgs() << " found external input " << *V << "\n");
1374 } else {
1375 append_range(C&: WorkList, R: I->operands());
1376 }
1377 }
1378 };
1379
1380 for (auto &Entry : Stack)
1381 if (Entry.Pred != ICmpInst::BAD_ICMP_PREDICATE)
1382 CollectArguments({Entry.LHS, Entry.RHS}, ICmpInst::isSigned(predicate: Entry.Pred));
1383 CollectArguments(Cond, ICmpInst::isSigned(predicate: Cond->getPredicate()));
1384
1385 SmallVector<Type *> ParamTys;
1386 for (auto *P : Args)
1387 ParamTys.push_back(Elt: P->getType());
1388
1389 FunctionType *FTy = FunctionType::get(Result: Cond->getType(), Params: ParamTys,
1390 /*isVarArg=*/false);
1391 Function *F = Function::Create(Ty: FTy, Linkage: Function::ExternalLinkage,
1392 N: Cond->getModule()->getName() +
1393 Cond->getFunction()->getName() + "repro",
1394 M);
1395 // Add arguments to the reproducer function for each external value collected.
1396 for (unsigned I = 0; I < Args.size(); ++I) {
1397 F->getArg(i: I)->setName(Args[I]->getName());
1398 Old2New[Args[I]] = F->getArg(i: I);
1399 }
1400
1401 BasicBlock *Entry = BasicBlock::Create(Context&: Ctx, Name: "entry", Parent: F);
1402 IRBuilder<> Builder(Entry);
1403 Builder.CreateRet(V: Builder.getTrue());
1404 Builder.SetInsertPoint(Entry->getTerminator());
1405
1406 // Clone instructions in \p Ops and their operands recursively until reaching
1407 // an value in Value2Index (external input to the reproducer). Update Old2New
1408 // mapping for the original and cloned instructions. Sort instructions to
1409 // clone by dominance, then insert the cloned instructions in the function.
1410 auto CloneInstructions = [&](ArrayRef<Value *> Ops, bool IsSigned) {
1411 SmallVector<Value *, 4> WorkList(Ops);
1412 SmallVector<Instruction *> ToClone;
1413 auto &Value2Index = Info.getValue2Index(Signed: IsSigned);
1414 while (!WorkList.empty()) {
1415 Value *V = WorkList.pop_back_val();
1416 if (Old2New.find(Val: V) != Old2New.end())
1417 continue;
1418
1419 auto *I = dyn_cast<Instruction>(Val: V);
1420 if (!Value2Index.contains(Val: V) && I) {
1421 Old2New[V] = nullptr;
1422 ToClone.push_back(Elt: I);
1423 append_range(C&: WorkList, R: I->operands());
1424 }
1425 }
1426
1427 sort(C&: ToClone,
1428 Comp: [&DT](Instruction *A, Instruction *B) { return DT.dominates(Def: A, User: B); });
1429 for (Instruction *I : ToClone) {
1430 Instruction *Cloned = I->clone();
1431 Old2New[I] = Cloned;
1432 Old2New[I]->setName(I->getName());
1433 Cloned->insertBefore(InsertPos: Builder.GetInsertPoint());
1434 Cloned->dropUnknownNonDebugMetadata();
1435 Cloned->setDebugLoc({});
1436 }
1437 };
1438
1439 // Materialize the assumptions for the reproducer using the entries in Stack.
1440 // That is, first clone the operands of the condition recursively until we
1441 // reach an external input to the reproducer and add them to the reproducer
1442 // function. Then add an ICmp for the condition (with the inverse predicate if
1443 // the entry is negated) and an assert using the ICmp.
1444 for (auto &Entry : Stack) {
1445 if (Entry.Pred == ICmpInst::BAD_ICMP_PREDICATE)
1446 continue;
1447
1448 LLVM_DEBUG(dbgs() << " Materializing assumption ";
1449 dumpUnpackedICmp(dbgs(), Entry.Pred, Entry.LHS, Entry.RHS);
1450 dbgs() << "\n");
1451 CloneInstructions({Entry.LHS, Entry.RHS}, CmpInst::isSigned(predicate: Entry.Pred));
1452
1453 auto *Cmp = Builder.CreateICmp(P: Entry.Pred, LHS: Entry.LHS, RHS: Entry.RHS);
1454 Builder.CreateAssumption(Cond: Cmp);
1455 }
1456
1457 // Finally, clone the condition to reproduce and remap instruction operands in
1458 // the reproducer using Old2New.
1459 CloneInstructions(Cond, CmpInst::isSigned(predicate: Cond->getPredicate()));
1460 Entry->getTerminator()->setOperand(i: 0, Val: Cond);
1461 remapInstructionsInBlocks(Blocks: {Entry}, VMap&: Old2New);
1462
1463 assert(!verifyFunction(*F, &dbgs()));
1464}
1465
1466static std::optional<bool> checkCondition(CmpInst::Predicate Pred, Value *A,
1467 Value *B, Instruction *CheckInst,
1468 ConstraintInfo &Info) {
1469 LLVM_DEBUG(dbgs() << "Checking " << *CheckInst << "\n");
1470
1471 auto R = Info.getConstraintForSolving(Pred, Op0: A, Op1: B);
1472 if (R.empty() || !R.isValid(Info)) {
1473 LLVM_DEBUG(dbgs() << " failed to decompose condition\n");
1474 return std::nullopt;
1475 }
1476
1477 auto &CSToUse = Info.getCS(Signed: R.IsSigned);
1478 if (auto ImpliedCondition = R.isImpliedBy(CS: CSToUse)) {
1479 if (!DebugCounter::shouldExecute(Counter&: EliminatedCounter))
1480 return std::nullopt;
1481
1482 LLVM_DEBUG({
1483 dbgs() << "Condition ";
1484 dumpUnpackedICmp(
1485 dbgs(), *ImpliedCondition ? Pred : CmpInst::getInversePredicate(Pred),
1486 A, B);
1487 dbgs() << " implied by dominating constraints\n";
1488 CSToUse.dump();
1489 });
1490 return ImpliedCondition;
1491 }
1492
1493 return std::nullopt;
1494}
1495
1496static bool checkAndReplaceCondition(
1497 ICmpInst *Cmp, ConstraintInfo &Info, unsigned NumIn, unsigned NumOut,
1498 Instruction *ContextInst, Module *ReproducerModule,
1499 ArrayRef<ReproducerEntry> ReproducerCondStack, DominatorTree &DT,
1500 SmallVectorImpl<Instruction *> &ToRemove) {
1501 auto ReplaceCmpWithConstant = [&](CmpInst *Cmp, bool IsTrue) {
1502 generateReproducer(Cond: Cmp, M: ReproducerModule, Stack: ReproducerCondStack, Info, DT);
1503 Constant *ConstantC = ConstantInt::getBool(
1504 Ty: CmpInst::makeCmpResultType(opnd_type: Cmp->getType()), V: IsTrue);
1505 bool Changed = false;
1506 Cmp->replaceUsesWithIf(New: ConstantC, ShouldReplace: [&DT, NumIn, NumOut, ContextInst,
1507 &Changed](Use &U) {
1508 auto *UserI = getContextInstForUse(U);
1509 auto *DTN = DT.getNode(BB: UserI->getParent());
1510 if (!DTN || DTN->getDFSNumIn() < NumIn || DTN->getDFSNumOut() > NumOut)
1511 return false;
1512 if (UserI->getParent() == ContextInst->getParent() &&
1513 UserI->comesBefore(Other: ContextInst))
1514 return false;
1515
1516 // Conditions in an assume trivially simplify to true. Skip uses
1517 // in assume calls to not destroy the available information.
1518 auto *II = dyn_cast<IntrinsicInst>(Val: U.getUser());
1519 bool ShouldReplace = !II || II->getIntrinsicID() != Intrinsic::assume;
1520 Changed |= ShouldReplace;
1521 return ShouldReplace;
1522 });
1523 NumCondsRemoved++;
1524
1525 // Update the debug value records that satisfy the same condition used
1526 // in replaceUsesWithIf.
1527 SmallVector<DbgVariableRecord *> DVRUsers;
1528 findDbgUsers(V: Cmp, DbgVariableRecords&: DVRUsers);
1529
1530 for (auto *DVR : DVRUsers) {
1531 auto *DTN = DT.getNode(BB: DVR->getParent());
1532 if (!DTN || DTN->getDFSNumIn() < NumIn || DTN->getDFSNumOut() > NumOut)
1533 continue;
1534
1535 auto *MarkedI = DVR->getInstruction();
1536 if (MarkedI->getParent() == ContextInst->getParent() &&
1537 MarkedI->comesBefore(Other: ContextInst))
1538 continue;
1539
1540 DVR->replaceVariableLocationOp(OldValue: Cmp, NewValue: ConstantC);
1541 }
1542
1543 if (Cmp->use_empty())
1544 ToRemove.push_back(Elt: Cmp);
1545
1546 return Changed;
1547 };
1548
1549 if (auto ImpliedCondition =
1550 checkCondition(Pred: Cmp->getPredicate(), A: Cmp->getOperand(i_nocapture: 0),
1551 B: Cmp->getOperand(i_nocapture: 1), CheckInst: Cmp, Info))
1552 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1553
1554 // When the predicate is samesign and unsigned, we can also make use of the
1555 // signed predicate information.
1556 if (Cmp->hasSameSign() && Cmp->isUnsigned())
1557 if (auto ImpliedCondition =
1558 checkCondition(Pred: Cmp->getSignedPredicate(), A: Cmp->getOperand(i_nocapture: 0),
1559 B: Cmp->getOperand(i_nocapture: 1), CheckInst: Cmp, Info))
1560 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1561
1562 return false;
1563}
1564
1565static bool checkAndReplaceMinMax(MinMaxIntrinsic *MinMax, ConstraintInfo &Info,
1566 SmallVectorImpl<Instruction *> &ToRemove) {
1567 auto ReplaceMinMaxWithOperand = [&](MinMaxIntrinsic *MinMax, bool UseLHS) {
1568 // TODO: generate reproducer for min/max.
1569 MinMax->replaceAllUsesWith(V: MinMax->getOperand(i_nocapture: UseLHS ? 0 : 1));
1570 ToRemove.push_back(Elt: MinMax);
1571 return true;
1572 };
1573
1574 ICmpInst::Predicate Pred =
1575 ICmpInst::getNonStrictPredicate(pred: MinMax->getPredicate());
1576 if (auto ImpliedCondition = checkCondition(
1577 Pred, A: MinMax->getOperand(i_nocapture: 0), B: MinMax->getOperand(i_nocapture: 1), CheckInst: MinMax, Info))
1578 return ReplaceMinMaxWithOperand(MinMax, *ImpliedCondition);
1579 if (auto ImpliedCondition = checkCondition(
1580 Pred, A: MinMax->getOperand(i_nocapture: 1), B: MinMax->getOperand(i_nocapture: 0), CheckInst: MinMax, Info))
1581 return ReplaceMinMaxWithOperand(MinMax, !*ImpliedCondition);
1582 return false;
1583}
1584
1585static bool checkAndReplaceCmp(CmpIntrinsic *I, ConstraintInfo &Info,
1586 SmallVectorImpl<Instruction *> &ToRemove) {
1587 Value *LHS = I->getOperand(i_nocapture: 0);
1588 Value *RHS = I->getOperand(i_nocapture: 1);
1589 if (checkCondition(Pred: I->getGTPredicate(), A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1590 I->replaceAllUsesWith(V: ConstantInt::get(Ty: I->getType(), V: 1));
1591 ToRemove.push_back(Elt: I);
1592 return true;
1593 }
1594 if (checkCondition(Pred: I->getLTPredicate(), A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1595 I->replaceAllUsesWith(V: ConstantInt::getSigned(Ty: I->getType(), V: -1));
1596 ToRemove.push_back(Elt: I);
1597 return true;
1598 }
1599 if (checkCondition(Pred: ICmpInst::ICMP_EQ, A: LHS, B: RHS, CheckInst: I, Info).value_or(u: false)) {
1600 I->replaceAllUsesWith(V: ConstantInt::get(Ty: I->getType(), V: 0));
1601 ToRemove.push_back(Elt: I);
1602 return true;
1603 }
1604 return false;
1605}
1606
1607static void
1608removeEntryFromStack(const StackEntry &E, ConstraintInfo &Info,
1609 Module *ReproducerModule,
1610 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack,
1611 SmallVectorImpl<StackEntry> &DFSInStack) {
1612 Info.popLastConstraint(Signed: E.IsSigned);
1613 // Remove variables in the system that went out of scope.
1614 auto &Mapping = Info.getValue2Index(Signed: E.IsSigned);
1615 for (Value *V : E.ValuesToRelease)
1616 Mapping.erase(Val: V);
1617 Info.popLastNVariables(Signed: E.IsSigned, N: E.ValuesToRelease.size());
1618 DFSInStack.pop_back();
1619 if (ReproducerModule)
1620 ReproducerCondStack.pop_back();
1621}
1622
1623/// Check if either the first condition of an AND or OR is implied by the
1624/// (negated in case of OR) second condition or vice versa.
1625static bool checkOrAndOpImpliedByOther(
1626 FactOrCheck &CB, ConstraintInfo &Info, Module *ReproducerModule,
1627 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack,
1628 SmallVectorImpl<StackEntry> &DFSInStack,
1629 SmallVectorImpl<Instruction *> &ToRemove) {
1630 Instruction *JoinOp = CB.getContextInst();
1631 if (JoinOp->use_empty())
1632 return false;
1633
1634 CmpInst *CmpToCheck = cast<CmpInst>(Val: CB.getInstructionToSimplify());
1635 unsigned OtherOpIdx = JoinOp->getOperand(i: 0) == CmpToCheck ? 1 : 0;
1636
1637 // Don't try to simplify the first condition of a select by the second, as
1638 // this may make the select more poisonous than the original one.
1639 // TODO: check if the first operand may be poison.
1640 if (OtherOpIdx != 0 && isa<SelectInst>(Val: JoinOp))
1641 return false;
1642
1643 unsigned OldSize = DFSInStack.size();
1644 llvm::scope_exit InfoRestorer([&]() {
1645 // Remove entries again.
1646 while (OldSize < DFSInStack.size()) {
1647 StackEntry E = DFSInStack.back();
1648 removeEntryFromStack(E, Info, ReproducerModule, ReproducerCondStack,
1649 DFSInStack);
1650 }
1651 });
1652 bool IsOr = match(V: JoinOp, P: m_LogicalOr());
1653 SmallVector<Value *, 4> Worklist({JoinOp->getOperand(i: OtherOpIdx)});
1654 // Do a traversal of the AND/OR tree to add facts from leaf compares.
1655 while (!Worklist.empty()) {
1656 Value *Val = Worklist.pop_back_val();
1657 Value *LHS, *RHS;
1658 CmpPredicate Pred;
1659 if (match(V: Val, P: m_ICmp(Pred, L: m_Value(V&: LHS), R: m_Value(V&: RHS)))) {
1660 // For OR, check if the negated condition implies CmpToCheck.
1661 if (IsOr)
1662 Pred = CmpInst::getInversePredicate(pred: Pred);
1663 // Optimistically add fact from the other compares in the AND/OR.
1664 Info.addFact(Pred, A: LHS, B: RHS, NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1665 continue;
1666 }
1667 if (IsOr ? match(V: Val, P: m_LogicalOr(L: m_Value(V&: LHS), R: m_Value(V&: RHS)))
1668 : match(V: Val, P: m_LogicalAnd(L: m_Value(V&: LHS), R: m_Value(V&: RHS)))) {
1669 Worklist.push_back(Elt: LHS);
1670 Worklist.push_back(Elt: RHS);
1671 }
1672 }
1673 if (OldSize == DFSInStack.size())
1674 return false;
1675
1676 // Check if the second condition can be simplified now.
1677 if (auto ImpliedCondition =
1678 checkCondition(Pred: CmpToCheck->getPredicate(), A: CmpToCheck->getOperand(i_nocapture: 0),
1679 B: CmpToCheck->getOperand(i_nocapture: 1), CheckInst: CmpToCheck, Info)) {
1680 if (IsOr == *ImpliedCondition)
1681 JoinOp->replaceAllUsesWith(
1682 V: ConstantInt::getBool(Ty: JoinOp->getType(), V: *ImpliedCondition));
1683 else
1684 JoinOp->replaceAllUsesWith(V: JoinOp->getOperand(i: OtherOpIdx));
1685 ToRemove.push_back(Elt: JoinOp);
1686 return true;
1687 }
1688
1689 return false;
1690}
1691
1692void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B,
1693 unsigned NumIn, unsigned NumOut,
1694 SmallVectorImpl<StackEntry> &DFSInStack) {
1695 addFactImpl(Pred, A, B, NumIn, NumOut, DFSInStack, ForceSignedSystem: false);
1696 // If the Pred is eq/ne, also add the fact to signed system.
1697 if (CmpInst::isEquality(pred: Pred))
1698 addFactImpl(Pred, A, B, NumIn, NumOut, DFSInStack, ForceSignedSystem: true);
1699}
1700
1701void ConstraintInfo::addFactImpl(CmpInst::Predicate Pred, Value *A, Value *B,
1702 unsigned NumIn, unsigned NumOut,
1703 SmallVectorImpl<StackEntry> &DFSInStack,
1704 bool ForceSignedSystem) {
1705 // If the constraint has a pre-condition, skip the constraint if it does not
1706 // hold.
1707 SmallVector<Value *> NewVariables;
1708 auto R = getConstraint(Pred, Op0: A, Op1: B, NewVariables, ForceSignedSystem);
1709
1710 // TODO: Support non-equality for facts as well.
1711 if (!R.isValid(Info: *this) || R.isNe())
1712 return;
1713
1714 LLVM_DEBUG(dbgs() << "Adding '"; dumpUnpackedICmp(dbgs(), Pred, A, B);
1715 dbgs() << "'\n");
1716 auto &CSToUse = getCS(Signed: R.IsSigned);
1717 if (R.Coefficients.empty())
1718 return;
1719
1720 bool Added = CSToUse.addVariableRowFill(R: R.Coefficients);
1721 if (!Added)
1722 return;
1723
1724 // If R has been added to the system, add the new variables and queue it for
1725 // removal once it goes out-of-scope.
1726 SmallVector<Value *, 2> ValuesToRelease;
1727 auto &Value2Index = getValue2Index(Signed: R.IsSigned);
1728 for (Value *V : NewVariables) {
1729 Value2Index.try_emplace(Key: V, Args: Value2Index.size() + 1);
1730 ValuesToRelease.push_back(Elt: V);
1731 }
1732
1733 LLVM_DEBUG({
1734 dbgs() << " constraint: ";
1735 dumpConstraint(R.Coefficients, getValue2Index(R.IsSigned));
1736 dbgs() << "\n";
1737 });
1738
1739 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1740 Args: std::move(ValuesToRelease));
1741
1742 if (!R.IsSigned) {
1743 for (Value *V : NewVariables) {
1744 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0),
1745 false, false, false);
1746 VarPos.Coefficients[Value2Index[V]] = -1;
1747 CSToUse.addVariableRow(R: VarPos.Coefficients);
1748 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1749 Args: SmallVector<Value *, 2>());
1750 }
1751 }
1752
1753 if (R.isEq()) {
1754 // Also add the inverted constraint for equality constraints.
1755 for (auto &Coeff : R.Coefficients)
1756 Coeff *= -1;
1757 CSToUse.addVariableRowFill(R: R.Coefficients);
1758
1759 DFSInStack.emplace_back(Args&: NumIn, Args&: NumOut, Args&: R.IsSigned,
1760 Args: SmallVector<Value *, 2>());
1761 }
1762}
1763
1764static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B,
1765 SmallVectorImpl<Instruction *> &ToRemove) {
1766 bool Changed = false;
1767 IRBuilder<> Builder(II->getParent(), II->getIterator());
1768 Value *Sub = nullptr;
1769 for (User *U : make_early_inc_range(Range: II->users())) {
1770 if (match(V: U, P: m_ExtractValue<0>(V: m_Value()))) {
1771 if (!Sub)
1772 Sub = Builder.CreateSub(LHS: A, RHS: B);
1773 U->replaceAllUsesWith(V: Sub);
1774 Changed = true;
1775 } else if (match(V: U, P: m_ExtractValue<1>(V: m_Value()))) {
1776 U->replaceAllUsesWith(V: Builder.getFalse());
1777 Changed = true;
1778 } else
1779 continue;
1780
1781 if (U->use_empty()) {
1782 auto *I = cast<Instruction>(Val: U);
1783 ToRemove.push_back(Elt: I);
1784 I->setOperand(i: 0, Val: PoisonValue::get(T: II->getType()));
1785 Changed = true;
1786 }
1787 }
1788
1789 if (II->use_empty()) {
1790 II->eraseFromParent();
1791 Changed = true;
1792 }
1793 return Changed;
1794}
1795
1796static bool
1797tryToSimplifyOverflowMath(IntrinsicInst *II, ConstraintInfo &Info,
1798 SmallVectorImpl<Instruction *> &ToRemove) {
1799 auto DoesConditionHold = [](CmpInst::Predicate Pred, Value *A, Value *B,
1800 ConstraintInfo &Info) {
1801 auto R = Info.getConstraintForSolving(Pred, Op0: A, Op1: B);
1802 if (R.size() < 2 || !R.isValid(Info))
1803 return false;
1804
1805 auto &CSToUse = Info.getCS(Signed: R.IsSigned);
1806 return CSToUse.isConditionImplied(R: R.Coefficients);
1807 };
1808
1809 bool Changed = false;
1810 if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
1811 // If A s>= B && B s>= 0, ssub.with.overflow(a, b) should not overflow and
1812 // can be simplified to a regular sub.
1813 Value *A = II->getArgOperand(i: 0);
1814 Value *B = II->getArgOperand(i: 1);
1815 if (!DoesConditionHold(CmpInst::ICMP_SGE, A, B, Info) ||
1816 !DoesConditionHold(CmpInst::ICMP_SGE, B,
1817 ConstantInt::get(Ty: A->getType(), V: 0), Info))
1818 return false;
1819 Changed = replaceSubOverflowUses(II, A, B, ToRemove);
1820 }
1821 return Changed;
1822}
1823
1824static bool eliminateConstraints(Function &F, DominatorTree &DT, LoopInfo &LI,
1825 ScalarEvolution &SE,
1826 OptimizationRemarkEmitter &ORE,
1827 TargetLibraryInfo &TLI) {
1828 bool Changed = false;
1829 DT.updateDFSNumbers();
1830 SmallVector<Value *> FunctionArgs(llvm::make_pointer_range(Range: F.args()));
1831 ConstraintInfo Info(F.getDataLayout(), FunctionArgs);
1832 State S(DT, LI, SE, TLI);
1833 std::unique_ptr<Module> ReproducerModule(
1834 DumpReproducers ? new Module(F.getName(), F.getContext()) : nullptr);
1835
1836 // First, collect conditions implied by branches and blocks with their
1837 // Dominator DFS in and out numbers.
1838 for (BasicBlock &BB : F) {
1839 if (!DT.getNode(BB: &BB))
1840 continue;
1841 S.addInfoFor(BB);
1842 }
1843
1844 // Next, sort worklist by dominance, so that dominating conditions to check
1845 // and facts come before conditions and facts dominated by them. If a
1846 // condition to check and a fact have the same numbers, conditional facts come
1847 // first. Assume facts and checks are ordered according to their relative
1848 // order in the containing basic block. Also make sure conditions with
1849 // constant operands come before conditions without constant operands. This
1850 // increases the effectiveness of the current signed <-> unsigned fact
1851 // transfer logic.
1852 stable_sort(Range&: S.WorkList, C: [](const FactOrCheck &A, const FactOrCheck &B) {
1853 auto HasNoConstOp = [](const FactOrCheck &B) {
1854 Value *V0 = B.isConditionFact() ? B.Cond.Op0 : B.Inst->getOperand(i: 0);
1855 Value *V1 = B.isConditionFact() ? B.Cond.Op1 : B.Inst->getOperand(i: 1);
1856 return !isa<ConstantInt>(Val: V0) && !isa<ConstantInt>(Val: V1);
1857 };
1858 // If both entries have the same In numbers, conditional facts come first.
1859 // Otherwise use the relative order in the basic block.
1860 if (A.NumIn == B.NumIn) {
1861 if (A.isConditionFact() && B.isConditionFact()) {
1862 bool NoConstOpA = HasNoConstOp(A);
1863 bool NoConstOpB = HasNoConstOp(B);
1864 return NoConstOpA < NoConstOpB;
1865 }
1866 if (A.isConditionFact())
1867 return true;
1868 if (B.isConditionFact())
1869 return false;
1870 auto *InstA = A.getContextInst();
1871 auto *InstB = B.getContextInst();
1872 return InstA->comesBefore(Other: InstB);
1873 }
1874 return A.NumIn < B.NumIn;
1875 });
1876
1877 SmallVector<Instruction *> ToRemove;
1878
1879 // Finally, process ordered worklist and eliminate implied conditions.
1880 SmallVector<StackEntry, 16> DFSInStack;
1881 SmallVector<ReproducerEntry> ReproducerCondStack;
1882 for (FactOrCheck &CB : S.WorkList) {
1883 // First, pop entries from the stack that are out-of-scope for CB. Remove
1884 // the corresponding entry from the constraint system.
1885 while (!DFSInStack.empty()) {
1886 auto &E = DFSInStack.back();
1887 LLVM_DEBUG(dbgs() << "Top of stack : " << E.NumIn << " " << E.NumOut
1888 << "\n");
1889 LLVM_DEBUG(dbgs() << "CB: " << CB.NumIn << " " << CB.NumOut << "\n");
1890 assert(E.NumIn <= CB.NumIn);
1891 if (CB.NumOut <= E.NumOut)
1892 break;
1893 LLVM_DEBUG({
1894 dbgs() << "Removing ";
1895 dumpConstraint(Info.getCS(E.IsSigned).getLastConstraint(),
1896 Info.getValue2Index(E.IsSigned));
1897 dbgs() << "\n";
1898 });
1899 removeEntryFromStack(E, Info, ReproducerModule: ReproducerModule.get(), ReproducerCondStack,
1900 DFSInStack);
1901 }
1902
1903 // For a block, check if any CmpInsts become known based on the current set
1904 // of constraints.
1905 if (CB.isCheck()) {
1906 Instruction *Inst = CB.getInstructionToSimplify();
1907 if (!Inst)
1908 continue;
1909 LLVM_DEBUG(dbgs() << "Processing condition to simplify: " << *Inst
1910 << "\n");
1911 if (auto *II = dyn_cast<WithOverflowInst>(Val: Inst)) {
1912 Changed |= tryToSimplifyOverflowMath(II, Info, ToRemove);
1913 } else if (auto *Cmp = dyn_cast<ICmpInst>(Val: Inst)) {
1914 bool Simplified = checkAndReplaceCondition(
1915 Cmp, Info, NumIn: CB.NumIn, NumOut: CB.NumOut, ContextInst: CB.getContextInst(),
1916 ReproducerModule: ReproducerModule.get(), ReproducerCondStack, DT&: S.DT, ToRemove);
1917 if (!Simplified &&
1918 match(V: CB.getContextInst(), P: m_LogicalOp(L: m_Value(), R: m_Value()))) {
1919 Simplified = checkOrAndOpImpliedByOther(
1920 CB, Info, ReproducerModule: ReproducerModule.get(), ReproducerCondStack, DFSInStack,
1921 ToRemove);
1922 }
1923 Changed |= Simplified;
1924 } else if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: Inst)) {
1925 Changed |= checkAndReplaceMinMax(MinMax, Info, ToRemove);
1926 } else if (auto *CmpIntr = dyn_cast<CmpIntrinsic>(Val: Inst)) {
1927 Changed |= checkAndReplaceCmp(I: CmpIntr, Info, ToRemove);
1928 }
1929 continue;
1930 }
1931
1932 auto AddFact = [&](CmpPredicate Pred, Value *A, Value *B) {
1933 LLVM_DEBUG(dbgs() << "Processing fact to add to the system: ";
1934 dumpUnpackedICmp(dbgs(), Pred, A, B); dbgs() << "\n");
1935 if (Info.getCS(Signed: CmpInst::isSigned(predicate: Pred)).size() > MaxRows) {
1936 LLVM_DEBUG(
1937 dbgs()
1938 << "Skip adding constraint because system has too many rows.\n");
1939 return;
1940 }
1941
1942 Info.addFact(Pred, A, B, NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1943 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size())
1944 ReproducerCondStack.emplace_back(Args&: Pred, Args&: A, Args&: B);
1945
1946 if (ICmpInst::isRelational(P: Pred)) {
1947 // If samesign is present on the ICmp, simply flip the sign of the
1948 // predicate, transferring the information from the signed system to the
1949 // unsigned system, and viceversa.
1950 if (Pred.hasSameSign())
1951 Info.addFact(Pred: ICmpInst::getFlippedSignednessPredicate(Pred), A, B,
1952 NumIn: CB.NumIn, NumOut: CB.NumOut, DFSInStack);
1953 else
1954 Info.transferToOtherSystem(Pred, A, B, NumIn: CB.NumIn, NumOut: CB.NumOut,
1955 DFSInStack);
1956 }
1957
1958 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size()) {
1959 // Add dummy entries to ReproducerCondStack to keep it in sync with
1960 // DFSInStack.
1961 for (unsigned I = 0,
1962 E = (DFSInStack.size() - ReproducerCondStack.size());
1963 I < E; ++I) {
1964 ReproducerCondStack.emplace_back(Args: ICmpInst::BAD_ICMP_PREDICATE,
1965 Args: nullptr, Args: nullptr);
1966 }
1967 }
1968 };
1969
1970 CmpPredicate Pred;
1971 if (!CB.isConditionFact()) {
1972 Value *X;
1973 if (match(V: CB.Inst, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Value(V&: X)))) {
1974 // If is_int_min_poison is true then we may assume llvm.abs >= 0.
1975 if (cast<ConstantInt>(Val: CB.Inst->getOperand(i: 1))->isOne())
1976 AddFact(CmpInst::ICMP_SGE, CB.Inst,
1977 ConstantInt::get(Ty: CB.Inst->getType(), V: 0));
1978 AddFact(CmpInst::ICMP_SGE, CB.Inst, X);
1979 continue;
1980 }
1981
1982 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: CB.Inst)) {
1983 Pred = ICmpInst::getNonStrictPredicate(pred: MinMax->getPredicate());
1984 AddFact(Pred, MinMax, MinMax->getLHS());
1985 AddFact(Pred, MinMax, MinMax->getRHS());
1986 continue;
1987 }
1988 if (auto *USatI = dyn_cast<SaturatingInst>(Val: CB.Inst)) {
1989 switch (USatI->getIntrinsicID()) {
1990 default:
1991 llvm_unreachable("Unexpected intrinsic.");
1992 case Intrinsic::uadd_sat:
1993 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getLHS());
1994 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getRHS());
1995 break;
1996 case Intrinsic::usub_sat:
1997 AddFact(ICmpInst::ICMP_ULE, USatI, USatI->getLHS());
1998 break;
1999 }
2000 continue;
2001 }
2002
2003 auto &DL = F.getDataLayout();
2004 auto AddFactsAboutIndices = [&](Value *Ptr, Type *AccessType) {
2005 CmpPredicate Pred;
2006 Value *A, *B;
2007 if (getConstraintFromMemoryAccess(
2008 GEP&: *cast<GetElementPtrInst>(Val: Ptr),
2009 AccessSize: DL.getTypeStoreSize(Ty: AccessType).getFixedValue(), Pred, A, B, DL,
2010 TLI))
2011 AddFact(Pred, A, B);
2012 };
2013
2014 if (auto *LI = dyn_cast<LoadInst>(Val: CB.Inst)) {
2015 AddFactsAboutIndices(LI->getPointerOperand(), LI->getAccessType());
2016 continue;
2017 }
2018 if (auto *SI = dyn_cast<StoreInst>(Val: CB.Inst)) {
2019 AddFactsAboutIndices(SI->getPointerOperand(), SI->getAccessType());
2020 continue;
2021 }
2022 }
2023
2024 Value *A = nullptr, *B = nullptr;
2025 if (CB.isConditionFact()) {
2026 Pred = CB.Cond.Pred;
2027 A = CB.Cond.Op0;
2028 B = CB.Cond.Op1;
2029 if (CB.DoesHold.Pred != CmpInst::BAD_ICMP_PREDICATE &&
2030 !Info.doesHold(Pred: CB.DoesHold.Pred, A: CB.DoesHold.Op0, B: CB.DoesHold.Op1)) {
2031 LLVM_DEBUG({
2032 dbgs() << "Not adding fact ";
2033 dumpUnpackedICmp(dbgs(), Pred, A, B);
2034 dbgs() << " because precondition ";
2035 dumpUnpackedICmp(dbgs(), CB.DoesHold.Pred, CB.DoesHold.Op0,
2036 CB.DoesHold.Op1);
2037 dbgs() << " does not hold.\n";
2038 });
2039 continue;
2040 }
2041 } else {
2042 bool Matched = match(V: CB.Inst, P: m_Intrinsic<Intrinsic::assume>(
2043 Op0: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))));
2044 (void)Matched;
2045 assert(Matched && "Must have an assume intrinsic with a icmp operand");
2046 }
2047 AddFact(Pred, A, B);
2048 }
2049
2050 if (ReproducerModule && !ReproducerModule->functions().empty()) {
2051 std::string S;
2052 raw_string_ostream StringS(S);
2053 ReproducerModule->print(OS&: StringS, AAW: nullptr);
2054 OptimizationRemark Rem(DEBUG_TYPE, "Reproducer", &F);
2055 Rem << ore::NV("module") << S;
2056 ORE.emit(OptDiag&: Rem);
2057 }
2058
2059#ifndef NDEBUG
2060 unsigned SignedEntries =
2061 count_if(DFSInStack, [](const StackEntry &E) { return E.IsSigned; });
2062 assert(Info.getCS(false).size() - FunctionArgs.size() ==
2063 DFSInStack.size() - SignedEntries &&
2064 "updates to CS and DFSInStack are out of sync");
2065 assert(Info.getCS(true).size() == SignedEntries &&
2066 "updates to CS and DFSInStack are out of sync");
2067#endif
2068
2069 for (Instruction *I : ToRemove)
2070 I->eraseFromParent();
2071 return Changed;
2072}
2073
2074PreservedAnalyses ConstraintEliminationPass::run(Function &F,
2075 FunctionAnalysisManager &AM) {
2076 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
2077 auto &LI = AM.getResult<LoopAnalysis>(IR&: F);
2078 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(IR&: F);
2079 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2080 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2081 if (!eliminateConstraints(F, DT, LI, SE, ORE, TLI))
2082 return PreservedAnalyses::all();
2083
2084 PreservedAnalyses PA;
2085 PA.preserve<DominatorTreeAnalysis>();
2086 PA.preserve<LoopAnalysis>();
2087 PA.preserve<ScalarEvolutionAnalysis>();
2088 PA.preserveSet<CFGAnalyses>();
2089 return PA;
2090}
2091