1//===- InductiveRangeCheckElimination.cpp - -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The InductiveRangeCheckElimination pass splits a loop's iteration space into
10// three disjoint ranges. It does that in a way such that the loop running in
11// the middle loop provably does not need range checks. As an example, it will
12// convert
13//
14// len = < known positive >
15// for (i = 0; i < n; i++) {
16// if (0 <= i && i < len) {
17// do_something();
18// } else {
19// throw_out_of_bounds();
20// }
21// }
22//
23// to
24//
25// len = < known positive >
26// limit = smin(n, len)
27// // no first segment
28// for (i = 0; i < limit; i++) {
29// if (0 <= i && i < len) { // this check is fully redundant
30// do_something();
31// } else {
32// throw_out_of_bounds();
33// }
34// }
35// for (i = limit; i < n; i++) {
36// if (0 <= i && i < len) {
37// do_something();
38// } else {
39// throw_out_of_bounds();
40// }
41// }
42//
43//===----------------------------------------------------------------------===//
44
45#include "llvm/Transforms/Scalar/InductiveRangeCheckElimination.h"
46#include "llvm/ADT/APInt.h"
47#include "llvm/ADT/ArrayRef.h"
48#include "llvm/ADT/PriorityWorklist.h"
49#include "llvm/ADT/SmallPtrSet.h"
50#include "llvm/ADT/SmallVector.h"
51#include "llvm/ADT/StringRef.h"
52#include "llvm/ADT/Twine.h"
53#include "llvm/Analysis/BlockFrequencyInfo.h"
54#include "llvm/Analysis/BranchProbabilityInfo.h"
55#include "llvm/Analysis/LoopAnalysisManager.h"
56#include "llvm/Analysis/LoopInfo.h"
57#include "llvm/Analysis/ScalarEvolution.h"
58#include "llvm/Analysis/ScalarEvolutionExpressions.h"
59#include "llvm/IR/BasicBlock.h"
60#include "llvm/IR/CFG.h"
61#include "llvm/IR/Constants.h"
62#include "llvm/IR/DerivedTypes.h"
63#include "llvm/IR/Dominators.h"
64#include "llvm/IR/Function.h"
65#include "llvm/IR/IRBuilder.h"
66#include "llvm/IR/InstrTypes.h"
67#include "llvm/IR/Instructions.h"
68#include "llvm/IR/Metadata.h"
69#include "llvm/IR/Module.h"
70#include "llvm/IR/PatternMatch.h"
71#include "llvm/IR/Type.h"
72#include "llvm/IR/Use.h"
73#include "llvm/IR/User.h"
74#include "llvm/IR/Value.h"
75#include "llvm/Support/BranchProbability.h"
76#include "llvm/Support/Casting.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Compiler.h"
79#include "llvm/Support/Debug.h"
80#include "llvm/Support/ErrorHandling.h"
81#include "llvm/Support/raw_ostream.h"
82#include "llvm/Transforms/Utils/BasicBlockUtils.h"
83#include "llvm/Transforms/Utils/Cloning.h"
84#include "llvm/Transforms/Utils/LoopConstrainer.h"
85#include "llvm/Transforms/Utils/LoopSimplify.h"
86#include "llvm/Transforms/Utils/LoopUtils.h"
87#include "llvm/Transforms/Utils/ValueMapper.h"
88#include <algorithm>
89#include <cassert>
90#include <optional>
91#include <utility>
92
93using namespace llvm;
94using namespace llvm::PatternMatch;
95
96static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
97 cl::init(Val: 64));
98
99static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden,
100 cl::init(Val: false));
101
102static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden,
103 cl::init(Val: false));
104
105static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks",
106 cl::Hidden, cl::init(Val: false));
107
108static cl::opt<unsigned> MinEliminatedChecks("irce-min-eliminated-checks",
109 cl::Hidden, cl::init(Val: 10));
110
111static cl::opt<bool> AllowUnsignedLatchCondition("irce-allow-unsigned-latch",
112 cl::Hidden, cl::init(Val: true));
113
114static cl::opt<bool> AllowNarrowLatchCondition(
115 "irce-allow-narrow-latch", cl::Hidden, cl::init(Val: true),
116 cl::desc("If set to true, IRCE may eliminate wide range checks in loops "
117 "with narrow latch condition."));
118
119static cl::opt<unsigned> MaxTypeSizeForOverflowCheck(
120 "irce-max-type-size-for-overflow-check", cl::Hidden, cl::init(Val: 32),
121 cl::desc(
122 "Maximum size of range check type for which can be produced runtime "
123 "overflow check of its limit's computation"));
124
125static cl::opt<bool>
126 PrintScaledBoundaryRangeChecks("irce-print-scaled-boundary-range-checks",
127 cl::Hidden, cl::init(Val: false));
128
129#define DEBUG_TYPE "irce"
130
131namespace {
132
133/// An inductive range check is conditional branch in a loop with a condition
134/// that is provably true for some contiguous range of values taken by the
135/// containing loop's induction variable.
136///
137class InductiveRangeCheck {
138
139 const SCEV *Begin = nullptr;
140 const SCEV *Step = nullptr;
141 const SCEV *End = nullptr;
142 Use *CheckUse = nullptr;
143
144 static bool parseRangeCheckICmp(Loop *L, ICmpInst *ICI, ScalarEvolution &SE,
145 const SCEVAddRecExpr *&Index,
146 const SCEV *&End);
147
148 static void
149 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse,
150 SmallVectorImpl<InductiveRangeCheck> &Checks,
151 SmallPtrSetImpl<Value *> &Visited);
152
153 static bool parseIvAgaisntLimit(Loop *L, Value *LHS, Value *RHS,
154 ICmpInst::Predicate Pred, ScalarEvolution &SE,
155 const SCEVAddRecExpr *&Index,
156 const SCEV *&End);
157
158 static bool reassociateSubLHS(Loop *L, Value *VariantLHS, Value *InvariantRHS,
159 ICmpInst::Predicate Pred, ScalarEvolution &SE,
160 const SCEVAddRecExpr *&Index, const SCEV *&End);
161
162public:
163 const SCEV *getBegin() const { return Begin; }
164 const SCEV *getStep() const { return Step; }
165 const SCEV *getEnd() const { return End; }
166
167 void print(raw_ostream &OS) const {
168 OS << "InductiveRangeCheck:\n";
169 OS << " Begin: ";
170 Begin->print(OS);
171 OS << " Step: ";
172 Step->print(OS);
173 OS << " End: ";
174 End->print(OS);
175 OS << "\n CheckUse: ";
176 getCheckUse()->getUser()->print(O&: OS);
177 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n";
178 }
179
180 LLVM_DUMP_METHOD
181 void dump() {
182 print(OS&: dbgs());
183 }
184
185 Use *getCheckUse() const { return CheckUse; }
186
187 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If
188 /// R.getEnd() le R.getBegin(), then R denotes the empty range.
189
190 class Range {
191 const SCEV *Begin;
192 const SCEV *End;
193
194 public:
195 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) {
196 assert(Begin->getType() == End->getType() && "ill-typed range!");
197 }
198
199 Type *getType() const { return Begin->getType(); }
200 const SCEV *getBegin() const { return Begin; }
201 const SCEV *getEnd() const { return End; }
202 bool isEmpty(ScalarEvolution &SE, bool IsSigned) const {
203 if (Begin == End)
204 return true;
205 if (IsSigned)
206 return SE.isKnownPredicate(Pred: ICmpInst::ICMP_SGE, LHS: Begin, RHS: End);
207 else
208 return SE.isKnownPredicate(Pred: ICmpInst::ICMP_UGE, LHS: Begin, RHS: End);
209 }
210 };
211
212 /// This is the value the condition of the branch needs to evaluate to for the
213 /// branch to take the hot successor (see (1) above).
214 bool getPassingDirection() { return true; }
215
216 /// Computes a range for the induction variable (IndVar) in which the range
217 /// check is redundant and can be constant-folded away. The induction
218 /// variable is not required to be the canonical {0,+,1} induction variable.
219 std::optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
220 const SCEVAddRecExpr *IndVar,
221 bool IsLatchSigned) const;
222
223 /// Parse out a set of inductive range checks from \p BI and append them to \p
224 /// Checks.
225 ///
226 /// NB! There may be conditions feeding into \p BI that aren't inductive range
227 /// checks, and hence don't end up in \p Checks.
228 static void extractRangeChecksFromBranch(
229 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo *BPI,
230 std::optional<uint64_t> EstimatedTripCount,
231 SmallVectorImpl<InductiveRangeCheck> &Checks, bool &Changed);
232};
233
234class InductiveRangeCheckElimination {
235 ScalarEvolution &SE;
236 BranchProbabilityInfo *BPI;
237 DominatorTree &DT;
238 LoopInfo &LI;
239
240 using GetBFIFunc = llvm::function_ref<llvm::BlockFrequencyInfo &()>;
241 GetBFIFunc GetBFI;
242
243 // Returns the estimated number of iterations based on block frequency info if
244 // available, or on branch probability info. Nullopt is returned if the number
245 // of iterations cannot be estimated.
246 std::optional<uint64_t> estimatedTripCount(const Loop &L);
247
248public:
249 InductiveRangeCheckElimination(ScalarEvolution &SE,
250 BranchProbabilityInfo *BPI, DominatorTree &DT,
251 LoopInfo &LI, GetBFIFunc GetBFI = nullptr)
252 : SE(SE), BPI(BPI), DT(DT), LI(LI), GetBFI(GetBFI) {}
253
254 bool run(Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop);
255};
256
257} // end anonymous namespace
258
259/// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot
260/// be interpreted as a range check, return false. Otherwise set `Index` to the
261/// SCEV being range checked, and set `End` to the upper or lower limit `Index`
262/// is being range checked.
263bool InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
264 ScalarEvolution &SE,
265 const SCEVAddRecExpr *&Index,
266 const SCEV *&End) {
267 auto IsLoopInvariant = [&SE, L](Value *V) {
268 return SE.isLoopInvariant(S: SE.getSCEV(V), L);
269 };
270
271 ICmpInst::Predicate Pred = ICI->getPredicate();
272 Value *LHS = ICI->getOperand(i_nocapture: 0);
273 Value *RHS = ICI->getOperand(i_nocapture: 1);
274
275 if (!LHS->getType()->isIntegerTy())
276 return false;
277
278 // Canonicalize to the `Index Pred Invariant` comparison
279 if (IsLoopInvariant(LHS)) {
280 std::swap(a&: LHS, b&: RHS);
281 Pred = CmpInst::getSwappedPredicate(pred: Pred);
282 } else if (!IsLoopInvariant(RHS))
283 // Both LHS and RHS are loop variant
284 return false;
285
286 if (parseIvAgaisntLimit(L, LHS, RHS, Pred, SE, Index, End))
287 return true;
288
289 if (reassociateSubLHS(L, VariantLHS: LHS, InvariantRHS: RHS, Pred, SE, Index, End))
290 return true;
291
292 // TODO: support ReassociateAddLHS
293 return false;
294}
295
296// Try to parse range check in the form of "IV vs Limit"
297bool InductiveRangeCheck::parseIvAgaisntLimit(Loop *L, Value *LHS, Value *RHS,
298 ICmpInst::Predicate Pred,
299 ScalarEvolution &SE,
300 const SCEVAddRecExpr *&Index,
301 const SCEV *&End) {
302
303 auto SIntMaxSCEV = [&](Type *T) {
304 unsigned BitWidth = cast<IntegerType>(Val: T)->getBitWidth();
305 return SE.getConstant(Val: APInt::getSignedMaxValue(numBits: BitWidth));
306 };
307
308 const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: SE.getSCEV(V: LHS));
309 if (!AddRec)
310 return false;
311
312 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L".
313 // We can potentially do much better here.
314 // If we want to adjust upper bound for the unsigned range check as we do it
315 // for signed one, we will need to pick Unsigned max
316 switch (Pred) {
317 default:
318 return false;
319
320 case ICmpInst::ICMP_SGE:
321 if (match(V: RHS, P: m_ConstantInt<0>())) {
322 Index = AddRec;
323 End = SIntMaxSCEV(Index->getType());
324 return true;
325 }
326 return false;
327
328 case ICmpInst::ICMP_SGT:
329 if (match(V: RHS, P: m_ConstantInt<-1>())) {
330 Index = AddRec;
331 End = SIntMaxSCEV(Index->getType());
332 return true;
333 }
334 return false;
335
336 case ICmpInst::ICMP_SLT:
337 case ICmpInst::ICMP_ULT:
338 Index = AddRec;
339 End = SE.getSCEV(V: RHS);
340 return true;
341
342 case ICmpInst::ICMP_SLE:
343 case ICmpInst::ICMP_ULE:
344 const SCEV *One = SE.getOne(Ty: RHS->getType());
345 const SCEV *RHSS = SE.getSCEV(V: RHS);
346 bool Signed = Pred == ICmpInst::ICMP_SLE;
347 if (SE.willNotOverflow(BinOp: Instruction::BinaryOps::Add, Signed, LHS: RHSS, RHS: One)) {
348 Index = AddRec;
349 End = SE.getAddExpr(LHS: RHSS, RHS: One);
350 return true;
351 }
352 return false;
353 }
354
355 llvm_unreachable("default clause returns!");
356}
357
358// Try to parse range check in the form of "IV - Offset vs Limit" or "Offset -
359// IV vs Limit"
360bool InductiveRangeCheck::reassociateSubLHS(
361 Loop *L, Value *VariantLHS, Value *InvariantRHS, ICmpInst::Predicate Pred,
362 ScalarEvolution &SE, const SCEVAddRecExpr *&Index, const SCEV *&End) {
363 Value *LHS, *RHS;
364 if (!match(V: VariantLHS, P: m_Sub(L: m_Value(V&: LHS), R: m_Value(V&: RHS))))
365 return false;
366
367 const SCEV *IV = SE.getSCEV(V: LHS);
368 const SCEV *Offset = SE.getSCEV(V: RHS);
369 const SCEV *Limit = SE.getSCEV(V: InvariantRHS);
370
371 bool OffsetSubtracted = false;
372 if (SE.isLoopInvariant(S: IV, L))
373 // "Offset - IV vs Limit"
374 std::swap(a&: IV, b&: Offset);
375 else if (SE.isLoopInvariant(S: Offset, L))
376 // "IV - Offset vs Limit"
377 OffsetSubtracted = true;
378 else
379 return false;
380
381 const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: IV);
382 if (!AddRec)
383 return false;
384
385 // In order to turn "IV - Offset < Limit" into "IV < Limit + Offset", we need
386 // to be able to freely move values from left side of inequality to right side
387 // (just as in normal linear arithmetics). Overflows make things much more
388 // complicated, so we want to avoid this.
389 //
390 // Let's prove that the initial subtraction doesn't overflow with all IV's
391 // values from the safe range constructed for that check.
392 //
393 // [Case 1] IV - Offset < Limit
394 // It doesn't overflow if:
395 // SINT_MIN <= IV - Offset <= SINT_MAX
396 // In terms of scaled SINT we need to prove:
397 // SINT_MIN + Offset <= IV <= SINT_MAX + Offset
398 // Safe range will be constructed:
399 // 0 <= IV < Limit + Offset
400 // It means that 'IV - Offset' doesn't underflow, because:
401 // SINT_MIN + Offset < 0 <= IV
402 // and doesn't overflow:
403 // IV < Limit + Offset <= SINT_MAX + Offset
404 //
405 // [Case 2] Offset - IV > Limit
406 // It doesn't overflow if:
407 // SINT_MIN <= Offset - IV <= SINT_MAX
408 // In terms of scaled SINT we need to prove:
409 // -SINT_MIN >= IV - Offset >= -SINT_MAX
410 // Offset - SINT_MIN >= IV >= Offset - SINT_MAX
411 // Safe range will be constructed:
412 // 0 <= IV < Offset - Limit
413 // It means that 'Offset - IV' doesn't underflow, because
414 // Offset - SINT_MAX < 0 <= IV
415 // and doesn't overflow:
416 // IV < Offset - Limit <= Offset - SINT_MIN
417 //
418 // For the computed upper boundary of the IV's range (Offset +/- Limit) we
419 // don't know exactly whether it overflows or not. So if we can't prove this
420 // fact at compile time, we scale boundary computations to a wider type with
421 // the intention to add runtime overflow check.
422
423 auto getExprScaledIfOverflow = [&](Instruction::BinaryOps BinOp,
424 const SCEV *LHS,
425 const SCEV *RHS) -> const SCEV * {
426 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
427 SCEV::NoWrapFlags, unsigned);
428 switch (BinOp) {
429 default:
430 llvm_unreachable("Unsupported binary op");
431 case Instruction::Add:
432 Operation = &ScalarEvolution::getAddExpr;
433 break;
434 case Instruction::Sub:
435 Operation = &ScalarEvolution::getMinusSCEV;
436 break;
437 }
438
439 if (SE.willNotOverflow(BinOp, Signed: ICmpInst::isSigned(predicate: Pred), LHS, RHS,
440 CtxI: cast<Instruction>(Val: VariantLHS)))
441 return (SE.*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0);
442
443 // We couldn't prove that the expression does not overflow.
444 // Than scale it to a wider type to check overflow at runtime.
445 auto *Ty = cast<IntegerType>(Val: LHS->getType());
446 if (Ty->getBitWidth() > MaxTypeSizeForOverflowCheck)
447 return nullptr;
448
449 auto WideTy = IntegerType::get(C&: Ty->getContext(), NumBits: Ty->getBitWidth() * 2);
450 return (SE.*Operation)(SE.getSignExtendExpr(Op: LHS, Ty: WideTy),
451 SE.getSignExtendExpr(Op: RHS, Ty: WideTy), SCEV::FlagAnyWrap,
452 0);
453 };
454
455 if (OffsetSubtracted)
456 // "IV - Offset < Limit" -> "IV" < Offset + Limit
457 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Add, Offset, Limit);
458 else {
459 // "Offset - IV > Limit" -> "IV" < Offset - Limit
460 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Sub, Offset, Limit);
461 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
462 }
463
464 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
465 // "Expr <= Limit" -> "Expr < Limit + 1"
466 if (Pred == ICmpInst::ICMP_SLE && Limit)
467 Limit = getExprScaledIfOverflow(Instruction::BinaryOps::Add, Limit,
468 SE.getOne(Ty: Limit->getType()));
469 if (Limit) {
470 Index = AddRec;
471 End = Limit;
472 return true;
473 }
474 }
475 return false;
476}
477
478void InductiveRangeCheck::extractRangeChecksFromCond(
479 Loop *L, ScalarEvolution &SE, Use &ConditionUse,
480 SmallVectorImpl<InductiveRangeCheck> &Checks,
481 SmallPtrSetImpl<Value *> &Visited) {
482 Value *Condition = ConditionUse.get();
483 if (!Visited.insert(Ptr: Condition).second)
484 return;
485
486 // TODO: Do the same for OR, XOR, NOT etc?
487 if (match(V: Condition, P: m_LogicalAnd(L: m_Value(), R: m_Value()))) {
488 extractRangeChecksFromCond(L, SE, ConditionUse&: cast<User>(Val: Condition)->getOperandUse(i: 0),
489 Checks, Visited);
490 extractRangeChecksFromCond(L, SE, ConditionUse&: cast<User>(Val: Condition)->getOperandUse(i: 1),
491 Checks, Visited);
492 return;
493 }
494
495 ICmpInst *ICI = dyn_cast<ICmpInst>(Val: Condition);
496 if (!ICI)
497 return;
498
499 const SCEV *End = nullptr;
500 const SCEVAddRecExpr *IndexAddRec = nullptr;
501 if (!parseRangeCheckICmp(L, ICI, SE, Index&: IndexAddRec, End))
502 return;
503
504 assert(IndexAddRec && "IndexAddRec was not computed");
505 assert(End && "End was not computed");
506
507 if ((IndexAddRec->getLoop() != L) || !IndexAddRec->isAffine())
508 return;
509
510 InductiveRangeCheck IRC;
511 IRC.End = End;
512 IRC.Begin = IndexAddRec->getStart();
513 IRC.Step = IndexAddRec->getStepRecurrence(SE);
514 IRC.CheckUse = &ConditionUse;
515 Checks.push_back(Elt: IRC);
516}
517
518void InductiveRangeCheck::extractRangeChecksFromBranch(
519 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo *BPI,
520 std::optional<uint64_t> EstimatedTripCount,
521 SmallVectorImpl<InductiveRangeCheck> &Checks, bool &Changed) {
522 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
523 return;
524
525 unsigned IndexLoopSucc = L->contains(BB: BI->getSuccessor(i: 0)) ? 0 : 1;
526 assert(L->contains(BI->getSuccessor(IndexLoopSucc)) &&
527 "No edges coming to loop?");
528
529 if (!SkipProfitabilityChecks && BPI) {
530 auto SuccessProbability =
531 BPI->getEdgeProbability(Src: BI->getParent(), IndexInSuccessors: IndexLoopSucc);
532 if (EstimatedTripCount) {
533 auto EstimatedEliminatedChecks =
534 SuccessProbability.scale(Num: *EstimatedTripCount);
535 if (EstimatedEliminatedChecks < MinEliminatedChecks) {
536 LLVM_DEBUG(dbgs() << "irce: could not prove profitability for branch "
537 << *BI << ": "
538 << "estimated eliminated checks too low "
539 << EstimatedEliminatedChecks << "\n";);
540 return;
541 }
542 } else {
543 BranchProbability LikelyTaken(15, 16);
544 if (SuccessProbability < LikelyTaken) {
545 LLVM_DEBUG(dbgs() << "irce: could not prove profitability for branch "
546 << *BI << ": "
547 << "could not estimate trip count "
548 << "and branch success probability too low "
549 << SuccessProbability << "\n";);
550 return;
551 }
552 }
553 }
554
555 // IRCE expects branch's true edge comes to loop. Invert branch for opposite
556 // case.
557 if (IndexLoopSucc != 0) {
558 IRBuilder<> Builder(BI);
559 InvertBranch(PBI: BI, Builder);
560 if (BPI)
561 BPI->swapSuccEdgesProbabilities(Src: BI->getParent());
562 Changed = true;
563 }
564
565 SmallPtrSet<Value *, 8> Visited;
566 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, ConditionUse&: BI->getOperandUse(i: 0),
567 Checks, Visited);
568}
569
570/// If the type of \p S matches with \p Ty, return \p S. Otherwise, return
571/// signed or unsigned extension of \p S to type \p Ty.
572static const SCEV *NoopOrExtend(const SCEV *S, Type *Ty, ScalarEvolution &SE,
573 bool Signed) {
574 return Signed ? SE.getNoopOrSignExtend(V: S, Ty) : SE.getNoopOrZeroExtend(V: S, Ty);
575}
576
577// Compute a safe set of limits for the main loop to run in -- effectively the
578// intersection of `Range' and the iteration space of the original loop.
579// Return std::nullopt if unable to compute the set of subranges.
580static std::optional<LoopConstrainer::SubRanges>
581calculateSubRanges(ScalarEvolution &SE, const Loop &L,
582 InductiveRangeCheck::Range &Range,
583 const LoopStructure &MainLoopStructure) {
584 auto *RTy = cast<IntegerType>(Val: Range.getType());
585 // We only support wide range checks and narrow latches.
586 if (!AllowNarrowLatchCondition && RTy != MainLoopStructure.ExitCountTy)
587 return std::nullopt;
588 if (RTy->getBitWidth() < MainLoopStructure.ExitCountTy->getBitWidth())
589 return std::nullopt;
590
591 LoopConstrainer::SubRanges Result;
592
593 bool IsSignedPredicate = MainLoopStructure.IsSignedPredicate;
594 // I think we can be more aggressive here and make this nuw / nsw if the
595 // addition that feeds into the icmp for the latch's terminating branch is nuw
596 // / nsw. In any case, a wrapping 2's complement addition is safe.
597 const SCEV *Start = NoopOrExtend(S: SE.getSCEV(V: MainLoopStructure.IndVarStart),
598 Ty: RTy, SE, Signed: IsSignedPredicate);
599 const SCEV *End = NoopOrExtend(S: SE.getSCEV(V: MainLoopStructure.LoopExitAt), Ty: RTy,
600 SE, Signed: IsSignedPredicate);
601
602 bool Increasing = MainLoopStructure.IndVarIncreasing;
603
604 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest), or
605 // [Smallest, GreatestSeen] is the range of values the induction variable
606 // takes.
607
608 const SCEV *Smallest = nullptr, *Greatest = nullptr, *GreatestSeen = nullptr;
609
610 const SCEV *One = SE.getOne(Ty: RTy);
611 if (Increasing) {
612 Smallest = Start;
613 Greatest = End;
614 // No overflow, because the range [Smallest, GreatestSeen] is not empty.
615 GreatestSeen = SE.getMinusSCEV(LHS: End, RHS: One);
616 } else {
617 // These two computations may sign-overflow. Here is why that is okay:
618 //
619 // We know that the induction variable does not sign-overflow on any
620 // iteration except the last one, and it starts at `Start` and ends at
621 // `End`, decrementing by one every time.
622 //
623 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the
624 // induction variable is decreasing we know that the smallest value
625 // the loop body is actually executed with is `INT_SMIN` == `Smallest`.
626 //
627 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In
628 // that case, `Clamp` will always return `Smallest` and
629 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`)
630 // will be an empty range. Returning an empty range is always safe.
631
632 Smallest = SE.getAddExpr(LHS: End, RHS: One);
633 Greatest = SE.getAddExpr(LHS: Start, RHS: One);
634 GreatestSeen = Start;
635 }
636
637 auto Clamp = [&SE, Smallest, Greatest, IsSignedPredicate](const SCEV *S) {
638 return IsSignedPredicate
639 ? SE.getSMaxExpr(LHS: Smallest, RHS: SE.getSMinExpr(LHS: Greatest, RHS: S))
640 : SE.getUMaxExpr(LHS: Smallest, RHS: SE.getUMinExpr(LHS: Greatest, RHS: S));
641 };
642
643 // In some cases we can prove that we don't need a pre or post loop.
644 ICmpInst::Predicate PredLE =
645 IsSignedPredicate ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
646 ICmpInst::Predicate PredLT =
647 IsSignedPredicate ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
648
649 bool ProvablyNoPreloop =
650 SE.isKnownPredicate(Pred: PredLE, LHS: Range.getBegin(), RHS: Smallest);
651 if (!ProvablyNoPreloop)
652 Result.LowLimit = Clamp(Range.getBegin());
653
654 bool ProvablyNoPostLoop =
655 SE.isKnownPredicate(Pred: PredLT, LHS: GreatestSeen, RHS: Range.getEnd());
656 if (!ProvablyNoPostLoop)
657 Result.HighLimit = Clamp(Range.getEnd());
658
659 return Result;
660}
661
662/// Computes and returns a range of values for the induction variable (IndVar)
663/// in which the range check can be safely elided. If it cannot compute such a
664/// range, returns std::nullopt.
665std::optional<InductiveRangeCheck::Range>
666InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
667 const SCEVAddRecExpr *IndVar,
668 bool IsLatchSigned) const {
669 // We can deal when types of latch check and range checks don't match in case
670 // if latch check is more narrow.
671 auto *IVType = dyn_cast<IntegerType>(Val: IndVar->getType());
672 auto *RCType = dyn_cast<IntegerType>(Val: getBegin()->getType());
673 auto *EndType = dyn_cast<IntegerType>(Val: getEnd()->getType());
674 // Do not work with pointer types.
675 if (!IVType || !RCType)
676 return std::nullopt;
677 if (IVType->getBitWidth() > RCType->getBitWidth())
678 return std::nullopt;
679
680 // IndVar is of the form "A + B * I" (where "I" is the canonical induction
681 // variable, that may or may not exist as a real llvm::Value in the loop) and
682 // this inductive range check is a range check on the "C + D * I" ("C" is
683 // getBegin() and "D" is getStep()). We rewrite the value being range
684 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA".
685 //
686 // The actual inequalities we solve are of the form
687 //
688 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1)
689 //
690 // Here L stands for upper limit of the safe iteration space.
691 // The inequality is satisfied by (0 - M) <= IndVar < (L - M). To avoid
692 // overflows when calculating (0 - M) and (L - M) we, depending on type of
693 // IV's iteration space, limit the calculations by borders of the iteration
694 // space. For example, if IndVar is unsigned, (0 - M) overflows for any M > 0.
695 // If we figured out that "anything greater than (-M) is safe", we strengthen
696 // this to "everything greater than 0 is safe", assuming that values between
697 // -M and 0 just do not exist in unsigned iteration space, and we don't want
698 // to deal with overflown values.
699
700 if (!IndVar->isAffine())
701 return std::nullopt;
702
703 const SCEV *A = NoopOrExtend(S: IndVar->getStart(), Ty: RCType, SE, Signed: IsLatchSigned);
704 const SCEVConstant *B = dyn_cast<SCEVConstant>(
705 Val: NoopOrExtend(S: IndVar->getStepRecurrence(SE), Ty: RCType, SE, Signed: IsLatchSigned));
706 if (!B)
707 return std::nullopt;
708 assert(!B->isZero() && "Recurrence with zero step?");
709
710 const SCEV *C = getBegin();
711 const SCEVConstant *D = dyn_cast<SCEVConstant>(Val: getStep());
712 if (D != B)
713 return std::nullopt;
714
715 assert(!D->getValue()->isZero() && "Recurrence with zero step?");
716 unsigned BitWidth = RCType->getBitWidth();
717 const SCEV *SIntMax = SE.getConstant(Val: APInt::getSignedMaxValue(numBits: BitWidth));
718 const SCEV *SIntMin = SE.getConstant(Val: APInt::getSignedMinValue(numBits: BitWidth));
719
720 // Subtract Y from X so that it does not go through border of the IV
721 // iteration space. Mathematically, it is equivalent to:
722 //
723 // ClampedSubtract(X, Y) = min(max(X - Y, INT_MIN), INT_MAX). [1]
724 //
725 // In [1], 'X - Y' is a mathematical subtraction (result is not bounded to
726 // any width of bit grid). But after we take min/max, the result is
727 // guaranteed to be within [INT_MIN, INT_MAX].
728 //
729 // In [1], INT_MAX and INT_MIN are respectively signed and unsigned max/min
730 // values, depending on type of latch condition that defines IV iteration
731 // space.
732 auto ClampedSubtract = [&](const SCEV *X, const SCEV *Y) {
733 // FIXME: The current implementation assumes that X is in [0, SINT_MAX].
734 // This is required to ensure that SINT_MAX - X does not overflow signed and
735 // that X - Y does not overflow unsigned if Y is negative. Can we lift this
736 // restriction and make it work for negative X either?
737 if (IsLatchSigned) {
738 // X is a number from signed range, Y is interpreted as signed.
739 // Even if Y is SINT_MAX, (X - Y) does not reach SINT_MIN. So the only
740 // thing we should care about is that we didn't cross SINT_MAX.
741 // So, if Y is positive, we subtract Y safely.
742 // Rule 1: Y > 0 ---> Y.
743 // If 0 <= -Y <= (SINT_MAX - X), we subtract Y safely.
744 // Rule 2: Y >=s (X - SINT_MAX) ---> Y.
745 // If 0 <= (SINT_MAX - X) < -Y, we can only subtract (X - SINT_MAX).
746 // Rule 3: Y <s (X - SINT_MAX) ---> (X - SINT_MAX).
747 // It gives us smax(Y, X - SINT_MAX) to subtract in all cases.
748 const SCEV *XMinusSIntMax = SE.getMinusSCEV(LHS: X, RHS: SIntMax);
749 return SE.getMinusSCEV(LHS: X, RHS: SE.getSMaxExpr(LHS: Y, RHS: XMinusSIntMax),
750 Flags: SCEV::FlagNSW);
751 } else
752 // X is a number from unsigned range, Y is interpreted as signed.
753 // Even if Y is SINT_MIN, (X - Y) does not reach UINT_MAX. So the only
754 // thing we should care about is that we didn't cross zero.
755 // So, if Y is negative, we subtract Y safely.
756 // Rule 1: Y <s 0 ---> Y.
757 // If 0 <= Y <= X, we subtract Y safely.
758 // Rule 2: Y <=s X ---> Y.
759 // If 0 <= X < Y, we should stop at 0 and can only subtract X.
760 // Rule 3: Y >s X ---> X.
761 // It gives us smin(X, Y) to subtract in all cases.
762 return SE.getMinusSCEV(LHS: X, RHS: SE.getSMinExpr(LHS: X, RHS: Y), Flags: SCEV::FlagNUW);
763 };
764 const SCEV *M = SE.getMinusSCEV(LHS: C, RHS: A);
765 const SCEV *Zero = SE.getZero(Ty: M->getType());
766
767 // This function returns SCEV equal to 1 if X is non-negative 0 otherwise.
768 auto SCEVCheckNonNegative = [&](const SCEV *X) {
769 const Loop *L = IndVar->getLoop();
770 const SCEV *Zero = SE.getZero(Ty: X->getType());
771 const SCEV *One = SE.getOne(Ty: X->getType());
772 // Can we trivially prove that X is a non-negative or negative value?
773 if (isKnownNonNegativeInLoop(S: X, L, SE))
774 return One;
775 else if (isKnownNegativeInLoop(S: X, L, SE))
776 return Zero;
777 // If not, we will have to figure it out during the execution.
778 // Function smax(smin(X, 0), -1) + 1 equals to 1 if X >= 0 and 0 if X < 0.
779 const SCEV *NegOne = SE.getNegativeSCEV(V: One);
780 return SE.getAddExpr(LHS: SE.getSMaxExpr(LHS: SE.getSMinExpr(LHS: X, RHS: Zero), RHS: NegOne), RHS: One);
781 };
782
783 // This function returns SCEV equal to 1 if X will not overflow in terms of
784 // range check type, 0 otherwise.
785 auto SCEVCheckWillNotOverflow = [&](const SCEV *X) {
786 // X doesn't overflow if SINT_MAX >= X.
787 // Then if (SINT_MAX - X) >= 0, X doesn't overflow
788 const SCEV *SIntMaxExt = SE.getSignExtendExpr(Op: SIntMax, Ty: X->getType());
789 const SCEV *OverflowCheck =
790 SCEVCheckNonNegative(SE.getMinusSCEV(LHS: SIntMaxExt, RHS: X));
791
792 // X doesn't underflow if X >= SINT_MIN.
793 // Then if (X - SINT_MIN) >= 0, X doesn't underflow
794 const SCEV *SIntMinExt = SE.getSignExtendExpr(Op: SIntMin, Ty: X->getType());
795 const SCEV *UnderflowCheck =
796 SCEVCheckNonNegative(SE.getMinusSCEV(LHS: X, RHS: SIntMinExt));
797
798 return SE.getMulExpr(LHS: OverflowCheck, RHS: UnderflowCheck);
799 };
800
801 // FIXME: Current implementation of ClampedSubtract implicitly assumes that
802 // X is non-negative (in sense of a signed value). We need to re-implement
803 // this function in a way that it will correctly handle negative X as well.
804 // We use it twice: for X = 0 everything is fine, but for X = getEnd() we can
805 // end up with a negative X and produce wrong results. So currently we ensure
806 // that if getEnd() is negative then both ends of the safe range are zero.
807 // Note that this may pessimize elimination of unsigned range checks against
808 // negative values.
809 const SCEV *REnd = getEnd();
810 const SCEV *EndWillNotOverflow = SE.getOne(Ty: RCType);
811
812 auto PrintRangeCheck = [&](raw_ostream &OS) {
813 auto L = IndVar->getLoop();
814 OS << "irce: in function ";
815 OS << L->getHeader()->getParent()->getName();
816 OS << ", in ";
817 L->print(OS);
818 OS << "there is range check with scaled boundary:\n";
819 print(OS);
820 };
821
822 if (EndType->getBitWidth() > RCType->getBitWidth()) {
823 assert(EndType->getBitWidth() == RCType->getBitWidth() * 2);
824 if (PrintScaledBoundaryRangeChecks)
825 PrintRangeCheck(errs());
826 // End is computed with extended type but will be truncated to a narrow one
827 // type of range check. Therefore we need a check that the result will not
828 // overflow in terms of narrow type.
829 EndWillNotOverflow =
830 SE.getTruncateExpr(Op: SCEVCheckWillNotOverflow(REnd), Ty: RCType);
831 REnd = SE.getTruncateExpr(Op: REnd, Ty: RCType);
832 }
833
834 const SCEV *RuntimeChecks =
835 SE.getMulExpr(LHS: SCEVCheckNonNegative(REnd), RHS: EndWillNotOverflow);
836 const SCEV *Begin = SE.getMulExpr(LHS: ClampedSubtract(Zero, M), RHS: RuntimeChecks);
837 const SCEV *End = SE.getMulExpr(LHS: ClampedSubtract(REnd, M), RHS: RuntimeChecks);
838
839 return InductiveRangeCheck::Range(Begin, End);
840}
841
842static std::optional<InductiveRangeCheck::Range>
843IntersectSignedRange(ScalarEvolution &SE,
844 const std::optional<InductiveRangeCheck::Range> &R1,
845 const InductiveRangeCheck::Range &R2) {
846 if (R2.isEmpty(SE, /* IsSigned */ true))
847 return std::nullopt;
848 if (!R1)
849 return R2;
850 auto &R1Value = *R1;
851 // We never return empty ranges from this function, and R1 is supposed to be
852 // a result of intersection. Thus, R1 is never empty.
853 assert(!R1Value.isEmpty(SE, /* IsSigned */ true) &&
854 "We should never have empty R1!");
855
856 // TODO: we could widen the smaller range and have this work; but for now we
857 // bail out to keep things simple.
858 if (R1Value.getType() != R2.getType())
859 return std::nullopt;
860
861 const SCEV *NewBegin = SE.getSMaxExpr(LHS: R1Value.getBegin(), RHS: R2.getBegin());
862 const SCEV *NewEnd = SE.getSMinExpr(LHS: R1Value.getEnd(), RHS: R2.getEnd());
863
864 // If the resulting range is empty, just return std::nullopt.
865 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd);
866 if (Ret.isEmpty(SE, /* IsSigned */ true))
867 return std::nullopt;
868 return Ret;
869}
870
871static std::optional<InductiveRangeCheck::Range>
872IntersectUnsignedRange(ScalarEvolution &SE,
873 const std::optional<InductiveRangeCheck::Range> &R1,
874 const InductiveRangeCheck::Range &R2) {
875 if (R2.isEmpty(SE, /* IsSigned */ false))
876 return std::nullopt;
877 if (!R1)
878 return R2;
879 auto &R1Value = *R1;
880 // We never return empty ranges from this function, and R1 is supposed to be
881 // a result of intersection. Thus, R1 is never empty.
882 assert(!R1Value.isEmpty(SE, /* IsSigned */ false) &&
883 "We should never have empty R1!");
884
885 // TODO: we could widen the smaller range and have this work; but for now we
886 // bail out to keep things simple.
887 if (R1Value.getType() != R2.getType())
888 return std::nullopt;
889
890 const SCEV *NewBegin = SE.getUMaxExpr(LHS: R1Value.getBegin(), RHS: R2.getBegin());
891 const SCEV *NewEnd = SE.getUMinExpr(LHS: R1Value.getEnd(), RHS: R2.getEnd());
892
893 // If the resulting range is empty, just return std::nullopt.
894 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd);
895 if (Ret.isEmpty(SE, /* IsSigned */ false))
896 return std::nullopt;
897 return Ret;
898}
899
900PreservedAnalyses IRCEPass::run(Function &F, FunctionAnalysisManager &AM) {
901 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
902 LoopInfo &LI = AM.getResult<LoopAnalysis>(IR&: F);
903 // There are no loops in the function. Return before computing other expensive
904 // analyses.
905 if (LI.empty())
906 return PreservedAnalyses::all();
907 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(IR&: F);
908 auto &BPI = AM.getResult<BranchProbabilityAnalysis>(IR&: F);
909
910 // Get BFI analysis result on demand. Please note that modification of
911 // CFG invalidates this analysis and we should handle it.
912 auto getBFI = [&F, &AM ]()->BlockFrequencyInfo & {
913 return AM.getResult<BlockFrequencyAnalysis>(IR&: F);
914 };
915 InductiveRangeCheckElimination IRCE(SE, &BPI, DT, LI, { getBFI });
916
917 bool Changed = false;
918 {
919 bool CFGChanged = false;
920 for (const auto &L : LI) {
921 CFGChanged |= simplifyLoop(L, DT: &DT, LI: &LI, SE: &SE, AC: nullptr, MSSAU: nullptr,
922 /*PreserveLCSSA=*/false);
923 Changed |= formLCSSARecursively(L&: *L, DT, LI: &LI, SE: &SE);
924 }
925 Changed |= CFGChanged;
926
927 if (CFGChanged && !SkipProfitabilityChecks) {
928 PreservedAnalyses PA = PreservedAnalyses::all();
929 PA.abandon<BlockFrequencyAnalysis>();
930 AM.invalidate(IR&: F, PA);
931 }
932 }
933
934 SmallPriorityWorklist<Loop *, 4> Worklist;
935 appendLoopsToWorklist(LI, Worklist);
936 auto LPMAddNewLoop = [&Worklist](Loop *NL, bool IsSubloop) {
937 if (!IsSubloop)
938 appendLoopsToWorklist(*NL, Worklist);
939 };
940
941 while (!Worklist.empty()) {
942 Loop *L = Worklist.pop_back_val();
943 if (IRCE.run(L, LPMAddNewLoop)) {
944 Changed = true;
945 if (!SkipProfitabilityChecks) {
946 PreservedAnalyses PA = PreservedAnalyses::all();
947 PA.abandon<BlockFrequencyAnalysis>();
948 AM.invalidate(IR&: F, PA);
949 }
950 }
951 }
952
953 if (!Changed)
954 return PreservedAnalyses::all();
955 return getLoopPassPreservedAnalyses();
956}
957
958std::optional<uint64_t>
959InductiveRangeCheckElimination::estimatedTripCount(const Loop &L) {
960 if (GetBFI) {
961 BlockFrequencyInfo &BFI = GetBFI();
962 uint64_t hFreq = BFI.getBlockFreq(BB: L.getHeader()).getFrequency();
963 uint64_t phFreq = BFI.getBlockFreq(BB: L.getLoopPreheader()).getFrequency();
964 if (phFreq == 0 || hFreq == 0)
965 return std::nullopt;
966 return {hFreq / phFreq};
967 }
968
969 if (!BPI)
970 return std::nullopt;
971
972 auto *Latch = L.getLoopLatch();
973 if (!Latch)
974 return std::nullopt;
975 auto *LatchBr = dyn_cast<BranchInst>(Val: Latch->getTerminator());
976 if (!LatchBr)
977 return std::nullopt;
978
979 auto LatchBrExitIdx = LatchBr->getSuccessor(i: 0) == L.getHeader() ? 1 : 0;
980 BranchProbability ExitProbability =
981 BPI->getEdgeProbability(Src: Latch, IndexInSuccessors: LatchBrExitIdx);
982 if (ExitProbability.isUnknown() || ExitProbability.isZero())
983 return std::nullopt;
984
985 return {ExitProbability.scaleByInverse(Num: 1)};
986}
987
988bool InductiveRangeCheckElimination::run(
989 Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop) {
990 if (L->getBlocks().size() >= LoopSizeCutoff) {
991 LLVM_DEBUG(dbgs() << "irce: giving up constraining loop, too large\n");
992 return false;
993 }
994
995 BasicBlock *Preheader = L->getLoopPreheader();
996 if (!Preheader) {
997 LLVM_DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
998 return false;
999 }
1000
1001 auto EstimatedTripCount = estimatedTripCount(L: *L);
1002 if (!SkipProfitabilityChecks && EstimatedTripCount &&
1003 *EstimatedTripCount < MinEliminatedChecks) {
1004 LLVM_DEBUG(dbgs() << "irce: could not prove profitability: "
1005 << "the estimated number of iterations is "
1006 << *EstimatedTripCount << "\n");
1007 return false;
1008 }
1009
1010 LLVMContext &Context = Preheader->getContext();
1011 SmallVector<InductiveRangeCheck, 16> RangeChecks;
1012 bool Changed = false;
1013
1014 for (auto *BBI : L->getBlocks())
1015 if (BranchInst *TBI = dyn_cast<BranchInst>(Val: BBI->getTerminator()))
1016 InductiveRangeCheck::extractRangeChecksFromBranch(
1017 BI: TBI, L, SE, BPI, EstimatedTripCount, Checks&: RangeChecks, Changed);
1018
1019 if (RangeChecks.empty())
1020 return Changed;
1021
1022 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) {
1023 OS << "irce: looking at loop "; L->print(OS);
1024 OS << "irce: loop has " << RangeChecks.size()
1025 << " inductive range checks: \n";
1026 for (InductiveRangeCheck &IRC : RangeChecks)
1027 IRC.print(OS);
1028 };
1029
1030 LLVM_DEBUG(PrintRecognizedRangeChecks(dbgs()));
1031
1032 if (PrintRangeChecks)
1033 PrintRecognizedRangeChecks(errs());
1034
1035 const char *FailureReason = nullptr;
1036 std::optional<LoopStructure> MaybeLoopStructure =
1037 LoopStructure::parseLoopStructure(SE, *L, AllowUnsignedLatchCondition,
1038 FailureReason);
1039 if (!MaybeLoopStructure) {
1040 LLVM_DEBUG(dbgs() << "irce: could not parse loop structure: "
1041 << FailureReason << "\n";);
1042 return Changed;
1043 }
1044 LoopStructure LS = *MaybeLoopStructure;
1045 const SCEVAddRecExpr *IndVar =
1046 cast<SCEVAddRecExpr>(Val: SE.getMinusSCEV(LHS: SE.getSCEV(V: LS.IndVarBase), RHS: SE.getSCEV(V: LS.IndVarStep)));
1047
1048 std::optional<InductiveRangeCheck::Range> SafeIterRange;
1049
1050 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate;
1051 // Basing on the type of latch predicate, we interpret the IV iteration range
1052 // as signed or unsigned range. We use different min/max functions (signed or
1053 // unsigned) when intersecting this range with safe iteration ranges implied
1054 // by range checks.
1055 auto IntersectRange =
1056 LS.IsSignedPredicate ? IntersectSignedRange : IntersectUnsignedRange;
1057
1058 for (InductiveRangeCheck &IRC : RangeChecks) {
1059 auto Result = IRC.computeSafeIterationSpace(SE, IndVar,
1060 IsLatchSigned: LS.IsSignedPredicate);
1061 if (Result) {
1062 auto MaybeSafeIterRange = IntersectRange(SE, SafeIterRange, *Result);
1063 if (MaybeSafeIterRange) {
1064 assert(!MaybeSafeIterRange->isEmpty(SE, LS.IsSignedPredicate) &&
1065 "We should never return empty ranges!");
1066 RangeChecksToEliminate.push_back(Elt: IRC);
1067 SafeIterRange = *MaybeSafeIterRange;
1068 }
1069 }
1070 }
1071
1072 if (!SafeIterRange)
1073 return Changed;
1074
1075 std::optional<LoopConstrainer::SubRanges> MaybeSR =
1076 calculateSubRanges(SE, L: *L, Range&: *SafeIterRange, MainLoopStructure: LS);
1077 if (!MaybeSR) {
1078 LLVM_DEBUG(dbgs() << "irce: could not compute subranges\n");
1079 return false;
1080 }
1081
1082 LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT,
1083 SafeIterRange->getBegin()->getType(), *MaybeSR);
1084
1085 if (LC.run()) {
1086 Changed = true;
1087
1088 auto PrintConstrainedLoopInfo = [L]() {
1089 dbgs() << "irce: in function ";
1090 dbgs() << L->getHeader()->getParent()->getName() << ": ";
1091 dbgs() << "constrained ";
1092 L->print(OS&: dbgs());
1093 };
1094
1095 LLVM_DEBUG(PrintConstrainedLoopInfo());
1096
1097 if (PrintChangedLoops)
1098 PrintConstrainedLoopInfo();
1099
1100 // Optimize away the now-redundant range checks.
1101
1102 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) {
1103 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection()
1104 ? ConstantInt::getTrue(Context)
1105 : ConstantInt::getFalse(Context);
1106 IRC.getCheckUse()->set(FoldedRangeCheck);
1107 }
1108 }
1109
1110 return Changed;
1111}
1112