1//===- LoopPeel.cpp -------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Loop Peeling Utilities.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/Transforms/Utils/LoopPeel.h"
13#include "llvm/ADT/DenseMap.h"
14#include "llvm/ADT/SmallVector.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/Loads.h"
17#include "llvm/Analysis/LoopInfo.h"
18#include "llvm/Analysis/LoopIterator.h"
19#include "llvm/Analysis/ScalarEvolution.h"
20#include "llvm/Analysis/ScalarEvolutionExpressions.h"
21#include "llvm/Analysis/ScalarEvolutionPatternMatch.h"
22#include "llvm/Analysis/TargetTransformInfo.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/InstrTypes.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/IR/MDBuilder.h"
31#include "llvm/IR/PatternMatch.h"
32#include "llvm/IR/ProfDataUtils.h"
33#include "llvm/Support/Casting.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Transforms/Utils/BasicBlockUtils.h"
38#include "llvm/Transforms/Utils/Cloning.h"
39#include "llvm/Transforms/Utils/LoopSimplify.h"
40#include "llvm/Transforms/Utils/LoopUtils.h"
41#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
42#include "llvm/Transforms/Utils/ValueMapper.h"
43#include <algorithm>
44#include <cassert>
45#include <cstdint>
46#include <optional>
47
48using namespace llvm;
49using namespace llvm::PatternMatch;
50using namespace llvm::SCEVPatternMatch;
51
52#define DEBUG_TYPE "loop-peel"
53
54STATISTIC(NumPeeled, "Number of loops peeled");
55STATISTIC(NumPeeledEnd, "Number of loops peeled from end");
56
57namespace llvm {
58static cl::opt<unsigned> UnrollPeelCount(
59 "unroll-peel-count", cl::Hidden,
60 cl::desc("Set the unroll peeling count, for testing purposes"));
61
62static cl::opt<bool>
63 UnrollAllowPeeling("unroll-allow-peeling", cl::init(Val: true), cl::Hidden,
64 cl::desc("Allows loops to be peeled when the dynamic "
65 "trip count is known to be low."));
66
67static cl::opt<bool>
68 UnrollAllowLoopNestsPeeling("unroll-allow-loop-nests-peeling",
69 cl::init(Val: false), cl::Hidden,
70 cl::desc("Allows loop nests to be peeled."));
71
72static cl::opt<unsigned> UnrollPeelMaxCount(
73 "unroll-peel-max-count", cl::init(Val: 7), cl::Hidden,
74 cl::desc("Max average trip count which will cause loop peeling."));
75
76static cl::opt<unsigned> UnrollForcePeelCount(
77 "unroll-force-peel-count", cl::init(Val: 0), cl::Hidden,
78 cl::desc("Force a peel count regardless of profiling information."));
79
80static cl::opt<bool> DisableAdvancedPeeling(
81 "disable-advanced-peeling", cl::init(Val: false), cl::Hidden,
82 cl::desc(
83 "Disable advance peeling. Issues for convergent targets (D134803)."));
84
85static cl::opt<bool> EnablePeelingForIV(
86 "enable-peeling-for-iv", cl::init(Val: false), cl::Hidden,
87 cl::desc("Enable peeling to convert Phi nodes into IVs"));
88
89static const char *PeeledCountMetaData = "llvm.loop.peeled.count";
90
91extern cl::opt<bool> ProfcheckDisableMetadataFixes;
92} // namespace llvm
93
94// Check whether we are capable of peeling this loop.
95bool llvm::canPeel(const Loop *L) {
96 // Make sure the loop is in simplified form
97 if (!L->isLoopSimplifyForm())
98 return false;
99 if (!DisableAdvancedPeeling)
100 return true;
101
102 SmallVector<BasicBlock *, 4> Exits;
103 L->getUniqueNonLatchExitBlocks(ExitBlocks&: Exits);
104 // The latch must either be the only exiting block or all non-latch exit
105 // blocks have either a deopt or unreachable terminator or compose a chain of
106 // blocks where the last one is either deopt or unreachable terminated. Both
107 // deopt and unreachable terminators are a strong indication they are not
108 // taken. Note that this is a profitability check, not a legality check. Also
109 // note that LoopPeeling currently can only update the branch weights of latch
110 // blocks and branch weights to blocks with deopt or unreachable do not need
111 // updating.
112 return llvm::all_of(Range&: Exits, P: IsBlockFollowedByDeoptOrUnreachable);
113}
114
115namespace {
116
117// As a loop is peeled, it may be the case that Phi nodes become
118// loop-invariant (ie, known because there is only one choice).
119// For example, consider the following function:
120// void g(int);
121// void binary() {
122// int x = 0;
123// int y = 0;
124// int a = 0;
125// for(int i = 0; i <100000; ++i) {
126// g(x);
127// x = y;
128// g(a);
129// y = a + 1;
130// a = 5;
131// }
132// }
133// Peeling 3 iterations is beneficial because the values for x, y and a
134// become known. The IR for this loop looks something like the following:
135//
136// %i = phi i32 [ 0, %entry ], [ %inc, %if.end ]
137// %a = phi i32 [ 0, %entry ], [ 5, %if.end ]
138// %y = phi i32 [ 0, %entry ], [ %add, %if.end ]
139// %x = phi i32 [ 0, %entry ], [ %y, %if.end ]
140// ...
141// tail call void @_Z1gi(i32 signext %x)
142// tail call void @_Z1gi(i32 signext %a)
143// %add = add nuw nsw i32 %a, 1
144// %inc = add nuw nsw i32 %i, 1
145// %exitcond = icmp eq i32 %inc, 100000
146// br i1 %exitcond, label %for.cond.cleanup, label %for.body
147//
148// The arguments for the calls to g will become known after 3 iterations
149// of the loop, because the phi nodes values become known after 3 iterations
150// of the loop (ie, they are known on the 4th iteration, so peel 3 iterations).
151// The first iteration has g(0), g(0); the second has g(0), g(5); the
152// third has g(1), g(5) and the fourth (and all subsequent) have g(6), g(5).
153// Now consider the phi nodes:
154// %a is a phi with constants so it is determined after iteration 1.
155// %y is a phi based on a constant and %a so it is determined on
156// the iteration after %a is determined, so iteration 2.
157// %x is a phi based on a constant and %y so it is determined on
158// the iteration after %y, so iteration 3.
159// %i is based on itself (and is an induction variable) so it is
160// never determined.
161// This means that peeling off 3 iterations will result in being able to
162// remove the phi nodes for %a, %y, and %x. The arguments for the
163// corresponding calls to g are determined and the code for computing
164// x, y, and a can be removed.
165//
166// Similarly, there are cases where peeling makes Phi nodes loop-inductions
167// (i.e., the value is increased or decreased by a fixed amount on every
168// iteration). For example, consider the following function.
169//
170// #define N 100
171// void f(int a[], int b[]) {
172// int im = N - 1;
173// for (int i = 0; i < N; i++) {
174// a[i] = b[i] + b[im];
175// im = i;
176// }
177// }
178//
179// The IR of the loop will look something like the following.
180//
181// %i = phi i32 [ 0, %entry ], [ %i.next, %for.body ]
182// %im = phi i32 [ 99, %entry ], [ %i, %for.body ]
183// ...
184// %i.next = add nuw nsw i32 %i, 1
185// ...
186//
187// In this case, %im becomes a loop-induction variable by peeling 1 iteration,
188// because %i is a loop-induction one. The peeling count can be determined by
189// the same algorithm with loop-invariant case. Such peeling is profitable for
190// loop-vectorization.
191//
192// The PhiAnalyzer class calculates how many times a loop should be
193// peeled based on the above analysis of the phi nodes in the loop while
194// respecting the maximum specified.
195class PhiAnalyzer {
196public:
197 PhiAnalyzer(const Loop &L, unsigned MaxIterations, bool PeelForIV);
198
199 // Calculate the sufficient minimum number of iterations of the loop to peel
200 // such that phi instructions become determined (subject to allowable limits)
201 std::optional<unsigned> calculateIterationsToPeel();
202
203protected:
204 enum class PeelCounterType {
205 Invariant,
206 Induction,
207 };
208
209 using PeelCounterValue = std::pair<unsigned, PeelCounterType>;
210 using PeelCounter = std::optional<PeelCounterValue>;
211 const PeelCounter Unknown = std::nullopt;
212
213 // Add 1 respecting Unknown and return Unknown if result over MaxIterations
214 PeelCounter addOne(PeelCounter PC) const {
215 if (PC == Unknown)
216 return Unknown;
217 auto [Val, Ty] = *PC;
218 return (Val + 1 <= MaxIterations) ? PeelCounter({Val + 1, Ty}) : Unknown;
219 }
220
221 // Return a value representing zero for the given counter type.
222 PeelCounter makeZero(PeelCounterType Ty) const {
223 return PeelCounter({0, Ty});
224 }
225
226 // Calculate the number of iterations after which the given value becomes an
227 // invariant or an induction.
228 PeelCounter calculate(const Value &);
229
230 // Auxiliary function to calculate the number of iterations for a comparison
231 // instruction or a binary operator.
232 PeelCounter mergeTwoCounters(const Instruction &CmpOrBinaryOp,
233 const PeelCounterValue &LHS,
234 const PeelCounterValue &RHS) const;
235
236 // Returns true if the \p Phi is an induction in the target loop. This is a
237 // lightweight check and possible to detect an IV in some cases.
238 bool isInductionPHI(const PHINode *Phi) const;
239
240 const Loop &L;
241 const unsigned MaxIterations;
242 const bool PeelForIV;
243
244 // Map of Values to number of iterations to invariance or induction
245 SmallDenseMap<const Value *, PeelCounter> IterationsToInvarianceOrInduction;
246};
247
248PhiAnalyzer::PhiAnalyzer(const Loop &L, unsigned MaxIterations, bool PeelForIV)
249 : L(L), MaxIterations(MaxIterations), PeelForIV(PeelForIV) {
250 assert(canPeel(&L) && "loop is not suitable for peeling");
251 assert(MaxIterations > 0 && "no peeling is allowed?");
252}
253
254/// Test whether \p Phi is an induction variable. Although this can be
255/// determined using SCEV analysis, it is expensive to compute here. Instead,
256/// we perform cheaper checks that may not detect complex cases but are
257/// sufficient for some situations.
258bool PhiAnalyzer::isInductionPHI(const PHINode *Phi) const {
259 // Currently we only support a loop that has single latch.
260 BasicBlock *Latch = L.getLoopLatch();
261 if (Latch == nullptr)
262 return false;
263
264 Value *Cur = Phi->getIncomingValueForBlock(BB: Latch);
265 SmallPtrSet<Value *, 4> Visited;
266 bool VisitBinOp = false;
267
268 // Starting from the incoming value of the Phi, we follow the use-def chain.
269 // We consider Phi to be an IV if we can reach it again by traversing only
270 // add, sub, or cast instructions.
271 while (true) {
272 if (Cur == Phi)
273 break;
274
275 // Avoid infinite loop.
276 if (!Visited.insert(Ptr: Cur).second)
277 return false;
278
279 auto *I = dyn_cast<Instruction>(Val: Cur);
280 if (!I || !L.contains(Inst: I))
281 return false;
282
283 if (auto *Cast = dyn_cast<CastInst>(Val: I)) {
284 Cur = Cast->getOperand(i_nocapture: 0);
285 } else if (auto *BinOp = dyn_cast<BinaryOperator>(Val: I)) {
286 if (BinOp->getOpcode() != Instruction::Add &&
287 BinOp->getOpcode() != Instruction::Sub)
288 return false;
289 if (!isa<ConstantInt>(Val: BinOp->getOperand(i_nocapture: 1)))
290 return false;
291
292 VisitBinOp = true;
293 Cur = BinOp->getOperand(i_nocapture: 0);
294 } else {
295 return false;
296 }
297 }
298
299 // Ignore cases where no binary operations are visited.
300 return VisitBinOp;
301}
302
303/// When either \p LHS or \p RHS is an IV, the result of \p CmpOrBinaryOp is
304/// considered an IV only if it is an addition or a subtraction. Otherwise the
305/// result can be a value that is neither a loop-invariant nor an IV.
306///
307/// If both \p LHS and \p RHS are loop-invariants, then the result of
308/// \CmpOrBinaryOp is also a loop-invariant.
309PhiAnalyzer::PeelCounter
310PhiAnalyzer::mergeTwoCounters(const Instruction &CmpOrBinaryOp,
311 const PeelCounterValue &LHS,
312 const PeelCounterValue &RHS) const {
313 auto &[LVal, LTy] = LHS;
314 auto &[RVal, RTy] = RHS;
315 unsigned NewVal = std::max(a: LVal, b: RVal);
316
317 if (LTy == PeelCounterType::Induction || RTy == PeelCounterType::Induction) {
318 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: &CmpOrBinaryOp)) {
319 if (BinOp->getOpcode() == Instruction::Add ||
320 BinOp->getOpcode() == Instruction::Sub)
321 return PeelCounter({NewVal, PeelCounterType::Induction});
322 }
323 return Unknown;
324 }
325 return PeelCounter({NewVal, PeelCounterType::Invariant});
326}
327
328// This function calculates the number of iterations after which the value
329// becomes an invariant. The pre-calculated values are memorized in a map.
330// N.B. This number will be Unknown or <= MaxIterations.
331// The function is calculated according to the following definition:
332// Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge].
333// F(%x) = G(%y) + 1 (N.B. [MaxIterations | Unknown] + 1 => Unknown)
334// G(%y) = 0 if %y is a loop invariant
335// G(%y) = G(%BackEdgeValue) if %y is a phi in the header block
336// G(%y) = TODO: if %y is an expression based on phis and loop invariants
337// The example looks like:
338// %x = phi(0, %a) <-- becomes invariant starting from 3rd iteration.
339// %y = phi(0, 5)
340// %a = %y + 1
341// G(%y) = Unknown otherwise (including phi not in header block)
342PhiAnalyzer::PeelCounter PhiAnalyzer::calculate(const Value &V) {
343 // If we already know the answer, take it from the map.
344 // Otherwise, place Unknown to map to avoid infinite recursion. Such
345 // cycles can never stop on an invariant.
346 auto [I, Inserted] =
347 IterationsToInvarianceOrInduction.try_emplace(Key: &V, Args: Unknown);
348 if (!Inserted)
349 return I->second;
350
351 if (L.isLoopInvariant(V: &V))
352 // Loop invariant so known at start.
353 return (IterationsToInvarianceOrInduction[&V] =
354 makeZero(Ty: PeelCounterType::Invariant));
355 if (const PHINode *Phi = dyn_cast<PHINode>(Val: &V)) {
356 if (Phi->getParent() != L.getHeader()) {
357 // Phi is not in header block so Unknown.
358 assert(IterationsToInvarianceOrInduction[&V] == Unknown &&
359 "unexpected value saved");
360 return Unknown;
361 }
362
363 // If Phi is an induction, register it as a starting point.
364 if (PeelForIV && isInductionPHI(Phi))
365 return (IterationsToInvarianceOrInduction[&V] =
366 makeZero(Ty: PeelCounterType::Induction));
367
368 // We need to analyze the input from the back edge and add 1.
369 Value *Input = Phi->getIncomingValueForBlock(BB: L.getLoopLatch());
370 PeelCounter Iterations = calculate(V: *Input);
371 assert(IterationsToInvarianceOrInduction[Input] == Iterations &&
372 "unexpected value saved");
373 return (IterationsToInvarianceOrInduction[Phi] = addOne(PC: Iterations));
374 }
375 if (const Instruction *I = dyn_cast<Instruction>(Val: &V)) {
376 if (isa<CmpInst>(Val: I) || I->isBinaryOp()) {
377 // Binary instructions get the max of the operands.
378 PeelCounter LHS = calculate(V: *I->getOperand(i: 0));
379 if (LHS == Unknown)
380 return Unknown;
381 PeelCounter RHS = calculate(V: *I->getOperand(i: 1));
382 if (RHS == Unknown)
383 return Unknown;
384 return (IterationsToInvarianceOrInduction[I] =
385 mergeTwoCounters(CmpOrBinaryOp: *I, LHS: *LHS, RHS: *RHS));
386 }
387 if (I->isCast())
388 // Cast instructions get the value of the operand.
389 return (IterationsToInvarianceOrInduction[I] =
390 calculate(V: *I->getOperand(i: 0)));
391 }
392 // TODO: handle more expressions
393
394 // Everything else is Unknown.
395 assert(IterationsToInvarianceOrInduction[&V] == Unknown &&
396 "unexpected value saved");
397 return Unknown;
398}
399
400std::optional<unsigned> PhiAnalyzer::calculateIterationsToPeel() {
401 unsigned Iterations = 0;
402 for (auto &PHI : L.getHeader()->phis()) {
403 PeelCounter ToInvarianceOrInduction = calculate(V: PHI);
404 if (ToInvarianceOrInduction != Unknown) {
405 unsigned Val = ToInvarianceOrInduction->first;
406 assert(Val <= MaxIterations && "bad result in phi analysis");
407 Iterations = std::max(a: Iterations, b: Val);
408 if (Iterations == MaxIterations)
409 break;
410 }
411 }
412 assert((Iterations <= MaxIterations) && "bad result in phi analysis");
413 return Iterations ? std::optional<unsigned>(Iterations) : std::nullopt;
414}
415
416} // unnamed namespace
417
418// Try to find any invariant memory reads that will become dereferenceable in
419// the remainder loop after peeling. The load must also be used (transitively)
420// by an exit condition. Returns the number of iterations to peel off (at the
421// moment either 0 or 1).
422static unsigned peelToTurnInvariantLoadsDereferenceable(Loop &L,
423 DominatorTree &DT,
424 AssumptionCache *AC) {
425 // Skip loops with a single exiting block, because there should be no benefit
426 // for the heuristic below.
427 if (L.getExitingBlock())
428 return 0;
429
430 // All non-latch exit blocks must have an UnreachableInst terminator.
431 // Otherwise the heuristic below may not be profitable.
432 SmallVector<BasicBlock *, 4> Exits;
433 L.getUniqueNonLatchExitBlocks(ExitBlocks&: Exits);
434 if (any_of(Range&: Exits, P: [](const BasicBlock *BB) {
435 return !isa<UnreachableInst>(Val: BB->getTerminator());
436 }))
437 return 0;
438
439 // Now look for invariant loads that dominate the latch and are not known to
440 // be dereferenceable. If there are such loads and no writes, they will become
441 // dereferenceable in the loop if the first iteration is peeled off. Also
442 // collect the set of instructions controlled by such loads. Only peel if an
443 // exit condition uses (transitively) such a load.
444 BasicBlock *Header = L.getHeader();
445 BasicBlock *Latch = L.getLoopLatch();
446 SmallPtrSet<Value *, 8> LoadUsers;
447 const DataLayout &DL = L.getHeader()->getDataLayout();
448 for (BasicBlock *BB : L.blocks()) {
449 for (Instruction &I : *BB) {
450 // Calls that only access inaccessible memory can never alias with loads.
451 if (I.mayWriteToMemory() &&
452 !(isa<CallBase>(Val: I) &&
453 cast<CallBase>(Val&: I).onlyAccessesInaccessibleMemory()))
454 return 0;
455
456 if (LoadUsers.contains(Ptr: &I))
457 LoadUsers.insert_range(R: I.users());
458 // Do not look for reads in the header; they can already be hoisted
459 // without peeling.
460 if (BB == Header)
461 continue;
462 if (auto *LI = dyn_cast<LoadInst>(Val: &I)) {
463 Value *Ptr = LI->getPointerOperand();
464 if (DT.dominates(A: BB, B: Latch) && L.isLoopInvariant(V: Ptr) &&
465 !isDereferenceablePointer(V: Ptr, Ty: LI->getType(), DL, CtxI: LI, AC, DT: &DT))
466 LoadUsers.insert_range(R: I.users());
467 }
468 }
469 }
470 SmallVector<BasicBlock *> ExitingBlocks;
471 L.getExitingBlocks(ExitingBlocks);
472 if (any_of(Range&: ExitingBlocks, P: [&LoadUsers](BasicBlock *Exiting) {
473 return LoadUsers.contains(Ptr: Exiting->getTerminator());
474 }))
475 return 1;
476 return 0;
477}
478
479bool llvm::canPeelLastIteration(const Loop &L, ScalarEvolution &SE) {
480 const SCEV *BTC = SE.getBackedgeTakenCount(L: &L);
481 if (isa<SCEVCouldNotCompute>(Val: BTC))
482 return false;
483
484 // Check if the exit condition of the loop can be adjusted by the peeling
485 // codegen. For now, it must
486 // * exit via the latch,
487 // * the exit condition must be a NE/EQ compare of an induction with step
488 // of 1 and must only be used by the exiting branch.
489 BasicBlock *Latch = L.getLoopLatch();
490 Value *Inc;
491 Value *Bound;
492 CmpPredicate Pred;
493 BasicBlock *Succ1;
494 BasicBlock *Succ2;
495 return Latch && Latch == L.getExitingBlock() &&
496 match(V: Latch->getTerminator(),
497 P: m_Br(C: m_OneUse(SubPattern: m_ICmp(Pred, L: m_Value(V&: Inc), R: m_Value(V&: Bound))),
498 T: m_BasicBlock(V&: Succ1), F: m_BasicBlock(V&: Succ2))) &&
499 ((Pred == CmpInst::ICMP_EQ && Succ2 == L.getHeader()) ||
500 (Pred == CmpInst::ICMP_NE && Succ1 == L.getHeader())) &&
501 Bound->getType()->isIntegerTy() &&
502 SE.isLoopInvariant(S: SE.getSCEV(V: Bound), L: &L) &&
503 match(S: SE.getSCEV(V: Inc),
504 P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_scev_One(), L: m_SpecificLoop(L: &L)));
505}
506
507/// Returns true if the last iteration can be peeled off and the condition (Pred
508/// LeftAR, RightSCEV) is known at the last iteration and the inverse condition
509/// is known at the second-to-last.
510static bool shouldPeelLastIteration(Loop &L, CmpPredicate Pred,
511 const SCEVAddRecExpr *LeftAR,
512 const SCEV *RightSCEV, ScalarEvolution &SE,
513 const TargetTransformInfo &TTI) {
514 if (!canPeelLastIteration(L, SE))
515 return false;
516
517 const SCEV *BTC = SE.getBackedgeTakenCount(L: &L);
518 SCEVExpander Expander(SE, "loop-peel");
519 if (!SE.isKnownNonZero(S: BTC) &&
520 Expander.isHighCostExpansion(Exprs: BTC, L: &L, Budget: SCEVCheapExpansionBudget, TTI: &TTI,
521 At: L.getLoopPredecessor()->getTerminator()))
522 return false;
523
524 auto Guards = ScalarEvolution::LoopGuards::collect(L: &L, SE);
525 BTC = SE.applyLoopGuards(Expr: BTC, Guards);
526 RightSCEV = SE.applyLoopGuards(Expr: RightSCEV, Guards);
527 const SCEV *ValAtLastIter = LeftAR->evaluateAtIteration(It: BTC, SE);
528 const SCEV *ValAtSecondToLastIter = LeftAR->evaluateAtIteration(
529 It: SE.getMinusSCEV(LHS: BTC, RHS: SE.getOne(Ty: BTC->getType())), SE);
530
531 return SE.isKnownPredicate(Pred: ICmpInst::getInversePredicate(pred: Pred), LHS: ValAtLastIter,
532 RHS: RightSCEV) &&
533 SE.isKnownPredicate(Pred, LHS: ValAtSecondToLastIter, RHS: RightSCEV);
534}
535
536// Return the number of iterations to peel off from the beginning and end of the
537// loop respectively, that make conditions in the body true/false. For example,
538// if we peel 2 iterations off the loop below, the condition i < 2 can be
539// evaluated at compile time.
540//
541// for (i = 0; i < n; i++)
542// if (i < 2)
543// ..
544// else
545// ..
546// }
547static std::pair<unsigned, unsigned>
548countToEliminateCompares(Loop &L, unsigned MaxPeelCount, ScalarEvolution &SE,
549 const TargetTransformInfo &TTI) {
550 assert(L.isLoopSimplifyForm() && "Loop needs to be in loop simplify form");
551 unsigned DesiredPeelCount = 0;
552 unsigned DesiredPeelCountLast = 0;
553
554 // Do not peel the entire loop.
555 const SCEV *BE = SE.getConstantMaxBackedgeTakenCount(L: &L);
556 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Val: BE))
557 MaxPeelCount =
558 std::min(a: (unsigned)SC->getAPInt().getLimitedValue() - 1, b: MaxPeelCount);
559
560 // Increase PeelCount while (IterVal Pred BoundSCEV) condition is satisfied;
561 // return true if inversed condition become known before reaching the
562 // MaxPeelCount limit.
563 auto PeelWhilePredicateIsKnown =
564 [&](unsigned &PeelCount, const SCEV *&IterVal, const SCEV *BoundSCEV,
565 const SCEV *Step, ICmpInst::Predicate Pred) {
566 while (PeelCount < MaxPeelCount &&
567 SE.isKnownPredicate(Pred, LHS: IterVal, RHS: BoundSCEV)) {
568 IterVal = SE.getAddExpr(LHS: IterVal, RHS: Step);
569 ++PeelCount;
570 }
571 return SE.isKnownPredicate(Pred: ICmpInst::getInversePredicate(pred: Pred), LHS: IterVal,
572 RHS: BoundSCEV);
573 };
574
575 const unsigned MaxDepth = 4;
576 std::function<void(Value *, unsigned)> ComputePeelCount =
577 [&](Value *Condition, unsigned Depth) -> void {
578 if (!Condition->getType()->isIntegerTy() || Depth >= MaxDepth)
579 return;
580
581 Value *LeftVal, *RightVal;
582 if (match(V: Condition, P: m_And(L: m_Value(V&: LeftVal), R: m_Value(V&: RightVal))) ||
583 match(V: Condition, P: m_Or(L: m_Value(V&: LeftVal), R: m_Value(V&: RightVal)))) {
584 ComputePeelCount(LeftVal, Depth + 1);
585 ComputePeelCount(RightVal, Depth + 1);
586 return;
587 }
588
589 CmpPredicate Pred;
590 if (!match(V: Condition, P: m_ICmp(Pred, L: m_Value(V&: LeftVal), R: m_Value(V&: RightVal))))
591 return;
592
593 const SCEV *LeftSCEV = SE.getSCEV(V: LeftVal);
594 const SCEV *RightSCEV = SE.getSCEV(V: RightVal);
595
596 // Do not consider predicates that are known to be true or false
597 // independently of the loop iteration.
598 if (SE.evaluatePredicate(Pred, LHS: LeftSCEV, RHS: RightSCEV))
599 return;
600
601 // Check if we have a condition with one AddRec and one non AddRec
602 // expression. Normalize LeftSCEV to be the AddRec.
603 if (!isa<SCEVAddRecExpr>(Val: LeftSCEV)) {
604 if (isa<SCEVAddRecExpr>(Val: RightSCEV)) {
605 std::swap(a&: LeftSCEV, b&: RightSCEV);
606 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
607 } else
608 return;
609 }
610
611 const SCEVAddRecExpr *LeftAR = cast<SCEVAddRecExpr>(Val: LeftSCEV);
612
613 // Avoid huge SCEV computations in the loop below, make sure we only
614 // consider AddRecs of the loop we are trying to peel.
615 if (!LeftAR->isAffine() || LeftAR->getLoop() != &L)
616 return;
617 if (!(ICmpInst::isEquality(P: Pred) && LeftAR->hasNoSelfWrap()) &&
618 !SE.getMonotonicPredicateType(LHS: LeftAR, Pred))
619 return;
620
621 // Check if extending the current DesiredPeelCount lets us evaluate Pred
622 // or !Pred in the loop body statically.
623 unsigned NewPeelCount = DesiredPeelCount;
624
625 const SCEV *IterVal = LeftAR->evaluateAtIteration(
626 It: SE.getConstant(Ty: LeftSCEV->getType(), V: NewPeelCount), SE);
627
628 // If the original condition is not known, get the negated predicate
629 // (which holds on the else branch) and check if it is known. This allows
630 // us to peel of iterations that make the original condition false.
631 if (!SE.isKnownPredicate(Pred, LHS: IterVal, RHS: RightSCEV))
632 Pred = ICmpInst::getInversePredicate(pred: Pred);
633
634 const SCEV *Step = LeftAR->getStepRecurrence(SE);
635 if (!PeelWhilePredicateIsKnown(NewPeelCount, IterVal, RightSCEV, Step,
636 Pred)) {
637 if (shouldPeelLastIteration(L, Pred, LeftAR, RightSCEV, SE, TTI))
638 DesiredPeelCountLast = 1;
639 return;
640 }
641
642 // However, for equality comparisons, that isn't always sufficient to
643 // eliminate the comparsion in loop body, we may need to peel one more
644 // iteration. See if that makes !Pred become unknown again.
645 const SCEV *NextIterVal = SE.getAddExpr(LHS: IterVal, RHS: Step);
646 if (ICmpInst::isEquality(P: Pred) &&
647 !SE.isKnownPredicate(Pred: ICmpInst::getInversePredicate(pred: Pred), LHS: NextIterVal,
648 RHS: RightSCEV) &&
649 !SE.isKnownPredicate(Pred, LHS: IterVal, RHS: RightSCEV) &&
650 SE.isKnownPredicate(Pred, LHS: NextIterVal, RHS: RightSCEV)) {
651 if (NewPeelCount >= MaxPeelCount)
652 return; // Need to peel one more iteration, but can't. Give up.
653 ++NewPeelCount; // Great!
654 }
655
656 DesiredPeelCount = std::max(a: DesiredPeelCount, b: NewPeelCount);
657 DesiredPeelCountLast = std::max(a: DesiredPeelCountLast, b: NewPeelCount);
658 };
659
660 auto ComputePeelCountMinMax = [&](MinMaxIntrinsic *MinMax) {
661 if (!MinMax->getType()->isIntegerTy())
662 return;
663 Value *LHS = MinMax->getLHS(), *RHS = MinMax->getRHS();
664 const SCEV *BoundSCEV, *IterSCEV;
665 if (L.isLoopInvariant(V: LHS)) {
666 BoundSCEV = SE.getSCEV(V: LHS);
667 IterSCEV = SE.getSCEV(V: RHS);
668 } else if (L.isLoopInvariant(V: RHS)) {
669 BoundSCEV = SE.getSCEV(V: RHS);
670 IterSCEV = SE.getSCEV(V: LHS);
671 } else
672 return;
673 const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: IterSCEV);
674 // For simplicity, we support only affine recurrences.
675 if (!AddRec || !AddRec->isAffine() || AddRec->getLoop() != &L)
676 return;
677 const SCEV *Step = AddRec->getStepRecurrence(SE);
678 bool IsSigned = MinMax->isSigned();
679 // To minimize number of peeled iterations, we use strict relational
680 // predicates here.
681 ICmpInst::Predicate Pred;
682 if (SE.isKnownPositive(S: Step))
683 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
684 else if (SE.isKnownNegative(S: Step))
685 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
686 else
687 return;
688 // Check that AddRec is not wrapping.
689 if (!(IsSigned ? AddRec->hasNoSignedWrap() : AddRec->hasNoUnsignedWrap()))
690 return;
691 unsigned NewPeelCount = DesiredPeelCount;
692 const SCEV *IterVal = AddRec->evaluateAtIteration(
693 It: SE.getConstant(Ty: AddRec->getType(), V: NewPeelCount), SE);
694 if (!PeelWhilePredicateIsKnown(NewPeelCount, IterVal, BoundSCEV, Step,
695 Pred)) {
696 if (shouldPeelLastIteration(L, Pred, LeftAR: AddRec, RightSCEV: BoundSCEV, SE, TTI))
697 DesiredPeelCountLast = 1;
698 return;
699 }
700 DesiredPeelCount = NewPeelCount;
701 };
702
703 for (BasicBlock *BB : L.blocks()) {
704 for (Instruction &I : *BB) {
705 if (SelectInst *SI = dyn_cast<SelectInst>(Val: &I))
706 ComputePeelCount(SI->getCondition(), 0);
707 if (MinMaxIntrinsic *MinMax = dyn_cast<MinMaxIntrinsic>(Val: &I))
708 ComputePeelCountMinMax(MinMax);
709 }
710
711 auto *BI = dyn_cast<BranchInst>(Val: BB->getTerminator());
712 if (!BI || BI->isUnconditional())
713 continue;
714
715 // Ignore loop exit condition.
716 if (L.getLoopLatch() == BB)
717 continue;
718
719 ComputePeelCount(BI->getCondition(), 0);
720 }
721
722 return {DesiredPeelCount, DesiredPeelCountLast};
723}
724
725/// This "heuristic" exactly matches implicit behavior which used to exist
726/// inside getLoopEstimatedTripCount. It was added here to keep an
727/// improvement inside that API from causing peeling to become more aggressive.
728/// This should probably be removed.
729static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
730 BasicBlock *Latch = L->getLoopLatch();
731 if (!Latch)
732 return true;
733
734 BranchInst *LatchBR = dyn_cast<BranchInst>(Val: Latch->getTerminator());
735 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(BB: Latch))
736 return true;
737
738 assert((LatchBR->getSuccessor(0) == L->getHeader() ||
739 LatchBR->getSuccessor(1) == L->getHeader()) &&
740 "At least one edge out of the latch must go to the header");
741
742 SmallVector<BasicBlock *, 4> ExitBlocks;
743 L->getUniqueNonLatchExitBlocks(ExitBlocks);
744 return any_of(Range&: ExitBlocks, P: [](const BasicBlock *EB) {
745 return !EB->getTerminatingDeoptimizeCall();
746 });
747}
748
749
750// Return the number of iterations we want to peel off.
751void llvm::computePeelCount(Loop *L, unsigned LoopSize,
752 TargetTransformInfo::PeelingPreferences &PP,
753 unsigned TripCount, DominatorTree &DT,
754 ScalarEvolution &SE, const TargetTransformInfo &TTI,
755 AssumptionCache *AC, unsigned Threshold) {
756 assert(LoopSize > 0 && "Zero loop size is not allowed!");
757 // Save the PP.PeelCount value set by the target in
758 // TTI.getPeelingPreferences or by the flag -unroll-peel-count.
759 unsigned TargetPeelCount = PP.PeelCount;
760 PP.PeelCount = 0;
761 PP.PeelLast = false;
762 if (!canPeel(L))
763 return;
764
765 // Only try to peel innermost loops by default.
766 // The constraint can be relaxed by the target in TTI.getPeelingPreferences
767 // or by the flag -unroll-allow-loop-nests-peeling.
768 if (!PP.AllowLoopNestsPeeling && !L->isInnermost())
769 return;
770
771 // If the user provided a peel count, use that.
772 bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
773 if (UserPeelCount) {
774 LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
775 << " iterations.\n");
776 PP.PeelCount = UnrollForcePeelCount;
777 PP.PeelProfiledIterations = true;
778 return;
779 }
780
781 // Skip peeling if it's disabled.
782 if (!PP.AllowPeeling)
783 return;
784
785 // Check that we can peel at least one iteration.
786 if (2 * LoopSize > Threshold)
787 return;
788
789 unsigned AlreadyPeeled = 0;
790 if (auto Peeled = getOptionalIntLoopAttribute(TheLoop: L, Name: PeeledCountMetaData))
791 AlreadyPeeled = *Peeled;
792 // Stop if we already peeled off the maximum number of iterations.
793 if (AlreadyPeeled >= UnrollPeelMaxCount)
794 return;
795
796 // Pay respect to limitations implied by loop size and the max peel count.
797 unsigned MaxPeelCount = UnrollPeelMaxCount;
798 MaxPeelCount = std::min(a: MaxPeelCount, b: Threshold / LoopSize - 1);
799
800 // Start the max computation with the PP.PeelCount value set by the target
801 // in TTI.getPeelingPreferences or by the flag -unroll-peel-count.
802 unsigned DesiredPeelCount = TargetPeelCount;
803
804 // Here we try to get rid of Phis which become invariants or inductions after
805 // 1, 2, ..., N iterations of the loop. For this we compute the number for
806 // iterations after which every Phi is guaranteed to become an invariant or an
807 // induction, and try to peel the maximum number of iterations among these
808 // values, thus turning all those Phis into invariants or inductions.
809 if (MaxPeelCount > DesiredPeelCount) {
810 // Check how many iterations are useful for resolving Phis
811 auto NumPeels = PhiAnalyzer(*L, MaxPeelCount, EnablePeelingForIV)
812 .calculateIterationsToPeel();
813 if (NumPeels)
814 DesiredPeelCount = std::max(a: DesiredPeelCount, b: *NumPeels);
815 }
816
817 const auto &[CountToEliminateCmps, CountToEliminateCmpsLast] =
818 countToEliminateCompares(L&: *L, MaxPeelCount, SE, TTI);
819 DesiredPeelCount = std::max(a: DesiredPeelCount, b: CountToEliminateCmps);
820
821 if (DesiredPeelCount == 0)
822 DesiredPeelCount = peelToTurnInvariantLoadsDereferenceable(L&: *L, DT, AC);
823
824 if (DesiredPeelCount > 0) {
825 DesiredPeelCount = std::min(a: DesiredPeelCount, b: MaxPeelCount);
826 // Consider max peel count limitation.
827 assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
828 if (DesiredPeelCount + AlreadyPeeled <= UnrollPeelMaxCount) {
829 LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
830 << " iteration(s) to turn"
831 << " some Phis into invariants or inductions.\n");
832 PP.PeelCount = DesiredPeelCount;
833 PP.PeelProfiledIterations = false;
834 PP.PeelLast = false;
835 return;
836 }
837 }
838
839 if (CountToEliminateCmpsLast > 0) {
840 unsigned DesiredPeelCountLast =
841 std::min(a: CountToEliminateCmpsLast, b: MaxPeelCount);
842 // Consider max peel count limitation.
843 assert(DesiredPeelCountLast > 0 && "Wrong loop size estimation?");
844 if (DesiredPeelCountLast + AlreadyPeeled <= UnrollPeelMaxCount) {
845 LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
846 << " iteration(s) to turn"
847 << " some Phis into invariants.\n");
848 PP.PeelCount = DesiredPeelCountLast;
849 PP.PeelProfiledIterations = false;
850 PP.PeelLast = true;
851 return;
852 }
853 }
854
855 // Bail if we know the statically calculated trip count.
856 // In this case we rather prefer partial unrolling.
857 if (TripCount)
858 return;
859
860 // Do not apply profile base peeling if it is disabled.
861 if (!PP.PeelProfiledIterations)
862 return;
863 // If we don't know the trip count, but have reason to believe the average
864 // trip count is low, peeling should be beneficial, since we will usually
865 // hit the peeled section.
866 // We only do this in the presence of profile information, since otherwise
867 // our estimates of the trip count are not reliable enough.
868 if (L->getHeader()->getParent()->hasProfileData()) {
869 if (violatesLegacyMultiExitLoopCheck(L))
870 return;
871 std::optional<unsigned> EstimatedTripCount = getLoopEstimatedTripCount(L);
872 if (!EstimatedTripCount)
873 return;
874
875 LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is "
876 << *EstimatedTripCount << "\n");
877
878 if (*EstimatedTripCount + AlreadyPeeled <= MaxPeelCount) {
879 unsigned PeelCount = *EstimatedTripCount;
880 LLVM_DEBUG(dbgs() << "Peeling first " << PeelCount << " iterations.\n");
881 PP.PeelCount = PeelCount;
882 return;
883 }
884 LLVM_DEBUG(dbgs() << "Already peel count: " << AlreadyPeeled << "\n");
885 LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
886 LLVM_DEBUG(dbgs() << "Loop cost: " << LoopSize << "\n");
887 LLVM_DEBUG(dbgs() << "Max peel cost: " << Threshold << "\n");
888 LLVM_DEBUG(dbgs() << "Max peel count by cost: "
889 << (Threshold / LoopSize - 1) << "\n");
890 }
891}
892
893/// Clones the body of the loop L, putting it between \p InsertTop and \p
894/// InsertBot.
895/// \param IterNumber The serial number of the iteration currently being
896/// peeled off.
897/// \param PeelLast Peel off the last iterations from \p L.
898/// \param ExitEdges The exit edges of the original loop.
899/// \param[out] NewBlocks A list of the blocks in the newly created clone
900/// \param[out] VMap The value map between the loop and the new clone.
901/// \param LoopBlocks A helper for DFS-traversal of the loop.
902/// \param LVMap A value-map that maps instructions from the original loop to
903/// instructions in the last peeled-off iteration.
904static void cloneLoopBlocks(
905 Loop *L, unsigned IterNumber, bool PeelLast, BasicBlock *InsertTop,
906 BasicBlock *InsertBot, BasicBlock *OrigPreHeader,
907 SmallVectorImpl<std::pair<BasicBlock *, BasicBlock *>> &ExitEdges,
908 SmallVectorImpl<BasicBlock *> &NewBlocks, LoopBlocksDFS &LoopBlocks,
909 ValueToValueMapTy &VMap, ValueToValueMapTy &LVMap, DominatorTree *DT,
910 LoopInfo *LI, ArrayRef<MDNode *> LoopLocalNoAliasDeclScopes,
911 ScalarEvolution &SE) {
912 BasicBlock *Header = L->getHeader();
913 BasicBlock *Latch = L->getLoopLatch();
914 BasicBlock *PreHeader = L->getLoopPreheader();
915
916 Function *F = Header->getParent();
917 LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
918 LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
919 Loop *ParentLoop = L->getParentLoop();
920
921 // For each block in the original loop, create a new copy,
922 // and update the value map with the newly created values.
923 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
924 BasicBlock *NewBB = CloneBasicBlock(BB: *BB, VMap, NameSuffix: ".peel", F);
925 NewBlocks.push_back(Elt: NewBB);
926
927 // If an original block is an immediate child of the loop L, its copy
928 // is a child of a ParentLoop after peeling. If a block is a child of
929 // a nested loop, it is handled in the cloneLoop() call below.
930 if (ParentLoop && LI->getLoopFor(BB: *BB) == L)
931 ParentLoop->addBasicBlockToLoop(NewBB, LI&: *LI);
932
933 VMap[*BB] = NewBB;
934
935 // If dominator tree is available, insert nodes to represent cloned blocks.
936 if (DT) {
937 if (Header == *BB)
938 DT->addNewBlock(BB: NewBB, DomBB: InsertTop);
939 else {
940 DomTreeNode *IDom = DT->getNode(BB: *BB)->getIDom();
941 // VMap must contain entry for IDom, as the iteration order is RPO.
942 DT->addNewBlock(BB: NewBB, DomBB: cast<BasicBlock>(Val&: VMap[IDom->getBlock()]));
943 }
944 }
945 }
946
947 {
948 // Identify what other metadata depends on the cloned version. After
949 // cloning, replace the metadata with the corrected version for both
950 // memory instructions and noalias intrinsics.
951 std::string Ext = (Twine("Peel") + Twine(IterNumber)).str();
952 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes: LoopLocalNoAliasDeclScopes, NewBlocks,
953 Context&: Header->getContext(), Ext);
954 }
955
956 // Recursively create the new Loop objects for nested loops, if any,
957 // to preserve LoopInfo.
958 for (Loop *ChildLoop : *L) {
959 cloneLoop(L: ChildLoop, PL: ParentLoop, VM&: VMap, LI, LPM: nullptr);
960 }
961
962 // Hook-up the control flow for the newly inserted blocks.
963 // The new header is hooked up directly to the "top", which is either
964 // the original loop preheader (for the first iteration) or the previous
965 // iteration's exiting block (for every other iteration)
966 InsertTop->getTerminator()->setSuccessor(Idx: 0, BB: cast<BasicBlock>(Val&: VMap[Header]));
967
968 // Similarly, for the latch:
969 // The original exiting edge is still hooked up to the loop exit.
970 BasicBlock *NewLatch = cast<BasicBlock>(Val&: VMap[Latch]);
971 if (PeelLast) {
972 // This is the last iteration and we definitely will go to the exit. Just
973 // set both successors to InsertBot and let the branch be simplified later.
974 assert(IterNumber == 0 && "Only peeling a single iteration implemented.");
975 auto *LatchTerm = cast<BranchInst>(Val: NewLatch->getTerminator());
976 LatchTerm->setSuccessor(idx: 0, NewSucc: InsertBot);
977 LatchTerm->setSuccessor(idx: 1, NewSucc: InsertBot);
978 } else {
979 auto *LatchTerm = cast<Instruction>(Val: NewLatch->getTerminator());
980 // The backedge now goes to the "bottom", which is either the loop's real
981 // header (for the last peeled iteration) or the copied header of the next
982 // iteration (for every other iteration)
983 for (unsigned idx = 0, e = LatchTerm->getNumSuccessors(); idx < e; ++idx) {
984 if (LatchTerm->getSuccessor(Idx: idx) == Header) {
985 LatchTerm->setSuccessor(Idx: idx, BB: InsertBot);
986 break;
987 }
988 }
989 }
990 if (DT)
991 DT->changeImmediateDominator(BB: InsertBot, NewBB: NewLatch);
992
993 // The new copy of the loop body starts with a bunch of PHI nodes
994 // that pick an incoming value from either the preheader, or the previous
995 // loop iteration. Since this copy is no longer part of the loop, we
996 // resolve this statically:
997 if (PeelLast) {
998 // For the last iteration, we introduce new phis for each header phi in
999 // InsertTop, using the incoming value from the preheader for the original
1000 // preheader (when skipping the main loop) and the incoming value from the
1001 // latch for the latch (when continuing from the main loop).
1002 IRBuilder<> B(InsertTop, InsertTop->getFirstNonPHIIt());
1003 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(Val: I); ++I) {
1004 PHINode *NewPHI = cast<PHINode>(Val&: VMap[&*I]);
1005 PHINode *PN = B.CreatePHI(Ty: NewPHI->getType(), NumReservedValues: 2);
1006 NewPHI->eraseFromParent();
1007 if (OrigPreHeader)
1008 PN->addIncoming(V: cast<PHINode>(Val: &*I)->getIncomingValueForBlock(BB: PreHeader),
1009 BB: OrigPreHeader);
1010
1011 PN->addIncoming(V: cast<PHINode>(Val: &*I)->getIncomingValueForBlock(BB: Latch),
1012 BB: Latch);
1013 VMap[&*I] = PN;
1014 }
1015 } else {
1016 // For the first iteration, we use the value from the preheader directly.
1017 // For any other iteration, we replace the phi with the value generated by
1018 // the immediately preceding clone of the loop body (which represents
1019 // the previous iteration).
1020 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(Val: I); ++I) {
1021 PHINode *NewPHI = cast<PHINode>(Val&: VMap[&*I]);
1022 if (IterNumber == 0) {
1023 VMap[&*I] = NewPHI->getIncomingValueForBlock(BB: PreHeader);
1024 } else {
1025 Value *LatchVal = NewPHI->getIncomingValueForBlock(BB: Latch);
1026 Instruction *LatchInst = dyn_cast<Instruction>(Val: LatchVal);
1027 if (LatchInst && L->contains(Inst: LatchInst))
1028 VMap[&*I] = LVMap[LatchInst];
1029 else
1030 VMap[&*I] = LatchVal;
1031 }
1032 NewPHI->eraseFromParent();
1033 }
1034 }
1035
1036 // Fix up the outgoing values - we need to add a value for the iteration
1037 // we've just created. Note that this must happen *after* the incoming
1038 // values are adjusted, since the value going out of the latch may also be
1039 // a value coming into the header.
1040 for (auto Edge : ExitEdges)
1041 for (PHINode &PHI : Edge.second->phis()) {
1042 Value *LatchVal = PHI.getIncomingValueForBlock(BB: Edge.first);
1043 Instruction *LatchInst = dyn_cast<Instruction>(Val: LatchVal);
1044 if (LatchInst && L->contains(Inst: LatchInst))
1045 LatchVal = VMap[LatchVal];
1046 PHI.addIncoming(V: LatchVal, BB: cast<BasicBlock>(Val&: VMap[Edge.first]));
1047 SE.forgetLcssaPhiWithNewPredecessor(L, V: &PHI);
1048 }
1049
1050 // LastValueMap is updated with the values for the current loop
1051 // which are used the next time this function is called.
1052 for (auto KV : VMap)
1053 LVMap[KV.first] = KV.second;
1054}
1055
1056TargetTransformInfo::PeelingPreferences
1057llvm::gatherPeelingPreferences(Loop *L, ScalarEvolution &SE,
1058 const TargetTransformInfo &TTI,
1059 std::optional<bool> UserAllowPeeling,
1060 std::optional<bool> UserAllowProfileBasedPeeling,
1061 bool UnrollingSpecficValues) {
1062 TargetTransformInfo::PeelingPreferences PP;
1063
1064 // Set the default values.
1065 PP.PeelCount = 0;
1066 PP.AllowPeeling = true;
1067 PP.AllowLoopNestsPeeling = false;
1068 PP.PeelLast = false;
1069 PP.PeelProfiledIterations = true;
1070
1071 // Get the target specifc values.
1072 TTI.getPeelingPreferences(L, SE, PP);
1073
1074 // User specified values using cl::opt.
1075 if (UnrollingSpecficValues) {
1076 if (UnrollPeelCount.getNumOccurrences() > 0)
1077 PP.PeelCount = UnrollPeelCount;
1078 if (UnrollAllowPeeling.getNumOccurrences() > 0)
1079 PP.AllowPeeling = UnrollAllowPeeling;
1080 if (UnrollAllowLoopNestsPeeling.getNumOccurrences() > 0)
1081 PP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling;
1082 }
1083
1084 // User specifed values provided by argument.
1085 if (UserAllowPeeling)
1086 PP.AllowPeeling = *UserAllowPeeling;
1087 if (UserAllowProfileBasedPeeling)
1088 PP.PeelProfiledIterations = *UserAllowProfileBasedPeeling;
1089
1090 return PP;
1091}
1092
1093/// Peel off the first \p PeelCount iterations of loop \p L.
1094///
1095/// Note that this does not peel them off as a single straight-line block.
1096/// Rather, each iteration is peeled off separately, and needs to check the
1097/// exit condition.
1098/// For loops that dynamically execute \p PeelCount iterations or less
1099/// this provides a benefit, since the peeled off iterations, which account
1100/// for the bulk of dynamic execution, can be further simplified by scalar
1101/// optimizations.
1102void llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI,
1103 ScalarEvolution *SE, DominatorTree &DT, AssumptionCache *AC,
1104 bool PreserveLCSSA, ValueToValueMapTy &LVMap) {
1105 assert(PeelCount > 0 && "Attempt to peel out zero iterations?");
1106 assert(canPeel(L) && "Attempt to peel a loop which is not peelable?");
1107 assert((!PeelLast || (canPeelLastIteration(*L, *SE) && PeelCount == 1)) &&
1108 "when peeling the last iteration, the loop must be supported and can "
1109 "only peel a single iteration");
1110
1111 LoopBlocksDFS LoopBlocks(L);
1112 LoopBlocks.perform(LI);
1113
1114 BasicBlock *Header = L->getHeader();
1115 BasicBlock *PreHeader = L->getLoopPreheader();
1116 BasicBlock *Latch = L->getLoopLatch();
1117 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitEdges;
1118 L->getExitEdges(ExitEdges);
1119
1120 // Remember dominators of blocks we might reach through exits to change them
1121 // later. Immediate dominator of such block might change, because we add more
1122 // routes which can lead to the exit: we can reach it from the peeled
1123 // iterations too.
1124 DenseMap<BasicBlock *, BasicBlock *> NonLoopBlocksIDom;
1125 for (auto *BB : L->blocks()) {
1126 auto *BBDomNode = DT.getNode(BB);
1127 SmallVector<BasicBlock *, 16> ChildrenToUpdate;
1128 for (auto *ChildDomNode : BBDomNode->children()) {
1129 auto *ChildBB = ChildDomNode->getBlock();
1130 if (!L->contains(BB: ChildBB))
1131 ChildrenToUpdate.push_back(Elt: ChildBB);
1132 }
1133 // The new idom of the block will be the nearest common dominator
1134 // of all copies of the previous idom. This is equivalent to the
1135 // nearest common dominator of the previous idom and the first latch,
1136 // which dominates all copies of the previous idom.
1137 BasicBlock *NewIDom = DT.findNearestCommonDominator(A: BB, B: Latch);
1138 for (auto *ChildBB : ChildrenToUpdate)
1139 NonLoopBlocksIDom[ChildBB] = NewIDom;
1140 }
1141
1142 Function *F = Header->getParent();
1143
1144 // Set up all the necessary basic blocks.
1145 BasicBlock *InsertTop;
1146 BasicBlock *InsertBot;
1147 BasicBlock *NewPreHeader = nullptr;
1148 DenseMap<Instruction *, Value *> ExitValues;
1149 if (PeelLast) {
1150 // It is convenient to split the single exit block from the latch the
1151 // into 3 parts - two blocks to anchor the peeled copy of the loop body,
1152 // and a new final exit block.
1153
1154 // Peeling the last iteration transforms.
1155 //
1156 // PreHeader:
1157 // ...
1158 // Header:
1159 // LoopBody
1160 // If (cond) goto Header
1161 // Exit:
1162 //
1163 // into
1164 //
1165 // Header:
1166 // LoopBody
1167 // If (cond) goto Header
1168 // InsertTop:
1169 // LoopBody
1170 // If (!cond) goto InsertBot
1171 // InsertBot:
1172 // Exit:
1173 // ...
1174 BasicBlock *Exit = L->getExitBlock();
1175 for (PHINode &P : Exit->phis())
1176 ExitValues[&P] = P.getIncomingValueForBlock(BB: Latch);
1177
1178 const SCEV *BTC = SE->getBackedgeTakenCount(L);
1179
1180 InsertTop = SplitEdge(From: Latch, To: Exit, DT: &DT, LI);
1181 InsertBot = SplitBlock(Old: InsertTop, SplitPt: InsertTop->getTerminator(), DT: &DT, LI);
1182
1183 InsertTop->setName(Exit->getName() + ".peel.begin");
1184 InsertBot->setName(Exit->getName() + ".peel.next");
1185 NewPreHeader = nullptr;
1186
1187 // If the original loop may only execute a single iteration we need to
1188 // insert a trip count check and skip the original loop with the last
1189 // iteration peeled off if necessary. Either way, we must update branch
1190 // weights to maintain the loop body frequency.
1191 if (SE->isKnownNonZero(S: BTC)) {
1192 // We have just proven that, when reached, the original loop always
1193 // executes at least two iterations. Thus, we unconditionally execute
1194 // both the remaining loop's initial iteration and the peeled iteration.
1195 // But that increases the latter's frequency above its frequency in the
1196 // original loop. To maintain the total frequency, we compensate by
1197 // decreasing the remaining loop body's frequency to indicate one less
1198 // iteration.
1199 //
1200 // We use this formula to convert probability to/from frequency:
1201 // Sum(i=0..inf)(P^i) = 1/(1-P) = Freq.
1202 if (BranchProbability P = getLoopProbability(L); !P.isUnknown()) {
1203 // Trying to subtract one from an infinite loop is pointless, and our
1204 // formulas then produce division by zero, so skip that case.
1205 if (BranchProbability ExitP = P.getCompl(); !ExitP.isZero()) {
1206 double Freq = 1 / ExitP.toDouble();
1207 // No branch weights can produce a frequency of less than one given
1208 // the initial iteration, and our formulas produce a negative
1209 // probability if we try.
1210 assert(Freq >= 1.0 && "expected freq >= 1 due to initial iteration");
1211 double NewFreq = std::max(a: Freq - 1, b: 1.0);
1212 setLoopProbability(
1213 L, P: BranchProbability::getBranchProbability(Prob: 1 - 1 / NewFreq));
1214 }
1215 }
1216 } else {
1217 NewPreHeader = SplitEdge(From: PreHeader, To: Header, DT: &DT, LI);
1218 SCEVExpander Expander(*SE, "loop-peel");
1219
1220 BranchInst *PreHeaderBR = cast<BranchInst>(Val: PreHeader->getTerminator());
1221 Value *BTCValue =
1222 Expander.expandCodeFor(SH: BTC, Ty: BTC->getType(), I: PreHeaderBR);
1223 IRBuilder<> B(PreHeaderBR);
1224 Value *Cond =
1225 B.CreateICmpNE(LHS: BTCValue, RHS: ConstantInt::get(Ty: BTCValue->getType(), V: 0));
1226 auto *BI = B.CreateCondBr(Cond, True: NewPreHeader, False: InsertTop);
1227 SmallVector<uint32_t> Weights;
1228 auto *OrigLatchBr = Latch->getTerminator();
1229 auto HasBranchWeights = !ProfcheckDisableMetadataFixes &&
1230 extractBranchWeights(I: *OrigLatchBr, Weights);
1231 if (HasBranchWeights) {
1232 // The probability that the new guard skips the loop to execute just one
1233 // iteration is the original loop's probability of exiting at the latch
1234 // after any iteration. That should maintain the original loop body
1235 // frequency. Upon arriving at the loop, due to the guard, the
1236 // probability of reaching iteration i of the new loop is the
1237 // probability of reaching iteration i+1 of the original loop. The
1238 // probability of reaching the peeled iteration is 1, which is the
1239 // probability of reaching iteration 0 of the original loop.
1240 if (L->getExitBlock() == OrigLatchBr->getSuccessor(Idx: 0))
1241 std::swap(a&: Weights[0], b&: Weights[1]);
1242 setBranchWeights(I&: *BI, Weights, /*IsExpected=*/false);
1243 }
1244 PreHeaderBR->eraseFromParent();
1245
1246 // PreHeader now dominates InsertTop.
1247 DT.changeImmediateDominator(BB: InsertTop, NewBB: PreHeader);
1248 }
1249 } else {
1250 // It is convenient to split the preheader into 3 parts - two blocks to
1251 // anchor the peeled copy of the loop body, and a new preheader for the
1252 // "real" loop.
1253
1254 // Peeling the first iteration transforms.
1255 //
1256 // PreHeader:
1257 // ...
1258 // Header:
1259 // LoopBody
1260 // If (cond) goto Header
1261 // Exit:
1262 //
1263 // into
1264 //
1265 // InsertTop:
1266 // LoopBody
1267 // If (!cond) goto Exit
1268 // InsertBot:
1269 // NewPreHeader:
1270 // ...
1271 // Header:
1272 // LoopBody
1273 // If (cond) goto Header
1274 // Exit:
1275 //
1276 // Each following iteration will split the current bottom anchor in two,
1277 // and put the new copy of the loop body between these two blocks. That
1278 // is, after peeling another iteration from the example above, we'll
1279 // split InsertBot, and get:
1280 //
1281 // InsertTop:
1282 // LoopBody
1283 // If (!cond) goto Exit
1284 // InsertBot:
1285 // LoopBody
1286 // If (!cond) goto Exit
1287 // InsertBot.next:
1288 // NewPreHeader:
1289 // ...
1290 // Header:
1291 // LoopBody
1292 // If (cond) goto Header
1293 // Exit:
1294 //
1295 InsertTop = SplitEdge(From: PreHeader, To: Header, DT: &DT, LI);
1296 InsertBot = SplitBlock(Old: InsertTop, SplitPt: InsertTop->getTerminator(), DT: &DT, LI);
1297 NewPreHeader = SplitBlock(Old: InsertBot, SplitPt: InsertBot->getTerminator(), DT: &DT, LI);
1298
1299 InsertTop->setName(Header->getName() + ".peel.begin");
1300 InsertBot->setName(Header->getName() + ".peel.next");
1301 NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
1302 }
1303
1304 Instruction *LatchTerm =
1305 cast<Instruction>(Val: cast<BasicBlock>(Val: Latch)->getTerminator());
1306
1307 // Identify what noalias metadata is inside the loop: if it is inside the
1308 // loop, the associated metadata must be cloned for each iteration.
1309 SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes;
1310 identifyNoAliasScopesToClone(BBs: L->getBlocks(), NoAliasDeclScopes&: LoopLocalNoAliasDeclScopes);
1311
1312 // For each peeled-off iteration, make a copy of the loop.
1313 ValueToValueMapTy VMap;
1314 for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
1315 SmallVector<BasicBlock *, 8> NewBlocks;
1316
1317 cloneLoopBlocks(L, IterNumber: Iter, PeelLast, InsertTop, InsertBot,
1318 OrigPreHeader: NewPreHeader ? PreHeader : nullptr, ExitEdges, NewBlocks,
1319 LoopBlocks, VMap, LVMap, DT: &DT, LI,
1320 LoopLocalNoAliasDeclScopes, SE&: *SE);
1321
1322 // Remap to use values from the current iteration instead of the
1323 // previous one.
1324 remapInstructionsInBlocks(Blocks: NewBlocks, VMap);
1325
1326 if (Iter == 0) {
1327 if (PeelLast) {
1328 // Adjust the exit condition so the loop exits one iteration early.
1329 // For now we simply subtract one form the second operand of the
1330 // exit condition. This relies on the peel count computation to
1331 // check that this is actually legal. In particular, it ensures that
1332 // the first operand of the compare is an AddRec with step 1 and we
1333 // execute more than one iteration.
1334 auto *Cmp =
1335 cast<ICmpInst>(Val: L->getLoopLatch()->getTerminator()->getOperand(i: 0));
1336 IRBuilder B(Cmp);
1337 Cmp->setOperand(
1338 i_nocapture: 1, Val_nocapture: B.CreateSub(LHS: Cmp->getOperand(i_nocapture: 1),
1339 RHS: ConstantInt::get(Ty: Cmp->getOperand(i_nocapture: 1)->getType(), V: 1)));
1340 } else {
1341 // Update IDoms of the blocks reachable through exits.
1342 for (auto BBIDom : NonLoopBlocksIDom)
1343 DT.changeImmediateDominator(BB: BBIDom.first,
1344 NewBB: cast<BasicBlock>(Val&: LVMap[BBIDom.second]));
1345 }
1346 }
1347
1348#ifdef EXPENSIVE_CHECKS
1349 assert(DT.verify(DominatorTree::VerificationLevel::Fast));
1350#endif
1351
1352 // Remove Loop metadata from the latch branch instruction
1353 // because it is not the Loop's latch branch anymore.
1354 auto *LatchTermCopy = cast<Instruction>(Val&: VMap[LatchTerm]);
1355 LatchTermCopy->setMetadata(KindID: LLVMContext::MD_loop, Node: nullptr);
1356
1357 InsertTop = InsertBot;
1358 InsertBot = SplitBlock(Old: InsertBot, SplitPt: InsertBot->getTerminator(), DT: &DT, LI);
1359 InsertBot->setName(Header->getName() + ".peel.next");
1360
1361 F->splice(ToIt: InsertTop->getIterator(), FromF: F, FromBeginIt: NewBlocks[0]->getIterator(),
1362 FromEndIt: F->end());
1363 }
1364
1365 if (PeelLast) {
1366 // Now adjust users of the original exit values by replacing them with the
1367 // exit value from the peeled iteration and remove them.
1368 for (const auto &[P, E] : ExitValues) {
1369 Instruction *ExitInst = dyn_cast<Instruction>(Val: E);
1370 if (ExitInst && L->contains(Inst: ExitInst))
1371 P->replaceAllUsesWith(V: &*VMap[ExitInst]);
1372 else
1373 P->replaceAllUsesWith(V: E);
1374 P->eraseFromParent();
1375 }
1376 formLCSSA(L&: *L, DT, LI, SE);
1377 } else {
1378 // Now adjust the phi nodes in the loop header to get their initial values
1379 // from the last peeled-off iteration instead of the preheader.
1380 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(Val: I); ++I) {
1381 PHINode *PHI = cast<PHINode>(Val&: I);
1382 Value *NewVal = PHI->getIncomingValueForBlock(BB: Latch);
1383 Instruction *LatchInst = dyn_cast<Instruction>(Val: NewVal);
1384 if (LatchInst && L->contains(Inst: LatchInst))
1385 NewVal = LVMap[LatchInst];
1386
1387 PHI->setIncomingValueForBlock(BB: NewPreHeader, V: NewVal);
1388 }
1389 }
1390
1391 // Update Metadata for count of peeled off iterations.
1392 unsigned AlreadyPeeled = 0;
1393 if (auto Peeled = getOptionalIntLoopAttribute(TheLoop: L, Name: PeeledCountMetaData))
1394 AlreadyPeeled = *Peeled;
1395 unsigned TotalPeeled = AlreadyPeeled + PeelCount;
1396 addStringMetadataToLoop(TheLoop: L, MDString: PeeledCountMetaData, V: TotalPeeled);
1397
1398 // Update metadata for the estimated trip count. The original branch weight
1399 // metadata is already correct for both the remaining loop and the peeled loop
1400 // iterations, so do not adjust it.
1401 //
1402 // For example, consider what happens when peeling 2 iterations from a loop
1403 // with an estimated trip count of 10 and inserting them before the remaining
1404 // loop. Each of the peeled iterations and each iteration in the remaining
1405 // loop still has the same probability of exiting the *entire original* loop
1406 // as it did when in the original loop, and thus it should still have the same
1407 // branch weights. The peeled iterations' non-zero probabilities of exiting
1408 // already appropriately reduce the probability of reaching the remaining
1409 // iterations just as they did in the original loop. Trying to also adjust
1410 // the remaining loop's branch weights to reflect its new trip count of 8 will
1411 // erroneously further reduce its block frequencies. However, in case an
1412 // analysis later needs to determine the trip count of the remaining loop
1413 // while examining it in isolation without considering the probability of
1414 // actually reaching it, we store the new trip count as separate metadata.
1415 if (auto EstimatedTripCount = getLoopEstimatedTripCount(L)) {
1416 unsigned EstimatedTripCountNew = *EstimatedTripCount;
1417 if (EstimatedTripCountNew < TotalPeeled)
1418 EstimatedTripCountNew = 0;
1419 else
1420 EstimatedTripCountNew -= TotalPeeled;
1421 setLoopEstimatedTripCount(L, EstimatedTripCount: EstimatedTripCountNew);
1422 }
1423
1424 if (Loop *ParentLoop = L->getParentLoop())
1425 L = ParentLoop;
1426
1427 // We modified the loop, update SE.
1428 SE->forgetTopmostLoop(L);
1429 SE->forgetBlockAndLoopDispositions();
1430
1431#ifdef EXPENSIVE_CHECKS
1432 // Finally DomtTree must be correct.
1433 assert(DT.verify(DominatorTree::VerificationLevel::Fast));
1434#endif
1435
1436 // FIXME: Incrementally update loop-simplify
1437 simplifyLoop(L, DT: &DT, LI, SE, AC, MSSAU: nullptr, PreserveLCSSA);
1438
1439 NumPeeled++;
1440 NumPeeledEnd += PeelLast;
1441}
1442