1//===----------------- LoopRotationUtils.cpp -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides utilities to convert a loop into a loop with bottom test.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Utils/LoopRotationUtils.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/Analysis/AssumptionCache.h"
16#include "llvm/Analysis/CodeMetrics.h"
17#include "llvm/Analysis/DomTreeUpdater.h"
18#include "llvm/Analysis/InstructionSimplify.h"
19#include "llvm/Analysis/LoopInfo.h"
20#include "llvm/Analysis/MemorySSA.h"
21#include "llvm/Analysis/MemorySSAUpdater.h"
22#include "llvm/Analysis/ScalarEvolution.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/DebugInfo.h"
26#include "llvm/IR/Dominators.h"
27#include "llvm/IR/IntrinsicInst.h"
28#include "llvm/IR/MDBuilder.h"
29#include "llvm/IR/ProfDataUtils.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33#include "llvm/Transforms/Utils/BasicBlockUtils.h"
34#include "llvm/Transforms/Utils/Cloning.h"
35#include "llvm/Transforms/Utils/Local.h"
36#include "llvm/Transforms/Utils/SSAUpdater.h"
37#include "llvm/Transforms/Utils/ValueMapper.h"
38using namespace llvm;
39
40#define DEBUG_TYPE "loop-rotate"
41
42STATISTIC(NumNotRotatedDueToHeaderSize,
43 "Number of loops not rotated due to the header size");
44STATISTIC(NumInstrsHoisted,
45 "Number of instructions hoisted into loop preheader");
46STATISTIC(NumInstrsDuplicated,
47 "Number of instructions cloned into loop preheader");
48
49// Probability that a rotated loop has zero trip count / is never entered.
50static constexpr uint32_t ZeroTripCountWeights[] = {1, 127};
51
52namespace {
53/// A simple loop rotation transformation.
54class LoopRotate {
55 const unsigned MaxHeaderSize;
56 LoopInfo *LI;
57 const TargetTransformInfo *TTI;
58 AssumptionCache *AC;
59 DominatorTree *DT;
60 ScalarEvolution *SE;
61 MemorySSAUpdater *MSSAU;
62 const SimplifyQuery &SQ;
63 bool RotationOnly;
64 bool IsUtilMode;
65 bool PrepareForLTO;
66 bool CheckExitCount;
67
68public:
69 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI,
70 const TargetTransformInfo *TTI, AssumptionCache *AC,
71 DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
72 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode,
73 bool PrepareForLTO, bool CheckExitCount)
74 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE),
75 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
76 IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO),
77 CheckExitCount(CheckExitCount) {}
78 bool processLoop(Loop *L);
79
80private:
81 bool rotateLoop(Loop *L, bool SimplifiedLatch);
82 bool simplifyLoopLatch(Loop *L);
83};
84} // end anonymous namespace
85
86/// Insert (K, V) pair into the ValueToValueMap, and verify the key did not
87/// previously exist in the map, and the value was inserted.
88static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) {
89 bool Inserted = VM.insert(KV: {K, V}).second;
90 assert(Inserted);
91 (void)Inserted;
92}
93/// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
94/// old header into the preheader. If there were uses of the values produced by
95/// these instruction that were outside of the loop, we have to insert PHI nodes
96/// to merge the two values. Do this now.
97static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
98 BasicBlock *OrigPreheader,
99 ValueToValueMapTy &ValueMap,
100 ScalarEvolution *SE,
101 SmallVectorImpl<PHINode*> *InsertedPHIs) {
102 // Remove PHI node entries that are no longer live.
103 BasicBlock::iterator I, E = OrigHeader->end();
104 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(Val&: I); ++I)
105 PN->removeIncomingValue(BB: OrigPreheader);
106
107 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
108 // as necessary.
109 SSAUpdater SSA(InsertedPHIs);
110 for (I = OrigHeader->begin(); I != E; ++I) {
111 Value *OrigHeaderVal = &*I;
112
113 // If there are no uses of the value (e.g. because it returns void), there
114 // is nothing to rewrite.
115 if (OrigHeaderVal->use_empty())
116 continue;
117
118 Value *OrigPreHeaderVal = ValueMap.lookup(Val: OrigHeaderVal);
119
120 // The value now exits in two versions: the initial value in the preheader
121 // and the loop "next" value in the original header.
122 SSA.Initialize(Ty: OrigHeaderVal->getType(), Name: OrigHeaderVal->getName());
123 // Force re-computation of OrigHeaderVal, as some users now need to use the
124 // new PHI node.
125 if (SE)
126 SE->forgetValue(V: OrigHeaderVal);
127 SSA.AddAvailableValue(BB: OrigHeader, V: OrigHeaderVal);
128 SSA.AddAvailableValue(BB: OrigPreheader, V: OrigPreHeaderVal);
129
130 // Visit each use of the OrigHeader instruction.
131 for (Use &U : llvm::make_early_inc_range(Range: OrigHeaderVal->uses())) {
132 // SSAUpdater can't handle a non-PHI use in the same block as an
133 // earlier def. We can easily handle those cases manually.
134 Instruction *UserInst = cast<Instruction>(Val: U.getUser());
135 if (!isa<PHINode>(Val: UserInst)) {
136 BasicBlock *UserBB = UserInst->getParent();
137
138 // The original users in the OrigHeader are already using the
139 // original definitions.
140 if (UserBB == OrigHeader)
141 continue;
142
143 // Users in the OrigPreHeader need to use the value to which the
144 // original definitions are mapped.
145 if (UserBB == OrigPreheader) {
146 U = OrigPreHeaderVal;
147 continue;
148 }
149 }
150
151 // Anything else can be handled by SSAUpdater.
152 SSA.RewriteUse(U);
153 }
154
155 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug
156 // intrinsics.
157 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
158 llvm::findDbgValues(V: OrigHeaderVal, DbgVariableRecords);
159
160 for (DbgVariableRecord *DVR : DbgVariableRecords) {
161 // The original users in the OrigHeader are already using the original
162 // definitions.
163 BasicBlock *UserBB = DVR->getMarker()->getParent();
164 if (UserBB == OrigHeader)
165 continue;
166
167 // Users in the OrigPreHeader need to use the value to which the
168 // original definitions are mapped and anything else can be handled by
169 // the SSAUpdater. To avoid adding PHINodes, check if the value is
170 // available in UserBB, if not substitute poison.
171 Value *NewVal;
172 if (UserBB == OrigPreheader)
173 NewVal = OrigPreHeaderVal;
174 else if (SSA.HasValueForBlock(BB: UserBB))
175 NewVal = SSA.GetValueInMiddleOfBlock(BB: UserBB);
176 else
177 NewVal = PoisonValue::get(T: OrigHeaderVal->getType());
178 DVR->replaceVariableLocationOp(OldValue: OrigHeaderVal, NewValue: NewVal);
179 }
180 }
181}
182
183// Assuming both header and latch are exiting, check if rotating is profitable:
184// either a header phi becomes dead, or rotating makes the latch exit count
185// computable (enabling downstream optimizations like unrolling/vectorization).
186static bool profitableToRotateLoopExitingLatch(Loop *L, ScalarEvolution *SE) {
187 BasicBlock *Header = L->getHeader();
188 BasicBlock *Latch = L->getLoopLatch();
189 CondBrInst *BI = dyn_cast<CondBrInst>(Val: Header->getTerminator());
190 BasicBlock *HeaderExit = BI->getSuccessor(i: 0);
191 if (L->contains(BB: HeaderExit))
192 HeaderExit = BI->getSuccessor(i: 1);
193
194 for (auto &Phi : Header->phis()) {
195 // Look for uses of this phi in the loop/via exits other than the header.
196 if (llvm::any_of(Range: Phi.users(), P: [HeaderExit](const User *U) {
197 return cast<Instruction>(Val: U)->getParent() != HeaderExit;
198 }))
199 continue;
200 return true;
201 }
202
203 // Check if rotating would make the latch exit count computable, enabling
204 // optimizations like runtime unrolling and vectorization.
205 if (SE && isa<SCEVCouldNotCompute>(Val: SE->getExitCount(L, ExitingBlock: Latch)) &&
206 !isa<SCEVCouldNotCompute>(Val: SE->getExitCount(L, ExitingBlock: Header)))
207 return true;
208
209 return false;
210}
211
212static void updateBranchWeights(CondBrInst &PreHeaderBI, CondBrInst &LoopBI,
213 bool HasConditionalPreHeader,
214 bool SuccsSwapped) {
215 MDNode *WeightMD = getBranchWeightMDNode(I: PreHeaderBI);
216 if (WeightMD == nullptr)
217 return;
218
219 // LoopBI should currently be a clone of PreHeaderBI with the same
220 // metadata. But we double check to make sure we don't have a degenerate case
221 // where instsimplify changed the instructions.
222 if (WeightMD != getBranchWeightMDNode(I: LoopBI))
223 return;
224
225 SmallVector<uint32_t, 2> Weights;
226 extractFromBranchWeightMD32(ProfileData: WeightMD, Weights);
227 if (Weights.size() != 2)
228 return;
229 uint32_t OrigLoopExitWeight = Weights[0];
230 uint32_t OrigLoopBackedgeWeight = Weights[1];
231
232 if (SuccsSwapped)
233 std::swap(a&: OrigLoopExitWeight, b&: OrigLoopBackedgeWeight);
234
235 // Update branch weights. Consider the following edge-counts:
236 //
237 // | |-------- |
238 // V V | V
239 // Br i1 ... | Br i1 ...
240 // | | | | |
241 // x| y| | becomes: | y0| |-----
242 // V V | | V V |
243 // Exit Loop | | Loop |
244 // | | | Br i1 ... |
245 // ----- | | | |
246 // x0| x1| y1 | |
247 // V V ----
248 // Exit
249 //
250 // The following must hold:
251 // - x == x0 + x1 # counts to "exit" must stay the same.
252 // - y0 == x - x0 == x1 # how often loop was entered at all.
253 // - y1 == y - y0 # How often loop was repeated (after first iter.).
254 //
255 // We cannot generally deduce how often we had a zero-trip count loop so we
256 // have to make a guess for how to distribute x among the new x0 and x1.
257
258 uint32_t ExitWeight0; // aka x0
259 uint32_t ExitWeight1; // aka x1
260 uint32_t EnterWeight; // aka y0
261 uint32_t LoopBackWeight; // aka y1
262 if (OrigLoopExitWeight > 0 && OrigLoopBackedgeWeight > 0) {
263 ExitWeight0 = 0;
264 if (HasConditionalPreHeader) {
265 // Here we cannot know how many 0-trip count loops we have, so we guess:
266 if (OrigLoopBackedgeWeight >= OrigLoopExitWeight) {
267 // If the loop count is bigger than the exit count then we set
268 // probabilities as if 0-trip count nearly never happens.
269 ExitWeight0 = ZeroTripCountWeights[0];
270 // Scale up counts if necessary so we can match `ZeroTripCountWeights`
271 // for the `ExitWeight0`:`ExitWeight1` (aka `x0`:`x1` ratio`) ratio.
272 while (OrigLoopExitWeight < ZeroTripCountWeights[1] + ExitWeight0) {
273 // ... but don't overflow.
274 uint32_t const HighBit = uint32_t{1} << (sizeof(uint32_t) * 8 - 1);
275 if ((OrigLoopBackedgeWeight & HighBit) != 0 ||
276 (OrigLoopExitWeight & HighBit) != 0)
277 break;
278 OrigLoopBackedgeWeight <<= 1;
279 OrigLoopExitWeight <<= 1;
280 }
281 } else {
282 // If there's a higher exit-count than backedge-count then we set
283 // probabilities as if there are only 0-trip and 1-trip cases.
284 ExitWeight0 = OrigLoopExitWeight - OrigLoopBackedgeWeight;
285 }
286 } else {
287 // Theoretically, if the loop body must be executed at least once, the
288 // backedge count must be not less than exit count. However the branch
289 // weight collected by sampling-based PGO may be not very accurate due to
290 // sampling. Therefore this workaround is required here to avoid underflow
291 // of unsigned in following update of branch weight.
292 if (OrigLoopExitWeight > OrigLoopBackedgeWeight)
293 OrigLoopBackedgeWeight = OrigLoopExitWeight;
294 }
295 assert(OrigLoopExitWeight >= ExitWeight0 && "Bad branch weight");
296 ExitWeight1 = OrigLoopExitWeight - ExitWeight0;
297 EnterWeight = ExitWeight1;
298 assert(OrigLoopBackedgeWeight >= EnterWeight && "Bad branch weight");
299 LoopBackWeight = OrigLoopBackedgeWeight - EnterWeight;
300 } else if (OrigLoopExitWeight == 0) {
301 if (OrigLoopBackedgeWeight == 0) {
302 // degenerate case... keep everything zero...
303 ExitWeight0 = 0;
304 ExitWeight1 = 0;
305 EnterWeight = 0;
306 LoopBackWeight = 0;
307 } else {
308 // Special case "LoopExitWeight == 0" weights which behaves like an
309 // endless where we don't want loop-enttry (y0) to be the same as
310 // loop-exit (x1).
311 ExitWeight0 = 0;
312 ExitWeight1 = 0;
313 EnterWeight = 1;
314 LoopBackWeight = OrigLoopBackedgeWeight;
315 }
316 } else {
317 // loop is never entered.
318 assert(OrigLoopBackedgeWeight == 0 && "remaining case is backedge zero");
319 ExitWeight0 = 1;
320 ExitWeight1 = 1;
321 EnterWeight = 0;
322 LoopBackWeight = 0;
323 }
324
325 const uint32_t LoopBIWeights[] = {
326 SuccsSwapped ? LoopBackWeight : ExitWeight1,
327 SuccsSwapped ? ExitWeight1 : LoopBackWeight,
328 };
329 setBranchWeights(I&: LoopBI, Weights: LoopBIWeights, /*IsExpected=*/false);
330 if (HasConditionalPreHeader) {
331 const uint32_t PreHeaderBIWeights[] = {
332 SuccsSwapped ? EnterWeight : ExitWeight0,
333 SuccsSwapped ? ExitWeight0 : EnterWeight,
334 };
335 setBranchWeights(I&: PreHeaderBI, Weights: PreHeaderBIWeights, /*IsExpected=*/false);
336 }
337}
338
339/// Rotate loop LP. Return true if the loop is rotated.
340///
341/// \param SimplifiedLatch is true if the latch was just folded into the final
342/// loop exit. In this case we may want to rotate even though the new latch is
343/// now an exiting branch. This rotation would have happened had the latch not
344/// been simplified. However, if SimplifiedLatch is false, then we avoid
345/// rotating loops in which the latch exits to avoid excessive or endless
346/// rotation. LoopRotate should be repeatable and converge to a canonical
347/// form. This property is satisfied because simplifying the loop latch can only
348/// happen once across multiple invocations of the LoopRotate pass.
349bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
350 // If the loop has only one block then there is not much to rotate.
351 if (L->getBlocks().size() == 1)
352 return false;
353
354 bool Rotated = false;
355 BasicBlock *OrigHeader = L->getHeader();
356 BasicBlock *OrigLatch = L->getLoopLatch();
357
358 CondBrInst *BI = dyn_cast<CondBrInst>(Val: OrigHeader->getTerminator());
359 if (!BI)
360 return Rotated;
361
362 // If the loop header is not one of the loop exiting blocks then
363 // either this loop is already rotated or it is not
364 // suitable for loop rotation transformations.
365 if (!L->isLoopExiting(BB: OrigHeader))
366 return Rotated;
367
368 // If the loop latch already contains a branch that leaves the loop then the
369 // loop is already rotated.
370 if (!OrigLatch)
371 return Rotated;
372
373 // Rotate if the loop latch was just simplified. Or if it makes the loop exit
374 // count computable. Or if we think it will be profitable.
375 if (L->isLoopExiting(BB: OrigLatch) && !SimplifiedLatch && IsUtilMode == false &&
376 !profitableToRotateLoopExitingLatch(L, SE: CheckExitCount ? SE : nullptr))
377 return Rotated;
378
379 // Check size of original header and reject loop if it is very big or we can't
380 // duplicate blocks inside it.
381 {
382 SmallPtrSet<const Value *, 32> EphValues;
383 CodeMetrics::collectEphemeralValues(L, AC, EphValues);
384
385 CodeMetrics Metrics;
386 Metrics.analyzeBasicBlock(BB: OrigHeader, TTI: *TTI, EphValues, PrepareForLTO);
387 if (Metrics.notDuplicatable) {
388 LLVM_DEBUG(
389 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
390 << " instructions: ";
391 L->dump());
392 return Rotated;
393 }
394 if (Metrics.Convergence != ConvergenceKind::None) {
395 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
396 "instructions: ";
397 L->dump());
398 return Rotated;
399 }
400 if (!Metrics.NumInsts.isValid()) {
401 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains instructions"
402 " with invalid cost: ";
403 L->dump());
404 return Rotated;
405 }
406 if (Metrics.NumInsts > MaxHeaderSize) {
407 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains "
408 << Metrics.NumInsts
409 << " instructions, which is more than the threshold ("
410 << MaxHeaderSize << " instructions): ";
411 L->dump());
412 ++NumNotRotatedDueToHeaderSize;
413 return Rotated;
414 }
415
416 // When preparing for LTO, avoid rotating loops with calls that could be
417 // inlined during the LTO stage.
418 if (PrepareForLTO && Metrics.NumInlineCandidates > 0)
419 return Rotated;
420 }
421
422 // Now, this loop is suitable for rotation.
423 BasicBlock *OrigPreheader = L->getLoopPreheader();
424
425 // If the loop could not be converted to canonical form, it must have an
426 // indirectbr in it, just give up.
427 if (!OrigPreheader || !L->hasDedicatedExits())
428 return Rotated;
429
430 // Anything ScalarEvolution may know about this loop or the PHI nodes
431 // in its header will soon be invalidated. We should also invalidate
432 // all outer loops because insertion and deletion of blocks that happens
433 // during the rotation may violate invariants related to backedge taken
434 // infos in them.
435 if (SE) {
436 SE->forgetTopmostLoop(L);
437 // We may hoist some instructions out of loop. In case if they were cached
438 // as "loop variant" or "loop computable", these caches must be dropped.
439 // We also may fold basic blocks, so cached block dispositions also need
440 // to be dropped.
441 SE->forgetBlockAndLoopDispositions();
442 }
443
444 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
445 if (MSSAU && VerifyMemorySSA)
446 MSSAU->getMemorySSA()->verifyMemorySSA();
447
448 // Find new Loop header. NewHeader is a Header's one and only successor
449 // that is inside loop. Header's other successor is outside the
450 // loop. Otherwise loop is not suitable for rotation.
451 BasicBlock *Exit = BI->getSuccessor(i: 0);
452 BasicBlock *NewHeader = BI->getSuccessor(i: 1);
453 bool BISuccsSwapped = L->contains(BB: Exit);
454 if (BISuccsSwapped)
455 std::swap(a&: Exit, b&: NewHeader);
456 assert(NewHeader && "Unable to determine new loop header");
457 assert(L->contains(NewHeader) && !L->contains(Exit) &&
458 "Unable to determine loop header and exit blocks");
459
460 // This code assumes that the new header has exactly one predecessor.
461 // Remove any single-entry PHI nodes in it.
462 assert(NewHeader->getSinglePredecessor() &&
463 "New header doesn't have one pred!");
464 FoldSingleEntryPHINodes(BB: NewHeader);
465
466 // Begin by walking OrigHeader and populating ValueMap with an entry for
467 // each Instruction.
468 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
469 ValueToValueMapTy ValueMap, ValueMapMSSA;
470
471 // For PHI nodes, the value available in OldPreHeader is just the
472 // incoming value from OldPreHeader.
473 for (; PHINode *PN = dyn_cast<PHINode>(Val&: I); ++I)
474 InsertNewValueIntoMap(VM&: ValueMap, K: PN,
475 V: PN->getIncomingValueForBlock(BB: OrigPreheader));
476
477 // For the rest of the instructions, either hoist to the OrigPreheader if
478 // possible or create a clone in the OldPreHeader if not.
479 Instruction *LoopEntryBranch = OrigPreheader->getTerminator();
480
481 // Record all debug records preceding LoopEntryBranch to avoid
482 // duplication.
483 using DbgHash =
484 std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>;
485 auto makeHash = [](const DbgVariableRecord *D) -> DbgHash {
486 auto VarLocOps = D->location_ops();
487 return {{hash_combine_range(R&: VarLocOps), D->getVariable()},
488 D->getExpression()};
489 };
490
491 SmallDenseSet<DbgHash, 8> DbgRecords;
492 // Build DbgVariableRecord hashes for DbgVariableRecords attached to the
493 // terminator.
494 for (const DbgVariableRecord &DVR :
495 filterDbgVars(R: OrigPreheader->getTerminator()->getDbgRecordRange()))
496 DbgRecords.insert(V: makeHash(&DVR));
497
498 // Remember the local noalias scope declarations in the header. After the
499 // rotation, they must be duplicated and the scope must be cloned. This
500 // avoids unwanted interaction across iterations.
501 SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions;
502 for (Instruction &I : *OrigHeader)
503 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: &I))
504 NoAliasDeclInstructions.push_back(Elt: Decl);
505
506 Module *M = OrigHeader->getModule();
507
508 // Track the next DbgRecord to clone. If we have a sequence where an
509 // instruction is hoisted instead of being cloned:
510 // DbgRecord blah
511 // %foo = add i32 0, 0
512 // DbgRecord xyzzy
513 // %bar = call i32 @foobar()
514 // where %foo is hoisted, then the DbgRecord "blah" will be seen twice, once
515 // attached to %foo, then when %foo his hoisted it will "fall down" onto the
516 // function call:
517 // DbgRecord blah
518 // DbgRecord xyzzy
519 // %bar = call i32 @foobar()
520 // causing it to appear attached to the call too.
521 //
522 // To avoid this, cloneDebugInfoFrom takes an optional "start cloning from
523 // here" position to account for this behaviour. We point it at any
524 // DbgRecords on the next instruction, here labelled xyzzy, before we hoist
525 // %foo. Later, we only only clone DbgRecords from that position (xyzzy)
526 // onwards, which avoids cloning DbgRecord "blah" multiple times. (Stored as
527 // a range because it gives us a natural way of testing whether
528 // there were DbgRecords on the next instruction before we hoisted things).
529 iterator_range<DbgRecord::self_iterator> NextDbgInsts =
530 (I != E) ? I->getDbgRecordRange() : DbgMarker::getEmptyDbgRecordRange();
531
532 while (I != E) {
533 Instruction *Inst = &*I++;
534
535 // If the instruction's operands are invariant and it doesn't read or write
536 // memory, then it is safe to hoist. Doing this doesn't change the order of
537 // execution in the preheader, but does prevent the instruction from
538 // executing in each iteration of the loop. This means it is safe to hoist
539 // something that might trap, but isn't safe to hoist something that reads
540 // memory (without proving that the loop doesn't write).
541 if (L->hasLoopInvariantOperands(I: Inst) && !Inst->mayReadFromMemory() &&
542 !Inst->mayWriteToMemory() && !Inst->isTerminator() &&
543 !isa<AllocaInst>(Val: Inst) &&
544 // It is not safe to hoist the value of these instructions in
545 // coroutines, as the addresses of otherwise eligible variables (e.g.
546 // thread-local variables and errno) may change if the coroutine is
547 // resumed in a different thread.Therefore, we disable this
548 // optimization for correctness. However, this may block other correct
549 // optimizations.
550 // FIXME: This should be reverted once we have a better model for
551 // memory access in coroutines.
552 !Inst->getFunction()->isPresplitCoroutine()) {
553
554 if (!NextDbgInsts.empty()) {
555 auto DbgValueRange =
556 LoopEntryBranch->cloneDebugInfoFrom(From: Inst, FromHere: NextDbgInsts.begin());
557 RemapDbgRecordRange(M, Range: DbgValueRange, VM&: ValueMap,
558 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
559 // Erase anything we've seen before.
560 for (DbgVariableRecord &DVR :
561 make_early_inc_range(Range: filterDbgVars(R: DbgValueRange)))
562 if (DbgRecords.count(V: makeHash(&DVR)))
563 DVR.eraseFromParent();
564 }
565
566 NextDbgInsts = I->getDbgRecordRange();
567
568 Inst->moveBefore(InsertPos: LoopEntryBranch->getIterator());
569
570 ++NumInstrsHoisted;
571 continue;
572 }
573
574 // Otherwise, create a duplicate of the instruction.
575 Instruction *C = Inst->clone();
576 if (const DebugLoc &DL = C->getDebugLoc())
577 mapAtomInstance(DL, VMap&: ValueMap);
578
579 C->insertBefore(InsertPos: LoopEntryBranch->getIterator());
580
581 ++NumInstrsDuplicated;
582
583 if (!NextDbgInsts.empty()) {
584 auto Range = C->cloneDebugInfoFrom(From: Inst, FromHere: NextDbgInsts.begin());
585 RemapDbgRecordRange(M, Range, VM&: ValueMap,
586 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
587 NextDbgInsts = DbgMarker::getEmptyDbgRecordRange();
588 // Erase anything we've seen before.
589 for (DbgVariableRecord &DVR : make_early_inc_range(Range: filterDbgVars(R: Range)))
590 if (DbgRecords.count(V: makeHash(&DVR)))
591 DVR.eraseFromParent();
592 }
593
594 // Eagerly remap the operands of the instruction.
595 RemapInstruction(I: C, VM&: ValueMap,
596 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
597
598 // With the operands remapped, see if the instruction constant folds or is
599 // otherwise simplifyable. This commonly occurs because the entry from PHI
600 // nodes allows icmps and other instructions to fold.
601 Value *V = simplifyInstruction(I: C, Q: SQ);
602 if (V && LI->replacementPreservesLCSSAForm(From: C, To: V)) {
603 // If so, then delete the temporary instruction and stick the folded value
604 // in the map.
605 InsertNewValueIntoMap(VM&: ValueMap, K: Inst, V);
606 if (!C->mayHaveSideEffects()) {
607 C->eraseFromParent();
608 C = nullptr;
609 }
610 } else {
611 InsertNewValueIntoMap(VM&: ValueMap, K: Inst, V: C);
612 }
613 if (C) {
614 // Otherwise, stick the new instruction into the new block!
615 C->setName(Inst->getName());
616
617 if (auto *II = dyn_cast<AssumeInst>(Val: C))
618 AC->registerAssumption(CI: II);
619 // MemorySSA cares whether the cloned instruction was inserted or not, and
620 // not whether it can be remapped to a simplified value.
621 if (MSSAU)
622 InsertNewValueIntoMap(VM&: ValueMapMSSA, K: Inst, V: C);
623 }
624 }
625
626 if (!NoAliasDeclInstructions.empty()) {
627 // There are noalias scope declarations:
628 // (general):
629 // Original: OrigPre { OrigHeader NewHeader ... Latch }
630 // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader }
631 //
632 // with D: llvm.experimental.noalias.scope.decl,
633 // U: !noalias or !alias.scope depending on D
634 // ... { D U1 U2 } can transform into:
635 // (0) : ... { D U1 U2 } // no relevant rotation for this part
636 // (1) : ... D' { U1 U2 D } // D is part of OrigHeader
637 // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader
638 //
639 // We now want to transform:
640 // (1) -> : ... D' { D U1 U2 D'' }
641 // (2) -> : ... D' U1' { D U2 D'' U1'' }
642 // D: original llvm.experimental.noalias.scope.decl
643 // D', U1': duplicate with replaced scopes
644 // D'', U1'': different duplicate with replaced scopes
645 // This ensures a safe fallback to 'may_alias' introduced by the rotate,
646 // as U1'' and U1' scopes will not be compatible wrt to the local restrict
647
648 // Clone the llvm.experimental.noalias.decl again for the NewHeader.
649 BasicBlock::iterator NewHeaderInsertionPoint =
650 NewHeader->getFirstNonPHIIt();
651 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) {
652 LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:"
653 << *NAD << "\n");
654 Instruction *NewNAD = NAD->clone();
655 NewNAD->insertBefore(BB&: *NewHeader, InsertPos: NewHeaderInsertionPoint);
656 }
657
658 // Scopes must now be duplicated, once for OrigHeader and once for
659 // OrigPreHeader'.
660 {
661 auto &Context = NewHeader->getContext();
662
663 SmallVector<MDNode *, 8> NoAliasDeclScopes;
664 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions)
665 NoAliasDeclScopes.push_back(Elt: NAD->getScopeList());
666
667 LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n");
668 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, NewBlocks: {OrigHeader}, Context,
669 Ext: "h.rot");
670 LLVM_DEBUG(OrigHeader->dump());
671
672 // Keep the compile time impact low by only adapting the inserted block
673 // of instructions in the OrigPreHeader. This might result in slightly
674 // more aliasing between these instructions and those that were already
675 // present, but it will be much faster when the original PreHeader is
676 // large.
677 LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n");
678 auto *FirstDecl =
679 cast<Instruction>(Val&: ValueMap[*NoAliasDeclInstructions.begin()]);
680 auto *LastInst = &OrigPreheader->back();
681 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, IStart: FirstDecl, IEnd: LastInst,
682 Context, Ext: "pre.rot");
683 LLVM_DEBUG(OrigPreheader->dump());
684
685 LLVM_DEBUG(dbgs() << " Updated NewHeader:\n");
686 LLVM_DEBUG(NewHeader->dump());
687 }
688 }
689
690 // Along with all the other instructions, we just cloned OrigHeader's
691 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
692 // successors by duplicating their incoming values for OrigHeader.
693 for (BasicBlock *SuccBB : successors(BB: OrigHeader))
694 for (BasicBlock::iterator BI = SuccBB->begin();
695 PHINode *PN = dyn_cast<PHINode>(Val&: BI); ++BI)
696 PN->addIncoming(V: PN->getIncomingValueForBlock(BB: OrigHeader), BB: OrigPreheader);
697
698 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
699 // OrigPreHeader's old terminator (the original branch into the loop), and
700 // remove the corresponding incoming values from the PHI nodes in OrigHeader.
701 LoopEntryBranch->eraseFromParent();
702 OrigPreheader->flushTerminatorDbgRecords();
703
704 // Update MemorySSA before the rewrite call below changes the 1:1
705 // instruction:cloned_instruction_or_value mapping.
706 if (MSSAU) {
707 InsertNewValueIntoMap(VM&: ValueMapMSSA, K: OrigHeader, V: OrigPreheader);
708 MSSAU->updateForClonedBlockIntoPred(BB: OrigHeader, P1: OrigPreheader,
709 VM: ValueMapMSSA);
710 }
711
712 SmallVector<PHINode *, 2> InsertedPHIs;
713 // If there were any uses of instructions in the duplicated block outside the
714 // loop, update them, inserting PHI nodes as required
715 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, SE,
716 InsertedPHIs: &InsertedPHIs);
717
718 // Attach debug records to the new phis if that phi uses a value that
719 // previously had debug metadata attached. This keeps the debug info
720 // up-to-date in the loop body.
721 if (!InsertedPHIs.empty())
722 insertDebugValuesForPHIs(BB: OrigHeader, InsertedPHIs);
723
724 // NewHeader is now the header of the loop.
725 L->moveToHeader(BB: NewHeader);
726 assert(L->getHeader() == NewHeader && "Latch block is our new header");
727
728 // Inform DT about changes to the CFG.
729 if (DT) {
730 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform
731 // the DT about the removed edge to the OrigHeader (that got removed).
732 SmallVector<DominatorTree::UpdateType, 3> Updates = {
733 {DominatorTree::Insert, OrigPreheader, Exit},
734 {DominatorTree::Insert, OrigPreheader, NewHeader},
735 {DominatorTree::Delete, OrigPreheader, OrigHeader}};
736
737 if (MSSAU) {
738 MSSAU->applyUpdates(Updates, DT&: *DT, /*UpdateDT=*/UpdateDTFirst: true);
739 if (VerifyMemorySSA)
740 MSSAU->getMemorySSA()->verifyMemorySSA();
741 } else {
742 DT->applyUpdates(Updates);
743 }
744 }
745
746 // At this point, we've finished our major CFG changes. As part of cloning
747 // the loop into the preheader we've simplified instructions and the
748 // duplicated conditional branch may now be branching on a constant. If it is
749 // branching on a constant and if that constant means that we enter the loop,
750 // then we fold away the cond branch to an uncond branch. This simplifies the
751 // loop in cases important for nested loops, and it also means we don't have
752 // to split as many edges.
753 CondBrInst *PHBI = cast<CondBrInst>(Val: OrigPreheader->getTerminator());
754 const Value *Cond = PHBI->getCondition();
755 const bool HasConditionalPreHeader =
756 !isa<ConstantInt>(Val: Cond) ||
757 PHBI->getSuccessor(i: cast<ConstantInt>(Val: Cond)->isZero()) != NewHeader;
758
759 updateBranchWeights(PreHeaderBI&: *PHBI, LoopBI&: *BI, HasConditionalPreHeader, SuccsSwapped: BISuccsSwapped);
760
761 if (HasConditionalPreHeader) {
762 // The conditional branch can't be folded, handle the general case.
763 // Split edges as necessary to preserve LoopSimplify form.
764
765 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
766 // thus is not a preheader anymore.
767 // Split the edge to form a real preheader.
768 BasicBlock *NewPH = SplitCriticalEdge(
769 Src: OrigPreheader, Dst: NewHeader,
770 Options: CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
771 NewPH->setName(NewHeader->getName() + ".lr.ph");
772
773 // Preserve canonical loop form, which means that 'Exit' should have only
774 // one predecessor. Note that Exit could be an exit block for multiple
775 // nested loops, causing both of the edges to now be critical and need to
776 // be split.
777 SmallVector<BasicBlock *, 4> ExitPreds(predecessors(BB: Exit));
778 bool SplitLatchEdge = false;
779 for (BasicBlock *ExitPred : ExitPreds) {
780 // We only need to split loop exit edges.
781 Loop *PredLoop = LI->getLoopFor(BB: ExitPred);
782 if (!PredLoop || PredLoop->contains(BB: Exit) ||
783 isa<IndirectBrInst>(Val: ExitPred->getTerminator()))
784 continue;
785 SplitLatchEdge |= L->getLoopLatch() == ExitPred;
786 BasicBlock *ExitSplit = SplitCriticalEdge(
787 Src: ExitPred, Dst: Exit,
788 Options: CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
789 ExitSplit->moveBefore(MovePos: Exit);
790 }
791 assert(SplitLatchEdge &&
792 "Despite splitting all preds, failed to split latch exit?");
793 (void)SplitLatchEdge;
794 } else {
795 // We can fold the conditional branch in the preheader, this makes things
796 // simpler. The first step is to remove the extra edge to the Exit block.
797 Exit->removePredecessor(Pred: OrigPreheader, KeepOneInputPHIs: true /*preserve LCSSA*/);
798 UncondBrInst *NewBI = UncondBrInst::Create(Target: NewHeader, InsertBefore: PHBI->getIterator());
799 NewBI->setDebugLoc(PHBI->getDebugLoc());
800 PHBI->eraseFromParent();
801
802 // With our CFG finalized, update DomTree if it is available.
803 if (DT)
804 DT->deleteEdge(From: OrigPreheader, To: Exit);
805
806 // Update MSSA too, if available.
807 if (MSSAU)
808 MSSAU->removeEdge(From: OrigPreheader, To: Exit);
809 }
810
811 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
812 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
813
814 if (MSSAU && VerifyMemorySSA)
815 MSSAU->getMemorySSA()->verifyMemorySSA();
816
817 // Now that the CFG and DomTree are in a consistent state again, try to merge
818 // the OrigHeader block into OrigLatch. This will succeed if they are
819 // connected by an unconditional branch. This is just a cleanup so the
820 // emitted code isn't too gross in this common case.
821 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
822 BasicBlock *PredBB = OrigHeader->getUniquePredecessor();
823 bool DidMerge = MergeBlockIntoPredecessor(BB: OrigHeader, DTU: &DTU, LI, MSSAU);
824 if (DidMerge)
825 RemoveRedundantDbgInstrs(BB: PredBB);
826
827 if (MSSAU && VerifyMemorySSA)
828 MSSAU->getMemorySSA()->verifyMemorySSA();
829
830 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump());
831
832 return true;
833}
834
835/// Determine whether the instructions in this range may be safely and cheaply
836/// speculated. This is not an important enough situation to develop complex
837/// heuristics. We handle a single arithmetic instruction along with any type
838/// conversions.
839static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
840 BasicBlock::iterator End, Loop *L) {
841 bool seenIncrement = false;
842 bool MultiExitLoop = false;
843
844 if (!L->getExitingBlock())
845 MultiExitLoop = true;
846
847 for (BasicBlock::iterator I = Begin; I != End; ++I) {
848
849 if (!isSafeToSpeculativelyExecute(I: &*I))
850 return false;
851
852 switch (I->getOpcode()) {
853 default:
854 return false;
855 case Instruction::GetElementPtr:
856 // GEPs are cheap if all indices are constant.
857 if (!cast<GEPOperator>(Val&: I)->hasAllConstantIndices())
858 return false;
859 // fall-thru to increment case
860 [[fallthrough]];
861 case Instruction::Add:
862 case Instruction::Sub:
863 case Instruction::And:
864 case Instruction::Or:
865 case Instruction::Xor:
866 case Instruction::Shl:
867 case Instruction::LShr:
868 case Instruction::AShr: {
869 Value *IVOpnd =
870 !isa<Constant>(Val: I->getOperand(i: 0))
871 ? I->getOperand(i: 0)
872 : !isa<Constant>(Val: I->getOperand(i: 1)) ? I->getOperand(i: 1) : nullptr;
873 if (!IVOpnd)
874 return false;
875
876 // If increment operand is used outside of the loop, this speculation
877 // could cause extra live range interference.
878 if (MultiExitLoop) {
879 for (User *UseI : IVOpnd->users()) {
880 auto *UserInst = cast<Instruction>(Val: UseI);
881 if (!L->contains(Inst: UserInst))
882 return false;
883 }
884 }
885
886 if (seenIncrement)
887 return false;
888 seenIncrement = true;
889 break;
890 }
891 case Instruction::Trunc:
892 case Instruction::ZExt:
893 case Instruction::SExt:
894 // ignore type conversions
895 break;
896 }
897 }
898 return true;
899}
900
901/// Fold the loop tail into the loop exit by speculating the loop tail
902/// instructions. Typically, this is a single post-increment. In the case of a
903/// simple 2-block loop, hoisting the increment can be much better than
904/// duplicating the entire loop header. In the case of loops with early exits,
905/// rotation will not work anyway, but simplifyLoopLatch will put the loop in
906/// canonical form so downstream passes can handle it.
907///
908/// I don't believe this invalidates SCEV.
909bool LoopRotate::simplifyLoopLatch(Loop *L) {
910 BasicBlock *Latch = L->getLoopLatch();
911 if (!Latch || Latch->hasAddressTaken())
912 return false;
913
914 UncondBrInst *Jmp = dyn_cast<UncondBrInst>(Val: Latch->getTerminator());
915 if (!Jmp)
916 return false;
917
918 BasicBlock *LastExit = Latch->getSinglePredecessor();
919 if (!LastExit || !L->isLoopExiting(BB: LastExit))
920 return false;
921
922 if (!isa<UncondBrInst, CondBrInst>(Val: LastExit->getTerminator()))
923 return false;
924
925 if (!shouldSpeculateInstrs(Begin: Latch->begin(), End: Jmp->getIterator(), L))
926 return false;
927
928 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
929 << LastExit->getName() << "\n");
930
931 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
932 MergeBlockIntoPredecessor(BB: Latch, DTU: &DTU, LI, MSSAU, MemDep: nullptr,
933 /*PredecessorWithTwoSuccessors=*/true);
934
935 if (SE) {
936 // Merging blocks may remove blocks reference in the block disposition cache. Clear the cache.
937 SE->forgetBlockAndLoopDispositions();
938 }
939
940 if (MSSAU && VerifyMemorySSA)
941 MSSAU->getMemorySSA()->verifyMemorySSA();
942
943 return true;
944}
945
946/// Rotate \c L, and return true if any modification was made.
947bool LoopRotate::processLoop(Loop *L) {
948 // Save the loop metadata.
949 MDNode *LoopMD = L->getLoopID();
950
951 bool SimplifiedLatch = false;
952
953 // Simplify the loop latch before attempting to rotate the header
954 // upward. Rotation may not be needed if the loop tail can be folded into the
955 // loop exit.
956 if (!RotationOnly)
957 SimplifiedLatch = simplifyLoopLatch(L);
958
959 bool MadeChange = rotateLoop(L, SimplifiedLatch);
960 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) &&
961 "Loop latch should be exiting after loop-rotate.");
962
963 // Restore the loop metadata.
964 // NB! We presume LoopRotation DOESN'T ADD its own metadata.
965 if ((MadeChange || SimplifiedLatch) && LoopMD)
966 L->setLoopID(LoopMD);
967
968 return MadeChange || SimplifiedLatch;
969}
970
971
972/// The utility to convert a loop into a loop with bottom test.
973bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
974 AssumptionCache *AC, DominatorTree *DT,
975 ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
976 const SimplifyQuery &SQ, bool RotationOnly = true,
977 unsigned Threshold = unsigned(-1),
978 bool IsUtilMode = true, bool PrepareForLTO,
979 bool CheckExitCount) {
980 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly,
981 IsUtilMode, PrepareForLTO, CheckExitCount);
982 return LR.processLoop(L);
983}
984