1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
39#include "llvm/Transforms/Scalar/LICM.h"
40#include "llvm/ADT/PriorityWorklist.h"
41#include "llvm/ADT/SetOperations.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/Analysis/AliasAnalysis.h"
44#include "llvm/Analysis/AliasSetTracker.h"
45#include "llvm/Analysis/AssumptionCache.h"
46#include "llvm/Analysis/CaptureTracking.h"
47#include "llvm/Analysis/DomTreeUpdater.h"
48#include "llvm/Analysis/GuardUtils.h"
49#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
50#include "llvm/Analysis/Loads.h"
51#include "llvm/Analysis/LoopInfo.h"
52#include "llvm/Analysis/LoopIterator.h"
53#include "llvm/Analysis/LoopNestAnalysis.h"
54#include "llvm/Analysis/LoopPass.h"
55#include "llvm/Analysis/MemorySSA.h"
56#include "llvm/Analysis/MemorySSAUpdater.h"
57#include "llvm/Analysis/MustExecute.h"
58#include "llvm/Analysis/OptimizationRemarkEmitter.h"
59#include "llvm/Analysis/ScalarEvolution.h"
60#include "llvm/Analysis/TargetLibraryInfo.h"
61#include "llvm/Analysis/TargetTransformInfo.h"
62#include "llvm/Analysis/ValueTracking.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugInfoMetadata.h"
67#include "llvm/IR/DerivedTypes.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/IRBuilder.h"
70#include "llvm/IR/Instructions.h"
71#include "llvm/IR/IntrinsicInst.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
74#include "llvm/IR/PatternMatch.h"
75#include "llvm/IR/PredIteratorCache.h"
76#include "llvm/InitializePasses.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/raw_ostream.h"
80#include "llvm/Transforms/Scalar.h"
81#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82#include "llvm/Transforms/Utils/BasicBlockUtils.h"
83#include "llvm/Transforms/Utils/Local.h"
84#include "llvm/Transforms/Utils/LoopUtils.h"
85#include "llvm/Transforms/Utils/SSAUpdater.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(Val: false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
124static cl::opt<bool> ControlFlowHoisting(
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(Val: false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(Val: false),
130 cl::desc("Force thread model single in LICM pass"));
131
132static cl::opt<uint32_t> MaxNumUsesTraversed(
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(Val: 8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
137static cl::opt<unsigned> FPAssociationUpperLimit(
138 "licm-max-num-fp-reassociations", cl::init(Val: 5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
143static cl::opt<unsigned> IntAssociationUpperLimit(
144 "licm-max-num-int-reassociations", cl::init(Val: 5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
157cl::opt<unsigned> llvm::SetLicmMssaOptCap(
158 "licm-mssa-optimization-cap", cl::init(Val: 100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
165cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
166 "licm-mssa-max-acc-promotion", cl::init(Val: 250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
172namespace llvm {
173extern cl::opt<bool> ProfcheckDisableMetadataFixes;
174} // end namespace llvm
175
176static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
177static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
178 const LoopSafetyInfo *SafetyInfo,
179 TargetTransformInfo *TTI,
180 bool &FoldableInLoop, bool LoopNestMode);
181static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
182 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
183 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
184 OptimizationRemarkEmitter *ORE);
185static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
186 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
187 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE);
188static bool isSafeToExecuteUnconditionally(
189 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
190 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
191 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
192 AssumptionCache *AC, bool AllowSpeculation);
193static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA,
194 AAResults *AA, Loop *CurLoop,
195 SinkAndHoistLICMFlags &Flags);
196static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
197 Loop *CurLoop, Instruction &I,
198 SinkAndHoistLICMFlags &Flags,
199 bool InvariantGroup);
200static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
201 MemoryUse &MU);
202/// Aggregates various functions for hoisting computations out of loop.
203static bool hoistArithmetics(Instruction &I, Loop &L,
204 ICFLoopSafetyInfo &SafetyInfo,
205 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
206 DominatorTree *DT);
207static Instruction *cloneInstructionInExitBlock(
208 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
209 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
210
211static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
212 MemorySSAUpdater &MSSAU);
213
214static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
215 ICFLoopSafetyInfo &SafetyInfo,
216 MemorySSAUpdater &MSSAU, ScalarEvolution *SE);
217
218static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
219 function_ref<void(Instruction *)> Fn);
220using PointersAndHasReadsOutsideSet =
221 std::pair<SmallSetVector<Value *, 8>, bool>;
222static SmallVector<PointersAndHasReadsOutsideSet, 0>
223collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
224
225namespace {
226struct LoopInvariantCodeMotion {
227 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
228 AssumptionCache *AC, TargetLibraryInfo *TLI,
229 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
230 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
231
232 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
233 unsigned LicmMssaNoAccForPromotionCap,
234 bool LicmAllowSpeculation)
235 : LicmMssaOptCap(LicmMssaOptCap),
236 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
237 LicmAllowSpeculation(LicmAllowSpeculation) {}
238
239private:
240 unsigned LicmMssaOptCap;
241 unsigned LicmMssaNoAccForPromotionCap;
242 bool LicmAllowSpeculation;
243};
244
245struct LegacyLICMPass : public LoopPass {
246 static char ID; // Pass identification, replacement for typeid
247 LegacyLICMPass(
248 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
249 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
250 bool LicmAllowSpeculation = true)
251 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
252 LicmAllowSpeculation) {
253 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
254 }
255
256 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
257 if (skipLoop(L))
258 return false;
259
260 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
261 << L->getHeader()->getNameOrAsOperand() << "\n");
262
263 Function *F = L->getHeader()->getParent();
264
265 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
266 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
267 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
268 // pass. Function analyses need to be preserved across loop transformations
269 // but ORE cannot be preserved (see comment before the pass definition).
270 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
271 return LICM.runOnLoop(
272 L, AA: &getAnalysis<AAResultsWrapperPass>().getAAResults(),
273 LI: &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
274 DT: &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
275 AC: &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: *F),
276 TLI: &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F: *F),
277 TTI: &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F: *F),
278 SE: SE ? &SE->getSE() : nullptr, MSSA, ORE: &ORE);
279 }
280
281 /// This transformation requires natural loop information & requires that
282 /// loop preheaders be inserted into the CFG...
283 ///
284 void getAnalysisUsage(AnalysisUsage &AU) const override {
285 AU.addPreserved<DominatorTreeWrapperPass>();
286 AU.addPreserved<LoopInfoWrapperPass>();
287 AU.addRequired<TargetLibraryInfoWrapperPass>();
288 AU.addRequired<MemorySSAWrapperPass>();
289 AU.addPreserved<MemorySSAWrapperPass>();
290 AU.addRequired<TargetTransformInfoWrapperPass>();
291 AU.addRequired<AssumptionCacheTracker>();
292 getLoopAnalysisUsage(AU);
293 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
294 AU.addPreserved<LazyBlockFrequencyInfoPass>();
295 AU.addPreserved<LazyBranchProbabilityInfoPass>();
296 }
297
298private:
299 LoopInvariantCodeMotion LICM;
300};
301} // namespace
302
303PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
304 LoopStandardAnalysisResults &AR, LPMUpdater &) {
305 if (!AR.MSSA)
306 reportFatalUsageError(reason: "LICM requires MemorySSA (loop-mssa)");
307
308 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
309 // pass. Function analyses need to be preserved across loop transformations
310 // but ORE cannot be preserved (see comment before the pass definition).
311 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
312
313 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
314 Opts.AllowSpeculation);
315 if (!LICM.runOnLoop(L: &L, AA: &AR.AA, LI: &AR.LI, DT: &AR.DT, AC: &AR.AC, TLI: &AR.TLI, TTI: &AR.TTI,
316 SE: &AR.SE, MSSA: AR.MSSA, ORE: &ORE))
317 return PreservedAnalyses::all();
318
319 auto PA = getLoopPassPreservedAnalyses();
320 PA.preserve<MemorySSAAnalysis>();
321
322 return PA;
323}
324
325void LICMPass::printPipeline(
326 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
327 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
328 OS, MapClassName2PassName);
329
330 OS << '<';
331 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
332 OS << '>';
333}
334
335PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
336 LoopStandardAnalysisResults &AR,
337 LPMUpdater &) {
338 if (!AR.MSSA)
339 reportFatalUsageError(reason: "LNICM requires MemorySSA (loop-mssa)");
340
341 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
342 // pass. Function analyses need to be preserved across loop transformations
343 // but ORE cannot be preserved (see comment before the pass definition).
344 OptimizationRemarkEmitter ORE(LN.getParent());
345
346 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
347 Opts.AllowSpeculation);
348
349 Loop &OutermostLoop = LN.getOutermostLoop();
350 bool Changed = LICM.runOnLoop(L: &OutermostLoop, AA: &AR.AA, LI: &AR.LI, DT: &AR.DT, AC: &AR.AC,
351 TLI: &AR.TLI, TTI: &AR.TTI, SE: &AR.SE, MSSA: AR.MSSA, ORE: &ORE, LoopNestMode: true);
352
353 if (!Changed)
354 return PreservedAnalyses::all();
355
356 auto PA = getLoopPassPreservedAnalyses();
357
358 PA.preserve<DominatorTreeAnalysis>();
359 PA.preserve<LoopAnalysis>();
360 PA.preserve<MemorySSAAnalysis>();
361
362 return PA;
363}
364
365void LNICMPass::printPipeline(
366 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
367 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
368 OS, MapClassName2PassName);
369
370 OS << '<';
371 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
372 OS << '>';
373}
374
375char LegacyLICMPass::ID = 0;
376INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
377 false, false)
378INITIALIZE_PASS_DEPENDENCY(LoopPass)
379INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
380INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
381INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
382INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
383INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
384 false)
385
386Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
387
388llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop &L,
389 MemorySSA &MSSA)
390 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
391 IsSink, L, MSSA) {}
392
393llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
394 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
395 Loop &L, MemorySSA &MSSA)
396 : LicmMssaOptCap(LicmMssaOptCap),
397 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
398 IsSink(IsSink) {
399 unsigned AccessCapCount = 0;
400 for (auto *BB : L.getBlocks())
401 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
402 for (const auto &MA : *Accesses) {
403 (void)MA;
404 ++AccessCapCount;
405 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
406 NoOfMemAccTooLarge = true;
407 return;
408 }
409 }
410}
411
412/// Hoist expressions out of the specified loop. Note, alias info for inner
413/// loop is not preserved so it is not a good idea to run LICM multiple
414/// times on one loop.
415bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
416 DominatorTree *DT, AssumptionCache *AC,
417 TargetLibraryInfo *TLI,
418 TargetTransformInfo *TTI,
419 ScalarEvolution *SE, MemorySSA *MSSA,
420 OptimizationRemarkEmitter *ORE,
421 bool LoopNestMode) {
422 bool Changed = false;
423
424 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
425
426 // If this loop has metadata indicating that LICM is not to be performed then
427 // just exit.
428 if (hasDisableLICMTransformsHint(L)) {
429 return false;
430 }
431
432 // Don't sink stores from loops with coroutine suspend instructions.
433 // LICM would sink instructions into the default destination of
434 // the coroutine switch. The default destination of the switch is to
435 // handle the case where the coroutine is suspended, by which point the
436 // coroutine frame may have been destroyed. No instruction can be sunk there.
437 // FIXME: This would unfortunately hurt the performance of coroutines, however
438 // there is currently no general solution for this. Similar issues could also
439 // potentially happen in other passes where instructions are being moved
440 // across that edge.
441 bool HasCoroSuspendInst = llvm::any_of(Range: L->getBlocks(), P: [](BasicBlock *BB) {
442 using namespace PatternMatch;
443 return any_of(Range: make_pointer_range(Range&: *BB),
444 P: match_fn(P: m_Intrinsic<Intrinsic::coro_suspend>()));
445 });
446
447 MemorySSAUpdater MSSAU(MSSA);
448 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
449 /*IsSink=*/true, *L, *MSSA);
450
451 // Get the preheader block to move instructions into...
452 BasicBlock *Preheader = L->getLoopPreheader();
453
454 // Compute loop safety information.
455 ICFLoopSafetyInfo SafetyInfo;
456 SafetyInfo.computeLoopSafetyInfo(CurLoop: L);
457
458 // We want to visit all of the instructions in this loop... that are not parts
459 // of our subloops (they have already had their invariants hoisted out of
460 // their loop, into this loop, so there is no need to process the BODIES of
461 // the subloops).
462 //
463 // Traverse the body of the loop in depth first order on the dominator tree so
464 // that we are guaranteed to see definitions before we see uses. This allows
465 // us to sink instructions in one pass, without iteration. After sinking
466 // instructions, we perform another pass to hoist them out of the loop.
467 if (L->hasDedicatedExits())
468 Changed |=
469 LoopNestMode
470 ? sinkRegionForLoopNest(DT->getNode(BB: L->getHeader()), AA, LI, DT,
471 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
472 : sinkRegion(DT->getNode(BB: L->getHeader()), AA, LI, DT, TLI, TTI, CurLoop: L,
473 MSSAU, &SafetyInfo, Flags, ORE);
474 Flags.setIsSink(false);
475 if (Preheader)
476 Changed |= hoistRegion(DT->getNode(BB: L->getHeader()), AA, LI, DT, AC, TLI, L,
477 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
478 AllowSpeculation: LicmAllowSpeculation);
479
480 // Now that all loop invariants have been removed from the loop, promote any
481 // memory references to scalars that we can.
482 // Don't sink stores from loops without dedicated block exits. Exits
483 // containing indirect branches are not transformed by loop simplify,
484 // make sure we catch that. An additional load may be generated in the
485 // preheader for SSA updater, so also avoid sinking when no preheader
486 // is available.
487 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
488 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
489 // Figure out the loop exits and their insertion points
490 SmallVector<BasicBlock *, 8> ExitBlocks;
491 L->getUniqueExitBlocks(ExitBlocks);
492
493 // We can't insert into a catchswitch.
494 bool HasCatchSwitch = llvm::any_of(Range&: ExitBlocks, P: [](BasicBlock *Exit) {
495 return isa<CatchSwitchInst>(Val: Exit->getTerminator());
496 });
497
498 if (!HasCatchSwitch) {
499 SmallVector<BasicBlock::iterator, 8> InsertPts;
500 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
501 InsertPts.reserve(N: ExitBlocks.size());
502 MSSAInsertPts.reserve(N: ExitBlocks.size());
503 for (BasicBlock *ExitBlock : ExitBlocks) {
504 InsertPts.push_back(Elt: ExitBlock->getFirstInsertionPt());
505 MSSAInsertPts.push_back(Elt: nullptr);
506 }
507
508 PredIteratorCache PIC;
509
510 // Promoting one set of accesses may make the pointers for another set
511 // loop invariant, so run this in a loop.
512 bool Promoted = false;
513 bool LocalPromoted;
514 do {
515 LocalPromoted = false;
516 for (auto [PointerMustAliases, HasReadsOutsideSet] :
517 collectPromotionCandidates(MSSA, AA, L)) {
518 LocalPromoted |= promoteLoopAccessesToScalars(
519 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
520 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
521 AllowSpeculation: LicmAllowSpeculation, HasReadsOutsideSet);
522 }
523 Promoted |= LocalPromoted;
524 } while (LocalPromoted);
525
526 // Once we have promoted values across the loop body we have to
527 // recursively reform LCSSA as any nested loop may now have values defined
528 // within the loop used in the outer loop.
529 // FIXME: This is really heavy handed. It would be a bit better to use an
530 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
531 // it as it went.
532 if (Promoted)
533 formLCSSARecursively(L&: *L, DT: *DT, LI, SE);
534
535 Changed |= Promoted;
536 }
537 }
538
539 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
540 // specifically moving instructions across the loop boundary and so it is
541 // especially in need of basic functional correctness checking here.
542 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
543 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
544 "Parent loop not left in LCSSA form after LICM!");
545
546 if (VerifyMemorySSA)
547 MSSA->verifyMemorySSA();
548
549 if (Changed && SE)
550 SE->forgetLoopDispositions();
551 return Changed;
552}
553
554/// Walk the specified region of the CFG (defined by all blocks dominated by
555/// the specified block, and that are in the current loop) in reverse depth
556/// first order w.r.t the DominatorTree. This allows us to visit uses before
557/// definitions, allowing us to sink a loop body in one pass without iteration.
558///
559bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
560 DominatorTree *DT, TargetLibraryInfo *TLI,
561 TargetTransformInfo *TTI, Loop *CurLoop,
562 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
563 SinkAndHoistLICMFlags &Flags,
564 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
565
566 // Verify inputs.
567 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
568 CurLoop != nullptr && SafetyInfo != nullptr &&
569 "Unexpected input to sinkRegion.");
570
571 // We want to visit children before parents. We will enqueue all the parents
572 // before their children in the worklist and process the worklist in reverse
573 // order.
574 SmallVector<BasicBlock *, 16> Worklist =
575 collectChildrenInLoop(DT, N, CurLoop);
576
577 bool Changed = false;
578 for (BasicBlock *BB : reverse(C&: Worklist)) {
579 // subloop (which would already have been processed).
580 if (inSubLoop(BB, CurLoop, LI))
581 continue;
582
583 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
584 Instruction &I = *--II;
585
586 // The instruction is not used in the loop if it is dead. In this case,
587 // we just delete it instead of sinking it.
588 if (isInstructionTriviallyDead(I: &I, TLI)) {
589 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
590 salvageKnowledge(I: &I);
591 salvageDebugInfo(I);
592 ++II;
593 eraseInstruction(I, SafetyInfo&: *SafetyInfo, MSSAU);
594 Changed = true;
595 continue;
596 }
597
598 // Check to see if we can sink this instruction to the exit blocks
599 // of the loop. We can do this if the all users of the instruction are
600 // outside of the loop. In this case, it doesn't even matter if the
601 // operands of the instruction are loop invariant.
602 //
603 bool FoldableInLoop = false;
604 bool LoopNestMode = OutermostLoop != nullptr;
605 if (!I.mayHaveSideEffects() &&
606 isNotUsedOrFoldableInLoop(I, CurLoop: LoopNestMode ? OutermostLoop : CurLoop,
607 SafetyInfo, TTI, FoldableInLoop,
608 LoopNestMode) &&
609 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, TargetExecutesOncePerLoop: true, LICMFlags&: Flags, ORE)) {
610 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
611 if (!FoldableInLoop) {
612 ++II;
613 salvageDebugInfo(I);
614 eraseInstruction(I, SafetyInfo&: *SafetyInfo, MSSAU);
615 }
616 Changed = true;
617 }
618 }
619 }
620 }
621 if (VerifyMemorySSA)
622 MSSAU.getMemorySSA()->verifyMemorySSA();
623 return Changed;
624}
625
626bool llvm::sinkRegionForLoopNest(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
627 DominatorTree *DT, TargetLibraryInfo *TLI,
628 TargetTransformInfo *TTI, Loop *CurLoop,
629 MemorySSAUpdater &MSSAU,
630 ICFLoopSafetyInfo *SafetyInfo,
631 SinkAndHoistLICMFlags &Flags,
632 OptimizationRemarkEmitter *ORE) {
633
634 bool Changed = false;
635 SmallPriorityWorklist<Loop *, 4> Worklist;
636 Worklist.insert(X: CurLoop);
637 appendLoopsToWorklist(*CurLoop, Worklist);
638 while (!Worklist.empty()) {
639 Loop *L = Worklist.pop_back_val();
640 Changed |= sinkRegion(N: DT->getNode(BB: L->getHeader()), AA, LI, DT, TLI, TTI, CurLoop: L,
641 MSSAU, SafetyInfo, Flags, ORE, OutermostLoop: CurLoop);
642 }
643 return Changed;
644}
645
646namespace {
647// This is a helper class for hoistRegion to make it able to hoist control flow
648// in order to be able to hoist phis. The way this works is that we initially
649// start hoisting to the loop preheader, and when we see a loop invariant branch
650// we make note of this. When we then come to hoist an instruction that's
651// conditional on such a branch we duplicate the branch and the relevant control
652// flow, then hoist the instruction into the block corresponding to its original
653// block in the duplicated control flow.
654class ControlFlowHoister {
655private:
656 // Information about the loop we are hoisting from
657 LoopInfo *LI;
658 DominatorTree *DT;
659 Loop *CurLoop;
660 MemorySSAUpdater &MSSAU;
661
662 // A map of blocks in the loop to the block their instructions will be hoisted
663 // to.
664 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
665
666 // The branches that we can hoist, mapped to the block that marks a
667 // convergence point of their control flow.
668 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
669
670public:
671 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
672 MemorySSAUpdater &MSSAU)
673 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
674
675 void registerPossiblyHoistableBranch(BranchInst *BI) {
676 // We can only hoist conditional branches with loop invariant operands.
677 if (!ControlFlowHoisting || !BI->isConditional() ||
678 !CurLoop->hasLoopInvariantOperands(I: BI))
679 return;
680
681 // The branch destinations need to be in the loop, and we don't gain
682 // anything by duplicating conditional branches with duplicate successors,
683 // as it's essentially the same as an unconditional branch.
684 BasicBlock *TrueDest = BI->getSuccessor(i: 0);
685 BasicBlock *FalseDest = BI->getSuccessor(i: 1);
686 if (!CurLoop->contains(BB: TrueDest) || !CurLoop->contains(BB: FalseDest) ||
687 TrueDest == FalseDest)
688 return;
689
690 // We can hoist BI if one branch destination is the successor of the other,
691 // or both have common successor which we check by seeing if the
692 // intersection of their successors is non-empty.
693 // TODO: This could be expanded to allowing branches where both ends
694 // eventually converge to a single block.
695 SmallPtrSet<BasicBlock *, 4> TrueDestSucc(llvm::from_range,
696 successors(BB: TrueDest));
697 SmallPtrSet<BasicBlock *, 4> FalseDestSucc(llvm::from_range,
698 successors(BB: FalseDest));
699 BasicBlock *CommonSucc = nullptr;
700 if (TrueDestSucc.count(Ptr: FalseDest)) {
701 CommonSucc = FalseDest;
702 } else if (FalseDestSucc.count(Ptr: TrueDest)) {
703 CommonSucc = TrueDest;
704 } else {
705 set_intersect(S1&: TrueDestSucc, S2: FalseDestSucc);
706 // If there's one common successor use that.
707 if (TrueDestSucc.size() == 1)
708 CommonSucc = *TrueDestSucc.begin();
709 // If there's more than one pick whichever appears first in the block list
710 // (we can't use the value returned by TrueDestSucc.begin() as it's
711 // unpredicatable which element gets returned).
712 else if (!TrueDestSucc.empty()) {
713 Function *F = TrueDest->getParent();
714 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(Ptr: &BB); };
715 auto It = llvm::find_if(Range&: *F, P: IsSucc);
716 assert(It != F->end() && "Could not find successor in function");
717 CommonSucc = &*It;
718 }
719 }
720 // The common successor has to be dominated by the branch, as otherwise
721 // there will be some other path to the successor that will not be
722 // controlled by this branch so any phi we hoist would be controlled by the
723 // wrong condition. This also takes care of avoiding hoisting of loop back
724 // edges.
725 // TODO: In some cases this could be relaxed if the successor is dominated
726 // by another block that's been hoisted and we can guarantee that the
727 // control flow has been replicated exactly.
728 if (CommonSucc && DT->dominates(Def: BI, BB: CommonSucc))
729 HoistableBranches[BI] = CommonSucc;
730 }
731
732 bool canHoistPHI(PHINode *PN) {
733 // The phi must have loop invariant operands.
734 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(I: PN))
735 return false;
736 // We can hoist phis if the block they are in is the target of hoistable
737 // branches which cover all of the predecessors of the block.
738 BasicBlock *BB = PN->getParent();
739 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks(llvm::from_range,
740 predecessors(BB));
741 // If we have less predecessor blocks than predecessors then the phi will
742 // have more than one incoming value for the same block which we can't
743 // handle.
744 // TODO: This could be handled be erasing some of the duplicate incoming
745 // values.
746 if (PredecessorBlocks.size() != pred_size(BB))
747 return false;
748 for (auto &Pair : HoistableBranches) {
749 if (Pair.second == BB) {
750 // Which blocks are predecessors via this branch depends on if the
751 // branch is triangle-like or diamond-like.
752 if (Pair.first->getSuccessor(i: 0) == BB) {
753 PredecessorBlocks.erase(Ptr: Pair.first->getParent());
754 PredecessorBlocks.erase(Ptr: Pair.first->getSuccessor(i: 1));
755 } else if (Pair.first->getSuccessor(i: 1) == BB) {
756 PredecessorBlocks.erase(Ptr: Pair.first->getParent());
757 PredecessorBlocks.erase(Ptr: Pair.first->getSuccessor(i: 0));
758 } else {
759 PredecessorBlocks.erase(Ptr: Pair.first->getSuccessor(i: 0));
760 PredecessorBlocks.erase(Ptr: Pair.first->getSuccessor(i: 1));
761 }
762 }
763 }
764 // PredecessorBlocks will now be empty if for every predecessor of BB we
765 // found a hoistable branch source.
766 return PredecessorBlocks.empty();
767 }
768
769 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
770 if (!ControlFlowHoisting)
771 return CurLoop->getLoopPreheader();
772 // If BB has already been hoisted, return that
773 if (auto It = HoistDestinationMap.find(Val: BB); It != HoistDestinationMap.end())
774 return It->second;
775
776 // Check if this block is conditional based on a pending branch
777 auto HasBBAsSuccessor =
778 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
779 return BB != Pair.second && (Pair.first->getSuccessor(i: 0) == BB ||
780 Pair.first->getSuccessor(i: 1) == BB);
781 };
782 auto It = llvm::find_if(Range&: HoistableBranches, P: HasBBAsSuccessor);
783
784 // If not involved in a pending branch, hoist to preheader
785 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
786 if (It == HoistableBranches.end()) {
787 LLVM_DEBUG(dbgs() << "LICM using "
788 << InitialPreheader->getNameOrAsOperand()
789 << " as hoist destination for "
790 << BB->getNameOrAsOperand() << "\n");
791 HoistDestinationMap[BB] = InitialPreheader;
792 return InitialPreheader;
793 }
794 BranchInst *BI = It->first;
795 assert(std::none_of(std::next(It), HoistableBranches.end(),
796 HasBBAsSuccessor) &&
797 "BB is expected to be the target of at most one branch");
798
799 LLVMContext &C = BB->getContext();
800 BasicBlock *TrueDest = BI->getSuccessor(i: 0);
801 BasicBlock *FalseDest = BI->getSuccessor(i: 1);
802 BasicBlock *CommonSucc = HoistableBranches[BI];
803 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BB: BI->getParent());
804
805 // Create hoisted versions of blocks that currently don't have them
806 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
807 auto [It, Inserted] = HoistDestinationMap.try_emplace(Key: Orig);
808 if (!Inserted)
809 return It->second;
810 BasicBlock *New =
811 BasicBlock::Create(Context&: C, Name: Orig->getName() + ".licm", Parent: Orig->getParent());
812 It->second = New;
813 DT->addNewBlock(BB: New, DomBB: HoistTarget);
814 if (CurLoop->getParentLoop())
815 CurLoop->getParentLoop()->addBasicBlockToLoop(NewBB: New, LI&: *LI);
816 ++NumCreatedBlocks;
817 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
818 << " as hoist destination for " << Orig->getName()
819 << "\n");
820 return New;
821 };
822 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
823 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
824 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
825
826 // Link up these blocks with branches.
827 if (!HoistCommonSucc->getTerminator()) {
828 // The new common successor we've generated will branch to whatever that
829 // hoist target branched to.
830 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
831 assert(TargetSucc && "Expected hoist target to have a single successor");
832 HoistCommonSucc->moveBefore(MovePos: TargetSucc);
833 BranchInst::Create(IfTrue: TargetSucc, InsertBefore: HoistCommonSucc);
834 }
835 if (!HoistTrueDest->getTerminator()) {
836 HoistTrueDest->moveBefore(MovePos: HoistCommonSucc);
837 BranchInst::Create(IfTrue: HoistCommonSucc, InsertBefore: HoistTrueDest);
838 }
839 if (!HoistFalseDest->getTerminator()) {
840 HoistFalseDest->moveBefore(MovePos: HoistCommonSucc);
841 BranchInst::Create(IfTrue: HoistCommonSucc, InsertBefore: HoistFalseDest);
842 }
843
844 // If BI is being cloned to what was originally the preheader then
845 // HoistCommonSucc will now be the new preheader.
846 if (HoistTarget == InitialPreheader) {
847 // Phis in the loop header now need to use the new preheader.
848 InitialPreheader->replaceSuccessorsPhiUsesWith(New: HoistCommonSucc);
849 MSSAU.wireOldPredecessorsToNewImmediatePredecessor(
850 Old: HoistTarget->getSingleSuccessor(), New: HoistCommonSucc, Preds: {HoistTarget});
851 // The new preheader dominates the loop header.
852 DomTreeNode *PreheaderNode = DT->getNode(BB: HoistCommonSucc);
853 DomTreeNode *HeaderNode = DT->getNode(BB: CurLoop->getHeader());
854 DT->changeImmediateDominator(N: HeaderNode, NewIDom: PreheaderNode);
855 // The preheader hoist destination is now the new preheader, with the
856 // exception of the hoist destination of this branch.
857 for (auto &Pair : HoistDestinationMap)
858 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
859 Pair.second = HoistCommonSucc;
860 }
861
862 // Now finally clone BI.
863 auto *NewBI =
864 BranchInst::Create(IfTrue: HoistTrueDest, IfFalse: HoistFalseDest, Cond: BI->getCondition(),
865 InsertBefore: HoistTarget->getTerminator()->getIterator());
866 HoistTarget->getTerminator()->eraseFromParent();
867 // md_prof should also come from the original branch - since the
868 // condition was hoisted, the branch probabilities shouldn't change.
869 if (!ProfcheckDisableMetadataFixes)
870 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_prof});
871 // FIXME: Issue #152767: debug info should also be the same as the
872 // original branch, **if** the user explicitly indicated that.
873 NewBI->setDebugLoc(HoistTarget->getTerminator()->getDebugLoc());
874
875 ++NumClonedBranches;
876
877 assert(CurLoop->getLoopPreheader() &&
878 "Hoisting blocks should not have destroyed preheader");
879 return HoistDestinationMap[BB];
880 }
881};
882} // namespace
883
884/// Walk the specified region of the CFG (defined by all blocks dominated by
885/// the specified block, and that are in the current loop) in depth first
886/// order w.r.t the DominatorTree. This allows us to visit definitions before
887/// uses, allowing us to hoist a loop body in one pass without iteration.
888///
889bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
890 DominatorTree *DT, AssumptionCache *AC,
891 TargetLibraryInfo *TLI, Loop *CurLoop,
892 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
893 ICFLoopSafetyInfo *SafetyInfo,
894 SinkAndHoistLICMFlags &Flags,
895 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
896 bool AllowSpeculation) {
897 // Verify inputs.
898 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
899 CurLoop != nullptr && SafetyInfo != nullptr &&
900 "Unexpected input to hoistRegion.");
901
902 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
903
904 // Keep track of instructions that have been hoisted, as they may need to be
905 // re-hoisted if they end up not dominating all of their uses.
906 SmallVector<Instruction *, 16> HoistedInstructions;
907
908 // For PHI hoisting to work we need to hoist blocks before their successors.
909 // We can do this by iterating through the blocks in the loop in reverse
910 // post-order.
911 LoopBlocksRPO Worklist(CurLoop);
912 Worklist.perform(LI);
913 bool Changed = false;
914 BasicBlock *Preheader = CurLoop->getLoopPreheader();
915 for (BasicBlock *BB : Worklist) {
916 // Only need to process the contents of this block if it is not part of a
917 // subloop (which would already have been processed).
918 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
919 continue;
920
921 for (Instruction &I : llvm::make_early_inc_range(Range&: *BB)) {
922 // Try hoisting the instruction out to the preheader. We can only do
923 // this if all of the operands of the instruction are loop invariant and
924 // if it is safe to hoist the instruction. We also check block frequency
925 // to make sure instruction only gets hoisted into colder blocks.
926 // TODO: It may be safe to hoist if we are hoisting to a conditional block
927 // and we have accurately duplicated the control flow from the loop header
928 // to that block.
929 if (CurLoop->hasLoopInvariantOperands(I: &I) &&
930 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, TargetExecutesOncePerLoop: true, LICMFlags&: Flags, ORE) &&
931 isSafeToExecuteUnconditionally(Inst&: I, DT, TLI, CurLoop, SafetyInfo, ORE,
932 CtxI: Preheader->getTerminator(), AC,
933 AllowSpeculation)) {
934 hoist(I, DT, CurLoop, Dest: CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
935 MSSAU, SE, ORE);
936 HoistedInstructions.push_back(Elt: &I);
937 Changed = true;
938 continue;
939 }
940
941 // Attempt to remove floating point division out of the loop by
942 // converting it to a reciprocal multiplication.
943 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
944 CurLoop->isLoopInvariant(V: I.getOperand(i: 1))) {
945 auto Divisor = I.getOperand(i: 1);
946 auto One = llvm::ConstantFP::get(Ty: Divisor->getType(), V: 1.0);
947 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(V1: One, V2: Divisor);
948 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
949 SafetyInfo->insertInstructionTo(Inst: ReciprocalDivisor, BB: I.getParent());
950 ReciprocalDivisor->insertBefore(InsertPos: I.getIterator());
951 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
952
953 auto Product =
954 BinaryOperator::CreateFMul(V1: I.getOperand(i: 0), V2: ReciprocalDivisor);
955 Product->setFastMathFlags(I.getFastMathFlags());
956 SafetyInfo->insertInstructionTo(Inst: Product, BB: I.getParent());
957 Product->insertAfter(InsertPos: I.getIterator());
958 Product->setDebugLoc(I.getDebugLoc());
959 I.replaceAllUsesWith(V: Product);
960 eraseInstruction(I, SafetyInfo&: *SafetyInfo, MSSAU);
961
962 hoist(I&: *ReciprocalDivisor, DT, CurLoop, Dest: CFH.getOrCreateHoistedBlock(BB),
963 SafetyInfo, MSSAU, SE, ORE);
964 HoistedInstructions.push_back(Elt: ReciprocalDivisor);
965 Changed = true;
966 continue;
967 }
968
969 auto IsInvariantStart = [&](Instruction &I) {
970 using namespace PatternMatch;
971 return I.use_empty() &&
972 match(V: &I, P: m_Intrinsic<Intrinsic::invariant_start>());
973 };
974 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
975 return SafetyInfo->isGuaranteedToExecute(Inst: I, DT, CurLoop) &&
976 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
977 };
978 if ((IsInvariantStart(I) || isGuard(U: &I)) &&
979 CurLoop->hasLoopInvariantOperands(I: &I) &&
980 MustExecuteWithoutWritesBefore(I)) {
981 hoist(I, DT, CurLoop, Dest: CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
982 MSSAU, SE, ORE);
983 HoistedInstructions.push_back(Elt: &I);
984 Changed = true;
985 continue;
986 }
987
988 if (PHINode *PN = dyn_cast<PHINode>(Val: &I)) {
989 if (CFH.canHoistPHI(PN)) {
990 // Redirect incoming blocks first to ensure that we create hoisted
991 // versions of those blocks before we hoist the phi.
992 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
993 PN->setIncomingBlock(
994 i, BB: CFH.getOrCreateHoistedBlock(BB: PN->getIncomingBlock(i)));
995 hoist(I&: *PN, DT, CurLoop, Dest: CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
996 MSSAU, SE, ORE);
997 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
998 Changed = true;
999 continue;
1000 }
1001 }
1002
1003 // Try to reassociate instructions so that part of computations can be
1004 // done out of loop.
1005 if (hoistArithmetics(I, L&: *CurLoop, SafetyInfo&: *SafetyInfo, MSSAU, AC, DT)) {
1006 Changed = true;
1007 continue;
1008 }
1009
1010 // Remember possibly hoistable branches so we can actually hoist them
1011 // later if needed.
1012 if (BranchInst *BI = dyn_cast<BranchInst>(Val: &I))
1013 CFH.registerPossiblyHoistableBranch(BI);
1014 }
1015 }
1016
1017 // If we hoisted instructions to a conditional block they may not dominate
1018 // their uses that weren't hoisted (such as phis where some operands are not
1019 // loop invariant). If so make them unconditional by moving them to their
1020 // immediate dominator. We iterate through the instructions in reverse order
1021 // which ensures that when we rehoist an instruction we rehoist its operands,
1022 // and also keep track of where in the block we are rehoisting to make sure
1023 // that we rehoist instructions before the instructions that use them.
1024 Instruction *HoistPoint = nullptr;
1025 if (ControlFlowHoisting) {
1026 for (Instruction *I : reverse(C&: HoistedInstructions)) {
1027 if (!llvm::all_of(Range: I->uses(),
1028 P: [&](Use &U) { return DT->dominates(Def: I, U); })) {
1029 BasicBlock *Dominator =
1030 DT->getNode(BB: I->getParent())->getIDom()->getBlock();
1031 if (!HoistPoint || !DT->dominates(A: HoistPoint->getParent(), B: Dominator)) {
1032 if (HoistPoint)
1033 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1034 "New hoist point expected to dominate old hoist point");
1035 HoistPoint = Dominator->getTerminator();
1036 }
1037 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1038 << HoistPoint->getParent()->getNameOrAsOperand()
1039 << ": " << *I << "\n");
1040 moveInstructionBefore(I&: *I, Dest: HoistPoint->getIterator(), SafetyInfo&: *SafetyInfo, MSSAU,
1041 SE);
1042 HoistPoint = I;
1043 Changed = true;
1044 }
1045 }
1046 }
1047 if (VerifyMemorySSA)
1048 MSSAU.getMemorySSA()->verifyMemorySSA();
1049
1050 // Now that we've finished hoisting make sure that LI and DT are still
1051 // valid.
1052#ifdef EXPENSIVE_CHECKS
1053 if (Changed) {
1054 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1055 "Dominator tree verification failed");
1056 LI->verify(*DT);
1057 }
1058#endif
1059
1060 return Changed;
1061}
1062
1063// Return true if LI is invariant within scope of the loop. LI is invariant if
1064// CurLoop is dominated by an invariant.start representing the same memory
1065// location and size as the memory location LI loads from, and also the
1066// invariant.start has no uses.
1067static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1068 Loop *CurLoop) {
1069 Value *Addr = LI->getPointerOperand();
1070 const DataLayout &DL = LI->getDataLayout();
1071 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(Ty: LI->getType());
1072
1073 // It is not currently possible for clang to generate an invariant.start
1074 // intrinsic with scalable vector types because we don't support thread local
1075 // sizeless types and we don't permit sizeless types in structs or classes.
1076 // Furthermore, even if support is added for this in future the intrinsic
1077 // itself is defined to have a size of -1 for variable sized objects. This
1078 // makes it impossible to verify if the intrinsic envelops our region of
1079 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1080 // types would have a -1 parameter, but the former is clearly double the size
1081 // of the latter.
1082 if (LocSizeInBits.isScalable())
1083 return false;
1084
1085 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1086 // uselists for non-local Values in a loop pass.
1087 if (isa<Constant>(Val: Addr))
1088 return false;
1089
1090 unsigned UsesVisited = 0;
1091 // Traverse all uses of the load operand value, to see if invariant.start is
1092 // one of the uses, and whether it dominates the load instruction.
1093 for (auto *U : Addr->users()) {
1094 // Avoid traversing for Load operand with high number of users.
1095 if (++UsesVisited > MaxNumUsesTraversed)
1096 return false;
1097 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: U);
1098 // If there are escaping uses of invariant.start instruction, the load maybe
1099 // non-invariant.
1100 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1101 !II->use_empty())
1102 continue;
1103 ConstantInt *InvariantSize = cast<ConstantInt>(Val: II->getArgOperand(i: 0));
1104 // The intrinsic supports having a -1 argument for variable sized objects
1105 // so we should check for that here.
1106 if (InvariantSize->isNegative())
1107 continue;
1108 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1109 // Confirm the invariant.start location size contains the load operand size
1110 // in bits. Also, the invariant.start should dominate the load, and we
1111 // should not hoist the load out of a loop that contains this dominating
1112 // invariant.start.
1113 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1114 DT->properlyDominates(A: II->getParent(), B: CurLoop->getHeader()))
1115 return true;
1116 }
1117
1118 return false;
1119}
1120
1121/// Return true if-and-only-if we know how to (mechanically) both hoist and
1122/// sink a given instruction out of a loop. Does not address legality
1123/// concerns such as aliasing or speculation safety.
1124static bool isHoistableAndSinkableInst(Instruction &I) {
1125 // Only these instructions are hoistable/sinkable.
1126 return (isa<LoadInst>(Val: I) || isa<StoreInst>(Val: I) || isa<CallInst>(Val: I) ||
1127 isa<FenceInst>(Val: I) || isa<CastInst>(Val: I) || isa<UnaryOperator>(Val: I) ||
1128 isa<BinaryOperator>(Val: I) || isa<SelectInst>(Val: I) ||
1129 isa<GetElementPtrInst>(Val: I) || isa<CmpInst>(Val: I) ||
1130 isa<InsertElementInst>(Val: I) || isa<ExtractElementInst>(Val: I) ||
1131 isa<ShuffleVectorInst>(Val: I) || isa<ExtractValueInst>(Val: I) ||
1132 isa<InsertValueInst>(Val: I) || isa<FreezeInst>(Val: I));
1133}
1134
1135/// Return true if I is the only Instruction with a MemoryAccess in L.
1136static bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1137 const MemorySSAUpdater &MSSAU) {
1138 for (auto *BB : L->getBlocks())
1139 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1140 int NotAPhi = 0;
1141 for (const auto &Acc : *Accs) {
1142 if (isa<MemoryPhi>(Val: &Acc))
1143 continue;
1144 const auto *MUD = cast<MemoryUseOrDef>(Val: &Acc);
1145 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1146 return false;
1147 }
1148 }
1149 return true;
1150}
1151
1152static MemoryAccess *getClobberingMemoryAccess(MemorySSA &MSSA,
1153 BatchAAResults &BAA,
1154 SinkAndHoistLICMFlags &Flags,
1155 MemoryUseOrDef *MA) {
1156 // See declaration of SetLicmMssaOptCap for usage details.
1157 if (Flags.tooManyClobberingCalls())
1158 return MA->getDefiningAccess();
1159
1160 MemoryAccess *Source =
1161 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(MA, AA&: BAA);
1162 Flags.incrementClobberingCalls();
1163 return Source;
1164}
1165
1166bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1167 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1168 bool TargetExecutesOncePerLoop,
1169 SinkAndHoistLICMFlags &Flags,
1170 OptimizationRemarkEmitter *ORE) {
1171 // If we don't understand the instruction, bail early.
1172 if (!isHoistableAndSinkableInst(I))
1173 return false;
1174
1175 MemorySSA *MSSA = MSSAU.getMemorySSA();
1176 // Loads have extra constraints we have to verify before we can hoist them.
1177 if (LoadInst *LI = dyn_cast<LoadInst>(Val: &I)) {
1178 if (!LI->isUnordered())
1179 return false; // Don't sink/hoist volatile or ordered atomic loads!
1180
1181 // Loads from constant memory are always safe to move, even if they end up
1182 // in the same alias set as something that ends up being modified.
1183 if (!isModSet(MRI: AA->getModRefInfoMask(P: LI->getOperand(i_nocapture: 0))))
1184 return true;
1185 if (LI->hasMetadata(KindID: LLVMContext::MD_invariant_load))
1186 return true;
1187
1188 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1189 return false; // Don't risk duplicating unordered loads
1190
1191 // This checks for an invariant.start dominating the load.
1192 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1193 return true;
1194
1195 auto MU = cast<MemoryUse>(Val: MSSA->getMemoryAccess(I: LI));
1196
1197 bool InvariantGroup = LI->hasMetadata(KindID: LLVMContext::MD_invariant_group);
1198
1199 bool Invalidated = pointerInvalidatedByLoop(
1200 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1201 // Check loop-invariant address because this may also be a sinkable load
1202 // whose address is not necessarily loop-invariant.
1203 if (ORE && Invalidated && CurLoop->isLoopInvariant(V: LI->getPointerOperand()))
1204 ORE->emit(RemarkBuilder: [&]() {
1205 return OptimizationRemarkMissed(
1206 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1207 << "failed to move load with loop-invariant address "
1208 "because the loop may invalidate its value";
1209 });
1210
1211 return !Invalidated;
1212 } else if (CallInst *CI = dyn_cast<CallInst>(Val: &I)) {
1213 // Don't sink calls which can throw.
1214 if (CI->mayThrow())
1215 return false;
1216
1217 // Convergent attribute has been used on operations that involve
1218 // inter-thread communication which results are implicitly affected by the
1219 // enclosing control flows. It is not safe to hoist or sink such operations
1220 // across control flow.
1221 if (CI->isConvergent())
1222 return false;
1223
1224 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1225 // thread local globals. We currently treat getting the address of a thread
1226 // local global as not accessing memory, even though it may not be a
1227 // constant throughout a function with coroutines. Remove this check after
1228 // we better model semantics of thread local globals.
1229 if (CI->getFunction()->isPresplitCoroutine())
1230 return false;
1231
1232 using namespace PatternMatch;
1233 if (match(V: CI, P: m_Intrinsic<Intrinsic::assume>()))
1234 // Assumes don't actually alias anything or throw
1235 return true;
1236
1237 // Handle simple cases by querying alias analysis.
1238 MemoryEffects Behavior = AA->getMemoryEffects(Call: CI);
1239
1240 if (Behavior.doesNotAccessMemory())
1241 return true;
1242 if (Behavior.onlyReadsMemory()) {
1243 // Might have stale MemoryDef for call that was later inferred to be
1244 // read-only.
1245 auto *MU = dyn_cast<MemoryUse>(Val: MSSA->getMemoryAccess(I: CI));
1246 if (!MU)
1247 return false;
1248
1249 // If we can prove there are no writes to the memory read by the call, we
1250 // can hoist or sink.
1251 return !pointerInvalidatedByLoop(
1252 MSSA, MU, CurLoop, I, Flags, /*InvariantGroup=*/false);
1253 }
1254
1255 if (Behavior.onlyWritesMemory()) {
1256 // can hoist or sink if there are no conflicting read/writes to the
1257 // memory location written to by the call.
1258 return noConflictingReadWrites(I: CI, MSSA, AA, CurLoop, Flags);
1259 }
1260
1261 return false;
1262 } else if (auto *FI = dyn_cast<FenceInst>(Val: &I)) {
1263 // Fences alias (most) everything to provide ordering. For the moment,
1264 // just give up if there are any other memory operations in the loop.
1265 return isOnlyMemoryAccess(I: FI, L: CurLoop, MSSAU);
1266 } else if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
1267 if (!SI->isUnordered())
1268 return false; // Don't sink/hoist volatile or ordered atomic store!
1269
1270 // We can only hoist a store that we can prove writes a value which is not
1271 // read or overwritten within the loop. For those cases, we fallback to
1272 // load store promotion instead. TODO: We can extend this to cases where
1273 // there is exactly one write to the location and that write dominates an
1274 // arbitrary number of reads in the loop.
1275 if (isOnlyMemoryAccess(I: SI, L: CurLoop, MSSAU))
1276 return true;
1277 return noConflictingReadWrites(I: SI, MSSA, AA, CurLoop, Flags);
1278 }
1279
1280 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1281
1282 // We've established mechanical ability and aliasing, it's up to the caller
1283 // to check fault safety
1284 return true;
1285}
1286
1287/// Returns true if a PHINode is a trivially replaceable with an
1288/// Instruction.
1289/// This is true when all incoming values are that instruction.
1290/// This pattern occurs most often with LCSSA PHI nodes.
1291///
1292static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1293 for (const Value *IncValue : PN.incoming_values())
1294 if (IncValue != &I)
1295 return false;
1296
1297 return true;
1298}
1299
1300/// Return true if the instruction is foldable in the loop.
1301static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1302 const TargetTransformInfo *TTI) {
1303 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I)) {
1304 InstructionCost CostI =
1305 TTI->getInstructionCost(U: &I, CostKind: TargetTransformInfo::TCK_SizeAndLatency);
1306 if (CostI != TargetTransformInfo::TCC_Free)
1307 return false;
1308 // For a GEP, we cannot simply use getInstructionCost because currently
1309 // it optimistically assumes that a GEP will fold into addressing mode
1310 // regardless of its users.
1311 const BasicBlock *BB = GEP->getParent();
1312 for (const User *U : GEP->users()) {
1313 const Instruction *UI = cast<Instruction>(Val: U);
1314 if (CurLoop->contains(Inst: UI) &&
1315 (BB != UI->getParent() ||
1316 (!isa<StoreInst>(Val: UI) && !isa<LoadInst>(Val: UI))))
1317 return false;
1318 }
1319 return true;
1320 }
1321
1322 return false;
1323}
1324
1325/// Return true if the only users of this instruction are outside of
1326/// the loop. If this is true, we can sink the instruction to the exit
1327/// blocks of the loop.
1328///
1329/// We also return true if the instruction could be folded away in lowering.
1330/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1331static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1332 const LoopSafetyInfo *SafetyInfo,
1333 TargetTransformInfo *TTI,
1334 bool &FoldableInLoop, bool LoopNestMode) {
1335 const auto &BlockColors = SafetyInfo->getBlockColors();
1336 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1337 for (const User *U : I.users()) {
1338 const Instruction *UI = cast<Instruction>(Val: U);
1339 if (const PHINode *PN = dyn_cast<PHINode>(Val: UI)) {
1340 const BasicBlock *BB = PN->getParent();
1341 // We cannot sink uses in catchswitches.
1342 if (isa<CatchSwitchInst>(Val: BB->getTerminator()))
1343 return false;
1344
1345 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1346 // phi use is too muddled.
1347 if (isa<CallInst>(Val: I))
1348 if (!BlockColors.empty() &&
1349 BlockColors.find(Val: const_cast<BasicBlock *>(BB))->second.size() != 1)
1350 return false;
1351
1352 if (LoopNestMode) {
1353 while (isa<PHINode>(Val: UI) && UI->hasOneUser() &&
1354 UI->getNumOperands() == 1) {
1355 if (!CurLoop->contains(Inst: UI))
1356 break;
1357 UI = cast<Instruction>(Val: UI->user_back());
1358 }
1359 }
1360 }
1361
1362 if (CurLoop->contains(Inst: UI)) {
1363 if (IsFoldable) {
1364 FoldableInLoop = true;
1365 continue;
1366 }
1367 return false;
1368 }
1369 }
1370 return true;
1371}
1372
1373static Instruction *cloneInstructionInExitBlock(
1374 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1375 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1376 Instruction *New;
1377 if (auto *CI = dyn_cast<CallInst>(Val: &I)) {
1378 const auto &BlockColors = SafetyInfo->getBlockColors();
1379
1380 // Sinking call-sites need to be handled differently from other
1381 // instructions. The cloned call-site needs a funclet bundle operand
1382 // appropriate for its location in the CFG.
1383 SmallVector<OperandBundleDef, 1> OpBundles;
1384 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1385 BundleIdx != BundleEnd; ++BundleIdx) {
1386 OperandBundleUse Bundle = CI->getOperandBundleAt(Index: BundleIdx);
1387 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1388 continue;
1389
1390 OpBundles.emplace_back(Args&: Bundle);
1391 }
1392
1393 if (!BlockColors.empty()) {
1394 const ColorVector &CV = BlockColors.find(Val: &ExitBlock)->second;
1395 assert(CV.size() == 1 && "non-unique color for exit block!");
1396 BasicBlock *BBColor = CV.front();
1397 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt();
1398 if (EHPad->isEHPad())
1399 OpBundles.emplace_back(Args: "funclet", Args: &*EHPad);
1400 }
1401
1402 New = CallInst::Create(CI, Bundles: OpBundles);
1403 New->copyMetadata(SrcInst: *CI);
1404 } else {
1405 New = I.clone();
1406 }
1407
1408 New->insertInto(ParentBB: &ExitBlock, It: ExitBlock.getFirstInsertionPt());
1409 if (!I.getName().empty())
1410 New->setName(I.getName() + ".le");
1411
1412 if (MSSAU.getMemorySSA()->getMemoryAccess(I: &I)) {
1413 // Create a new MemoryAccess and let MemorySSA set its defining access.
1414 // After running some passes, MemorySSA might be outdated, and the
1415 // instruction `I` may have become a non-memory touching instruction.
1416 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1417 I: New, Definition: nullptr, BB: New->getParent(), Point: MemorySSA::Beginning,
1418 /*CreationMustSucceed=*/false);
1419 if (NewMemAcc) {
1420 if (auto *MemDef = dyn_cast<MemoryDef>(Val: NewMemAcc))
1421 MSSAU.insertDef(Def: MemDef, /*RenameUses=*/true);
1422 else {
1423 auto *MemUse = cast<MemoryUse>(Val: NewMemAcc);
1424 MSSAU.insertUse(Use: MemUse, /*RenameUses=*/true);
1425 }
1426 }
1427 }
1428
1429 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1430 // this is particularly cheap because we can rip off the PHI node that we're
1431 // replacing for the number and blocks of the predecessors.
1432 // OPT: If this shows up in a profile, we can instead finish sinking all
1433 // invariant instructions, and then walk their operands to re-establish
1434 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1435 // sinking bottom-up.
1436 for (Use &Op : New->operands())
1437 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(V: Op.get(), ExitBB: PN.getParent())) {
1438 auto *OInst = cast<Instruction>(Val: Op.get());
1439 PHINode *OpPN =
1440 PHINode::Create(Ty: OInst->getType(), NumReservedValues: PN.getNumIncomingValues(),
1441 NameStr: OInst->getName() + ".lcssa");
1442 OpPN->insertBefore(InsertPos: ExitBlock.begin());
1443 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1444 OpPN->addIncoming(V: OInst, BB: PN.getIncomingBlock(i));
1445 Op = OpPN;
1446 }
1447 return New;
1448}
1449
1450static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1451 MemorySSAUpdater &MSSAU) {
1452 MSSAU.removeMemoryAccess(I: &I);
1453 SafetyInfo.removeInstruction(Inst: &I);
1454 I.eraseFromParent();
1455}
1456
1457static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
1458 ICFLoopSafetyInfo &SafetyInfo,
1459 MemorySSAUpdater &MSSAU,
1460 ScalarEvolution *SE) {
1461 SafetyInfo.removeInstruction(Inst: &I);
1462 SafetyInfo.insertInstructionTo(Inst: &I, BB: Dest->getParent());
1463 I.moveBefore(BB&: *Dest->getParent(), I: Dest);
1464 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1465 Val: MSSAU.getMemorySSA()->getMemoryAccess(I: &I)))
1466 MSSAU.moveToPlace(What: OldMemAcc, BB: Dest->getParent(),
1467 Where: MemorySSA::BeforeTerminator);
1468 if (SE)
1469 SE->forgetBlockAndLoopDispositions(V: &I);
1470}
1471
1472static Instruction *sinkThroughTriviallyReplaceablePHI(
1473 PHINode *TPN, Instruction *I, LoopInfo *LI,
1474 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1475 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1476 MemorySSAUpdater &MSSAU) {
1477 assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1478 "Expect only trivially replaceable PHI");
1479 BasicBlock *ExitBlock = TPN->getParent();
1480 auto [It, Inserted] = SunkCopies.try_emplace(Key: ExitBlock);
1481 if (Inserted)
1482 It->second = cloneInstructionInExitBlock(I&: *I, ExitBlock&: *ExitBlock, PN&: *TPN, LI,
1483 SafetyInfo, MSSAU);
1484 return It->second;
1485}
1486
1487static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1488 BasicBlock *BB = PN->getParent();
1489 if (!BB->canSplitPredecessors())
1490 return false;
1491 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1492 // it require updating BlockColors for all offspring blocks accordingly. By
1493 // skipping such corner case, we can make updating BlockColors after splitting
1494 // predecessor fairly simple.
1495 if (!SafetyInfo->getBlockColors().empty() &&
1496 BB->getFirstNonPHIIt()->isEHPad())
1497 return false;
1498 for (BasicBlock *BBPred : predecessors(BB)) {
1499 if (isa<IndirectBrInst>(Val: BBPred->getTerminator()))
1500 return false;
1501 }
1502 return true;
1503}
1504
1505static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1506 LoopInfo *LI, const Loop *CurLoop,
1507 LoopSafetyInfo *SafetyInfo,
1508 MemorySSAUpdater *MSSAU) {
1509#ifndef NDEBUG
1510 SmallVector<BasicBlock *, 32> ExitBlocks;
1511 CurLoop->getUniqueExitBlocks(ExitBlocks);
1512 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1513#endif
1514 BasicBlock *ExitBB = PN->getParent();
1515 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1516
1517 // Split predecessors of the loop exit to make instructions in the loop are
1518 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1519 // loop in the canonical form where each predecessor of each exit block should
1520 // be contained within the loop. For example, this will convert the loop below
1521 // from
1522 //
1523 // LB1:
1524 // %v1 =
1525 // br %LE, %LB2
1526 // LB2:
1527 // %v2 =
1528 // br %LE, %LB1
1529 // LE:
1530 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1531 //
1532 // to
1533 //
1534 // LB1:
1535 // %v1 =
1536 // br %LE.split, %LB2
1537 // LB2:
1538 // %v2 =
1539 // br %LE.split2, %LB1
1540 // LE.split:
1541 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1542 // br %LE
1543 // LE.split2:
1544 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1545 // br %LE
1546 // LE:
1547 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1548 //
1549 const auto &BlockColors = SafetyInfo->getBlockColors();
1550 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(BB: ExitBB), pred_end(BB: ExitBB));
1551 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1552 while (!PredBBs.empty()) {
1553 BasicBlock *PredBB = *PredBBs.begin();
1554 assert(CurLoop->contains(PredBB) &&
1555 "Expect all predecessors are in the loop");
1556 if (PN->getBasicBlockIndex(BB: PredBB) >= 0) {
1557 BasicBlock *NewPred = SplitBlockPredecessors(
1558 BB: ExitBB, Preds: PredBB, Suffix: ".split.loop.exit", DTU: &DTU, LI, MSSAU, PreserveLCSSA: true);
1559 // Since we do not allow splitting EH-block with BlockColors in
1560 // canSplitPredecessors(), we can simply assign predecessor's color to
1561 // the new block.
1562 if (!BlockColors.empty())
1563 // Grab a reference to the ColorVector to be inserted before getting the
1564 // reference to the vector we are copying because inserting the new
1565 // element in BlockColors might cause the map to be reallocated.
1566 SafetyInfo->copyColors(New: NewPred, Old: PredBB);
1567 }
1568 PredBBs.remove(X: PredBB);
1569 }
1570}
1571
1572/// When an instruction is found to only be used outside of the loop, this
1573/// function moves it to the exit blocks and patches up SSA form as needed.
1574/// This method is guaranteed to remove the original instruction from its
1575/// position, and may either delete it or move it to outside of the loop.
1576///
1577static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1578 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1579 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE) {
1580 bool Changed = false;
1581 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1582
1583 // Iterate over users to be ready for actual sinking. Replace users via
1584 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1585 SmallPtrSet<Instruction *, 8> VisitedUsers;
1586 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1587 auto *User = cast<Instruction>(Val: *UI);
1588 Use &U = UI.getUse();
1589 ++UI;
1590
1591 if (VisitedUsers.count(Ptr: User) || CurLoop->contains(Inst: User))
1592 continue;
1593
1594 if (!DT->isReachableFromEntry(A: User->getParent())) {
1595 U = PoisonValue::get(T: I.getType());
1596 Changed = true;
1597 continue;
1598 }
1599
1600 // The user must be a PHI node.
1601 PHINode *PN = cast<PHINode>(Val: User);
1602
1603 // Surprisingly, instructions can be used outside of loops without any
1604 // exits. This can only happen in PHI nodes if the incoming block is
1605 // unreachable.
1606 BasicBlock *BB = PN->getIncomingBlock(U);
1607 if (!DT->isReachableFromEntry(A: BB)) {
1608 U = PoisonValue::get(T: I.getType());
1609 Changed = true;
1610 continue;
1611 }
1612
1613 VisitedUsers.insert(Ptr: PN);
1614 if (isTriviallyReplaceablePHI(PN: *PN, I))
1615 continue;
1616
1617 if (!canSplitPredecessors(PN, SafetyInfo))
1618 return Changed;
1619
1620 // Split predecessors of the PHI so that we can make users trivially
1621 // replaceable.
1622 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU: &MSSAU);
1623
1624 // Should rebuild the iterators, as they may be invalidated by
1625 // splitPredecessorsOfLoopExit().
1626 UI = I.user_begin();
1627 UE = I.user_end();
1628 }
1629
1630 if (VisitedUsers.empty())
1631 return Changed;
1632
1633 ORE->emit(RemarkBuilder: [&]() {
1634 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1635 << "sinking " << ore::NV("Inst", &I);
1636 });
1637 if (isa<LoadInst>(Val: I))
1638 ++NumMovedLoads;
1639 else if (isa<CallInst>(Val: I))
1640 ++NumMovedCalls;
1641 ++NumSunk;
1642
1643#ifndef NDEBUG
1644 SmallVector<BasicBlock *, 32> ExitBlocks;
1645 CurLoop->getUniqueExitBlocks(ExitBlocks);
1646 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1647#endif
1648
1649 // Clones of this instruction. Don't create more than one per exit block!
1650 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1651
1652 // If this instruction is only used outside of the loop, then all users are
1653 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1654 // the instruction.
1655 // First check if I is worth sinking for all uses. Sink only when it is worth
1656 // across all uses.
1657 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1658 for (auto *UI : Users) {
1659 auto *User = cast<Instruction>(Val: UI);
1660
1661 if (CurLoop->contains(Inst: User))
1662 continue;
1663
1664 PHINode *PN = cast<PHINode>(Val: User);
1665 assert(ExitBlockSet.count(PN->getParent()) &&
1666 "The LCSSA PHI is not in an exit block!");
1667
1668 // The PHI must be trivially replaceable.
1669 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1670 TPN: PN, I: &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1671 // As we sink the instruction out of the BB, drop its debug location.
1672 New->dropLocation();
1673 PN->replaceAllUsesWith(V: New);
1674 eraseInstruction(I&: *PN, SafetyInfo&: *SafetyInfo, MSSAU);
1675 Changed = true;
1676 }
1677 return Changed;
1678}
1679
1680/// When an instruction is found to only use loop invariant operands that
1681/// is safe to hoist, this instruction is called to do the dirty work.
1682///
1683static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1684 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1685 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
1686 OptimizationRemarkEmitter *ORE) {
1687 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1688 << I << "\n");
1689 ORE->emit(RemarkBuilder: [&]() {
1690 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1691 << ore::NV("Inst", &I);
1692 });
1693
1694 // Metadata can be dependent on conditions we are hoisting above.
1695 // Conservatively strip all metadata on the instruction unless we were
1696 // guaranteed to execute I if we entered the loop, in which case the metadata
1697 // is valid in the loop preheader.
1698 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1699 // then moving to the preheader means we should strip attributes on the call
1700 // that can cause UB since we may be hoisting above conditions that allowed
1701 // inferring those attributes. They may not be valid at the preheader.
1702 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(Val: I)) &&
1703 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1704 // time in isGuaranteedToExecute if we don't actually have anything to
1705 // drop. It is a compile time optimization, not required for correctness.
1706 !SafetyInfo->isGuaranteedToExecute(Inst: I, DT, CurLoop)) {
1707 I.dropUBImplyingAttrsAndMetadata();
1708 }
1709
1710 if (isa<PHINode>(Val: I))
1711 // Move the new node to the end of the phi list in the destination block.
1712 moveInstructionBefore(I, Dest: Dest->getFirstNonPHIIt(), SafetyInfo&: *SafetyInfo, MSSAU, SE);
1713 else
1714 // Move the new node to the destination block, before its terminator.
1715 moveInstructionBefore(I, Dest: Dest->getTerminator()->getIterator(), SafetyInfo&: *SafetyInfo,
1716 MSSAU, SE);
1717
1718 I.updateLocationAfterHoist();
1719
1720 if (isa<LoadInst>(Val: I))
1721 ++NumMovedLoads;
1722 else if (isa<CallInst>(Val: I))
1723 ++NumMovedCalls;
1724 ++NumHoisted;
1725}
1726
1727/// Only sink or hoist an instruction if it is not a trapping instruction,
1728/// or if the instruction is known not to trap when moved to the preheader.
1729/// or if it is a trapping instruction and is guaranteed to execute.
1730static bool isSafeToExecuteUnconditionally(
1731 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1732 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1733 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1734 AssumptionCache *AC, bool AllowSpeculation) {
1735 if (AllowSpeculation &&
1736 isSafeToSpeculativelyExecute(I: &Inst, CtxI, AC, DT, TLI))
1737 return true;
1738
1739 bool GuaranteedToExecute =
1740 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1741
1742 if (!GuaranteedToExecute) {
1743 auto *LI = dyn_cast<LoadInst>(Val: &Inst);
1744 if (LI && CurLoop->isLoopInvariant(V: LI->getPointerOperand()))
1745 ORE->emit(RemarkBuilder: [&]() {
1746 return OptimizationRemarkMissed(
1747 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1748 << "failed to hoist load with loop-invariant address "
1749 "because load is conditionally executed";
1750 });
1751 }
1752
1753 return GuaranteedToExecute;
1754}
1755
1756namespace {
1757class LoopPromoter : public LoadAndStorePromoter {
1758 Value *SomePtr; // Designated pointer to store to.
1759 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1760 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1761 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1762 PredIteratorCache &PredCache;
1763 MemorySSAUpdater &MSSAU;
1764 LoopInfo &LI;
1765 DebugLoc DL;
1766 Align Alignment;
1767 bool UnorderedAtomic;
1768 AAMDNodes AATags;
1769 ICFLoopSafetyInfo &SafetyInfo;
1770 bool CanInsertStoresInExitBlocks;
1771 ArrayRef<const Instruction *> Uses;
1772
1773 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1774 // (if legal) if doing so would add an out-of-loop use to an instruction
1775 // defined in-loop.
1776 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1777 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, ExitBB: BB))
1778 return V;
1779
1780 Instruction *I = cast<Instruction>(Val: V);
1781 // We need to create an LCSSA PHI node for the incoming value and
1782 // store that.
1783 PHINode *PN = PHINode::Create(Ty: I->getType(), NumReservedValues: PredCache.size(BB),
1784 NameStr: I->getName() + ".lcssa");
1785 PN->insertBefore(InsertPos: BB->begin());
1786 for (BasicBlock *Pred : PredCache.get(BB))
1787 PN->addIncoming(V: I, BB: Pred);
1788 return PN;
1789 }
1790
1791public:
1792 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1793 SmallVectorImpl<BasicBlock *> &LEB,
1794 SmallVectorImpl<BasicBlock::iterator> &LIP,
1795 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1796 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1797 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1798 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1799 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1800 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1801 LI(li), DL(std::move(dl)), Alignment(Alignment),
1802 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1803 SafetyInfo(SafetyInfo),
1804 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1805
1806 void insertStoresInLoopExitBlocks() {
1807 // Insert stores after in the loop exit blocks. Each exit block gets a
1808 // store of the live-out values that feed them. Since we've already told
1809 // the SSA updater about the defs in the loop and the preheader
1810 // definition, it is all set and we can start using it.
1811 DIAssignID *NewID = nullptr;
1812 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1813 BasicBlock *ExitBlock = LoopExitBlocks[i];
1814 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(BB: ExitBlock);
1815 LiveInValue = maybeInsertLCSSAPHI(V: LiveInValue, BB: ExitBlock);
1816 Value *Ptr = maybeInsertLCSSAPHI(V: SomePtr, BB: ExitBlock);
1817 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1818 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1819 if (UnorderedAtomic)
1820 NewSI->setOrdering(AtomicOrdering::Unordered);
1821 NewSI->setAlignment(Alignment);
1822 NewSI->setDebugLoc(DL);
1823 // Attach DIAssignID metadata to the new store, generating it on the
1824 // first loop iteration.
1825 if (i == 0) {
1826 // NewSI will have its DIAssignID set here if there are any stores in
1827 // Uses with a DIAssignID attachment. This merged ID will then be
1828 // attached to the other inserted stores (in the branch below).
1829 NewSI->mergeDIAssignID(SourceInstructions: Uses);
1830 NewID = cast_or_null<DIAssignID>(
1831 Val: NewSI->getMetadata(KindID: LLVMContext::MD_DIAssignID));
1832 } else {
1833 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1834 // above.
1835 NewSI->setMetadata(KindID: LLVMContext::MD_DIAssignID, Node: NewID);
1836 }
1837
1838 if (AATags)
1839 NewSI->setAAMetadata(AATags);
1840
1841 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1842 MemoryAccess *NewMemAcc;
1843 if (!MSSAInsertPoint) {
1844 NewMemAcc = MSSAU.createMemoryAccessInBB(
1845 I: NewSI, Definition: nullptr, BB: NewSI->getParent(), Point: MemorySSA::Beginning);
1846 } else {
1847 NewMemAcc =
1848 MSSAU.createMemoryAccessAfter(I: NewSI, Definition: nullptr, InsertPt: MSSAInsertPoint);
1849 }
1850 MSSAInsertPts[i] = NewMemAcc;
1851 MSSAU.insertDef(Def: cast<MemoryDef>(Val: NewMemAcc), RenameUses: true);
1852 // FIXME: true for safety, false may still be correct.
1853 }
1854 }
1855
1856 void doExtraRewritesBeforeFinalDeletion() override {
1857 if (CanInsertStoresInExitBlocks)
1858 insertStoresInLoopExitBlocks();
1859 }
1860
1861 void instructionDeleted(Instruction *I) const override {
1862 SafetyInfo.removeInstruction(Inst: I);
1863 MSSAU.removeMemoryAccess(I);
1864 }
1865
1866 bool shouldDelete(Instruction *I) const override {
1867 if (isa<StoreInst>(Val: I))
1868 return CanInsertStoresInExitBlocks;
1869 return true;
1870 }
1871};
1872
1873bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1874 DominatorTree *DT) {
1875 // We can perform the captured-before check against any instruction in the
1876 // loop header, as the loop header is reachable from any instruction inside
1877 // the loop.
1878 // TODO: ReturnCaptures=true shouldn't be necessary here.
1879 return capturesNothing(CC: PointerMayBeCapturedBefore(
1880 V, /*ReturnCaptures=*/true, I: L->getHeader()->getTerminator(), DT,
1881 /*IncludeI=*/false, Mask: CaptureComponents::Provenance));
1882}
1883
1884/// Return true if we can prove that a caller cannot inspect the object if an
1885/// unwind occurs inside the loop.
1886bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1887 DominatorTree *DT) {
1888 bool RequiresNoCaptureBeforeUnwind;
1889 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1890 return false;
1891
1892 return !RequiresNoCaptureBeforeUnwind ||
1893 isNotCapturedBeforeOrInLoop(V: Object, L, DT);
1894}
1895
1896bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1897 TargetTransformInfo *TTI) {
1898 // The object must be function-local to start with, and then not captured
1899 // before/in the loop.
1900 return (isIdentifiedFunctionLocal(V: Object) &&
1901 isNotCapturedBeforeOrInLoop(V: Object, L, DT)) ||
1902 (TTI->isSingleThreaded() || SingleThread);
1903}
1904
1905} // namespace
1906
1907/// Try to promote memory values to scalars by sinking stores out of the
1908/// loop and moving loads to before the loop. We do this by looping over
1909/// the stores in the loop, looking for stores to Must pointers which are
1910/// loop invariant.
1911///
1912bool llvm::promoteLoopAccessesToScalars(
1913 const SmallSetVector<Value *, 8> &PointerMustAliases,
1914 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1915 SmallVectorImpl<BasicBlock::iterator> &InsertPts,
1916 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1917 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1918 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1919 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1920 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1921 bool HasReadsOutsideSet) {
1922 // Verify inputs.
1923 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1924 SafetyInfo != nullptr &&
1925 "Unexpected Input to promoteLoopAccessesToScalars");
1926
1927 LLVM_DEBUG({
1928 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1929 for (Value *Ptr : PointerMustAliases)
1930 dbgs() << " " << *Ptr << "\n";
1931 });
1932 ++NumPromotionCandidates;
1933
1934 Value *SomePtr = *PointerMustAliases.begin();
1935 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1936
1937 // It is not safe to promote a load/store from the loop if the load/store is
1938 // conditional. For example, turning:
1939 //
1940 // for () { if (c) *P += 1; }
1941 //
1942 // into:
1943 //
1944 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1945 //
1946 // is not safe, because *P may only be valid to access if 'c' is true.
1947 //
1948 // The safety property divides into two parts:
1949 // p1) The memory may not be dereferenceable on entry to the loop. In this
1950 // case, we can't insert the required load in the preheader.
1951 // p2) The memory model does not allow us to insert a store along any dynamic
1952 // path which did not originally have one.
1953 //
1954 // If at least one store is guaranteed to execute, both properties are
1955 // satisfied, and promotion is legal.
1956 //
1957 // This, however, is not a necessary condition. Even if no store/load is
1958 // guaranteed to execute, we can still establish these properties.
1959 // We can establish (p1) by proving that hoisting the load into the preheader
1960 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1961 // can use any access within the alias set to prove dereferenceability,
1962 // since they're all must alias.
1963 //
1964 // There are two ways establish (p2):
1965 // a) Prove the location is thread-local. In this case the memory model
1966 // requirement does not apply, and stores are safe to insert.
1967 // b) Prove a store dominates every exit block. In this case, if an exit
1968 // blocks is reached, the original dynamic path would have taken us through
1969 // the store, so inserting a store into the exit block is safe. Note that this
1970 // is different from the store being guaranteed to execute. For instance,
1971 // if an exception is thrown on the first iteration of the loop, the original
1972 // store is never executed, but the exit blocks are not executed either.
1973
1974 bool DereferenceableInPH = false;
1975 bool StoreIsGuanteedToExecute = false;
1976 bool LoadIsGuaranteedToExecute = false;
1977 bool FoundLoadToPromote = false;
1978
1979 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
1980 enum {
1981 StoreSafe,
1982 StoreUnsafe,
1983 StoreSafetyUnknown,
1984 } StoreSafety = StoreSafetyUnknown;
1985
1986 SmallVector<Instruction *, 64> LoopUses;
1987
1988 // We start with an alignment of one and try to find instructions that allow
1989 // us to prove better alignment.
1990 Align Alignment;
1991 // Keep track of which types of access we see
1992 bool SawUnorderedAtomic = false;
1993 bool SawNotAtomic = false;
1994 AAMDNodes AATags;
1995
1996 const DataLayout &MDL = Preheader->getDataLayout();
1997
1998 // If there are reads outside the promoted set, then promoting stores is
1999 // definitely not safe.
2000 if (HasReadsOutsideSet)
2001 StoreSafety = StoreUnsafe;
2002
2003 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2004 // If a loop can throw, we have to insert a store along each unwind edge.
2005 // That said, we can't actually make the unwind edge explicit. Therefore,
2006 // we have to prove that the store is dead along the unwind edge. We do
2007 // this by proving that the caller can't have a reference to the object
2008 // after return and thus can't possibly load from the object.
2009 Value *Object = getUnderlyingObject(V: SomePtr);
2010 if (!isNotVisibleOnUnwindInLoop(Object, L: CurLoop, DT))
2011 StoreSafety = StoreUnsafe;
2012 }
2013
2014 // Check that all accesses to pointers in the alias set use the same type.
2015 // We cannot (yet) promote a memory location that is loaded and stored in
2016 // different sizes. While we are at it, collect alignment and AA info.
2017 Type *AccessTy = nullptr;
2018 for (Value *ASIV : PointerMustAliases) {
2019 for (Use &U : ASIV->uses()) {
2020 // Ignore instructions that are outside the loop.
2021 Instruction *UI = dyn_cast<Instruction>(Val: U.getUser());
2022 if (!UI || !CurLoop->contains(Inst: UI))
2023 continue;
2024
2025 // If there is an non-load/store instruction in the loop, we can't promote
2026 // it.
2027 if (LoadInst *Load = dyn_cast<LoadInst>(Val: UI)) {
2028 if (!Load->isUnordered())
2029 return false;
2030
2031 SawUnorderedAtomic |= Load->isAtomic();
2032 SawNotAtomic |= !Load->isAtomic();
2033 FoundLoadToPromote = true;
2034
2035 Align InstAlignment = Load->getAlign();
2036
2037 if (!LoadIsGuaranteedToExecute)
2038 LoadIsGuaranteedToExecute =
2039 SafetyInfo->isGuaranteedToExecute(Inst: *UI, DT, CurLoop);
2040
2041 // Note that proving a load safe to speculate requires proving
2042 // sufficient alignment at the target location. Proving it guaranteed
2043 // to execute does as well. Thus we can increase our guaranteed
2044 // alignment as well.
2045 if (!DereferenceableInPH || (InstAlignment > Alignment))
2046 if (isSafeToExecuteUnconditionally(
2047 Inst&: *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2048 CtxI: Preheader->getTerminator(), AC, AllowSpeculation)) {
2049 DereferenceableInPH = true;
2050 Alignment = std::max(a: Alignment, b: InstAlignment);
2051 }
2052 } else if (const StoreInst *Store = dyn_cast<StoreInst>(Val: UI)) {
2053 // Stores *of* the pointer are not interesting, only stores *to* the
2054 // pointer.
2055 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2056 continue;
2057 if (!Store->isUnordered())
2058 return false;
2059
2060 SawUnorderedAtomic |= Store->isAtomic();
2061 SawNotAtomic |= !Store->isAtomic();
2062
2063 // If the store is guaranteed to execute, both properties are satisfied.
2064 // We may want to check if a store is guaranteed to execute even if we
2065 // already know that promotion is safe, since it may have higher
2066 // alignment than any other guaranteed stores, in which case we can
2067 // raise the alignment on the promoted store.
2068 Align InstAlignment = Store->getAlign();
2069 bool GuaranteedToExecute =
2070 SafetyInfo->isGuaranteedToExecute(Inst: *UI, DT, CurLoop);
2071 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2072 if (GuaranteedToExecute) {
2073 DereferenceableInPH = true;
2074 if (StoreSafety == StoreSafetyUnknown)
2075 StoreSafety = StoreSafe;
2076 Alignment = std::max(a: Alignment, b: InstAlignment);
2077 }
2078
2079 // If a store dominates all exit blocks, it is safe to sink.
2080 // As explained above, if an exit block was executed, a dominating
2081 // store must have been executed at least once, so we are not
2082 // introducing stores on paths that did not have them.
2083 // Note that this only looks at explicit exit blocks. If we ever
2084 // start sinking stores into unwind edges (see above), this will break.
2085 if (StoreSafety == StoreSafetyUnknown &&
2086 llvm::all_of(Range&: ExitBlocks, P: [&](BasicBlock *Exit) {
2087 return DT->dominates(A: Store->getParent(), B: Exit);
2088 }))
2089 StoreSafety = StoreSafe;
2090
2091 // If the store is not guaranteed to execute, we may still get
2092 // deref info through it.
2093 if (!DereferenceableInPH) {
2094 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2095 V: Store->getPointerOperand(), Ty: Store->getValueOperand()->getType(),
2096 Alignment: Store->getAlign(), DL: MDL, CtxI: Preheader->getTerminator(), AC, DT, TLI);
2097 }
2098 } else
2099 continue; // Not a load or store.
2100
2101 if (!AccessTy)
2102 AccessTy = getLoadStoreType(I: UI);
2103 else if (AccessTy != getLoadStoreType(I: UI))
2104 return false;
2105
2106 // Merge the AA tags.
2107 if (LoopUses.empty()) {
2108 // On the first load/store, just take its AA tags.
2109 AATags = UI->getAAMetadata();
2110 } else if (AATags) {
2111 AATags = AATags.merge(Other: UI->getAAMetadata());
2112 }
2113
2114 LoopUses.push_back(Elt: UI);
2115 }
2116 }
2117
2118 // If we found both an unordered atomic instruction and a non-atomic memory
2119 // access, bail. We can't blindly promote non-atomic to atomic since we
2120 // might not be able to lower the result. We can't downgrade since that
2121 // would violate memory model. Also, align 0 is an error for atomics.
2122 if (SawUnorderedAtomic && SawNotAtomic)
2123 return false;
2124
2125 // If we're inserting an atomic load in the preheader, we must be able to
2126 // lower it. We're only guaranteed to be able to lower naturally aligned
2127 // atomics.
2128 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(Ty: AccessTy))
2129 return false;
2130
2131 // If we couldn't prove we can hoist the load, bail.
2132 if (!DereferenceableInPH) {
2133 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2134 return false;
2135 }
2136
2137 // We know we can hoist the load, but don't have a guaranteed store.
2138 // Check whether the location is writable and thread-local. If it is, then we
2139 // can insert stores along paths which originally didn't have them without
2140 // violating the memory model.
2141 if (StoreSafety == StoreSafetyUnknown) {
2142 Value *Object = getUnderlyingObject(V: SomePtr);
2143 bool ExplicitlyDereferenceableOnly;
2144 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2145 (!ExplicitlyDereferenceableOnly ||
2146 isDereferenceablePointer(V: SomePtr, Ty: AccessTy, DL: MDL)) &&
2147 isThreadLocalObject(Object, L: CurLoop, DT, TTI))
2148 StoreSafety = StoreSafe;
2149 }
2150
2151 // If we've still failed to prove we can sink the store, hoist the load
2152 // only, if possible.
2153 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2154 // If we cannot hoist the load either, give up.
2155 return false;
2156
2157 // Lets do the promotion!
2158 if (StoreSafety == StoreSafe) {
2159 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2160 << '\n');
2161 ++NumLoadStorePromoted;
2162 } else {
2163 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2164 << '\n');
2165 ++NumLoadPromoted;
2166 }
2167
2168 ORE->emit(RemarkBuilder: [&]() {
2169 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2170 LoopUses[0])
2171 << "Moving accesses to memory location out of the loop";
2172 });
2173
2174 // Look at all the loop uses, and try to merge their locations.
2175 std::vector<DebugLoc> LoopUsesLocs;
2176 for (auto U : LoopUses)
2177 LoopUsesLocs.push_back(x: U->getDebugLoc());
2178 auto DL = DebugLoc::getMergedLocations(Locs: LoopUsesLocs);
2179
2180 // We use the SSAUpdater interface to insert phi nodes as required.
2181 SmallVector<PHINode *, 16> NewPHIs;
2182 SSAUpdater SSA(&NewPHIs);
2183 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2184 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2185 SawUnorderedAtomic,
2186 StoreIsGuanteedToExecute ? AATags : AAMDNodes(),
2187 *SafetyInfo, StoreSafety == StoreSafe);
2188
2189 // Set up the preheader to have a definition of the value. It is the live-out
2190 // value from the preheader that uses in the loop will use.
2191 LoadInst *PreheaderLoad = nullptr;
2192 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2193 PreheaderLoad =
2194 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2195 Preheader->getTerminator()->getIterator());
2196 if (SawUnorderedAtomic)
2197 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2198 PreheaderLoad->setAlignment(Alignment);
2199 PreheaderLoad->setDebugLoc(DebugLoc::getDropped());
2200 if (AATags && LoadIsGuaranteedToExecute)
2201 PreheaderLoad->setAAMetadata(AATags);
2202
2203 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2204 I: PreheaderLoad, Definition: nullptr, BB: PreheaderLoad->getParent(), Point: MemorySSA::End);
2205 MemoryUse *NewMemUse = cast<MemoryUse>(Val: PreheaderLoadMemoryAccess);
2206 MSSAU.insertUse(Use: NewMemUse, /*RenameUses=*/true);
2207 SSA.AddAvailableValue(BB: Preheader, V: PreheaderLoad);
2208 } else {
2209 SSA.AddAvailableValue(BB: Preheader, V: PoisonValue::get(T: AccessTy));
2210 }
2211
2212 if (VerifyMemorySSA)
2213 MSSAU.getMemorySSA()->verifyMemorySSA();
2214 // Rewrite all the loads in the loop and remember all the definitions from
2215 // stores in the loop.
2216 Promoter.run(Insts: LoopUses);
2217
2218 if (VerifyMemorySSA)
2219 MSSAU.getMemorySSA()->verifyMemorySSA();
2220 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2221 if (PreheaderLoad && PreheaderLoad->use_empty())
2222 eraseInstruction(I&: *PreheaderLoad, SafetyInfo&: *SafetyInfo, MSSAU);
2223
2224 return true;
2225}
2226
2227static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2228 function_ref<void(Instruction *)> Fn) {
2229 for (const BasicBlock *BB : L->blocks())
2230 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2231 for (const auto &Access : *Accesses)
2232 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(Val: &Access))
2233 Fn(MUD->getMemoryInst());
2234}
2235
2236// The bool indicates whether there might be reads outside the set, in which
2237// case only loads may be promoted.
2238static SmallVector<PointersAndHasReadsOutsideSet, 0>
2239collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2240 BatchAAResults BatchAA(*AA);
2241 AliasSetTracker AST(BatchAA);
2242
2243 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2244 if (const auto *SI = dyn_cast<StoreInst>(Val: I)) {
2245 const Value *PtrOp = SI->getPointerOperand();
2246 return !isa<ConstantData>(Val: PtrOp) && L->isLoopInvariant(V: PtrOp);
2247 }
2248 if (const auto *LI = dyn_cast<LoadInst>(Val: I)) {
2249 const Value *PtrOp = LI->getPointerOperand();
2250 return !isa<ConstantData>(Val: PtrOp) && L->isLoopInvariant(V: PtrOp);
2251 }
2252 return false;
2253 };
2254
2255 // Populate AST with potentially promotable accesses.
2256 SmallPtrSet<Value *, 16> AttemptingPromotion;
2257 foreachMemoryAccess(MSSA, L, Fn: [&](Instruction *I) {
2258 if (IsPotentiallyPromotable(I)) {
2259 AttemptingPromotion.insert(Ptr: I);
2260 AST.add(I);
2261 }
2262 });
2263
2264 // We're only interested in must-alias sets that contain a mod.
2265 SmallVector<PointerIntPair<const AliasSet *, 1, bool>, 8> Sets;
2266 for (AliasSet &AS : AST)
2267 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2268 Sets.push_back(Elt: {&AS, false});
2269
2270 if (Sets.empty())
2271 return {}; // Nothing to promote...
2272
2273 // Discard any sets for which there is an aliasing non-promotable access.
2274 foreachMemoryAccess(MSSA, L, Fn: [&](Instruction *I) {
2275 if (AttemptingPromotion.contains(Ptr: I))
2276 return;
2277
2278 llvm::erase_if(C&: Sets, P: [&](PointerIntPair<const AliasSet *, 1, bool> &Pair) {
2279 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(Inst: I, AA&: BatchAA);
2280 // Cannot promote if there are writes outside the set.
2281 if (isModSet(MRI: MR))
2282 return true;
2283 if (isRefSet(MRI: MR)) {
2284 // Remember reads outside the set.
2285 Pair.setInt(true);
2286 // If this is a mod-only set and there are reads outside the set,
2287 // we will not be able to promote, so bail out early.
2288 return !Pair.getPointer()->isRef();
2289 }
2290 return false;
2291 });
2292 });
2293
2294 SmallVector<std::pair<SmallSetVector<Value *, 8>, bool>, 0> Result;
2295 for (auto [Set, HasReadsOutsideSet] : Sets) {
2296 SmallSetVector<Value *, 8> PointerMustAliases;
2297 for (const auto &MemLoc : *Set)
2298 PointerMustAliases.insert(X: const_cast<Value *>(MemLoc.Ptr));
2299 Result.emplace_back(Args: std::move(PointerMustAliases), Args&: HasReadsOutsideSet);
2300 }
2301
2302 return Result;
2303}
2304
2305// For a given store instruction or writeonly call instruction, this function
2306// checks that there are no read or writes that conflict with the memory
2307// access in the instruction
2308static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA,
2309 AAResults *AA, Loop *CurLoop,
2310 SinkAndHoistLICMFlags &Flags) {
2311 assert(isa<CallInst>(*I) || isa<StoreInst>(*I));
2312 // If there are more accesses than the Promotion cap, then give up as we're
2313 // not walking a list that long.
2314 if (Flags.tooManyMemoryAccesses())
2315 return false;
2316
2317 auto *IMD = MSSA->getMemoryAccess(I);
2318 BatchAAResults BAA(*AA);
2319 auto *Source = getClobberingMemoryAccess(MSSA&: *MSSA, BAA, Flags, MA: IMD);
2320 // Make sure there are no clobbers inside the loop.
2321 if (!MSSA->isLiveOnEntryDef(MA: Source) && CurLoop->contains(BB: Source->getBlock()))
2322 return false;
2323
2324 // If there are interfering Uses (i.e. their defining access is in the
2325 // loop), or ordered loads (stored as Defs!), don't move this store.
2326 // Could do better here, but this is conservatively correct.
2327 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
2328 // moving accesses. Can also extend to dominating uses.
2329 for (auto *BB : CurLoop->getBlocks()) {
2330 auto *Accesses = MSSA->getBlockAccesses(BB);
2331 if (!Accesses)
2332 continue;
2333 for (const auto &MA : *Accesses)
2334 if (const auto *MU = dyn_cast<MemoryUse>(Val: &MA)) {
2335 auto *MD = getClobberingMemoryAccess(MSSA&: *MSSA, BAA, Flags,
2336 MA: const_cast<MemoryUse *>(MU));
2337 if (!MSSA->isLiveOnEntryDef(MA: MD) && CurLoop->contains(BB: MD->getBlock()))
2338 return false;
2339 // Disable hoisting past potentially interfering loads. Optimized
2340 // Uses may point to an access outside the loop, as getClobbering
2341 // checks the previous iteration when walking the backedge.
2342 // FIXME: More precise: no Uses that alias I.
2343 if (!Flags.getIsSink() && !MSSA->dominates(A: IMD, B: MU))
2344 return false;
2345 } else if (const auto *MD = dyn_cast<MemoryDef>(Val: &MA)) {
2346 if (auto *LI = dyn_cast<LoadInst>(Val: MD->getMemoryInst())) {
2347 (void)LI; // Silence warning.
2348 assert(!LI->isUnordered() && "Expected unordered load");
2349 return false;
2350 }
2351 // Any call, while it may not be clobbering I, it may be a use.
2352 if (auto *CI = dyn_cast<CallInst>(Val: MD->getMemoryInst())) {
2353 // Check if the call may read from the memory location written
2354 // to by I. Check CI's attributes and arguments; the number of
2355 // such checks performed is limited above by NoOfMemAccTooLarge.
2356 if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
2357 ModRefInfo MRI = BAA.getModRefInfo(I: CI, OptLoc: MemoryLocation::get(SI));
2358 if (isModOrRefSet(MRI))
2359 return false;
2360 } else {
2361 auto *SCI = cast<CallInst>(Val: I);
2362 // If the instruction we are wanting to hoist is also a call
2363 // instruction then we need not check mod/ref info with itself
2364 if (SCI == CI)
2365 continue;
2366 ModRefInfo MRI = BAA.getModRefInfo(I: CI, Call2: SCI);
2367 if (isModOrRefSet(MRI))
2368 return false;
2369 }
2370 }
2371 }
2372 }
2373 return true;
2374}
2375
2376static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
2377 Loop *CurLoop, Instruction &I,
2378 SinkAndHoistLICMFlags &Flags,
2379 bool InvariantGroup) {
2380 // For hoisting, use the walker to determine safety
2381 if (!Flags.getIsSink()) {
2382 // If hoisting an invariant group, we only need to check that there
2383 // is no store to the loaded pointer between the start of the loop,
2384 // and the load (since all values must be the same).
2385
2386 // This can be checked in two conditions:
2387 // 1) if the memoryaccess is outside the loop
2388 // 2) the earliest access is at the loop header,
2389 // if the memory loaded is the phi node
2390
2391 BatchAAResults BAA(MSSA->getAA());
2392 MemoryAccess *Source = getClobberingMemoryAccess(MSSA&: *MSSA, BAA, Flags, MA: MU);
2393 return !MSSA->isLiveOnEntryDef(MA: Source) &&
2394 CurLoop->contains(BB: Source->getBlock()) &&
2395 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Val: Source));
2396 }
2397
2398 // For sinking, we'd need to check all Defs below this use. The getClobbering
2399 // call will look on the backedge of the loop, but will check aliasing with
2400 // the instructions on the previous iteration.
2401 // For example:
2402 // for (i ... )
2403 // load a[i] ( Use (LoE)
2404 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2405 // i++;
2406 // The load sees no clobbering inside the loop, as the backedge alias check
2407 // does phi translation, and will check aliasing against store a[i-1].
2408 // However sinking the load outside the loop, below the store is incorrect.
2409
2410 // For now, only sink if there are no Defs in the loop, and the existing ones
2411 // precede the use and are in the same block.
2412 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2413 // needs PostDominatorTreeAnalysis.
2414 // FIXME: More precise: no Defs that alias this Use.
2415 if (Flags.tooManyMemoryAccesses())
2416 return true;
2417 for (auto *BB : CurLoop->getBlocks())
2418 if (pointerInvalidatedByBlock(BB&: *BB, MSSA&: *MSSA, MU&: *MU))
2419 return true;
2420 // When sinking, the source block may not be part of the loop so check it.
2421 if (!CurLoop->contains(Inst: &I))
2422 return pointerInvalidatedByBlock(BB&: *I.getParent(), MSSA&: *MSSA, MU&: *MU);
2423
2424 return false;
2425}
2426
2427bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU) {
2428 if (const auto *Accesses = MSSA.getBlockDefs(BB: &BB))
2429 for (const auto &MA : *Accesses)
2430 if (const auto *MD = dyn_cast<MemoryDef>(Val: &MA))
2431 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(A: MD, B: &MU))
2432 return true;
2433 return false;
2434}
2435
2436/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2437/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2438/// minimun can be computed outside of loop, and X is not a loop-invariant.
2439static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2440 MemorySSAUpdater &MSSAU) {
2441 bool Inverse = false;
2442 using namespace PatternMatch;
2443 Value *Cond1, *Cond2;
2444 if (match(V: &I, P: m_LogicalOr(L: m_Value(V&: Cond1), R: m_Value(V&: Cond2)))) {
2445 Inverse = true;
2446 } else if (match(V: &I, P: m_LogicalAnd(L: m_Value(V&: Cond1), R: m_Value(V&: Cond2)))) {
2447 // Do nothing
2448 } else
2449 return false;
2450
2451 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS,
2452 Value *&RHS) {
2453 if (!match(V: C, P: m_OneUse(SubPattern: m_ICmp(Pred&: P, L: m_Value(V&: LHS), R: m_Value(V&: RHS)))))
2454 return false;
2455 if (!LHS->getType()->isIntegerTy())
2456 return false;
2457 if (!ICmpInst::isRelational(P))
2458 return false;
2459 if (L.isLoopInvariant(V: LHS)) {
2460 std::swap(a&: LHS, b&: RHS);
2461 P = ICmpInst::getSwappedPredicate(pred: P);
2462 }
2463 if (L.isLoopInvariant(V: LHS) || !L.isLoopInvariant(V: RHS))
2464 return false;
2465 if (Inverse)
2466 P = ICmpInst::getInversePredicate(pred: P);
2467 return true;
2468 };
2469 CmpPredicate P1, P2;
2470 Value *LHS1, *LHS2, *RHS1, *RHS2;
2471 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2472 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2473 return false;
2474 auto MatchingPred = CmpPredicate::getMatching(A: P1, B: P2);
2475 if (!MatchingPred || LHS1 != LHS2)
2476 return false;
2477
2478 // Everything is fine, we can do the transform.
2479 bool UseMin = ICmpInst::isLT(P: *MatchingPred) || ICmpInst::isLE(P: *MatchingPred);
2480 assert(
2481 (UseMin || ICmpInst::isGT(*MatchingPred) ||
2482 ICmpInst::isGE(*MatchingPred)) &&
2483 "Relational predicate is either less (or equal) or greater (or equal)!");
2484 Intrinsic::ID id = ICmpInst::isSigned(predicate: *MatchingPred)
2485 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2486 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2487 auto *Preheader = L.getLoopPreheader();
2488 assert(Preheader && "Loop is not in simplify form?");
2489 IRBuilder<> Builder(Preheader->getTerminator());
2490 // We are about to create a new guaranteed use for RHS2 which might not exist
2491 // before (if it was a non-taken input of logical and/or instruction). If it
2492 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2493 // introduced, so they don't need this.
2494 if (isa<SelectInst>(Val: I))
2495 RHS2 = Builder.CreateFreeze(V: RHS2, Name: RHS2->getName() + ".fr");
2496 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2497 ID: id, LHS: RHS1, RHS: RHS2, FMFSource: nullptr,
2498 Name: StringRef("invariant.") +
2499 (ICmpInst::isSigned(predicate: *MatchingPred) ? "s" : "u") +
2500 (UseMin ? "min" : "max"));
2501 Builder.SetInsertPoint(&I);
2502 ICmpInst::Predicate P = *MatchingPred;
2503 if (Inverse)
2504 P = ICmpInst::getInversePredicate(pred: P);
2505 Value *NewCond = Builder.CreateICmp(P, LHS: LHS1, RHS: NewRHS);
2506 NewCond->takeName(V: &I);
2507 I.replaceAllUsesWith(V: NewCond);
2508 eraseInstruction(I, SafetyInfo, MSSAU);
2509 Instruction &CondI1 = *cast<Instruction>(Val: Cond1);
2510 Instruction &CondI2 = *cast<Instruction>(Val: Cond2);
2511 salvageDebugInfo(I&: CondI1);
2512 salvageDebugInfo(I&: CondI2);
2513 eraseInstruction(I&: CondI1, SafetyInfo, MSSAU);
2514 eraseInstruction(I&: CondI2, SafetyInfo, MSSAU);
2515 return true;
2516}
2517
2518/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2519/// this allows hoisting the inner GEP.
2520static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2521 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2522 DominatorTree *DT) {
2523 auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I);
2524 if (!GEP)
2525 return false;
2526
2527 // Do not try to hoist a constant GEP out of the loop via reassociation.
2528 // Constant GEPs can often be folded into addressing modes, and reassociating
2529 // them may inhibit CSE of a common base.
2530 if (GEP->hasAllConstantIndices())
2531 return false;
2532
2533 auto *Src = dyn_cast<GetElementPtrInst>(Val: GEP->getPointerOperand());
2534 if (!Src || !Src->hasOneUse() || !L.contains(Inst: Src))
2535 return false;
2536
2537 Value *SrcPtr = Src->getPointerOperand();
2538 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2539 if (!L.isLoopInvariant(V: SrcPtr) || !all_of(Range: GEP->indices(), P: LoopInvariant))
2540 return false;
2541
2542 // This can only happen if !AllowSpeculation, otherwise this would already be
2543 // handled.
2544 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2545 // The flag exists to prevent metadata dropping, which is not relevant here.
2546 if (all_of(Range: Src->indices(), P: LoopInvariant))
2547 return false;
2548
2549 // The swapped GEPs are inbounds if both original GEPs are inbounds
2550 // and the sign of the offsets is the same. For simplicity, only
2551 // handle both offsets being non-negative.
2552 const DataLayout &DL = GEP->getDataLayout();
2553 auto NonNegative = [&](Value *V) {
2554 return isKnownNonNegative(V, SQ: SimplifyQuery(DL, DT, AC, GEP));
2555 };
2556 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2557 all_of(Range: Src->indices(), P: NonNegative) &&
2558 all_of(Range: GEP->indices(), P: NonNegative);
2559
2560 BasicBlock *Preheader = L.getLoopPreheader();
2561 IRBuilder<> Builder(Preheader->getTerminator());
2562 Value *NewSrc = Builder.CreateGEP(Ty: GEP->getSourceElementType(), Ptr: SrcPtr,
2563 IdxList: SmallVector<Value *>(GEP->indices()),
2564 Name: "invariant.gep", NW: IsInBounds);
2565 Builder.SetInsertPoint(GEP);
2566 Value *NewGEP = Builder.CreateGEP(Ty: Src->getSourceElementType(), Ptr: NewSrc,
2567 IdxList: SmallVector<Value *>(Src->indices()), Name: "gep",
2568 NW: IsInBounds);
2569 GEP->replaceAllUsesWith(V: NewGEP);
2570 eraseInstruction(I&: *GEP, SafetyInfo, MSSAU);
2571 salvageDebugInfo(I&: *Src);
2572 eraseInstruction(I&: *Src, SafetyInfo, MSSAU);
2573 return true;
2574}
2575
2576/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2577/// C1 and C2 are loop invariants and LV is a loop-variant.
2578static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2579 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2580 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2581 AssumptionCache *AC, DominatorTree *DT) {
2582 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2583 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2584
2585 bool IsSigned = ICmpInst::isSigned(predicate: Pred);
2586
2587 // Try to represent VariantLHS as sum of invariant and variant operands.
2588 using namespace PatternMatch;
2589 Value *VariantOp, *InvariantOp;
2590 if (IsSigned &&
2591 !match(V: VariantLHS, P: m_NSWAdd(L: m_Value(V&: VariantOp), R: m_Value(V&: InvariantOp))))
2592 return false;
2593 if (!IsSigned &&
2594 !match(V: VariantLHS, P: m_NUWAdd(L: m_Value(V&: VariantOp), R: m_Value(V&: InvariantOp))))
2595 return false;
2596
2597 // LHS itself is a loop-variant, try to represent it in the form:
2598 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2599 if (L.isLoopInvariant(V: VariantOp))
2600 std::swap(a&: VariantOp, b&: InvariantOp);
2601 if (L.isLoopInvariant(V: VariantOp) || !L.isLoopInvariant(V: InvariantOp))
2602 return false;
2603
2604 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2605 // freely move values from left side of inequality to right side (just as in
2606 // normal linear arithmetics). Overflows make things much more complicated, so
2607 // we want to avoid this.
2608 auto &DL = L.getHeader()->getDataLayout();
2609 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2610 if (IsSigned && computeOverflowForSignedSub(LHS: InvariantRHS, RHS: InvariantOp, SQ) !=
2611 llvm::OverflowResult::NeverOverflows)
2612 return false;
2613 if (!IsSigned &&
2614 computeOverflowForUnsignedSub(LHS: InvariantRHS, RHS: InvariantOp, SQ) !=
2615 llvm::OverflowResult::NeverOverflows)
2616 return false;
2617 auto *Preheader = L.getLoopPreheader();
2618 assert(Preheader && "Loop is not in simplify form?");
2619 IRBuilder<> Builder(Preheader->getTerminator());
2620 Value *NewCmpOp =
2621 Builder.CreateSub(LHS: InvariantRHS, RHS: InvariantOp, Name: "invariant.op",
2622 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2623 ICmp.setPredicate(Pred);
2624 ICmp.setOperand(i_nocapture: 0, Val_nocapture: VariantOp);
2625 ICmp.setOperand(i_nocapture: 1, Val_nocapture: NewCmpOp);
2626
2627 Instruction &DeadI = cast<Instruction>(Val&: *VariantLHS);
2628 salvageDebugInfo(I&: DeadI);
2629 eraseInstruction(I&: DeadI, SafetyInfo, MSSAU);
2630 return true;
2631}
2632
2633/// Try to reassociate and hoist the following two patterns:
2634/// LV - C1 < C2 --> LV < C1 + C2,
2635/// C1 - LV < C2 --> LV > C1 - C2.
2636static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2637 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2638 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2639 AssumptionCache *AC, DominatorTree *DT) {
2640 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2641 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2642
2643 bool IsSigned = ICmpInst::isSigned(predicate: Pred);
2644
2645 // Try to represent VariantLHS as sum of invariant and variant operands.
2646 using namespace PatternMatch;
2647 Value *VariantOp, *InvariantOp;
2648 if (IsSigned &&
2649 !match(V: VariantLHS, P: m_NSWSub(L: m_Value(V&: VariantOp), R: m_Value(V&: InvariantOp))))
2650 return false;
2651 if (!IsSigned &&
2652 !match(V: VariantLHS, P: m_NUWSub(L: m_Value(V&: VariantOp), R: m_Value(V&: InvariantOp))))
2653 return false;
2654
2655 bool VariantSubtracted = false;
2656 // LHS itself is a loop-variant, try to represent it in the form:
2657 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2658 // the variant operand goes with minus, we use a slightly different scheme.
2659 if (L.isLoopInvariant(V: VariantOp)) {
2660 std::swap(a&: VariantOp, b&: InvariantOp);
2661 VariantSubtracted = true;
2662 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
2663 }
2664 if (L.isLoopInvariant(V: VariantOp) || !L.isLoopInvariant(V: InvariantOp))
2665 return false;
2666
2667 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2668 // freely move values from left side of inequality to right side (just as in
2669 // normal linear arithmetics). Overflows make things much more complicated, so
2670 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2671 // "C1 - C2" does not overflow.
2672 auto &DL = L.getHeader()->getDataLayout();
2673 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2674 if (VariantSubtracted && IsSigned) {
2675 // C1 - LV < C2 --> LV > C1 - C2
2676 if (computeOverflowForSignedSub(LHS: InvariantOp, RHS: InvariantRHS, SQ) !=
2677 llvm::OverflowResult::NeverOverflows)
2678 return false;
2679 } else if (VariantSubtracted && !IsSigned) {
2680 // C1 - LV < C2 --> LV > C1 - C2
2681 if (computeOverflowForUnsignedSub(LHS: InvariantOp, RHS: InvariantRHS, SQ) !=
2682 llvm::OverflowResult::NeverOverflows)
2683 return false;
2684 } else if (!VariantSubtracted && IsSigned) {
2685 // LV - C1 < C2 --> LV < C1 + C2
2686 if (computeOverflowForSignedAdd(LHS: InvariantOp, RHS: InvariantRHS, SQ) !=
2687 llvm::OverflowResult::NeverOverflows)
2688 return false;
2689 } else { // !VariantSubtracted && !IsSigned
2690 // LV - C1 < C2 --> LV < C1 + C2
2691 if (computeOverflowForUnsignedAdd(LHS: InvariantOp, RHS: InvariantRHS, SQ) !=
2692 llvm::OverflowResult::NeverOverflows)
2693 return false;
2694 }
2695 auto *Preheader = L.getLoopPreheader();
2696 assert(Preheader && "Loop is not in simplify form?");
2697 IRBuilder<> Builder(Preheader->getTerminator());
2698 Value *NewCmpOp =
2699 VariantSubtracted
2700 ? Builder.CreateSub(LHS: InvariantOp, RHS: InvariantRHS, Name: "invariant.op",
2701 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned)
2702 : Builder.CreateAdd(LHS: InvariantOp, RHS: InvariantRHS, Name: "invariant.op",
2703 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2704 ICmp.setPredicate(Pred);
2705 ICmp.setOperand(i_nocapture: 0, Val_nocapture: VariantOp);
2706 ICmp.setOperand(i_nocapture: 1, Val_nocapture: NewCmpOp);
2707
2708 Instruction &DeadI = cast<Instruction>(Val&: *VariantLHS);
2709 salvageDebugInfo(I&: DeadI);
2710 eraseInstruction(I&: DeadI, SafetyInfo, MSSAU);
2711 return true;
2712}
2713
2714/// Reassociate and hoist add/sub expressions.
2715static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2716 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2717 DominatorTree *DT) {
2718 using namespace PatternMatch;
2719 CmpPredicate Pred;
2720 Value *LHS, *RHS;
2721 if (!match(V: &I, P: m_ICmp(Pred, L: m_Value(V&: LHS), R: m_Value(V&: RHS))))
2722 return false;
2723
2724 // Put variant operand to LHS position.
2725 if (L.isLoopInvariant(V: LHS)) {
2726 std::swap(a&: LHS, b&: RHS);
2727 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
2728 }
2729 // We want to delete the initial operation after reassociation, so only do it
2730 // if it has no other uses.
2731 if (L.isLoopInvariant(V: LHS) || !L.isLoopInvariant(V: RHS) || !LHS->hasOneUse())
2732 return false;
2733
2734 // TODO: We could go with smarter context, taking common dominator of all I's
2735 // users instead of I itself.
2736 if (hoistAdd(Pred, VariantLHS: LHS, InvariantRHS: RHS, ICmp&: cast<ICmpInst>(Val&: I), L, SafetyInfo, MSSAU, AC, DT))
2737 return true;
2738
2739 if (hoistSub(Pred, VariantLHS: LHS, InvariantRHS: RHS, ICmp&: cast<ICmpInst>(Val&: I), L, SafetyInfo, MSSAU, AC, DT))
2740 return true;
2741
2742 return false;
2743}
2744
2745static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2746 unsigned FPOpcode) {
2747 if (I->getOpcode() == IntOpcode)
2748 return true;
2749 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2750 I->hasNoSignedZeros())
2751 return true;
2752 return false;
2753}
2754
2755/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2756/// A1, A2, ... and C are loop invariants into expressions like
2757/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2758/// invariant expressions. This functions returns true only if any hoisting has
2759/// actually occurred.
2760static bool hoistMulAddAssociation(Instruction &I, Loop &L,
2761 ICFLoopSafetyInfo &SafetyInfo,
2762 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2763 DominatorTree *DT) {
2764 if (!isReassociableOp(I: &I, IntOpcode: Instruction::Mul, FPOpcode: Instruction::FMul))
2765 return false;
2766 Value *VariantOp = I.getOperand(i: 0);
2767 Value *InvariantOp = I.getOperand(i: 1);
2768 if (L.isLoopInvariant(V: VariantOp))
2769 std::swap(a&: VariantOp, b&: InvariantOp);
2770 if (L.isLoopInvariant(V: VariantOp) || !L.isLoopInvariant(V: InvariantOp))
2771 return false;
2772 Value *Factor = InvariantOp;
2773
2774 // First, we need to make sure we should do the transformation.
2775 SmallVector<Use *> Changes;
2776 SmallVector<BinaryOperator *> Adds;
2777 SmallVector<BinaryOperator *> Worklist;
2778 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(Val: VariantOp))
2779 Worklist.push_back(Elt: VariantBinOp);
2780 while (!Worklist.empty()) {
2781 BinaryOperator *BO = Worklist.pop_back_val();
2782 if (!BO->hasOneUse())
2783 return false;
2784 if (isReassociableOp(I: BO, IntOpcode: Instruction::Add, FPOpcode: Instruction::FAdd) &&
2785 isa<BinaryOperator>(Val: BO->getOperand(i_nocapture: 0)) &&
2786 isa<BinaryOperator>(Val: BO->getOperand(i_nocapture: 1))) {
2787 Worklist.push_back(Elt: cast<BinaryOperator>(Val: BO->getOperand(i_nocapture: 0)));
2788 Worklist.push_back(Elt: cast<BinaryOperator>(Val: BO->getOperand(i_nocapture: 1)));
2789 Adds.push_back(Elt: BO);
2790 continue;
2791 }
2792 if (!isReassociableOp(I: BO, IntOpcode: Instruction::Mul, FPOpcode: Instruction::FMul) ||
2793 L.isLoopInvariant(V: BO))
2794 return false;
2795 Use &U0 = BO->getOperandUse(i: 0);
2796 Use &U1 = BO->getOperandUse(i: 1);
2797 if (L.isLoopInvariant(V: U0))
2798 Changes.push_back(Elt: &U0);
2799 else if (L.isLoopInvariant(V: U1))
2800 Changes.push_back(Elt: &U1);
2801 else
2802 return false;
2803 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2804 ? IntAssociationUpperLimit
2805 : FPAssociationUpperLimit;
2806 if (Changes.size() > Limit)
2807 return false;
2808 }
2809 if (Changes.empty())
2810 return false;
2811
2812 // Drop the poison flags for any adds we looked through.
2813 if (I.getType()->isIntOrIntVectorTy()) {
2814 for (auto *Add : Adds)
2815 Add->dropPoisonGeneratingFlags();
2816 }
2817
2818 // We know we should do it so let's do the transformation.
2819 auto *Preheader = L.getLoopPreheader();
2820 assert(Preheader && "Loop is not in simplify form?");
2821 IRBuilder<> Builder(Preheader->getTerminator());
2822 for (auto *U : Changes) {
2823 assert(L.isLoopInvariant(U->get()));
2824 auto *Ins = cast<BinaryOperator>(Val: U->getUser());
2825 Value *Mul;
2826 if (I.getType()->isIntOrIntVectorTy()) {
2827 Mul = Builder.CreateMul(LHS: U->get(), RHS: Factor, Name: "factor.op.mul");
2828 // Drop the poison flags on the original multiply.
2829 Ins->dropPoisonGeneratingFlags();
2830 } else
2831 Mul = Builder.CreateFMulFMF(L: U->get(), R: Factor, FMFSource: Ins, Name: "factor.op.fmul");
2832
2833 // Rewrite the reassociable instruction.
2834 unsigned OpIdx = U->getOperandNo();
2835 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(i_nocapture: 0);
2836 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(i_nocapture: 1);
2837 auto *NewBO =
2838 BinaryOperator::Create(Op: Ins->getOpcode(), S1: LHS, S2: RHS,
2839 Name: Ins->getName() + ".reass", InsertBefore: Ins->getIterator());
2840 NewBO->setDebugLoc(DebugLoc::getDropped());
2841 NewBO->copyIRFlags(V: Ins);
2842 if (VariantOp == Ins)
2843 VariantOp = NewBO;
2844 Ins->replaceAllUsesWith(V: NewBO);
2845 eraseInstruction(I&: *Ins, SafetyInfo, MSSAU);
2846 }
2847
2848 I.replaceAllUsesWith(V: VariantOp);
2849 eraseInstruction(I, SafetyInfo, MSSAU);
2850 return true;
2851}
2852
2853/// Reassociate associative binary expressions of the form
2854///
2855/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2856/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
2857/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
2858/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
2859///
2860/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
2861/// loop invariants that we want to hoist, noting that associativity implies
2862/// commutativity.
2863static bool hoistBOAssociation(Instruction &I, Loop &L,
2864 ICFLoopSafetyInfo &SafetyInfo,
2865 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2866 DominatorTree *DT) {
2867 auto *BO = dyn_cast<BinaryOperator>(Val: &I);
2868 if (!BO || !BO->isAssociative())
2869 return false;
2870
2871 Instruction::BinaryOps Opcode = BO->getOpcode();
2872 bool LVInRHS = L.isLoopInvariant(V: BO->getOperand(i_nocapture: 0));
2873 auto *BO0 = dyn_cast<BinaryOperator>(Val: BO->getOperand(i_nocapture: LVInRHS));
2874 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2875 BO0->hasNUsesOrMore(N: BO0->getType()->isIntegerTy() ? 2 : 3))
2876 return false;
2877
2878 Value *LV = BO0->getOperand(i_nocapture: 0);
2879 Value *C1 = BO0->getOperand(i_nocapture: 1);
2880 Value *C2 = BO->getOperand(i_nocapture: !LVInRHS);
2881
2882 assert(BO->isCommutative() && BO0->isCommutative() &&
2883 "Associativity implies commutativity");
2884 if (L.isLoopInvariant(V: LV) && !L.isLoopInvariant(V: C1))
2885 std::swap(a&: LV, b&: C1);
2886 if (L.isLoopInvariant(V: LV) || !L.isLoopInvariant(V: C1) || !L.isLoopInvariant(V: C2))
2887 return false;
2888
2889 auto *Preheader = L.getLoopPreheader();
2890 assert(Preheader && "Loop is not in simplify form?");
2891
2892 IRBuilder<> Builder(Preheader->getTerminator());
2893 auto *Inv = Builder.CreateBinOp(Opc: Opcode, LHS: C1, RHS: C2, Name: "invariant.op");
2894
2895 auto *NewBO = BinaryOperator::Create(
2896 Op: Opcode, S1: LV, S2: Inv, Name: BO->getName() + ".reass", InsertBefore: BO->getIterator());
2897 NewBO->setDebugLoc(DebugLoc::getDropped());
2898
2899 if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) {
2900 // Intersect FMF flags for FADD and FMUL.
2901 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags();
2902 if (auto *I = dyn_cast<Instruction>(Val: Inv))
2903 I->setFastMathFlags(Intersect);
2904 NewBO->setFastMathFlags(Intersect);
2905 } else {
2906 OverflowTracking Flags;
2907 Flags.AllKnownNonNegative = false;
2908 Flags.AllKnownNonZero = false;
2909 Flags.mergeFlags(I&: *BO);
2910 Flags.mergeFlags(I&: *BO0);
2911 // If `Inv` was not constant-folded, a new Instruction has been created.
2912 if (auto *I = dyn_cast<Instruction>(Val: Inv))
2913 Flags.applyFlags(I&: *I);
2914 Flags.applyFlags(I&: *NewBO);
2915 }
2916
2917 BO->replaceAllUsesWith(V: NewBO);
2918 eraseInstruction(I&: *BO, SafetyInfo, MSSAU);
2919
2920 // (LV op C1) might not be erased if it has more uses than the one we just
2921 // replaced.
2922 if (BO0->use_empty()) {
2923 salvageDebugInfo(I&: *BO0);
2924 eraseInstruction(I&: *BO0, SafetyInfo, MSSAU);
2925 }
2926
2927 return true;
2928}
2929
2930static bool hoistArithmetics(Instruction &I, Loop &L,
2931 ICFLoopSafetyInfo &SafetyInfo,
2932 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2933 DominatorTree *DT) {
2934 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2935 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2936 // expression out of the loop.
2937 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2938 ++NumHoisted;
2939 ++NumMinMaxHoisted;
2940 return true;
2941 }
2942
2943 // Try to hoist GEPs by reassociation.
2944 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2945 ++NumHoisted;
2946 ++NumGEPsHoisted;
2947 return true;
2948 }
2949
2950 // Try to hoist add/sub's by reassociation.
2951 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2952 ++NumHoisted;
2953 ++NumAddSubHoisted;
2954 return true;
2955 }
2956
2957 bool IsInt = I.getType()->isIntOrIntVectorTy();
2958 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2959 ++NumHoisted;
2960 if (IsInt)
2961 ++NumIntAssociationsHoisted;
2962 else
2963 ++NumFPAssociationsHoisted;
2964 return true;
2965 }
2966
2967 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2968 ++NumHoisted;
2969 ++NumBOAssociationsHoisted;
2970 return true;
2971 }
2972
2973 return false;
2974}
2975
2976/// Little predicate that returns true if the specified basic block is in
2977/// a subloop of the current one, not the current one itself.
2978///
2979static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2980 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2981 return LI->getLoopFor(BB) != CurLoop;
2982}
2983