1//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements an idiom recognizer that transforms simple loops into a
10// non-loop form. In cases that this kicks in, it can be a significant
11// performance win.
12//
13// If compiling for code size we avoid idiom recognition if the resulting
14// code could be larger than the code for the original loop. One way this could
15// happen is if the loop is not removable after idiom recognition due to the
16// presence of non-idiom instructions. The initial implementation of the
17// heuristics applies to idioms in multi-block loops.
18//
19//===----------------------------------------------------------------------===//
20//
21// TODO List:
22//
23// Future loop memory idioms to recognize: memcmp, etc.
24//
25// This could recognize common matrix multiplies and dot product idioms and
26// replace them with calls to BLAS (if linked in??).
27//
28//===----------------------------------------------------------------------===//
29
30#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/ArrayRef.h"
33#include "llvm/ADT/DenseMap.h"
34#include "llvm/ADT/MapVector.h"
35#include "llvm/ADT/SetVector.h"
36#include "llvm/ADT/SmallPtrSet.h"
37#include "llvm/ADT/SmallVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
40#include "llvm/Analysis/AliasAnalysis.h"
41#include "llvm/Analysis/CmpInstAnalysis.h"
42#include "llvm/Analysis/HashRecognize.h"
43#include "llvm/Analysis/LoopAccessAnalysis.h"
44#include "llvm/Analysis/LoopInfo.h"
45#include "llvm/Analysis/LoopPass.h"
46#include "llvm/Analysis/MemoryLocation.h"
47#include "llvm/Analysis/MemorySSA.h"
48#include "llvm/Analysis/MemorySSAUpdater.h"
49#include "llvm/Analysis/MustExecute.h"
50#include "llvm/Analysis/OptimizationRemarkEmitter.h"
51#include "llvm/Analysis/ScalarEvolution.h"
52#include "llvm/Analysis/ScalarEvolutionExpressions.h"
53#include "llvm/Analysis/ScalarEvolutionPatternMatch.h"
54#include "llvm/Analysis/TargetLibraryInfo.h"
55#include "llvm/Analysis/TargetTransformInfo.h"
56#include "llvm/Analysis/ValueTracking.h"
57#include "llvm/IR/BasicBlock.h"
58#include "llvm/IR/Constant.h"
59#include "llvm/IR/Constants.h"
60#include "llvm/IR/DataLayout.h"
61#include "llvm/IR/DebugLoc.h"
62#include "llvm/IR/DerivedTypes.h"
63#include "llvm/IR/Dominators.h"
64#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/GlobalVariable.h"
66#include "llvm/IR/IRBuilder.h"
67#include "llvm/IR/InstrTypes.h"
68#include "llvm/IR/Instruction.h"
69#include "llvm/IR/Instructions.h"
70#include "llvm/IR/IntrinsicInst.h"
71#include "llvm/IR/Intrinsics.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Module.h"
74#include "llvm/IR/PassManager.h"
75#include "llvm/IR/PatternMatch.h"
76#include "llvm/IR/ProfDataUtils.h"
77#include "llvm/IR/Type.h"
78#include "llvm/IR/User.h"
79#include "llvm/IR/Value.h"
80#include "llvm/IR/ValueHandle.h"
81#include "llvm/Support/Casting.h"
82#include "llvm/Support/CommandLine.h"
83#include "llvm/Support/Debug.h"
84#include "llvm/Support/InstructionCost.h"
85#include "llvm/Support/raw_ostream.h"
86#include "llvm/Transforms/Utils/BuildLibCalls.h"
87#include "llvm/Transforms/Utils/Local.h"
88#include "llvm/Transforms/Utils/LoopUtils.h"
89#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
90#include <algorithm>
91#include <cassert>
92#include <cstdint>
93#include <utility>
94
95using namespace llvm;
96using namespace SCEVPatternMatch;
97
98#define DEBUG_TYPE "loop-idiom"
99
100STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
101STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
102STATISTIC(NumMemMove, "Number of memmove's formed from loop load+stores");
103STATISTIC(NumStrLen, "Number of strlen's and wcslen's formed from loop loads");
104STATISTIC(
105 NumShiftUntilBitTest,
106 "Number of uncountable loops recognized as 'shift until bitttest' idiom");
107STATISTIC(NumShiftUntilZero,
108 "Number of uncountable loops recognized as 'shift until zero' idiom");
109
110namespace llvm {
111bool DisableLIRP::All;
112static cl::opt<bool, true>
113 DisableLIRPAll("disable-" DEBUG_TYPE "-all",
114 cl::desc("Options to disable Loop Idiom Recognize Pass."),
115 cl::location(L&: DisableLIRP::All), cl::init(Val: false),
116 cl::ReallyHidden);
117
118bool DisableLIRP::Memset;
119static cl::opt<bool, true>
120 DisableLIRPMemset("disable-" DEBUG_TYPE "-memset",
121 cl::desc("Proceed with loop idiom recognize pass, but do "
122 "not convert loop(s) to memset."),
123 cl::location(L&: DisableLIRP::Memset), cl::init(Val: false),
124 cl::ReallyHidden);
125
126bool DisableLIRP::Memcpy;
127static cl::opt<bool, true>
128 DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy",
129 cl::desc("Proceed with loop idiom recognize pass, but do "
130 "not convert loop(s) to memcpy."),
131 cl::location(L&: DisableLIRP::Memcpy), cl::init(Val: false),
132 cl::ReallyHidden);
133
134bool DisableLIRP::Strlen;
135static cl::opt<bool, true>
136 DisableLIRPStrlen("disable-loop-idiom-strlen",
137 cl::desc("Proceed with loop idiom recognize pass, but do "
138 "not convert loop(s) to strlen."),
139 cl::location(L&: DisableLIRP::Strlen), cl::init(Val: false),
140 cl::ReallyHidden);
141
142bool DisableLIRP::Wcslen;
143static cl::opt<bool, true>
144 EnableLIRPWcslen("disable-loop-idiom-wcslen",
145 cl::desc("Proceed with loop idiom recognize pass, "
146 "enable conversion of loop(s) to wcslen."),
147 cl::location(L&: DisableLIRP::Wcslen), cl::init(Val: false),
148 cl::ReallyHidden);
149
150bool DisableLIRP::HashRecognize;
151static cl::opt<bool, true>
152 DisableLIRPHashRecognize("disable-" DEBUG_TYPE "-hashrecognize",
153 cl::desc("Proceed with loop idiom recognize pass, "
154 "but do not optimize CRC loops."),
155 cl::location(L&: DisableLIRP::HashRecognize),
156 cl::init(Val: false), cl::ReallyHidden);
157
158static cl::opt<bool> UseLIRCodeSizeHeurs(
159 "use-lir-code-size-heurs",
160 cl::desc("Use loop idiom recognition code size heuristics when compiling "
161 "with -Os/-Oz"),
162 cl::init(Val: true), cl::Hidden);
163
164static cl::opt<bool> ForceMemsetPatternIntrinsic(
165 "loop-idiom-force-memset-pattern-intrinsic",
166 cl::desc("Use memset.pattern intrinsic whenever possible"), cl::init(Val: false),
167 cl::Hidden);
168
169extern cl::opt<bool> ProfcheckDisableMetadataFixes;
170
171} // namespace llvm
172
173namespace {
174
175class LoopIdiomRecognize {
176 Loop *CurLoop = nullptr;
177 AliasAnalysis *AA;
178 DominatorTree *DT;
179 LoopInfo *LI;
180 ScalarEvolution *SE;
181 TargetLibraryInfo *TLI;
182 const TargetTransformInfo *TTI;
183 const DataLayout *DL;
184 OptimizationRemarkEmitter &ORE;
185 bool ApplyCodeSizeHeuristics;
186 std::unique_ptr<MemorySSAUpdater> MSSAU;
187
188public:
189 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
190 LoopInfo *LI, ScalarEvolution *SE,
191 TargetLibraryInfo *TLI,
192 const TargetTransformInfo *TTI, MemorySSA *MSSA,
193 const DataLayout *DL,
194 OptimizationRemarkEmitter &ORE)
195 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {
196 if (MSSA)
197 MSSAU = std::make_unique<MemorySSAUpdater>(args&: MSSA);
198 }
199
200 bool runOnLoop(Loop *L);
201
202private:
203 using StoreList = SmallVector<StoreInst *, 8>;
204 using StoreListMap = MapVector<Value *, StoreList>;
205
206 StoreListMap StoreRefsForMemset;
207 StoreListMap StoreRefsForMemsetPattern;
208 StoreList StoreRefsForMemcpy;
209 bool HasMemset;
210 bool HasMemsetPattern;
211 bool HasMemcpy;
212
213 /// Return code for isLegalStore()
214 enum LegalStoreKind {
215 None = 0,
216 Memset,
217 MemsetPattern,
218 Memcpy,
219 UnorderedAtomicMemcpy,
220 DontUse // Dummy retval never to be used. Allows catching errors in retval
221 // handling.
222 };
223
224 /// \name Countable Loop Idiom Handling
225 /// @{
226
227 bool runOnCountableLoop();
228 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
229 SmallVectorImpl<BasicBlock *> &ExitBlocks);
230
231 void collectStores(BasicBlock *BB);
232 LegalStoreKind isLegalStore(StoreInst *SI);
233 enum class ForMemset { No, Yes };
234 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
235 ForMemset For);
236
237 template <typename MemInst>
238 bool processLoopMemIntrinsic(
239 BasicBlock *BB,
240 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *),
241 const SCEV *BECount);
242 bool processLoopMemCpy(MemCpyInst *MCI, const SCEV *BECount);
243 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
244
245 bool processLoopStridedStore(Value *DestPtr, const SCEV *StoreSizeSCEV,
246 MaybeAlign StoreAlignment, Value *StoredVal,
247 Instruction *TheStore,
248 SmallPtrSetImpl<Instruction *> &Stores,
249 const SCEVAddRecExpr *Ev, const SCEV *BECount,
250 bool IsNegStride, bool IsLoopMemset = false);
251 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
252 bool processLoopStoreOfLoopLoad(Value *DestPtr, Value *SourcePtr,
253 const SCEV *StoreSize, MaybeAlign StoreAlign,
254 MaybeAlign LoadAlign, Instruction *TheStore,
255 Instruction *TheLoad,
256 const SCEVAddRecExpr *StoreEv,
257 const SCEVAddRecExpr *LoadEv,
258 const SCEV *BECount);
259 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
260 bool IsLoopMemset = false);
261 bool optimizeCRCLoop(const PolynomialInfo &Info);
262
263 /// @}
264 /// \name Noncountable Loop Idiom Handling
265 /// @{
266
267 bool runOnNoncountableLoop();
268
269 bool recognizePopcount();
270 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
271 PHINode *CntPhi, Value *Var);
272 bool isProfitableToInsertFFS(Intrinsic::ID IntrinID, Value *InitX,
273 bool ZeroCheck, size_t CanonicalSize);
274 bool insertFFSIfProfitable(Intrinsic::ID IntrinID, Value *InitX,
275 Instruction *DefX, PHINode *CntPhi,
276 Instruction *CntInst);
277 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz
278 bool recognizeShiftUntilLessThan();
279 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB,
280 Instruction *CntInst, PHINode *CntPhi,
281 Value *Var, Instruction *DefX,
282 const DebugLoc &DL, bool ZeroCheck,
283 bool IsCntPhiUsedOutsideLoop,
284 bool InsertSub = false);
285
286 bool recognizeShiftUntilBitTest();
287 bool recognizeShiftUntilZero();
288 bool recognizeAndInsertStrLen();
289
290 /// @}
291};
292} // end anonymous namespace
293
294PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
295 LoopStandardAnalysisResults &AR,
296 LPMUpdater &) {
297 if (DisableLIRP::All)
298 return PreservedAnalyses::all();
299
300 const auto *DL = &L.getHeader()->getDataLayout();
301
302 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
303 // pass. Function analyses need to be preserved across loop transformations
304 // but ORE cannot be preserved (see comment before the pass definition).
305 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
306
307 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI,
308 AR.MSSA, DL, ORE);
309 if (!LIR.runOnLoop(L: &L))
310 return PreservedAnalyses::all();
311
312 auto PA = getLoopPassPreservedAnalyses();
313 if (AR.MSSA)
314 PA.preserve<MemorySSAAnalysis>();
315 return PA;
316}
317
318static void deleteDeadInstruction(Instruction *I) {
319 I->replaceAllUsesWith(V: PoisonValue::get(T: I->getType()));
320 I->eraseFromParent();
321}
322
323//===----------------------------------------------------------------------===//
324//
325// Implementation of LoopIdiomRecognize
326//
327//===----------------------------------------------------------------------===//
328
329bool LoopIdiomRecognize::runOnLoop(Loop *L) {
330 CurLoop = L;
331 // If the loop could not be converted to canonical form, it must have an
332 // indirectbr in it, just give up.
333 if (!L->getLoopPreheader())
334 return false;
335
336 // Disable loop idiom recognition if the function's name is a common idiom.
337 StringRef Name = L->getHeader()->getParent()->getName();
338 if (Name == "memset" || Name == "memcpy" || Name == "strlen" ||
339 Name == "wcslen")
340 return false;
341
342 // Determine if code size heuristics need to be applied.
343 ApplyCodeSizeHeuristics =
344 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs;
345
346 HasMemset = TLI->has(F: LibFunc_memset);
347 // TODO: Unconditionally enable use of the memset pattern intrinsic (or at
348 // least, opt-in via target hook) once we are confident it will never result
349 // in worse codegen than without. For now, use it only when the target
350 // supports memset_pattern16 libcall (or unless this is overridden by
351 // command line option).
352 HasMemsetPattern = TLI->has(F: LibFunc_memset_pattern16);
353 HasMemcpy = TLI->has(F: LibFunc_memcpy);
354
355 if (HasMemset || HasMemsetPattern || ForceMemsetPatternIntrinsic ||
356 HasMemcpy || !DisableLIRP::HashRecognize)
357 if (SE->hasLoopInvariantBackedgeTakenCount(L))
358 return runOnCountableLoop();
359
360 return runOnNoncountableLoop();
361}
362
363bool LoopIdiomRecognize::runOnCountableLoop() {
364 const SCEV *BECount = SE->getBackedgeTakenCount(L: CurLoop);
365 assert(!isa<SCEVCouldNotCompute>(BECount) &&
366 "runOnCountableLoop() called on a loop without a predictable"
367 "backedge-taken count");
368
369 // If this loop executes exactly one time, then it should be peeled, not
370 // optimized by this pass.
371 if (BECount->isZero())
372 return false;
373
374 SmallVector<BasicBlock *, 8> ExitBlocks;
375 CurLoop->getUniqueExitBlocks(ExitBlocks);
376
377 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
378 << CurLoop->getHeader()->getParent()->getName()
379 << "] Countable Loop %" << CurLoop->getHeader()->getName()
380 << "\n");
381
382 // The following transforms hoist stores/memsets into the loop pre-header.
383 // Give up if the loop has instructions that may throw.
384 SimpleLoopSafetyInfo SafetyInfo;
385 SafetyInfo.computeLoopSafetyInfo(CurLoop);
386 if (SafetyInfo.anyBlockMayThrow())
387 return false;
388
389 bool MadeChange = false;
390
391 // Scan all the blocks in the loop that are not in subloops.
392 for (auto *BB : CurLoop->getBlocks()) {
393 // Ignore blocks in subloops.
394 if (LI->getLoopFor(BB) != CurLoop)
395 continue;
396
397 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
398 }
399
400 // Optimize a CRC loop if HashRecognize found one, provided we're not
401 // optimizing for size.
402 if (!DisableLIRP::HashRecognize && !ApplyCodeSizeHeuristics)
403 if (auto Res = HashRecognize(*CurLoop, *SE).getResult())
404 optimizeCRCLoop(Info: *Res);
405
406 return MadeChange;
407}
408
409static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
410 const SCEVConstant *ConstStride = cast<SCEVConstant>(Val: StoreEv->getOperand(i: 1));
411 return ConstStride->getAPInt();
412}
413
414/// getMemSetPatternValue - If a strided store of the specified value is safe to
415/// turn into a memset.patternn intrinsic, return the Constant that should
416/// be passed in. Otherwise, return null.
417///
418/// TODO this function could allow more constants than it does today (e.g.
419/// those over 16 bytes) now it has transitioned to being used for the
420/// memset.pattern intrinsic rather than directly the memset_pattern16
421/// libcall.
422static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
423 // FIXME: This could check for UndefValue because it can be merged into any
424 // other valid pattern.
425
426 // If the value isn't a constant, we can't promote it to being in a constant
427 // array. We could theoretically do a store to an alloca or something, but
428 // that doesn't seem worthwhile.
429 Constant *C = dyn_cast<Constant>(Val: V);
430 if (!C || isa<ConstantExpr>(Val: C))
431 return nullptr;
432
433 // Only handle simple values that are a power of two bytes in size.
434 uint64_t Size = DL->getTypeSizeInBits(Ty: V->getType());
435 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
436 return nullptr;
437
438 // Don't care enough about darwin/ppc to implement this.
439 if (DL->isBigEndian())
440 return nullptr;
441
442 // Convert to size in bytes.
443 Size /= 8;
444
445 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
446 // if the top and bottom are the same (e.g. for vectors and large integers).
447 if (Size > 16)
448 return nullptr;
449
450 // For now, don't handle types that aren't int, floats, or pointers.
451 Type *CTy = C->getType();
452 if (!CTy->isIntOrPtrTy() && !CTy->isFloatingPointTy())
453 return nullptr;
454
455 return C;
456}
457
458LoopIdiomRecognize::LegalStoreKind
459LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
460 // Don't touch volatile stores.
461 if (SI->isVolatile())
462 return LegalStoreKind::None;
463 // We only want simple or unordered-atomic stores.
464 if (!SI->isUnordered())
465 return LegalStoreKind::None;
466
467 // Avoid merging nontemporal stores.
468 if (SI->getMetadata(KindID: LLVMContext::MD_nontemporal))
469 return LegalStoreKind::None;
470
471 Value *StoredVal = SI->getValueOperand();
472 Value *StorePtr = SI->getPointerOperand();
473
474 // Don't convert stores of non-integral pointer types to memsets (which stores
475 // integers).
476 if (DL->isNonIntegralPointerType(Ty: StoredVal->getType()->getScalarType()))
477 return LegalStoreKind::None;
478
479 // Reject stores that are so large that they overflow an unsigned.
480 // When storing out scalable vectors we bail out for now, since the code
481 // below currently only works for constant strides.
482 TypeSize SizeInBits = DL->getTypeSizeInBits(Ty: StoredVal->getType());
483 if (SizeInBits.isScalable() || (SizeInBits.getFixedValue() & 7) ||
484 (SizeInBits.getFixedValue() >> 32) != 0)
485 return LegalStoreKind::None;
486
487 // See if the pointer expression is an AddRec like {base,+,1} on the current
488 // loop, which indicates a strided store. If we have something else, it's a
489 // random store we can't handle.
490 const SCEV *StoreEv = SE->getSCEV(V: StorePtr);
491 const SCEVConstant *Stride;
492 if (!match(S: StoreEv, P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_SCEVConstant(V&: Stride),
493 L: m_SpecificLoop(L: CurLoop))))
494 return LegalStoreKind::None;
495
496 // See if the store can be turned into a memset.
497
498 // If the stored value is a byte-wise value (like i32 -1), then it may be
499 // turned into a memset of i8 -1, assuming that all the consecutive bytes
500 // are stored. A store of i32 0x01020304 can never be turned into a memset,
501 // but it can be turned into memset_pattern if the target supports it.
502 Value *SplatValue = isBytewiseValue(V: StoredVal, DL: *DL);
503
504 // Note: memset and memset_pattern on unordered-atomic is yet not supported
505 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
506
507 // If we're allowed to form a memset, and the stored value would be
508 // acceptable for memset, use it.
509 if (!UnorderedAtomic && HasMemset && SplatValue && !DisableLIRP::Memset &&
510 // Verify that the stored value is loop invariant. If not, we can't
511 // promote the memset.
512 CurLoop->isLoopInvariant(V: SplatValue)) {
513 // It looks like we can use SplatValue.
514 return LegalStoreKind::Memset;
515 }
516 if (!UnorderedAtomic && (HasMemsetPattern || ForceMemsetPatternIntrinsic) &&
517 !DisableLIRP::Memset &&
518 // Don't create memset_pattern16s with address spaces.
519 StorePtr->getType()->getPointerAddressSpace() == 0 &&
520 getMemSetPatternValue(V: StoredVal, DL)) {
521 // It looks like we can use PatternValue!
522 return LegalStoreKind::MemsetPattern;
523 }
524
525 // Otherwise, see if the store can be turned into a memcpy.
526 if (HasMemcpy && !DisableLIRP::Memcpy) {
527 // Check to see if the stride matches the size of the store. If so, then we
528 // know that every byte is touched in the loop.
529 unsigned StoreSize = DL->getTypeStoreSize(Ty: SI->getValueOperand()->getType());
530 APInt StrideAP = Stride->getAPInt();
531 if (StoreSize != StrideAP && StoreSize != -StrideAP)
532 return LegalStoreKind::None;
533
534 // The store must be feeding a non-volatile load.
535 LoadInst *LI = dyn_cast<LoadInst>(Val: SI->getValueOperand());
536
537 // Only allow non-volatile loads
538 if (!LI || LI->isVolatile())
539 return LegalStoreKind::None;
540 // Only allow simple or unordered-atomic loads
541 if (!LI->isUnordered())
542 return LegalStoreKind::None;
543
544 // See if the pointer expression is an AddRec like {base,+,1} on the current
545 // loop, which indicates a strided load. If we have something else, it's a
546 // random load we can't handle.
547 const SCEV *LoadEv = SE->getSCEV(V: LI->getPointerOperand());
548
549 // The store and load must share the same stride.
550 if (!match(S: LoadEv, P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_scev_Specific(S: Stride),
551 L: m_SpecificLoop(L: CurLoop))))
552 return LegalStoreKind::None;
553
554 // Success. This store can be converted into a memcpy.
555 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
556 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
557 : LegalStoreKind::Memcpy;
558 }
559 // This store can't be transformed into a memset/memcpy.
560 return LegalStoreKind::None;
561}
562
563void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
564 StoreRefsForMemset.clear();
565 StoreRefsForMemsetPattern.clear();
566 StoreRefsForMemcpy.clear();
567 for (Instruction &I : *BB) {
568 StoreInst *SI = dyn_cast<StoreInst>(Val: &I);
569 if (!SI)
570 continue;
571
572 // Make sure this is a strided store with a constant stride.
573 switch (isLegalStore(SI)) {
574 case LegalStoreKind::None:
575 // Nothing to do
576 break;
577 case LegalStoreKind::Memset: {
578 // Find the base pointer.
579 Value *Ptr = getUnderlyingObject(V: SI->getPointerOperand());
580 StoreRefsForMemset[Ptr].push_back(Elt: SI);
581 } break;
582 case LegalStoreKind::MemsetPattern: {
583 // Find the base pointer.
584 Value *Ptr = getUnderlyingObject(V: SI->getPointerOperand());
585 StoreRefsForMemsetPattern[Ptr].push_back(Elt: SI);
586 } break;
587 case LegalStoreKind::Memcpy:
588 case LegalStoreKind::UnorderedAtomicMemcpy:
589 StoreRefsForMemcpy.push_back(Elt: SI);
590 break;
591 default:
592 assert(false && "unhandled return value");
593 break;
594 }
595 }
596}
597
598/// runOnLoopBlock - Process the specified block, which lives in a counted loop
599/// with the specified backedge count. This block is known to be in the current
600/// loop and not in any subloops.
601bool LoopIdiomRecognize::runOnLoopBlock(
602 BasicBlock *BB, const SCEV *BECount,
603 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
604 // We can only promote stores in this block if they are unconditionally
605 // executed in the loop. For a block to be unconditionally executed, it has
606 // to dominate all the exit blocks of the loop. Verify this now.
607 for (BasicBlock *ExitBlock : ExitBlocks)
608 if (!DT->dominates(A: BB, B: ExitBlock))
609 return false;
610
611 bool MadeChange = false;
612 // Look for store instructions, which may be optimized to memset/memcpy.
613 collectStores(BB);
614
615 // Look for a single store or sets of stores with a common base, which can be
616 // optimized into a memset (memset_pattern). The latter most commonly happens
617 // with structs and handunrolled loops.
618 for (auto &SL : StoreRefsForMemset)
619 MadeChange |= processLoopStores(SL&: SL.second, BECount, For: ForMemset::Yes);
620
621 for (auto &SL : StoreRefsForMemsetPattern)
622 MadeChange |= processLoopStores(SL&: SL.second, BECount, For: ForMemset::No);
623
624 // Optimize the store into a memcpy, if it feeds an similarly strided load.
625 for (auto &SI : StoreRefsForMemcpy)
626 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
627
628 MadeChange |= processLoopMemIntrinsic<MemCpyInst>(
629 BB, Processor: &LoopIdiomRecognize::processLoopMemCpy, BECount);
630 MadeChange |= processLoopMemIntrinsic<MemSetInst>(
631 BB, Processor: &LoopIdiomRecognize::processLoopMemSet, BECount);
632
633 return MadeChange;
634}
635
636/// See if this store(s) can be promoted to a memset.
637bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
638 const SCEV *BECount, ForMemset For) {
639 // Try to find consecutive stores that can be transformed into memsets.
640 SetVector<StoreInst *> Heads, Tails;
641 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
642
643 // Do a quadratic search on all of the given stores and find
644 // all of the pairs of stores that follow each other.
645 SmallVector<unsigned, 16> IndexQueue;
646 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
647 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
648
649 Value *FirstStoredVal = SL[i]->getValueOperand();
650 Value *FirstStorePtr = SL[i]->getPointerOperand();
651 const SCEVAddRecExpr *FirstStoreEv =
652 cast<SCEVAddRecExpr>(Val: SE->getSCEV(V: FirstStorePtr));
653 APInt FirstStride = getStoreStride(StoreEv: FirstStoreEv);
654 unsigned FirstStoreSize = DL->getTypeStoreSize(Ty: SL[i]->getValueOperand()->getType());
655
656 // See if we can optimize just this store in isolation.
657 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
658 Heads.insert(X: SL[i]);
659 continue;
660 }
661
662 Value *FirstSplatValue = nullptr;
663 Constant *FirstPatternValue = nullptr;
664
665 if (For == ForMemset::Yes)
666 FirstSplatValue = isBytewiseValue(V: FirstStoredVal, DL: *DL);
667 else
668 FirstPatternValue = getMemSetPatternValue(V: FirstStoredVal, DL);
669
670 assert((FirstSplatValue || FirstPatternValue) &&
671 "Expected either splat value or pattern value.");
672
673 IndexQueue.clear();
674 // If a store has multiple consecutive store candidates, search Stores
675 // array according to the sequence: from i+1 to e, then from i-1 to 0.
676 // This is because usually pairing with immediate succeeding or preceding
677 // candidate create the best chance to find memset opportunity.
678 unsigned j = 0;
679 for (j = i + 1; j < e; ++j)
680 IndexQueue.push_back(Elt: j);
681 for (j = i; j > 0; --j)
682 IndexQueue.push_back(Elt: j - 1);
683
684 for (auto &k : IndexQueue) {
685 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
686 Value *SecondStorePtr = SL[k]->getPointerOperand();
687 const SCEVAddRecExpr *SecondStoreEv =
688 cast<SCEVAddRecExpr>(Val: SE->getSCEV(V: SecondStorePtr));
689 APInt SecondStride = getStoreStride(StoreEv: SecondStoreEv);
690
691 if (FirstStride != SecondStride)
692 continue;
693
694 Value *SecondStoredVal = SL[k]->getValueOperand();
695 Value *SecondSplatValue = nullptr;
696 Constant *SecondPatternValue = nullptr;
697
698 if (For == ForMemset::Yes)
699 SecondSplatValue = isBytewiseValue(V: SecondStoredVal, DL: *DL);
700 else
701 SecondPatternValue = getMemSetPatternValue(V: SecondStoredVal, DL);
702
703 assert((SecondSplatValue || SecondPatternValue) &&
704 "Expected either splat value or pattern value.");
705
706 if (isConsecutiveAccess(A: SL[i], B: SL[k], DL: *DL, SE&: *SE, CheckType: false)) {
707 if (For == ForMemset::Yes) {
708 if (isa<UndefValue>(Val: FirstSplatValue))
709 FirstSplatValue = SecondSplatValue;
710 if (FirstSplatValue != SecondSplatValue)
711 continue;
712 } else {
713 if (isa<UndefValue>(Val: FirstPatternValue))
714 FirstPatternValue = SecondPatternValue;
715 if (FirstPatternValue != SecondPatternValue)
716 continue;
717 }
718 Tails.insert(X: SL[k]);
719 Heads.insert(X: SL[i]);
720 ConsecutiveChain[SL[i]] = SL[k];
721 break;
722 }
723 }
724 }
725
726 // We may run into multiple chains that merge into a single chain. We mark the
727 // stores that we transformed so that we don't visit the same store twice.
728 SmallPtrSet<Value *, 16> TransformedStores;
729 bool Changed = false;
730
731 // For stores that start but don't end a link in the chain:
732 for (StoreInst *I : Heads) {
733 if (Tails.count(key: I))
734 continue;
735
736 // We found a store instr that starts a chain. Now follow the chain and try
737 // to transform it.
738 SmallPtrSet<Instruction *, 8> AdjacentStores;
739 StoreInst *HeadStore = I;
740 unsigned StoreSize = 0;
741
742 // Collect the chain into a list.
743 while (Tails.count(key: I) || Heads.count(key: I)) {
744 if (TransformedStores.count(Ptr: I))
745 break;
746 AdjacentStores.insert(Ptr: I);
747
748 StoreSize += DL->getTypeStoreSize(Ty: I->getValueOperand()->getType());
749 // Move to the next value in the chain.
750 I = ConsecutiveChain[I];
751 }
752
753 Value *StoredVal = HeadStore->getValueOperand();
754 Value *StorePtr = HeadStore->getPointerOperand();
755 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(Val: SE->getSCEV(V: StorePtr));
756 APInt Stride = getStoreStride(StoreEv);
757
758 // Check to see if the stride matches the size of the stores. If so, then
759 // we know that every byte is touched in the loop.
760 if (StoreSize != Stride && StoreSize != -Stride)
761 continue;
762
763 bool IsNegStride = StoreSize == -Stride;
764
765 Type *IntIdxTy = DL->getIndexType(PtrTy: StorePtr->getType());
766 const SCEV *StoreSizeSCEV = SE->getConstant(Ty: IntIdxTy, V: StoreSize);
767 if (processLoopStridedStore(DestPtr: StorePtr, StoreSizeSCEV,
768 StoreAlignment: MaybeAlign(HeadStore->getAlign()), StoredVal,
769 TheStore: HeadStore, Stores&: AdjacentStores, Ev: StoreEv, BECount,
770 IsNegStride)) {
771 TransformedStores.insert_range(R&: AdjacentStores);
772 Changed = true;
773 }
774 }
775
776 return Changed;
777}
778
779/// processLoopMemIntrinsic - Template function for calling different processor
780/// functions based on mem intrinsic type.
781template <typename MemInst>
782bool LoopIdiomRecognize::processLoopMemIntrinsic(
783 BasicBlock *BB,
784 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *),
785 const SCEV *BECount) {
786 bool MadeChange = false;
787 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
788 Instruction *Inst = &*I++;
789 // Look for memory instructions, which may be optimized to a larger one.
790 if (MemInst *MI = dyn_cast<MemInst>(Inst)) {
791 WeakTrackingVH InstPtr(&*I);
792 if (!(this->*Processor)(MI, BECount))
793 continue;
794 MadeChange = true;
795
796 // If processing the instruction invalidated our iterator, start over from
797 // the top of the block.
798 if (!InstPtr)
799 I = BB->begin();
800 }
801 }
802 return MadeChange;
803}
804
805/// processLoopMemCpy - See if this memcpy can be promoted to a large memcpy
806bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI,
807 const SCEV *BECount) {
808 // We can only handle non-volatile memcpys with a constant size.
809 if (MCI->isVolatile() || !isa<ConstantInt>(Val: MCI->getLength()))
810 return false;
811
812 // If we're not allowed to hack on memcpy, we fail.
813 if ((!HasMemcpy && !MCI->isForceInlined()) || DisableLIRP::Memcpy)
814 return false;
815
816 Value *Dest = MCI->getDest();
817 Value *Source = MCI->getSource();
818 if (!Dest || !Source)
819 return false;
820
821 // See if the load and store pointer expressions are AddRec like {base,+,1} on
822 // the current loop, which indicates a strided load and store. If we have
823 // something else, it's a random load or store we can't handle.
824 const SCEV *StoreEv = SE->getSCEV(V: Dest);
825 const SCEV *LoadEv = SE->getSCEV(V: Source);
826 const APInt *StoreStrideValue, *LoadStrideValue;
827 if (!match(S: StoreEv,
828 P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_scev_APInt(C&: StoreStrideValue),
829 L: m_SpecificLoop(L: CurLoop))) ||
830 !match(S: LoadEv,
831 P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_scev_APInt(C&: LoadStrideValue),
832 L: m_SpecificLoop(L: CurLoop))))
833 return false;
834
835 // Reject memcpys that are so large that they overflow an unsigned.
836 uint64_t SizeInBytes = cast<ConstantInt>(Val: MCI->getLength())->getZExtValue();
837 if ((SizeInBytes >> 32) != 0)
838 return false;
839
840 // Huge stride value - give up
841 if (StoreStrideValue->getBitWidth() > 64 ||
842 LoadStrideValue->getBitWidth() > 64)
843 return false;
844
845 if (SizeInBytes != *StoreStrideValue && SizeInBytes != -*StoreStrideValue) {
846 ORE.emit(RemarkBuilder: [&]() {
847 return OptimizationRemarkMissed(DEBUG_TYPE, "SizeStrideUnequal", MCI)
848 << ore::NV("Inst", "memcpy") << " in "
849 << ore::NV("Function", MCI->getFunction())
850 << " function will not be hoisted: "
851 << ore::NV("Reason", "memcpy size is not equal to stride");
852 });
853 return false;
854 }
855
856 int64_t StoreStrideInt = StoreStrideValue->getSExtValue();
857 int64_t LoadStrideInt = LoadStrideValue->getSExtValue();
858 // Check if the load stride matches the store stride.
859 if (StoreStrideInt != LoadStrideInt)
860 return false;
861
862 return processLoopStoreOfLoopLoad(
863 DestPtr: Dest, SourcePtr: Source, StoreSize: SE->getConstant(Ty: Dest->getType(), V: SizeInBytes),
864 StoreAlign: MCI->getDestAlign(), LoadAlign: MCI->getSourceAlign(), TheStore: MCI, TheLoad: MCI,
865 StoreEv: cast<SCEVAddRecExpr>(Val: StoreEv), LoadEv: cast<SCEVAddRecExpr>(Val: LoadEv), BECount);
866}
867
868/// processLoopMemSet - See if this memset can be promoted to a large memset.
869bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
870 const SCEV *BECount) {
871 // We can only handle non-volatile memsets.
872 if (MSI->isVolatile())
873 return false;
874
875 // If we're not allowed to hack on memset, we fail.
876 if (!HasMemset || DisableLIRP::Memset)
877 return false;
878
879 Value *Pointer = MSI->getDest();
880
881 // See if the pointer expression is an AddRec like {base,+,1} on the current
882 // loop, which indicates a strided store. If we have something else, it's a
883 // random store we can't handle.
884 const SCEV *Ev = SE->getSCEV(V: Pointer);
885 const SCEV *PointerStrideSCEV;
886 if (!match(S: Ev, P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_SCEV(V&: PointerStrideSCEV),
887 L: m_SpecificLoop(L: CurLoop)))) {
888 LLVM_DEBUG(dbgs() << " Pointer is not affine, abort\n");
889 return false;
890 }
891
892 const SCEV *MemsetSizeSCEV = SE->getSCEV(V: MSI->getLength());
893
894 bool IsNegStride = false;
895 const bool IsConstantSize = isa<ConstantInt>(Val: MSI->getLength());
896
897 if (IsConstantSize) {
898 // Memset size is constant.
899 // Check if the pointer stride matches the memset size. If so, then
900 // we know that every byte is touched in the loop.
901 LLVM_DEBUG(dbgs() << " memset size is constant\n");
902 uint64_t SizeInBytes = cast<ConstantInt>(Val: MSI->getLength())->getZExtValue();
903 const APInt *Stride;
904 if (!match(S: PointerStrideSCEV, P: m_scev_APInt(C&: Stride)))
905 return false;
906
907 if (SizeInBytes != *Stride && SizeInBytes != -*Stride)
908 return false;
909
910 IsNegStride = SizeInBytes == -*Stride;
911 } else {
912 // Memset size is non-constant.
913 // Check if the pointer stride matches the memset size.
914 // To be conservative, the pass would not promote pointers that aren't in
915 // address space zero. Also, the pass only handles memset length and stride
916 // that are invariant for the top level loop.
917 LLVM_DEBUG(dbgs() << " memset size is non-constant\n");
918 if (Pointer->getType()->getPointerAddressSpace() != 0) {
919 LLVM_DEBUG(dbgs() << " pointer is not in address space zero, "
920 << "abort\n");
921 return false;
922 }
923 if (!SE->isLoopInvariant(S: MemsetSizeSCEV, L: CurLoop)) {
924 LLVM_DEBUG(dbgs() << " memset size is not a loop-invariant, "
925 << "abort\n");
926 return false;
927 }
928
929 // Compare positive direction PointerStrideSCEV with MemsetSizeSCEV
930 IsNegStride = PointerStrideSCEV->isNonConstantNegative();
931 const SCEV *PositiveStrideSCEV =
932 IsNegStride ? SE->getNegativeSCEV(V: PointerStrideSCEV)
933 : PointerStrideSCEV;
934 LLVM_DEBUG(dbgs() << " MemsetSizeSCEV: " << *MemsetSizeSCEV << "\n"
935 << " PositiveStrideSCEV: " << *PositiveStrideSCEV
936 << "\n");
937
938 if (PositiveStrideSCEV != MemsetSizeSCEV) {
939 // If an expression is covered by the loop guard, compare again and
940 // proceed with optimization if equal.
941 const SCEV *FoldedPositiveStride =
942 SE->applyLoopGuards(Expr: PositiveStrideSCEV, L: CurLoop);
943 const SCEV *FoldedMemsetSize =
944 SE->applyLoopGuards(Expr: MemsetSizeSCEV, L: CurLoop);
945
946 LLVM_DEBUG(dbgs() << " Try to fold SCEV based on loop guard\n"
947 << " FoldedMemsetSize: " << *FoldedMemsetSize << "\n"
948 << " FoldedPositiveStride: " << *FoldedPositiveStride
949 << "\n");
950
951 if (FoldedPositiveStride != FoldedMemsetSize) {
952 LLVM_DEBUG(dbgs() << " SCEV don't match, abort\n");
953 return false;
954 }
955 }
956 }
957
958 // Verify that the memset value is loop invariant. If not, we can't promote
959 // the memset.
960 Value *SplatValue = MSI->getValue();
961 if (!SplatValue || !CurLoop->isLoopInvariant(V: SplatValue))
962 return false;
963
964 SmallPtrSet<Instruction *, 1> MSIs;
965 MSIs.insert(Ptr: MSI);
966 return processLoopStridedStore(DestPtr: Pointer, StoreSizeSCEV: SE->getSCEV(V: MSI->getLength()),
967 StoreAlignment: MSI->getDestAlign(), StoredVal: SplatValue, TheStore: MSI, Stores&: MSIs,
968 Ev: cast<SCEVAddRecExpr>(Val: Ev), BECount, IsNegStride,
969 /*IsLoopMemset=*/true);
970}
971
972/// mayLoopAccessLocation - Return true if the specified loop might access the
973/// specified pointer location, which is a loop-strided access. The 'Access'
974/// argument specifies what the verboten forms of access are (read or write).
975static bool
976mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
977 const SCEV *BECount, const SCEV *StoreSizeSCEV,
978 AliasAnalysis &AA,
979 SmallPtrSetImpl<Instruction *> &IgnoredInsts) {
980 // Get the location that may be stored across the loop. Since the access is
981 // strided positively through memory, we say that the modified location starts
982 // at the pointer and has infinite size.
983 LocationSize AccessSize = LocationSize::afterPointer();
984
985 // If the loop iterates a fixed number of times, we can refine the access size
986 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
987 const APInt *BECst, *ConstSize;
988 if (match(S: BECount, P: m_scev_APInt(C&: BECst)) &&
989 match(S: StoreSizeSCEV, P: m_scev_APInt(C&: ConstSize))) {
990 std::optional<uint64_t> BEInt = BECst->tryZExtValue();
991 std::optional<uint64_t> SizeInt = ConstSize->tryZExtValue();
992 // FIXME: Should this check for overflow?
993 if (BEInt && SizeInt)
994 AccessSize = LocationSize::precise(Value: (*BEInt + 1) * *SizeInt);
995 }
996
997 // TODO: For this to be really effective, we have to dive into the pointer
998 // operand in the store. Store to &A[i] of 100 will always return may alias
999 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
1000 // which will then no-alias a store to &A[100].
1001 MemoryLocation StoreLoc(Ptr, AccessSize);
1002
1003 for (BasicBlock *B : L->blocks())
1004 for (Instruction &I : *B)
1005 if (!IgnoredInsts.contains(Ptr: &I) &&
1006 isModOrRefSet(MRI: AA.getModRefInfo(I: &I, OptLoc: StoreLoc) & Access))
1007 return true;
1008 return false;
1009}
1010
1011// If we have a negative stride, Start refers to the end of the memory location
1012// we're trying to memset. Therefore, we need to recompute the base pointer,
1013// which is just Start - BECount*Size.
1014static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
1015 Type *IntPtr, const SCEV *StoreSizeSCEV,
1016 ScalarEvolution *SE) {
1017 const SCEV *Index = SE->getTruncateOrZeroExtend(V: BECount, Ty: IntPtr);
1018 if (!StoreSizeSCEV->isOne()) {
1019 // index = back edge count * store size
1020 Index = SE->getMulExpr(LHS: Index,
1021 RHS: SE->getTruncateOrZeroExtend(V: StoreSizeSCEV, Ty: IntPtr),
1022 Flags: SCEV::FlagNUW);
1023 }
1024 // base pointer = start - index * store size
1025 return SE->getMinusSCEV(LHS: Start, RHS: Index);
1026}
1027
1028/// Compute the number of bytes as a SCEV from the backedge taken count.
1029///
1030/// This also maps the SCEV into the provided type and tries to handle the
1031/// computation in a way that will fold cleanly.
1032static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
1033 const SCEV *StoreSizeSCEV, Loop *CurLoop,
1034 const DataLayout *DL, ScalarEvolution *SE) {
1035 const SCEV *TripCountSCEV =
1036 SE->getTripCountFromExitCount(ExitCount: BECount, EvalTy: IntPtr, L: CurLoop);
1037 return SE->getMulExpr(LHS: TripCountSCEV,
1038 RHS: SE->getTruncateOrZeroExtend(V: StoreSizeSCEV, Ty: IntPtr),
1039 Flags: SCEV::FlagNUW);
1040}
1041
1042/// processLoopStridedStore - We see a strided store of some value. If we can
1043/// transform this into a memset or memset_pattern in the loop preheader, do so.
1044bool LoopIdiomRecognize::processLoopStridedStore(
1045 Value *DestPtr, const SCEV *StoreSizeSCEV, MaybeAlign StoreAlignment,
1046 Value *StoredVal, Instruction *TheStore,
1047 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
1048 const SCEV *BECount, bool IsNegStride, bool IsLoopMemset) {
1049 Module *M = TheStore->getModule();
1050
1051 // The trip count of the loop and the base pointer of the addrec SCEV is
1052 // guaranteed to be loop invariant, which means that it should dominate the
1053 // header. This allows us to insert code for it in the preheader.
1054 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
1055 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1056 IRBuilder<> Builder(Preheader->getTerminator());
1057 SCEVExpander Expander(*SE, "loop-idiom");
1058 SCEVExpanderCleaner ExpCleaner(Expander);
1059
1060 Type *DestInt8PtrTy = Builder.getPtrTy(AddrSpace: DestAS);
1061 Type *IntIdxTy = DL->getIndexType(PtrTy: DestPtr->getType());
1062
1063 bool Changed = false;
1064 const SCEV *Start = Ev->getStart();
1065 // Handle negative strided loops.
1066 if (IsNegStride)
1067 Start = getStartForNegStride(Start, BECount, IntPtr: IntIdxTy, StoreSizeSCEV, SE);
1068
1069 // TODO: ideally we should still be able to generate memset if SCEV expander
1070 // is taught to generate the dependencies at the latest point.
1071 if (!Expander.isSafeToExpand(S: Start))
1072 return Changed;
1073
1074 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
1075 // this into a memset in the loop preheader now if we want. However, this
1076 // would be unsafe to do if there is anything else in the loop that may read
1077 // or write to the aliased location. Check for any overlap by generating the
1078 // base pointer and checking the region.
1079 Value *BasePtr =
1080 Expander.expandCodeFor(SH: Start, Ty: DestInt8PtrTy, I: Preheader->getTerminator());
1081
1082 // From here on out, conservatively report to the pass manager that we've
1083 // changed the IR, even if we later clean up these added instructions. There
1084 // may be structural differences e.g. in the order of use lists not accounted
1085 // for in just a textual dump of the IR. This is written as a variable, even
1086 // though statically all the places this dominates could be replaced with
1087 // 'true', with the hope that anyone trying to be clever / "more precise" with
1088 // the return value will read this comment, and leave them alone.
1089 Changed = true;
1090
1091 if (mayLoopAccessLocation(Ptr: BasePtr, Access: ModRefInfo::ModRef, L: CurLoop, BECount,
1092 StoreSizeSCEV, AA&: *AA, IgnoredInsts&: Stores))
1093 return Changed;
1094
1095 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
1096 return Changed;
1097
1098 // Okay, everything looks good, insert the memset.
1099 Value *SplatValue = isBytewiseValue(V: StoredVal, DL: *DL);
1100 Constant *PatternValue = nullptr;
1101 if (!SplatValue)
1102 PatternValue = getMemSetPatternValue(V: StoredVal, DL);
1103
1104 // MemsetArg is the number of bytes for the memset libcall, and the number
1105 // of pattern repetitions if the memset.pattern intrinsic is being used.
1106 Value *MemsetArg;
1107 std::optional<int64_t> BytesWritten;
1108
1109 if (PatternValue && (HasMemsetPattern || ForceMemsetPatternIntrinsic)) {
1110 const SCEV *TripCountS =
1111 SE->getTripCountFromExitCount(ExitCount: BECount, EvalTy: IntIdxTy, L: CurLoop);
1112 if (!Expander.isSafeToExpand(S: TripCountS))
1113 return Changed;
1114 const SCEVConstant *ConstStoreSize = dyn_cast<SCEVConstant>(Val: StoreSizeSCEV);
1115 if (!ConstStoreSize)
1116 return Changed;
1117 Value *TripCount = Expander.expandCodeFor(SH: TripCountS, Ty: IntIdxTy,
1118 I: Preheader->getTerminator());
1119 uint64_t PatternRepsPerTrip =
1120 (ConstStoreSize->getValue()->getZExtValue() * 8) /
1121 DL->getTypeSizeInBits(Ty: PatternValue->getType());
1122 // If ConstStoreSize is not equal to the width of PatternValue, then
1123 // MemsetArg is TripCount * (ConstStoreSize/PatternValueWidth). Else
1124 // MemSetArg is just TripCount.
1125 MemsetArg =
1126 PatternRepsPerTrip == 1
1127 ? TripCount
1128 : Builder.CreateMul(LHS: TripCount,
1129 RHS: Builder.getIntN(N: IntIdxTy->getIntegerBitWidth(),
1130 C: PatternRepsPerTrip));
1131 if (auto *CI = dyn_cast<ConstantInt>(Val: TripCount))
1132 BytesWritten =
1133 CI->getZExtValue() * ConstStoreSize->getValue()->getZExtValue();
1134
1135 } else {
1136 const SCEV *NumBytesS =
1137 getNumBytes(BECount, IntPtr: IntIdxTy, StoreSizeSCEV, CurLoop, DL, SE);
1138
1139 // TODO: ideally we should still be able to generate memset if SCEV expander
1140 // is taught to generate the dependencies at the latest point.
1141 if (!Expander.isSafeToExpand(S: NumBytesS))
1142 return Changed;
1143 MemsetArg =
1144 Expander.expandCodeFor(SH: NumBytesS, Ty: IntIdxTy, I: Preheader->getTerminator());
1145 if (auto *CI = dyn_cast<ConstantInt>(Val: MemsetArg))
1146 BytesWritten = CI->getZExtValue();
1147 }
1148 assert(MemsetArg && "MemsetArg should have been set");
1149
1150 AAMDNodes AATags = TheStore->getAAMetadata();
1151 for (Instruction *Store : Stores)
1152 AATags = AATags.merge(Other: Store->getAAMetadata());
1153 if (BytesWritten)
1154 AATags = AATags.extendTo(Len: BytesWritten.value());
1155 else
1156 AATags = AATags.extendTo(Len: -1);
1157
1158 CallInst *NewCall;
1159 if (SplatValue) {
1160 NewCall = Builder.CreateMemSet(Ptr: BasePtr, Val: SplatValue, Size: MemsetArg,
1161 Align: MaybeAlign(StoreAlignment),
1162 /*isVolatile=*/false, AAInfo: AATags);
1163 } else if (ForceMemsetPatternIntrinsic ||
1164 isLibFuncEmittable(M, TLI, TheLibFunc: LibFunc_memset_pattern16)) {
1165 assert(isa<SCEVConstant>(StoreSizeSCEV) && "Expected constant store size");
1166
1167 NewCall = Builder.CreateIntrinsic(
1168 ID: Intrinsic::experimental_memset_pattern,
1169 Types: {DestInt8PtrTy, PatternValue->getType(), IntIdxTy},
1170 Args: {BasePtr, PatternValue, MemsetArg,
1171 ConstantInt::getFalse(Context&: M->getContext())});
1172 if (StoreAlignment)
1173 cast<MemSetPatternInst>(Val: NewCall)->setDestAlignment(*StoreAlignment);
1174 NewCall->setAAMetadata(AATags);
1175 } else {
1176 // Neither a memset, nor memset_pattern16
1177 return Changed;
1178 }
1179
1180 NewCall->setDebugLoc(TheStore->getDebugLoc());
1181
1182 if (MSSAU) {
1183 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1184 I: NewCall, Definition: nullptr, BB: NewCall->getParent(), Point: MemorySSA::BeforeTerminator);
1185 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewMemAcc), RenameUses: true);
1186 }
1187
1188 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
1189 << " from store to: " << *Ev << " at: " << *TheStore
1190 << "\n");
1191
1192 ORE.emit(RemarkBuilder: [&]() {
1193 OptimizationRemark R(DEBUG_TYPE, "ProcessLoopStridedStore",
1194 NewCall->getDebugLoc(), Preheader);
1195 R << "Transformed loop-strided store in "
1196 << ore::NV("Function", TheStore->getFunction())
1197 << " function into a call to "
1198 << ore::NV("NewFunction", NewCall->getCalledFunction())
1199 << "() intrinsic";
1200 if (!Stores.empty())
1201 R << ore::setExtraArgs();
1202 for (auto *I : Stores) {
1203 R << ore::NV("FromBlock", I->getParent()->getName())
1204 << ore::NV("ToBlock", Preheader->getName());
1205 }
1206 return R;
1207 });
1208
1209 // Okay, the memset has been formed. Zap the original store and anything that
1210 // feeds into it.
1211 for (auto *I : Stores) {
1212 if (MSSAU)
1213 MSSAU->removeMemoryAccess(I, OptimizePhis: true);
1214 deleteDeadInstruction(I);
1215 }
1216 if (MSSAU && VerifyMemorySSA)
1217 MSSAU->getMemorySSA()->verifyMemorySSA();
1218 ++NumMemSet;
1219 ExpCleaner.markResultUsed();
1220 return true;
1221}
1222
1223/// If the stored value is a strided load in the same loop with the same stride
1224/// this may be transformable into a memcpy. This kicks in for stuff like
1225/// for (i) A[i] = B[i];
1226bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
1227 const SCEV *BECount) {
1228 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
1229
1230 Value *StorePtr = SI->getPointerOperand();
1231 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(Val: SE->getSCEV(V: StorePtr));
1232 unsigned StoreSize = DL->getTypeStoreSize(Ty: SI->getValueOperand()->getType());
1233
1234 // The store must be feeding a non-volatile load.
1235 LoadInst *LI = cast<LoadInst>(Val: SI->getValueOperand());
1236 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
1237
1238 // See if the pointer expression is an AddRec like {base,+,1} on the current
1239 // loop, which indicates a strided load. If we have something else, it's a
1240 // random load we can't handle.
1241 Value *LoadPtr = LI->getPointerOperand();
1242 const SCEVAddRecExpr *LoadEv = cast<SCEVAddRecExpr>(Val: SE->getSCEV(V: LoadPtr));
1243
1244 const SCEV *StoreSizeSCEV = SE->getConstant(Ty: StorePtr->getType(), V: StoreSize);
1245 return processLoopStoreOfLoopLoad(DestPtr: StorePtr, SourcePtr: LoadPtr, StoreSize: StoreSizeSCEV,
1246 StoreAlign: SI->getAlign(), LoadAlign: LI->getAlign(), TheStore: SI, TheLoad: LI,
1247 StoreEv, LoadEv, BECount);
1248}
1249
1250namespace {
1251class MemmoveVerifier {
1252public:
1253 explicit MemmoveVerifier(const Value &LoadBasePtr, const Value &StoreBasePtr,
1254 const DataLayout &DL)
1255 : DL(DL), BP1(llvm::GetPointerBaseWithConstantOffset(
1256 Ptr: LoadBasePtr.stripPointerCasts(), Offset&: LoadOff, DL)),
1257 BP2(llvm::GetPointerBaseWithConstantOffset(
1258 Ptr: StoreBasePtr.stripPointerCasts(), Offset&: StoreOff, DL)),
1259 IsSameObject(BP1 == BP2) {}
1260
1261 bool loadAndStoreMayFormMemmove(unsigned StoreSize, bool IsNegStride,
1262 const Instruction &TheLoad,
1263 bool IsMemCpy) const {
1264 if (IsMemCpy) {
1265 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr
1266 // for negative stride.
1267 if ((!IsNegStride && LoadOff <= StoreOff) ||
1268 (IsNegStride && LoadOff >= StoreOff))
1269 return false;
1270 } else {
1271 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr
1272 // for negative stride. LoadBasePtr shouldn't overlap with StoreBasePtr.
1273 int64_t LoadSize =
1274 DL.getTypeSizeInBits(Ty: TheLoad.getType()).getFixedValue() / 8;
1275 if (BP1 != BP2 || LoadSize != int64_t(StoreSize))
1276 return false;
1277 if ((!IsNegStride && LoadOff < StoreOff + int64_t(StoreSize)) ||
1278 (IsNegStride && LoadOff + LoadSize > StoreOff))
1279 return false;
1280 }
1281 return true;
1282 }
1283
1284private:
1285 const DataLayout &DL;
1286 int64_t LoadOff = 0;
1287 int64_t StoreOff = 0;
1288 const Value *BP1;
1289 const Value *BP2;
1290
1291public:
1292 const bool IsSameObject;
1293};
1294} // namespace
1295
1296bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
1297 Value *DestPtr, Value *SourcePtr, const SCEV *StoreSizeSCEV,
1298 MaybeAlign StoreAlign, MaybeAlign LoadAlign, Instruction *TheStore,
1299 Instruction *TheLoad, const SCEVAddRecExpr *StoreEv,
1300 const SCEVAddRecExpr *LoadEv, const SCEV *BECount) {
1301
1302 // FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to
1303 // conservatively bail here, since otherwise we may have to transform
1304 // llvm.memcpy.inline into llvm.memcpy which is illegal.
1305 if (auto *MCI = dyn_cast<MemCpyInst>(Val: TheStore); MCI && MCI->isForceInlined())
1306 return false;
1307
1308 // The trip count of the loop and the base pointer of the addrec SCEV is
1309 // guaranteed to be loop invariant, which means that it should dominate the
1310 // header. This allows us to insert code for it in the preheader.
1311 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1312 IRBuilder<> Builder(Preheader->getTerminator());
1313 SCEVExpander Expander(*SE, "loop-idiom");
1314
1315 SCEVExpanderCleaner ExpCleaner(Expander);
1316
1317 bool Changed = false;
1318 const SCEV *StrStart = StoreEv->getStart();
1319 unsigned StrAS = DestPtr->getType()->getPointerAddressSpace();
1320 Type *IntIdxTy = Builder.getIntNTy(N: DL->getIndexSizeInBits(AS: StrAS));
1321
1322 APInt Stride = getStoreStride(StoreEv);
1323 const SCEVConstant *ConstStoreSize = dyn_cast<SCEVConstant>(Val: StoreSizeSCEV);
1324
1325 // TODO: Deal with non-constant size; Currently expect constant store size
1326 assert(ConstStoreSize && "store size is expected to be a constant");
1327
1328 int64_t StoreSize = ConstStoreSize->getValue()->getZExtValue();
1329 bool IsNegStride = StoreSize == -Stride;
1330
1331 // Handle negative strided loops.
1332 if (IsNegStride)
1333 StrStart =
1334 getStartForNegStride(Start: StrStart, BECount, IntPtr: IntIdxTy, StoreSizeSCEV, SE);
1335
1336 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
1337 // this into a memcpy in the loop preheader now if we want. However, this
1338 // would be unsafe to do if there is anything else in the loop that may read
1339 // or write the memory region we're storing to. This includes the load that
1340 // feeds the stores. Check for an alias by generating the base address and
1341 // checking everything.
1342 Value *StoreBasePtr = Expander.expandCodeFor(
1343 SH: StrStart, Ty: Builder.getPtrTy(AddrSpace: StrAS), I: Preheader->getTerminator());
1344
1345 // From here on out, conservatively report to the pass manager that we've
1346 // changed the IR, even if we later clean up these added instructions. There
1347 // may be structural differences e.g. in the order of use lists not accounted
1348 // for in just a textual dump of the IR. This is written as a variable, even
1349 // though statically all the places this dominates could be replaced with
1350 // 'true', with the hope that anyone trying to be clever / "more precise" with
1351 // the return value will read this comment, and leave them alone.
1352 Changed = true;
1353
1354 SmallPtrSet<Instruction *, 2> IgnoredInsts;
1355 IgnoredInsts.insert(Ptr: TheStore);
1356
1357 bool IsMemCpy = isa<MemCpyInst>(Val: TheStore);
1358 const StringRef InstRemark = IsMemCpy ? "memcpy" : "load and store";
1359
1360 bool LoopAccessStore =
1361 mayLoopAccessLocation(Ptr: StoreBasePtr, Access: ModRefInfo::ModRef, L: CurLoop, BECount,
1362 StoreSizeSCEV, AA&: *AA, IgnoredInsts);
1363 if (LoopAccessStore) {
1364 // For memmove case it's not enough to guarantee that loop doesn't access
1365 // TheStore and TheLoad. Additionally we need to make sure that TheStore is
1366 // the only user of TheLoad.
1367 if (!TheLoad->hasOneUse())
1368 return Changed;
1369 IgnoredInsts.insert(Ptr: TheLoad);
1370 if (mayLoopAccessLocation(Ptr: StoreBasePtr, Access: ModRefInfo::ModRef, L: CurLoop,
1371 BECount, StoreSizeSCEV, AA&: *AA, IgnoredInsts)) {
1372 ORE.emit(RemarkBuilder: [&]() {
1373 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessStore",
1374 TheStore)
1375 << ore::NV("Inst", InstRemark) << " in "
1376 << ore::NV("Function", TheStore->getFunction())
1377 << " function will not be hoisted: "
1378 << ore::NV("Reason", "The loop may access store location");
1379 });
1380 return Changed;
1381 }
1382 IgnoredInsts.erase(Ptr: TheLoad);
1383 }
1384
1385 const SCEV *LdStart = LoadEv->getStart();
1386 unsigned LdAS = SourcePtr->getType()->getPointerAddressSpace();
1387
1388 // Handle negative strided loops.
1389 if (IsNegStride)
1390 LdStart =
1391 getStartForNegStride(Start: LdStart, BECount, IntPtr: IntIdxTy, StoreSizeSCEV, SE);
1392
1393 // For a memcpy, we have to make sure that the input array is not being
1394 // mutated by the loop.
1395 Value *LoadBasePtr = Expander.expandCodeFor(SH: LdStart, Ty: Builder.getPtrTy(AddrSpace: LdAS),
1396 I: Preheader->getTerminator());
1397
1398 // If the store is a memcpy instruction, we must check if it will write to
1399 // the load memory locations. So remove it from the ignored stores.
1400 MemmoveVerifier Verifier(*LoadBasePtr, *StoreBasePtr, *DL);
1401 if (IsMemCpy && !Verifier.IsSameObject)
1402 IgnoredInsts.erase(Ptr: TheStore);
1403 if (mayLoopAccessLocation(Ptr: LoadBasePtr, Access: ModRefInfo::Mod, L: CurLoop, BECount,
1404 StoreSizeSCEV, AA&: *AA, IgnoredInsts)) {
1405 ORE.emit(RemarkBuilder: [&]() {
1406 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessLoad", TheLoad)
1407 << ore::NV("Inst", InstRemark) << " in "
1408 << ore::NV("Function", TheStore->getFunction())
1409 << " function will not be hoisted: "
1410 << ore::NV("Reason", "The loop may access load location");
1411 });
1412 return Changed;
1413 }
1414
1415 bool IsAtomic = TheStore->isAtomic() || TheLoad->isAtomic();
1416 bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore;
1417
1418 if (IsAtomic) {
1419 // For now don't support unordered atomic memmove.
1420 if (UseMemMove)
1421 return Changed;
1422
1423 // We cannot allow unaligned ops for unordered load/store, so reject
1424 // anything where the alignment isn't at least the element size.
1425 assert((StoreAlign && LoadAlign) &&
1426 "Expect unordered load/store to have align.");
1427 if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
1428 return Changed;
1429
1430 // If the element.atomic memcpy is not lowered into explicit
1431 // loads/stores later, then it will be lowered into an element-size
1432 // specific lib call. If the lib call doesn't exist for our store size, then
1433 // we shouldn't generate the memcpy.
1434 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1435 return Changed;
1436 }
1437
1438 if (UseMemMove)
1439 if (!Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, TheLoad: *TheLoad,
1440 IsMemCpy))
1441 return Changed;
1442
1443 if (avoidLIRForMultiBlockLoop())
1444 return Changed;
1445
1446 // Okay, everything is safe, we can transform this!
1447
1448 const SCEV *NumBytesS =
1449 getNumBytes(BECount, IntPtr: IntIdxTy, StoreSizeSCEV, CurLoop, DL, SE);
1450
1451 Value *NumBytes =
1452 Expander.expandCodeFor(SH: NumBytesS, Ty: IntIdxTy, I: Preheader->getTerminator());
1453
1454 AAMDNodes AATags = TheLoad->getAAMetadata();
1455 AAMDNodes StoreAATags = TheStore->getAAMetadata();
1456 AATags = AATags.merge(Other: StoreAATags);
1457 if (auto CI = dyn_cast<ConstantInt>(Val: NumBytes))
1458 AATags = AATags.extendTo(Len: CI->getZExtValue());
1459 else
1460 AATags = AATags.extendTo(Len: -1);
1461
1462 CallInst *NewCall = nullptr;
1463 // Check whether to generate an unordered atomic memcpy:
1464 // If the load or store are atomic, then they must necessarily be unordered
1465 // by previous checks.
1466 if (!IsAtomic) {
1467 if (UseMemMove)
1468 NewCall = Builder.CreateMemMove(Dst: StoreBasePtr, DstAlign: StoreAlign, Src: LoadBasePtr,
1469 SrcAlign: LoadAlign, Size: NumBytes,
1470 /*isVolatile=*/false, AAInfo: AATags);
1471 else
1472 NewCall =
1473 Builder.CreateMemCpy(Dst: StoreBasePtr, DstAlign: StoreAlign, Src: LoadBasePtr, SrcAlign: LoadAlign,
1474 Size: NumBytes, /*isVolatile=*/false, AAInfo: AATags);
1475 } else {
1476 // Create the call.
1477 // Note that unordered atomic loads/stores are *required* by the spec to
1478 // have an alignment but non-atomic loads/stores may not.
1479 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1480 Dst: StoreBasePtr, DstAlign: *StoreAlign, Src: LoadBasePtr, SrcAlign: *LoadAlign, Size: NumBytes, ElementSize: StoreSize,
1481 AAInfo: AATags);
1482 }
1483 NewCall->setDebugLoc(TheStore->getDebugLoc());
1484
1485 if (MSSAU) {
1486 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1487 I: NewCall, Definition: nullptr, BB: NewCall->getParent(), Point: MemorySSA::BeforeTerminator);
1488 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewMemAcc), RenameUses: true);
1489 }
1490
1491 LLVM_DEBUG(dbgs() << " Formed new call: " << *NewCall << "\n"
1492 << " from load ptr=" << *LoadEv << " at: " << *TheLoad
1493 << "\n"
1494 << " from store ptr=" << *StoreEv << " at: " << *TheStore
1495 << "\n");
1496
1497 ORE.emit(RemarkBuilder: [&]() {
1498 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad",
1499 NewCall->getDebugLoc(), Preheader)
1500 << "Formed a call to "
1501 << ore::NV("NewFunction", NewCall->getCalledFunction())
1502 << "() intrinsic from " << ore::NV("Inst", InstRemark)
1503 << " instruction in " << ore::NV("Function", TheStore->getFunction())
1504 << " function"
1505 << ore::setExtraArgs()
1506 << ore::NV("FromBlock", TheStore->getParent()->getName())
1507 << ore::NV("ToBlock", Preheader->getName());
1508 });
1509
1510 // Okay, a new call to memcpy/memmove has been formed. Zap the original store
1511 // and anything that feeds into it.
1512 if (MSSAU)
1513 MSSAU->removeMemoryAccess(I: TheStore, OptimizePhis: true);
1514 deleteDeadInstruction(I: TheStore);
1515 if (MSSAU && VerifyMemorySSA)
1516 MSSAU->getMemorySSA()->verifyMemorySSA();
1517 if (UseMemMove)
1518 ++NumMemMove;
1519 else
1520 ++NumMemCpy;
1521 ExpCleaner.markResultUsed();
1522 return true;
1523}
1524
1525// When compiling for codesize we avoid idiom recognition for a multi-block loop
1526// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1527//
1528bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1529 bool IsLoopMemset) {
1530 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1531 if (CurLoop->isOutermost() && (!IsMemset || !IsLoopMemset)) {
1532 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
1533 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1534 << " avoided: multi-block top-level loop\n");
1535 return true;
1536 }
1537 }
1538
1539 return false;
1540}
1541
1542bool LoopIdiomRecognize::optimizeCRCLoop(const PolynomialInfo &Info) {
1543 // FIXME: Hexagon has a special HexagonLoopIdiom that optimizes CRC using
1544 // carry-less multiplication instructions, which is more efficient than our
1545 // Sarwate table-lookup optimization. Hence, until we're able to emit
1546 // target-specific instructions for Hexagon, subsuming HexagonLoopIdiom,
1547 // disable the optimization for Hexagon.
1548 Module &M = *CurLoop->getHeader()->getModule();
1549 Triple TT(M.getTargetTriple());
1550 if (TT.getArch() == Triple::hexagon)
1551 return false;
1552
1553 // First, create a new GlobalVariable corresponding to the
1554 // Sarwate-lookup-table.
1555 Type *CRCTy = Info.LHS->getType();
1556 unsigned CRCBW = CRCTy->getIntegerBitWidth();
1557 std::array<Constant *, 256> CRCConstants;
1558 transform(Range: HashRecognize::genSarwateTable(GenPoly: Info.RHS, ByteOrderSwapped: Info.ByteOrderSwapped),
1559 d_first: CRCConstants.begin(),
1560 F: [CRCTy](const APInt &E) { return ConstantInt::get(Ty: CRCTy, V: E); });
1561 Constant *ConstArray =
1562 ConstantArray::get(T: ArrayType::get(ElementType: CRCTy, NumElements: 256), V: CRCConstants);
1563 GlobalVariable *GV =
1564 new GlobalVariable(M, ConstArray->getType(), true,
1565 GlobalValue::PrivateLinkage, ConstArray, ".crctable");
1566
1567 PHINode *IV = CurLoop->getCanonicalInductionVariable();
1568 SmallVector<PHINode *, 2> Cleanup;
1569
1570 // Next, mark all PHIs for removal except IV.
1571 {
1572 for (PHINode &PN : CurLoop->getHeader()->phis()) {
1573 if (&PN == IV)
1574 continue;
1575 PN.replaceAllUsesWith(V: PoisonValue::get(T: PN.getType()));
1576 Cleanup.push_back(Elt: &PN);
1577 }
1578 }
1579
1580 // Next, fix up the trip count.
1581 {
1582 unsigned NewBTC = (Info.TripCount / 8) - 1;
1583 BasicBlock *LoopBlk = CurLoop->getLoopLatch();
1584 BranchInst *BrInst = cast<BranchInst>(Val: LoopBlk->getTerminator());
1585 CmpPredicate ExitPred = BrInst->getSuccessor(i: 0) == LoopBlk
1586 ? ICmpInst::Predicate::ICMP_NE
1587 : ICmpInst::Predicate::ICMP_EQ;
1588 Instruction *ExitCond = CurLoop->getLatchCmpInst();
1589 Value *ExitLimit = ConstantInt::get(Ty: IV->getType(), V: NewBTC);
1590 IRBuilder<> Builder(ExitCond);
1591 Value *NewExitCond =
1592 Builder.CreateICmp(P: ExitPred, LHS: IV, RHS: ExitLimit, Name: "exit.cond");
1593 ExitCond->replaceAllUsesWith(V: NewExitCond);
1594 deleteDeadInstruction(I: ExitCond);
1595 }
1596
1597 // Finally, fill the loop with the Sarwate-table-lookup logic, and replace all
1598 // uses of ComputedValue.
1599 //
1600 // Little-endian:
1601 // crc = (crc >> 8) ^ tbl[(iv'th byte of data) ^ (bottom byte of crc)]
1602 // Big-Endian:
1603 // crc = (crc << 8) ^ tbl[(iv'th byte of data) ^ (top byte of crc)]
1604 {
1605 auto LoByte = [](IRBuilderBase &Builder, Value *Op, const Twine &Name) {
1606 return Builder.CreateZExtOrTrunc(
1607 V: Op, DestTy: IntegerType::getInt8Ty(C&: Op->getContext()), Name);
1608 };
1609 auto HiIdx = [LoByte, CRCBW](IRBuilderBase &Builder, Value *Op,
1610 const Twine &Name) {
1611 Type *OpTy = Op->getType();
1612
1613 // When the bitwidth of the CRC mismatches the Op's bitwidth, we need to
1614 // use the CRC's bitwidth as the reference for shifting right.
1615 return LoByte(Builder,
1616 CRCBW > 8 ? Builder.CreateLShr(
1617 LHS: Op, RHS: ConstantInt::get(Ty: OpTy, V: CRCBW - 8), Name)
1618 : Op,
1619 Name + ".lo.byte");
1620 };
1621
1622 IRBuilder<> Builder(CurLoop->getHeader(),
1623 CurLoop->getHeader()->getFirstNonPHIIt());
1624
1625 // Create the CRC PHI, and initialize its incoming value to the initial
1626 // value of CRC.
1627 PHINode *CRCPhi = Builder.CreatePHI(Ty: CRCTy, NumReservedValues: 2, Name: "crc");
1628 CRCPhi->addIncoming(V: Info.LHS, BB: CurLoop->getLoopPreheader());
1629
1630 // CRC is now an evolving variable, initialized to the PHI.
1631 Value *CRC = CRCPhi;
1632
1633 // TableIndexer = ((top|bottom) byte of CRC). It is XOR'ed with (iv'th byte
1634 // of LHSAux), if LHSAux is non-nullptr.
1635 Value *Indexer = CRC;
1636 if (Value *Data = Info.LHSAux) {
1637 Type *DataTy = Data->getType();
1638
1639 // To index into the (iv'th byte of LHSAux), we multiply iv by 8, and we
1640 // shift right by that amount, and take the lo-byte (in the little-endian
1641 // case), or shift left by that amount, and take the hi-idx (in the
1642 // big-endian case).
1643 Value *IVBits = Builder.CreateZExtOrTrunc(
1644 V: Builder.CreateShl(LHS: IV, RHS: 3, Name: "iv.bits"), DestTy: DataTy, Name: "iv.indexer");
1645 Value *DataIndexer =
1646 Info.ByteOrderSwapped
1647 ? Builder.CreateShl(LHS: Data, RHS: IVBits, Name: "data.indexer")
1648 : Builder.CreateLShr(LHS: Data, RHS: IVBits, Name: "data.indexer");
1649 Indexer = Builder.CreateXor(
1650 LHS: DataIndexer,
1651 RHS: Builder.CreateZExtOrTrunc(V: Indexer, DestTy: DataTy, Name: "crc.indexer.cast"),
1652 Name: "crc.data.indexer");
1653 }
1654
1655 Indexer = Info.ByteOrderSwapped ? HiIdx(Builder, Indexer, "indexer.hi")
1656 : LoByte(Builder, Indexer, "indexer.lo");
1657
1658 // Always index into a GEP using the index type.
1659 Indexer = Builder.CreateZExt(
1660 V: Indexer, DestTy: SE->getDataLayout().getIndexType(PtrTy: GV->getType()),
1661 Name: "indexer.ext");
1662
1663 // CRCTableLd = CRCTable[(iv'th byte of data) ^ (top|bottom) byte of CRC].
1664 Value *CRCTableGEP =
1665 Builder.CreateInBoundsGEP(Ty: CRCTy, Ptr: GV, IdxList: Indexer, Name: "tbl.ptradd");
1666 Value *CRCTableLd = Builder.CreateLoad(Ty: CRCTy, Ptr: CRCTableGEP, Name: "tbl.ld");
1667
1668 // CRCNext = (CRC (<<|>>) 8) ^ CRCTableLd, or simply CRCTableLd in case of
1669 // CRC-8.
1670 Value *CRCNext = CRCTableLd;
1671 if (CRCBW > 8) {
1672 Value *CRCShift = Info.ByteOrderSwapped
1673 ? Builder.CreateShl(LHS: CRC, RHS: 8, Name: "crc.be.shift")
1674 : Builder.CreateLShr(LHS: CRC, RHS: 8, Name: "crc.le.shift");
1675 CRCNext = Builder.CreateXor(LHS: CRCShift, RHS: CRCTableLd, Name: "crc.next");
1676 }
1677
1678 // Connect the back-edge for the loop, and RAUW the ComputedValue.
1679 CRCPhi->addIncoming(V: CRCNext, BB: CurLoop->getLoopLatch());
1680 Info.ComputedValue->replaceUsesOutsideBlock(V: CRCNext,
1681 BB: CurLoop->getLoopLatch());
1682 }
1683
1684 // Cleanup.
1685 {
1686 for (PHINode *PN : Cleanup)
1687 RecursivelyDeleteDeadPHINode(PN);
1688 SE->forgetLoop(L: CurLoop);
1689 }
1690 return true;
1691}
1692
1693bool LoopIdiomRecognize::runOnNoncountableLoop() {
1694 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
1695 << CurLoop->getHeader()->getParent()->getName()
1696 << "] Noncountable Loop %"
1697 << CurLoop->getHeader()->getName() << "\n");
1698
1699 return recognizePopcount() || recognizeAndInsertFFS() ||
1700 recognizeShiftUntilBitTest() || recognizeShiftUntilZero() ||
1701 recognizeShiftUntilLessThan() || recognizeAndInsertStrLen();
1702}
1703
1704/// Check if the given conditional branch is based on the comparison between
1705/// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is
1706/// true), the control yields to the loop entry. If the branch matches the
1707/// behavior, the variable involved in the comparison is returned. This function
1708/// will be called to see if the precondition and postcondition of the loop are
1709/// in desirable form.
1710static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry,
1711 bool JmpOnZero = false) {
1712 if (!BI || !BI->isConditional())
1713 return nullptr;
1714
1715 ICmpInst *Cond = dyn_cast<ICmpInst>(Val: BI->getCondition());
1716 if (!Cond)
1717 return nullptr;
1718
1719 auto *CmpZero = dyn_cast<ConstantInt>(Val: Cond->getOperand(i_nocapture: 1));
1720 if (!CmpZero || !CmpZero->isZero())
1721 return nullptr;
1722
1723 BasicBlock *TrueSucc = BI->getSuccessor(i: 0);
1724 BasicBlock *FalseSucc = BI->getSuccessor(i: 1);
1725 if (JmpOnZero)
1726 std::swap(a&: TrueSucc, b&: FalseSucc);
1727
1728 ICmpInst::Predicate Pred = Cond->getPredicate();
1729 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) ||
1730 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry))
1731 return Cond->getOperand(i_nocapture: 0);
1732
1733 return nullptr;
1734}
1735
1736namespace {
1737
1738class StrlenVerifier {
1739public:
1740 explicit StrlenVerifier(const Loop *CurLoop, ScalarEvolution *SE,
1741 const TargetLibraryInfo *TLI)
1742 : CurLoop(CurLoop), SE(SE), TLI(TLI) {}
1743
1744 bool isValidStrlenIdiom() {
1745 // Give up if the loop has multiple blocks, multiple backedges, or
1746 // multiple exit blocks
1747 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1 ||
1748 !CurLoop->getUniqueExitBlock())
1749 return false;
1750
1751 // It should have a preheader and a branch instruction.
1752 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1753 if (!Preheader)
1754 return false;
1755
1756 BranchInst *EntryBI = dyn_cast<BranchInst>(Val: Preheader->getTerminator());
1757 if (!EntryBI)
1758 return false;
1759
1760 // The loop exit must be conditioned on an icmp with 0 the null terminator.
1761 // The icmp operand has to be a load on some SSA reg that increments
1762 // by 1 in the loop.
1763 BasicBlock *LoopBody = *CurLoop->block_begin();
1764
1765 // Skip if the body is too big as it most likely is not a strlen idiom.
1766 if (!LoopBody || LoopBody->size() >= 15)
1767 return false;
1768
1769 BranchInst *LoopTerm = dyn_cast<BranchInst>(Val: LoopBody->getTerminator());
1770 Value *LoopCond = matchCondition(BI: LoopTerm, LoopEntry: LoopBody);
1771 if (!LoopCond)
1772 return false;
1773
1774 LoadInst *LoopLoad = dyn_cast<LoadInst>(Val: LoopCond);
1775 if (!LoopLoad || LoopLoad->getPointerAddressSpace() != 0)
1776 return false;
1777
1778 OperandType = LoopLoad->getType();
1779 if (!OperandType || !OperandType->isIntegerTy())
1780 return false;
1781
1782 // See if the pointer expression is an AddRec with constant step a of form
1783 // ({n,+,a}) where a is the width of the char type.
1784 Value *IncPtr = LoopLoad->getPointerOperand();
1785 const SCEV *LoadEv = SE->getSCEV(V: IncPtr);
1786 const APInt *Step;
1787 if (!match(S: LoadEv,
1788 P: m_scev_AffineAddRec(Op0: m_SCEV(V&: LoadBaseEv), Op1: m_scev_APInt(C&: Step))))
1789 return false;
1790
1791 LLVM_DEBUG(dbgs() << "pointer load scev: " << *LoadEv << "\n");
1792
1793 unsigned StepSize = Step->getZExtValue();
1794
1795 // Verify that StepSize is consistent with platform char width.
1796 OpWidth = OperandType->getIntegerBitWidth();
1797 unsigned WcharSize = TLI->getWCharSize(M: *LoopLoad->getModule());
1798 if (OpWidth != StepSize * 8)
1799 return false;
1800 if (OpWidth != 8 && OpWidth != 16 && OpWidth != 32)
1801 return false;
1802 if (OpWidth >= 16)
1803 if (OpWidth != WcharSize * 8)
1804 return false;
1805
1806 // Scan every instruction in the loop to ensure there are no side effects.
1807 for (Instruction &I : *LoopBody)
1808 if (I.mayHaveSideEffects())
1809 return false;
1810
1811 BasicBlock *LoopExitBB = CurLoop->getExitBlock();
1812 if (!LoopExitBB)
1813 return false;
1814
1815 for (PHINode &PN : LoopExitBB->phis()) {
1816 if (!SE->isSCEVable(Ty: PN.getType()))
1817 return false;
1818
1819 const SCEV *Ev = SE->getSCEV(V: &PN);
1820 if (!Ev)
1821 return false;
1822
1823 LLVM_DEBUG(dbgs() << "loop exit phi scev: " << *Ev << "\n");
1824
1825 // Since we verified that the loop trip count will be a valid strlen
1826 // idiom, we can expand all lcssa phi with {n,+,1} as (n + strlen) and use
1827 // SCEVExpander materialize the loop output.
1828 const SCEVAddRecExpr *AddRecEv = dyn_cast<SCEVAddRecExpr>(Val: Ev);
1829 if (!AddRecEv || !AddRecEv->isAffine())
1830 return false;
1831
1832 // We only want RecAddExpr with recurrence step that is constant. This
1833 // is good enough for all the idioms we want to recognize. Later we expand
1834 // and materialize the recurrence as {base,+,a} -> (base + a * strlen)
1835 if (!isa<SCEVConstant>(Val: AddRecEv->getStepRecurrence(SE&: *SE)))
1836 return false;
1837 }
1838
1839 return true;
1840 }
1841
1842public:
1843 const Loop *CurLoop;
1844 ScalarEvolution *SE;
1845 const TargetLibraryInfo *TLI;
1846
1847 unsigned OpWidth;
1848 ConstantInt *StepSizeCI;
1849 const SCEV *LoadBaseEv;
1850 Type *OperandType;
1851};
1852
1853} // namespace
1854
1855/// The Strlen Idiom we are trying to detect has the following structure
1856///
1857/// preheader:
1858/// ...
1859/// br label %body, ...
1860///
1861/// body:
1862/// ... ; %0 is incremented by a gep
1863/// %1 = load i8, ptr %0, align 1
1864/// %2 = icmp eq i8 %1, 0
1865/// br i1 %2, label %exit, label %body
1866///
1867/// exit:
1868/// %lcssa = phi [%0, %body], ...
1869///
1870/// We expect the strlen idiom to have a load of a character type that
1871/// is compared against '\0', and such load pointer operand must have scev
1872/// expression of the form {%str,+,c} where c is a ConstantInt of the
1873/// appropiate character width for the idiom, and %str is the base of the string
1874/// And, that all lcssa phis have the form {...,+,n} where n is a constant,
1875///
1876/// When transforming the output of the strlen idiom, the lccsa phi are
1877/// expanded using SCEVExpander as {base scev,+,a} -> (base scev + a * strlen)
1878/// and all subsequent uses are replaced. For example,
1879///
1880/// \code{.c}
1881/// const char* base = str;
1882/// while (*str != '\0')
1883/// ++str;
1884/// size_t result = str - base;
1885/// \endcode
1886///
1887/// will be transformed as follows: The idiom will be replaced by a strlen
1888/// computation to compute the address of the null terminator of the string.
1889///
1890/// \code{.c}
1891/// const char* base = str;
1892/// const char* end = base + strlen(str);
1893/// size_t result = end - base;
1894/// \endcode
1895///
1896/// In the case we index by an induction variable, as long as the induction
1897/// variable has a constant int increment, we can replace all such indvars
1898/// with the closed form computation of strlen
1899///
1900/// \code{.c}
1901/// size_t i = 0;
1902/// while (str[i] != '\0')
1903/// ++i;
1904/// size_t result = i;
1905/// \endcode
1906///
1907/// Will be replaced by
1908///
1909/// \code{.c}
1910/// size_t i = 0 + strlen(str);
1911/// size_t result = i;
1912/// \endcode
1913///
1914bool LoopIdiomRecognize::recognizeAndInsertStrLen() {
1915 if (DisableLIRP::All)
1916 return false;
1917
1918 StrlenVerifier Verifier(CurLoop, SE, TLI);
1919
1920 if (!Verifier.isValidStrlenIdiom())
1921 return false;
1922
1923 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1924 BasicBlock *LoopBody = *CurLoop->block_begin();
1925 BasicBlock *LoopExitBB = CurLoop->getExitBlock();
1926 BranchInst *LoopTerm = dyn_cast<BranchInst>(Val: LoopBody->getTerminator());
1927 assert(Preheader && LoopBody && LoopExitBB && LoopTerm &&
1928 "Should be verified to be valid by StrlenVerifier");
1929
1930 if (Verifier.OpWidth == 8) {
1931 if (DisableLIRP::Strlen)
1932 return false;
1933 if (!isLibFuncEmittable(M: Preheader->getModule(), TLI, TheLibFunc: LibFunc_strlen))
1934 return false;
1935 } else {
1936 if (DisableLIRP::Wcslen)
1937 return false;
1938 if (!isLibFuncEmittable(M: Preheader->getModule(), TLI, TheLibFunc: LibFunc_wcslen))
1939 return false;
1940 }
1941
1942 IRBuilder<> Builder(Preheader->getTerminator());
1943 Builder.SetCurrentDebugLocation(CurLoop->getStartLoc());
1944 SCEVExpander Expander(*SE, "strlen_idiom");
1945 Value *MaterialzedBase = Expander.expandCodeFor(
1946 SH: Verifier.LoadBaseEv, Ty: Verifier.LoadBaseEv->getType(),
1947 I: Builder.GetInsertPoint());
1948
1949 Value *StrLenFunc = nullptr;
1950 if (Verifier.OpWidth == 8) {
1951 StrLenFunc = emitStrLen(Ptr: MaterialzedBase, B&: Builder, DL: *DL, TLI);
1952 } else {
1953 StrLenFunc = emitWcsLen(Ptr: MaterialzedBase, B&: Builder, DL: *DL, TLI);
1954 }
1955 assert(StrLenFunc && "Failed to emit strlen function.");
1956
1957 const SCEV *StrlenEv = SE->getSCEV(V: StrLenFunc);
1958 SmallVector<PHINode *, 4> Cleanup;
1959 for (PHINode &PN : LoopExitBB->phis()) {
1960 // We can now materialize the loop output as all phi have scev {base,+,a}.
1961 // We expand the phi as:
1962 // %strlen = call i64 @strlen(%str)
1963 // %phi.new = base expression + step * %strlen
1964 const SCEV *Ev = SE->getSCEV(V: &PN);
1965 const SCEVAddRecExpr *AddRecEv = dyn_cast<SCEVAddRecExpr>(Val: Ev);
1966 const SCEVConstant *Step =
1967 dyn_cast<SCEVConstant>(Val: AddRecEv->getStepRecurrence(SE&: *SE));
1968 const SCEV *Base = AddRecEv->getStart();
1969
1970 // It is safe to truncate to base since if base is narrower than size_t
1971 // the equivalent user code will have to truncate anyways.
1972 const SCEV *NewEv = SE->getAddExpr(
1973 LHS: Base, RHS: SE->getMulExpr(LHS: Step, RHS: SE->getTruncateOrSignExtend(
1974 V: StrlenEv, Ty: Base->getType())));
1975
1976 Value *MaterializedPHI = Expander.expandCodeFor(SH: NewEv, Ty: NewEv->getType(),
1977 I: Builder.GetInsertPoint());
1978 Expander.clear();
1979 PN.replaceAllUsesWith(V: MaterializedPHI);
1980 Cleanup.push_back(Elt: &PN);
1981 }
1982
1983 // All LCSSA Loop Phi are dead, the left over dead loop body can be cleaned
1984 // up by later passes
1985 for (PHINode *PN : Cleanup)
1986 RecursivelyDeleteDeadPHINode(PN);
1987
1988 // LoopDeletion only delete invariant loops with known trip-count. We can
1989 // update the condition so it will reliablely delete the invariant loop
1990 assert(LoopTerm->getNumSuccessors() == 2 &&
1991 (LoopTerm->getSuccessor(0) == LoopBody ||
1992 LoopTerm->getSuccessor(1) == LoopBody) &&
1993 "loop body must have a successor that is it self");
1994 ConstantInt *NewLoopCond = LoopTerm->getSuccessor(i: 0) == LoopBody
1995 ? Builder.getFalse()
1996 : Builder.getTrue();
1997 LoopTerm->setCondition(NewLoopCond);
1998 SE->forgetLoop(L: CurLoop);
1999
2000 ++NumStrLen;
2001 LLVM_DEBUG(dbgs() << " Formed strlen idiom: " << *StrLenFunc << "\n");
2002 ORE.emit(RemarkBuilder: [&]() {
2003 return OptimizationRemark(DEBUG_TYPE, "recognizeAndInsertStrLen",
2004 CurLoop->getStartLoc(), Preheader)
2005 << "Transformed " << StrLenFunc->getName() << " loop idiom";
2006 });
2007
2008 return true;
2009}
2010
2011/// Check if the given conditional branch is based on an unsigned less-than
2012/// comparison between a variable and a constant, and if the comparison is false
2013/// the control yields to the loop entry. If the branch matches the behaviour,
2014/// the variable involved in the comparison is returned.
2015static Value *matchShiftULTCondition(BranchInst *BI, BasicBlock *LoopEntry,
2016 APInt &Threshold) {
2017 if (!BI || !BI->isConditional())
2018 return nullptr;
2019
2020 ICmpInst *Cond = dyn_cast<ICmpInst>(Val: BI->getCondition());
2021 if (!Cond)
2022 return nullptr;
2023
2024 ConstantInt *CmpConst = dyn_cast<ConstantInt>(Val: Cond->getOperand(i_nocapture: 1));
2025 if (!CmpConst)
2026 return nullptr;
2027
2028 BasicBlock *FalseSucc = BI->getSuccessor(i: 1);
2029 ICmpInst::Predicate Pred = Cond->getPredicate();
2030
2031 if (Pred == ICmpInst::ICMP_ULT && FalseSucc == LoopEntry) {
2032 Threshold = CmpConst->getValue();
2033 return Cond->getOperand(i_nocapture: 0);
2034 }
2035
2036 return nullptr;
2037}
2038
2039// Check if the recurrence variable `VarX` is in the right form to create
2040// the idiom. Returns the value coerced to a PHINode if so.
2041static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
2042 BasicBlock *LoopEntry) {
2043 auto *PhiX = dyn_cast<PHINode>(Val: VarX);
2044 if (PhiX && PhiX->getParent() == LoopEntry &&
2045 (PhiX->getOperand(i_nocapture: 0) == DefX || PhiX->getOperand(i_nocapture: 1) == DefX))
2046 return PhiX;
2047 return nullptr;
2048}
2049
2050/// Return true if the idiom is detected in the loop.
2051///
2052/// Additionally:
2053/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
2054/// or nullptr if there is no such.
2055/// 2) \p CntPhi is set to the corresponding phi node
2056/// or nullptr if there is no such.
2057/// 3) \p InitX is set to the value whose CTLZ could be used.
2058/// 4) \p DefX is set to the instruction calculating Loop exit condition.
2059/// 5) \p Threshold is set to the constant involved in the unsigned less-than
2060/// comparison.
2061///
2062/// The core idiom we are trying to detect is:
2063/// \code
2064/// if (x0 < 2)
2065/// goto loop-exit // the precondition of the loop
2066/// cnt0 = init-val
2067/// do {
2068/// x = phi (x0, x.next); //PhiX
2069/// cnt = phi (cnt0, cnt.next)
2070///
2071/// cnt.next = cnt + 1;
2072/// ...
2073/// x.next = x >> 1; // DefX
2074/// } while (x >= 4)
2075/// loop-exit:
2076/// \endcode
2077static bool detectShiftUntilLessThanIdiom(Loop *CurLoop, const DataLayout &DL,
2078 Intrinsic::ID &IntrinID,
2079 Value *&InitX, Instruction *&CntInst,
2080 PHINode *&CntPhi, Instruction *&DefX,
2081 APInt &Threshold) {
2082 BasicBlock *LoopEntry;
2083
2084 DefX = nullptr;
2085 CntInst = nullptr;
2086 CntPhi = nullptr;
2087 LoopEntry = *(CurLoop->block_begin());
2088
2089 // step 1: Check if the loop-back branch is in desirable form.
2090 if (Value *T = matchShiftULTCondition(
2091 BI: dyn_cast<BranchInst>(Val: LoopEntry->getTerminator()), LoopEntry,
2092 Threshold))
2093 DefX = dyn_cast<Instruction>(Val: T);
2094 else
2095 return false;
2096
2097 // step 2: Check the recurrence of variable X
2098 if (!DefX || !isa<PHINode>(Val: DefX))
2099 return false;
2100
2101 PHINode *VarPhi = cast<PHINode>(Val: DefX);
2102 int Idx = VarPhi->getBasicBlockIndex(BB: LoopEntry);
2103 if (Idx == -1)
2104 return false;
2105
2106 DefX = dyn_cast<Instruction>(Val: VarPhi->getIncomingValue(i: Idx));
2107 if (!DefX || DefX->getNumOperands() == 0 || DefX->getOperand(i: 0) != VarPhi)
2108 return false;
2109
2110 // step 3: detect instructions corresponding to "x.next = x >> 1"
2111 if (DefX->getOpcode() != Instruction::LShr)
2112 return false;
2113
2114 IntrinID = Intrinsic::ctlz;
2115 ConstantInt *Shft = dyn_cast<ConstantInt>(Val: DefX->getOperand(i: 1));
2116 if (!Shft || !Shft->isOne())
2117 return false;
2118
2119 InitX = VarPhi->getIncomingValueForBlock(BB: CurLoop->getLoopPreheader());
2120
2121 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
2122 // or cnt.next = cnt + -1.
2123 // TODO: We can skip the step. If loop trip count is known (CTLZ),
2124 // then all uses of "cnt.next" could be optimized to the trip count
2125 // plus "cnt0". Currently it is not optimized.
2126 // This step could be used to detect POPCNT instruction:
2127 // cnt.next = cnt + (x.next & 1)
2128 for (Instruction &Inst :
2129 llvm::make_range(x: LoopEntry->getFirstNonPHIIt(), y: LoopEntry->end())) {
2130 if (Inst.getOpcode() != Instruction::Add)
2131 continue;
2132
2133 ConstantInt *Inc = dyn_cast<ConstantInt>(Val: Inst.getOperand(i: 1));
2134 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne()))
2135 continue;
2136
2137 PHINode *Phi = getRecurrenceVar(VarX: Inst.getOperand(i: 0), DefX: &Inst, LoopEntry);
2138 if (!Phi)
2139 continue;
2140
2141 CntInst = &Inst;
2142 CntPhi = Phi;
2143 break;
2144 }
2145 if (!CntInst)
2146 return false;
2147
2148 return true;
2149}
2150
2151/// Return true iff the idiom is detected in the loop.
2152///
2153/// Additionally:
2154/// 1) \p CntInst is set to the instruction counting the population bit.
2155/// 2) \p CntPhi is set to the corresponding phi node.
2156/// 3) \p Var is set to the value whose population bits are being counted.
2157///
2158/// The core idiom we are trying to detect is:
2159/// \code
2160/// if (x0 != 0)
2161/// goto loop-exit // the precondition of the loop
2162/// cnt0 = init-val;
2163/// do {
2164/// x1 = phi (x0, x2);
2165/// cnt1 = phi(cnt0, cnt2);
2166///
2167/// cnt2 = cnt1 + 1;
2168/// ...
2169/// x2 = x1 & (x1 - 1);
2170/// ...
2171/// } while(x != 0);
2172///
2173/// loop-exit:
2174/// \endcode
2175static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
2176 Instruction *&CntInst, PHINode *&CntPhi,
2177 Value *&Var) {
2178 // step 1: Check to see if the look-back branch match this pattern:
2179 // "if (a!=0) goto loop-entry".
2180 BasicBlock *LoopEntry;
2181 Instruction *DefX2, *CountInst;
2182 Value *VarX1, *VarX0;
2183 PHINode *PhiX, *CountPhi;
2184
2185 DefX2 = CountInst = nullptr;
2186 VarX1 = VarX0 = nullptr;
2187 PhiX = CountPhi = nullptr;
2188 LoopEntry = *(CurLoop->block_begin());
2189
2190 // step 1: Check if the loop-back branch is in desirable form.
2191 {
2192 if (Value *T = matchCondition(
2193 BI: dyn_cast<BranchInst>(Val: LoopEntry->getTerminator()), LoopEntry))
2194 DefX2 = dyn_cast<Instruction>(Val: T);
2195 else
2196 return false;
2197 }
2198
2199 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
2200 {
2201 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
2202 return false;
2203
2204 BinaryOperator *SubOneOp;
2205
2206 if ((SubOneOp = dyn_cast<BinaryOperator>(Val: DefX2->getOperand(i: 0))))
2207 VarX1 = DefX2->getOperand(i: 1);
2208 else {
2209 VarX1 = DefX2->getOperand(i: 0);
2210 SubOneOp = dyn_cast<BinaryOperator>(Val: DefX2->getOperand(i: 1));
2211 }
2212 if (!SubOneOp || SubOneOp->getOperand(i_nocapture: 0) != VarX1)
2213 return false;
2214
2215 ConstantInt *Dec = dyn_cast<ConstantInt>(Val: SubOneOp->getOperand(i_nocapture: 1));
2216 if (!Dec ||
2217 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
2218 (SubOneOp->getOpcode() == Instruction::Add &&
2219 Dec->isMinusOne()))) {
2220 return false;
2221 }
2222 }
2223
2224 // step 3: Check the recurrence of variable X
2225 PhiX = getRecurrenceVar(VarX: VarX1, DefX: DefX2, LoopEntry);
2226 if (!PhiX)
2227 return false;
2228
2229 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
2230 {
2231 CountInst = nullptr;
2232 for (Instruction &Inst :
2233 llvm::make_range(x: LoopEntry->getFirstNonPHIIt(), y: LoopEntry->end())) {
2234 if (Inst.getOpcode() != Instruction::Add)
2235 continue;
2236
2237 ConstantInt *Inc = dyn_cast<ConstantInt>(Val: Inst.getOperand(i: 1));
2238 if (!Inc || !Inc->isOne())
2239 continue;
2240
2241 PHINode *Phi = getRecurrenceVar(VarX: Inst.getOperand(i: 0), DefX: &Inst, LoopEntry);
2242 if (!Phi)
2243 continue;
2244
2245 // Check if the result of the instruction is live of the loop.
2246 bool LiveOutLoop = false;
2247 for (User *U : Inst.users()) {
2248 if ((cast<Instruction>(Val: U))->getParent() != LoopEntry) {
2249 LiveOutLoop = true;
2250 break;
2251 }
2252 }
2253
2254 if (LiveOutLoop) {
2255 CountInst = &Inst;
2256 CountPhi = Phi;
2257 break;
2258 }
2259 }
2260
2261 if (!CountInst)
2262 return false;
2263 }
2264
2265 // step 5: check if the precondition is in this form:
2266 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
2267 {
2268 auto *PreCondBr = dyn_cast<BranchInst>(Val: PreCondBB->getTerminator());
2269 Value *T = matchCondition(BI: PreCondBr, LoopEntry: CurLoop->getLoopPreheader());
2270 if (T != PhiX->getOperand(i_nocapture: 0) && T != PhiX->getOperand(i_nocapture: 1))
2271 return false;
2272
2273 CntInst = CountInst;
2274 CntPhi = CountPhi;
2275 Var = T;
2276 }
2277
2278 return true;
2279}
2280
2281/// Return true if the idiom is detected in the loop.
2282///
2283/// Additionally:
2284/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
2285/// or nullptr if there is no such.
2286/// 2) \p CntPhi is set to the corresponding phi node
2287/// or nullptr if there is no such.
2288/// 3) \p Var is set to the value whose CTLZ could be used.
2289/// 4) \p DefX is set to the instruction calculating Loop exit condition.
2290///
2291/// The core idiom we are trying to detect is:
2292/// \code
2293/// if (x0 == 0)
2294/// goto loop-exit // the precondition of the loop
2295/// cnt0 = init-val;
2296/// do {
2297/// x = phi (x0, x.next); //PhiX
2298/// cnt = phi(cnt0, cnt.next);
2299///
2300/// cnt.next = cnt + 1;
2301/// ...
2302/// x.next = x >> 1; // DefX
2303/// ...
2304/// } while(x.next != 0);
2305///
2306/// loop-exit:
2307/// \endcode
2308static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL,
2309 Intrinsic::ID &IntrinID, Value *&InitX,
2310 Instruction *&CntInst, PHINode *&CntPhi,
2311 Instruction *&DefX) {
2312 BasicBlock *LoopEntry;
2313 Value *VarX = nullptr;
2314
2315 DefX = nullptr;
2316 CntInst = nullptr;
2317 CntPhi = nullptr;
2318 LoopEntry = *(CurLoop->block_begin());
2319
2320 // step 1: Check if the loop-back branch is in desirable form.
2321 if (Value *T = matchCondition(
2322 BI: dyn_cast<BranchInst>(Val: LoopEntry->getTerminator()), LoopEntry))
2323 DefX = dyn_cast<Instruction>(Val: T);
2324 else
2325 return false;
2326
2327 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1"
2328 if (!DefX || !DefX->isShift())
2329 return false;
2330 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz :
2331 Intrinsic::ctlz;
2332 ConstantInt *Shft = dyn_cast<ConstantInt>(Val: DefX->getOperand(i: 1));
2333 if (!Shft || !Shft->isOne())
2334 return false;
2335 VarX = DefX->getOperand(i: 0);
2336
2337 // step 3: Check the recurrence of variable X
2338 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
2339 if (!PhiX)
2340 return false;
2341
2342 InitX = PhiX->getIncomingValueForBlock(BB: CurLoop->getLoopPreheader());
2343
2344 // Make sure the initial value can't be negative otherwise the ashr in the
2345 // loop might never reach zero which would make the loop infinite.
2346 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(V: InitX, SQ: DL))
2347 return false;
2348
2349 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
2350 // or cnt.next = cnt + -1.
2351 // TODO: We can skip the step. If loop trip count is known (CTLZ),
2352 // then all uses of "cnt.next" could be optimized to the trip count
2353 // plus "cnt0". Currently it is not optimized.
2354 // This step could be used to detect POPCNT instruction:
2355 // cnt.next = cnt + (x.next & 1)
2356 for (Instruction &Inst :
2357 llvm::make_range(x: LoopEntry->getFirstNonPHIIt(), y: LoopEntry->end())) {
2358 if (Inst.getOpcode() != Instruction::Add)
2359 continue;
2360
2361 ConstantInt *Inc = dyn_cast<ConstantInt>(Val: Inst.getOperand(i: 1));
2362 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne()))
2363 continue;
2364
2365 PHINode *Phi = getRecurrenceVar(VarX: Inst.getOperand(i: 0), DefX: &Inst, LoopEntry);
2366 if (!Phi)
2367 continue;
2368
2369 CntInst = &Inst;
2370 CntPhi = Phi;
2371 break;
2372 }
2373 if (!CntInst)
2374 return false;
2375
2376 return true;
2377}
2378
2379// Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always
2380// profitable if we delete the loop.
2381bool LoopIdiomRecognize::isProfitableToInsertFFS(Intrinsic::ID IntrinID,
2382 Value *InitX, bool ZeroCheck,
2383 size_t CanonicalSize) {
2384 const Value *Args[] = {InitX,
2385 ConstantInt::getBool(Context&: InitX->getContext(), V: ZeroCheck)};
2386
2387 // @llvm.dbg doesn't count as they have no semantic effect.
2388 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug();
2389 uint32_t HeaderSize =
2390 std::distance(first: InstWithoutDebugIt.begin(), last: InstWithoutDebugIt.end());
2391
2392 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args);
2393 InstructionCost Cost = TTI->getIntrinsicInstrCost(
2394 ICA: Attrs, CostKind: TargetTransformInfo::TCK_SizeAndLatency);
2395 if (HeaderSize != CanonicalSize && Cost > TargetTransformInfo::TCC_Basic)
2396 return false;
2397
2398 return true;
2399}
2400
2401/// Convert CTLZ / CTTZ idiom loop into countable loop.
2402/// If CTLZ / CTTZ inserted as a new trip count returns true; otherwise,
2403/// returns false.
2404bool LoopIdiomRecognize::insertFFSIfProfitable(Intrinsic::ID IntrinID,
2405 Value *InitX, Instruction *DefX,
2406 PHINode *CntPhi,
2407 Instruction *CntInst) {
2408 bool IsCntPhiUsedOutsideLoop = false;
2409 for (User *U : CntPhi->users())
2410 if (!CurLoop->contains(Inst: cast<Instruction>(Val: U))) {
2411 IsCntPhiUsedOutsideLoop = true;
2412 break;
2413 }
2414 bool IsCntInstUsedOutsideLoop = false;
2415 for (User *U : CntInst->users())
2416 if (!CurLoop->contains(Inst: cast<Instruction>(Val: U))) {
2417 IsCntInstUsedOutsideLoop = true;
2418 break;
2419 }
2420 // If both CntInst and CntPhi are used outside the loop the profitability
2421 // is questionable.
2422 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
2423 return false;
2424
2425 // For some CPUs result of CTLZ(X) intrinsic is undefined
2426 // when X is 0. If we can not guarantee X != 0, we need to check this
2427 // when expand.
2428 bool ZeroCheck = false;
2429 // It is safe to assume Preheader exist as it was checked in
2430 // parent function RunOnLoop.
2431 BasicBlock *PH = CurLoop->getLoopPreheader();
2432
2433 // If we are using the count instruction outside the loop, make sure we
2434 // have a zero check as a precondition. Without the check the loop would run
2435 // one iteration for before any check of the input value. This means 0 and 1
2436 // would have identical behavior in the original loop and thus
2437 if (!IsCntPhiUsedOutsideLoop) {
2438 auto *PreCondBB = PH->getSinglePredecessor();
2439 if (!PreCondBB)
2440 return false;
2441 auto *PreCondBI = dyn_cast<BranchInst>(Val: PreCondBB->getTerminator());
2442 if (!PreCondBI)
2443 return false;
2444 if (matchCondition(BI: PreCondBI, LoopEntry: PH) != InitX)
2445 return false;
2446 ZeroCheck = true;
2447 }
2448
2449 // FFS idiom loop has only 6 instructions:
2450 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
2451 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
2452 // %shr = ashr %n.addr.0, 1
2453 // %tobool = icmp eq %shr, 0
2454 // %inc = add nsw %i.0, 1
2455 // br i1 %tobool
2456 size_t IdiomCanonicalSize = 6;
2457 if (!isProfitableToInsertFFS(IntrinID, InitX, ZeroCheck, CanonicalSize: IdiomCanonicalSize))
2458 return false;
2459
2460 transformLoopToCountable(IntrinID, PreCondBB: PH, CntInst, CntPhi, Var: InitX, DefX,
2461 DL: DefX->getDebugLoc(), ZeroCheck,
2462 IsCntPhiUsedOutsideLoop);
2463 return true;
2464}
2465
2466/// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop
2467/// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new
2468/// trip count returns true; otherwise, returns false.
2469bool LoopIdiomRecognize::recognizeAndInsertFFS() {
2470 // Give up if the loop has multiple blocks or multiple backedges.
2471 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2472 return false;
2473
2474 Intrinsic::ID IntrinID;
2475 Value *InitX;
2476 Instruction *DefX = nullptr;
2477 PHINode *CntPhi = nullptr;
2478 Instruction *CntInst = nullptr;
2479
2480 if (!detectShiftUntilZeroIdiom(CurLoop, DL: *DL, IntrinID, InitX, CntInst, CntPhi,
2481 DefX))
2482 return false;
2483
2484 return insertFFSIfProfitable(IntrinID, InitX, DefX, CntPhi, CntInst);
2485}
2486
2487bool LoopIdiomRecognize::recognizeShiftUntilLessThan() {
2488 // Give up if the loop has multiple blocks or multiple backedges.
2489 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2490 return false;
2491
2492 Intrinsic::ID IntrinID;
2493 Value *InitX;
2494 Instruction *DefX = nullptr;
2495 PHINode *CntPhi = nullptr;
2496 Instruction *CntInst = nullptr;
2497
2498 APInt LoopThreshold;
2499 if (!detectShiftUntilLessThanIdiom(CurLoop, DL: *DL, IntrinID, InitX, CntInst,
2500 CntPhi, DefX, Threshold&: LoopThreshold))
2501 return false;
2502
2503 if (LoopThreshold == 2) {
2504 // Treat as regular FFS.
2505 return insertFFSIfProfitable(IntrinID, InitX, DefX, CntPhi, CntInst);
2506 }
2507
2508 // Look for Floor Log2 Idiom.
2509 if (LoopThreshold != 4)
2510 return false;
2511
2512 // Abort if CntPhi is used outside of the loop.
2513 for (User *U : CntPhi->users())
2514 if (!CurLoop->contains(Inst: cast<Instruction>(Val: U)))
2515 return false;
2516
2517 // It is safe to assume Preheader exist as it was checked in
2518 // parent function RunOnLoop.
2519 BasicBlock *PH = CurLoop->getLoopPreheader();
2520 auto *PreCondBB = PH->getSinglePredecessor();
2521 if (!PreCondBB)
2522 return false;
2523 auto *PreCondBI = dyn_cast<BranchInst>(Val: PreCondBB->getTerminator());
2524 if (!PreCondBI)
2525 return false;
2526
2527 APInt PreLoopThreshold;
2528 if (matchShiftULTCondition(BI: PreCondBI, LoopEntry: PH, Threshold&: PreLoopThreshold) != InitX ||
2529 PreLoopThreshold != 2)
2530 return false;
2531
2532 bool ZeroCheck = true;
2533
2534 // the loop has only 6 instructions:
2535 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
2536 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
2537 // %shr = ashr %n.addr.0, 1
2538 // %tobool = icmp ult %n.addr.0, C
2539 // %inc = add nsw %i.0, 1
2540 // br i1 %tobool
2541 size_t IdiomCanonicalSize = 6;
2542 if (!isProfitableToInsertFFS(IntrinID, InitX, ZeroCheck, CanonicalSize: IdiomCanonicalSize))
2543 return false;
2544
2545 // log2(x) = w − 1 − clz(x)
2546 transformLoopToCountable(IntrinID, PreCondBB: PH, CntInst, CntPhi, Var: InitX, DefX,
2547 DL: DefX->getDebugLoc(), ZeroCheck,
2548 /*IsCntPhiUsedOutsideLoop=*/false,
2549 /*InsertSub=*/true);
2550 return true;
2551}
2552
2553/// Recognizes a population count idiom in a non-countable loop.
2554///
2555/// If detected, transforms the relevant code to issue the popcount intrinsic
2556/// function call, and returns true; otherwise, returns false.
2557bool LoopIdiomRecognize::recognizePopcount() {
2558 if (TTI->getPopcntSupport(IntTyWidthInBit: 32) != TargetTransformInfo::PSK_FastHardware)
2559 return false;
2560
2561 // Counting population are usually conducted by few arithmetic instructions.
2562 // Such instructions can be easily "absorbed" by vacant slots in a
2563 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
2564 // in a compact loop.
2565
2566 // Give up if the loop has multiple blocks or multiple backedges.
2567 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2568 return false;
2569
2570 BasicBlock *LoopBody = *(CurLoop->block_begin());
2571 if (LoopBody->size() >= 20) {
2572 // The loop is too big, bail out.
2573 return false;
2574 }
2575
2576 // It should have a preheader containing nothing but an unconditional branch.
2577 BasicBlock *PH = CurLoop->getLoopPreheader();
2578 if (!PH || &PH->front() != PH->getTerminator())
2579 return false;
2580 auto *EntryBI = dyn_cast<BranchInst>(Val: PH->getTerminator());
2581 if (!EntryBI || EntryBI->isConditional())
2582 return false;
2583
2584 // It should have a precondition block where the generated popcount intrinsic
2585 // function can be inserted.
2586 auto *PreCondBB = PH->getSinglePredecessor();
2587 if (!PreCondBB)
2588 return false;
2589 auto *PreCondBI = dyn_cast<BranchInst>(Val: PreCondBB->getTerminator());
2590 if (!PreCondBI || PreCondBI->isUnconditional())
2591 return false;
2592
2593 Instruction *CntInst;
2594 PHINode *CntPhi;
2595 Value *Val;
2596 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Var&: Val))
2597 return false;
2598
2599 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Var: Val);
2600 return true;
2601}
2602
2603static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
2604 const DebugLoc &DL) {
2605 Value *Ops[] = {Val};
2606 Type *Tys[] = {Val->getType()};
2607
2608 CallInst *CI = IRBuilder.CreateIntrinsic(ID: Intrinsic::ctpop, Types: Tys, Args: Ops);
2609 CI->setDebugLoc(DL);
2610
2611 return CI;
2612}
2613
2614static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
2615 const DebugLoc &DL, bool ZeroCheck,
2616 Intrinsic::ID IID) {
2617 Value *Ops[] = {Val, IRBuilder.getInt1(V: ZeroCheck)};
2618 Type *Tys[] = {Val->getType()};
2619
2620 CallInst *CI = IRBuilder.CreateIntrinsic(ID: IID, Types: Tys, Args: Ops);
2621 CI->setDebugLoc(DL);
2622
2623 return CI;
2624}
2625
2626/// Transform the following loop (Using CTLZ, CTTZ is similar):
2627/// loop:
2628/// CntPhi = PHI [Cnt0, CntInst]
2629/// PhiX = PHI [InitX, DefX]
2630/// CntInst = CntPhi + 1
2631/// DefX = PhiX >> 1
2632/// LOOP_BODY
2633/// Br: loop if (DefX != 0)
2634/// Use(CntPhi) or Use(CntInst)
2635///
2636/// Into:
2637/// If CntPhi used outside the loop:
2638/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
2639/// Count = CountPrev + 1
2640/// else
2641/// Count = BitWidth(InitX) - CTLZ(InitX)
2642/// loop:
2643/// CntPhi = PHI [Cnt0, CntInst]
2644/// PhiX = PHI [InitX, DefX]
2645/// PhiCount = PHI [Count, Dec]
2646/// CntInst = CntPhi + 1
2647/// DefX = PhiX >> 1
2648/// Dec = PhiCount - 1
2649/// LOOP_BODY
2650/// Br: loop if (Dec != 0)
2651/// Use(CountPrev + Cnt0) // Use(CntPhi)
2652/// or
2653/// Use(Count + Cnt0) // Use(CntInst)
2654///
2655/// If LOOP_BODY is empty the loop will be deleted.
2656/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
2657void LoopIdiomRecognize::transformLoopToCountable(
2658 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst,
2659 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL,
2660 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop, bool InsertSub) {
2661 BranchInst *PreheaderBr = cast<BranchInst>(Val: Preheader->getTerminator());
2662
2663 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block
2664 IRBuilder<> Builder(PreheaderBr);
2665 Builder.SetCurrentDebugLocation(DL);
2666
2667 // If there are no uses of CntPhi crate:
2668 // Count = BitWidth - CTLZ(InitX);
2669 // NewCount = Count;
2670 // If there are uses of CntPhi create:
2671 // NewCount = BitWidth - CTLZ(InitX >> 1);
2672 // Count = NewCount + 1;
2673 Value *InitXNext;
2674 if (IsCntPhiUsedOutsideLoop) {
2675 if (DefX->getOpcode() == Instruction::AShr)
2676 InitXNext = Builder.CreateAShr(LHS: InitX, RHS: 1);
2677 else if (DefX->getOpcode() == Instruction::LShr)
2678 InitXNext = Builder.CreateLShr(LHS: InitX, RHS: 1);
2679 else if (DefX->getOpcode() == Instruction::Shl) // cttz
2680 InitXNext = Builder.CreateShl(LHS: InitX, RHS: 1);
2681 else
2682 llvm_unreachable("Unexpected opcode!");
2683 } else
2684 InitXNext = InitX;
2685 Value *Count =
2686 createFFSIntrinsic(IRBuilder&: Builder, Val: InitXNext, DL, ZeroCheck, IID: IntrinID);
2687 Type *CountTy = Count->getType();
2688 Count = Builder.CreateSub(
2689 LHS: ConstantInt::get(Ty: CountTy, V: CountTy->getIntegerBitWidth()), RHS: Count);
2690 if (InsertSub)
2691 Count = Builder.CreateSub(LHS: Count, RHS: ConstantInt::get(Ty: CountTy, V: 1));
2692 Value *NewCount = Count;
2693 if (IsCntPhiUsedOutsideLoop)
2694 Count = Builder.CreateAdd(LHS: Count, RHS: ConstantInt::get(Ty: CountTy, V: 1));
2695
2696 NewCount = Builder.CreateZExtOrTrunc(V: NewCount, DestTy: CntInst->getType());
2697
2698 Value *CntInitVal = CntPhi->getIncomingValueForBlock(BB: Preheader);
2699 if (cast<ConstantInt>(Val: CntInst->getOperand(i: 1))->isOne()) {
2700 // If the counter was being incremented in the loop, add NewCount to the
2701 // counter's initial value, but only if the initial value is not zero.
2702 ConstantInt *InitConst = dyn_cast<ConstantInt>(Val: CntInitVal);
2703 if (!InitConst || !InitConst->isZero())
2704 NewCount = Builder.CreateAdd(LHS: NewCount, RHS: CntInitVal);
2705 } else {
2706 // If the count was being decremented in the loop, subtract NewCount from
2707 // the counter's initial value.
2708 NewCount = Builder.CreateSub(LHS: CntInitVal, RHS: NewCount);
2709 }
2710
2711 // Step 2: Insert new IV and loop condition:
2712 // loop:
2713 // ...
2714 // PhiCount = PHI [Count, Dec]
2715 // ...
2716 // Dec = PhiCount - 1
2717 // ...
2718 // Br: loop if (Dec != 0)
2719 BasicBlock *Body = *(CurLoop->block_begin());
2720 auto *LbBr = cast<BranchInst>(Val: Body->getTerminator());
2721 ICmpInst *LbCond = cast<ICmpInst>(Val: LbBr->getCondition());
2722
2723 PHINode *TcPhi = PHINode::Create(Ty: CountTy, NumReservedValues: 2, NameStr: "tcphi");
2724 TcPhi->insertBefore(InsertPos: Body->begin());
2725
2726 Builder.SetInsertPoint(LbCond);
2727 Instruction *TcDec = cast<Instruction>(Val: Builder.CreateSub(
2728 LHS: TcPhi, RHS: ConstantInt::get(Ty: CountTy, V: 1), Name: "tcdec", HasNUW: false, HasNSW: true));
2729
2730 TcPhi->addIncoming(V: Count, BB: Preheader);
2731 TcPhi->addIncoming(V: TcDec, BB: Body);
2732
2733 CmpInst::Predicate Pred =
2734 (LbBr->getSuccessor(i: 0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
2735 LbCond->setPredicate(Pred);
2736 LbCond->setOperand(i_nocapture: 0, Val_nocapture: TcDec);
2737 LbCond->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::get(Ty: CountTy, V: 0));
2738
2739 // Step 3: All the references to the original counter outside
2740 // the loop are replaced with the NewCount
2741 if (IsCntPhiUsedOutsideLoop)
2742 CntPhi->replaceUsesOutsideBlock(V: NewCount, BB: Body);
2743 else
2744 CntInst->replaceUsesOutsideBlock(V: NewCount, BB: Body);
2745
2746 // step 4: Forget the "non-computable" trip-count SCEV associated with the
2747 // loop. The loop would otherwise not be deleted even if it becomes empty.
2748 SE->forgetLoop(L: CurLoop);
2749}
2750
2751void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
2752 Instruction *CntInst,
2753 PHINode *CntPhi, Value *Var) {
2754 BasicBlock *PreHead = CurLoop->getLoopPreheader();
2755 auto *PreCondBr = cast<BranchInst>(Val: PreCondBB->getTerminator());
2756 const DebugLoc &DL = CntInst->getDebugLoc();
2757
2758 // Assuming before transformation, the loop is following:
2759 // if (x) // the precondition
2760 // do { cnt++; x &= x - 1; } while(x);
2761
2762 // Step 1: Insert the ctpop instruction at the end of the precondition block
2763 IRBuilder<> Builder(PreCondBr);
2764 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
2765 {
2766 PopCnt = createPopcntIntrinsic(IRBuilder&: Builder, Val: Var, DL);
2767 NewCount = PopCntZext =
2768 Builder.CreateZExtOrTrunc(V: PopCnt, DestTy: cast<IntegerType>(Val: CntPhi->getType()));
2769
2770 if (NewCount != PopCnt)
2771 (cast<Instruction>(Val: NewCount))->setDebugLoc(DL);
2772
2773 // TripCnt is exactly the number of iterations the loop has
2774 TripCnt = NewCount;
2775
2776 // If the population counter's initial value is not zero, insert Add Inst.
2777 Value *CntInitVal = CntPhi->getIncomingValueForBlock(BB: PreHead);
2778 ConstantInt *InitConst = dyn_cast<ConstantInt>(Val: CntInitVal);
2779 if (!InitConst || !InitConst->isZero()) {
2780 NewCount = Builder.CreateAdd(LHS: NewCount, RHS: CntInitVal);
2781 (cast<Instruction>(Val: NewCount))->setDebugLoc(DL);
2782 }
2783 }
2784
2785 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
2786 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
2787 // function would be partial dead code, and downstream passes will drag
2788 // it back from the precondition block to the preheader.
2789 {
2790 ICmpInst *PreCond = cast<ICmpInst>(Val: PreCondBr->getCondition());
2791
2792 Value *Opnd0 = PopCntZext;
2793 Value *Opnd1 = ConstantInt::get(Ty: PopCntZext->getType(), V: 0);
2794 if (PreCond->getOperand(i_nocapture: 0) != Var)
2795 std::swap(a&: Opnd0, b&: Opnd1);
2796
2797 ICmpInst *NewPreCond = cast<ICmpInst>(
2798 Val: Builder.CreateICmp(P: PreCond->getPredicate(), LHS: Opnd0, RHS: Opnd1));
2799 PreCondBr->setCondition(NewPreCond);
2800
2801 RecursivelyDeleteTriviallyDeadInstructions(V: PreCond, TLI);
2802 }
2803
2804 // Step 3: Note that the population count is exactly the trip count of the
2805 // loop in question, which enable us to convert the loop from noncountable
2806 // loop into a countable one. The benefit is twofold:
2807 //
2808 // - If the loop only counts population, the entire loop becomes dead after
2809 // the transformation. It is a lot easier to prove a countable loop dead
2810 // than to prove a noncountable one. (In some C dialects, an infinite loop
2811 // isn't dead even if it computes nothing useful. In general, DCE needs
2812 // to prove a noncountable loop finite before safely delete it.)
2813 //
2814 // - If the loop also performs something else, it remains alive.
2815 // Since it is transformed to countable form, it can be aggressively
2816 // optimized by some optimizations which are in general not applicable
2817 // to a noncountable loop.
2818 //
2819 // After this step, this loop (conceptually) would look like following:
2820 // newcnt = __builtin_ctpop(x);
2821 // t = newcnt;
2822 // if (x)
2823 // do { cnt++; x &= x-1; t--) } while (t > 0);
2824 BasicBlock *Body = *(CurLoop->block_begin());
2825 {
2826 auto *LbBr = cast<BranchInst>(Val: Body->getTerminator());
2827 ICmpInst *LbCond = cast<ICmpInst>(Val: LbBr->getCondition());
2828 Type *Ty = TripCnt->getType();
2829
2830 PHINode *TcPhi = PHINode::Create(Ty, NumReservedValues: 2, NameStr: "tcphi");
2831 TcPhi->insertBefore(InsertPos: Body->begin());
2832
2833 Builder.SetInsertPoint(LbCond);
2834 Instruction *TcDec = cast<Instruction>(
2835 Val: Builder.CreateSub(LHS: TcPhi, RHS: ConstantInt::get(Ty, V: 1),
2836 Name: "tcdec", HasNUW: false, HasNSW: true));
2837
2838 TcPhi->addIncoming(V: TripCnt, BB: PreHead);
2839 TcPhi->addIncoming(V: TcDec, BB: Body);
2840
2841 CmpInst::Predicate Pred =
2842 (LbBr->getSuccessor(i: 0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
2843 LbCond->setPredicate(Pred);
2844 LbCond->setOperand(i_nocapture: 0, Val_nocapture: TcDec);
2845 LbCond->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::get(Ty, V: 0));
2846 }
2847
2848 // Step 4: All the references to the original population counter outside
2849 // the loop are replaced with the NewCount -- the value returned from
2850 // __builtin_ctpop().
2851 CntInst->replaceUsesOutsideBlock(V: NewCount, BB: Body);
2852
2853 // step 5: Forget the "non-computable" trip-count SCEV associated with the
2854 // loop. The loop would otherwise not be deleted even if it becomes empty.
2855 SE->forgetLoop(L: CurLoop);
2856}
2857
2858/// Match loop-invariant value.
2859template <typename SubPattern_t> struct match_LoopInvariant {
2860 SubPattern_t SubPattern;
2861 const Loop *L;
2862
2863 match_LoopInvariant(const SubPattern_t &SP, const Loop *L)
2864 : SubPattern(SP), L(L) {}
2865
2866 template <typename ITy> bool match(ITy *V) const {
2867 return L->isLoopInvariant(V) && SubPattern.match(V);
2868 }
2869};
2870
2871/// Matches if the value is loop-invariant.
2872template <typename Ty>
2873inline match_LoopInvariant<Ty> m_LoopInvariant(const Ty &M, const Loop *L) {
2874 return match_LoopInvariant<Ty>(M, L);
2875}
2876
2877/// Return true if the idiom is detected in the loop.
2878///
2879/// The core idiom we are trying to detect is:
2880/// \code
2881/// entry:
2882/// <...>
2883/// %bitmask = shl i32 1, %bitpos
2884/// br label %loop
2885///
2886/// loop:
2887/// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ]
2888/// %x.curr.bitmasked = and i32 %x.curr, %bitmask
2889/// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0
2890/// %x.next = shl i32 %x.curr, 1
2891/// <...>
2892/// br i1 %x.curr.isbitunset, label %loop, label %end
2893///
2894/// end:
2895/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
2896/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
2897/// <...>
2898/// \endcode
2899static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX,
2900 Value *&BitMask, Value *&BitPos,
2901 Value *&CurrX, Instruction *&NextX) {
2902 LLVM_DEBUG(dbgs() << DEBUG_TYPE
2903 " Performing shift-until-bittest idiom detection.\n");
2904
2905 // Give up if the loop has multiple blocks or multiple backedges.
2906 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) {
2907 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n");
2908 return false;
2909 }
2910
2911 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
2912 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
2913 assert(LoopPreheaderBB && "There is always a loop preheader.");
2914
2915 using namespace PatternMatch;
2916
2917 // Step 1: Check if the loop backedge is in desirable form.
2918
2919 CmpPredicate Pred;
2920 Value *CmpLHS, *CmpRHS;
2921 BasicBlock *TrueBB, *FalseBB;
2922 if (!match(V: LoopHeaderBB->getTerminator(),
2923 P: m_Br(C: m_ICmp(Pred, L: m_Value(V&: CmpLHS), R: m_Value(V&: CmpRHS)),
2924 T: m_BasicBlock(V&: TrueBB), F: m_BasicBlock(V&: FalseBB)))) {
2925 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n");
2926 return false;
2927 }
2928
2929 // Step 2: Check if the backedge's condition is in desirable form.
2930
2931 auto MatchVariableBitMask = [&]() {
2932 return ICmpInst::isEquality(P: Pred) && match(V: CmpRHS, P: m_Zero()) &&
2933 match(V: CmpLHS,
2934 P: m_c_And(L: m_Value(V&: CurrX),
2935 R: m_CombineAnd(
2936 L: m_Value(V&: BitMask),
2937 R: m_LoopInvariant(M: m_Shl(L: m_One(), R: m_Value(V&: BitPos)),
2938 L: CurLoop))));
2939 };
2940
2941 auto MatchDecomposableConstantBitMask = [&]() {
2942 auto Res = llvm::decomposeBitTestICmp(
2943 LHS: CmpLHS, RHS: CmpRHS, Pred, /*LookThroughTrunc=*/true,
2944 /*AllowNonZeroC=*/false, /*DecomposeAnd=*/true);
2945 if (Res && Res->Mask.isPowerOf2()) {
2946 assert(ICmpInst::isEquality(Res->Pred));
2947 Pred = Res->Pred;
2948 CurrX = Res->X;
2949 BitMask = ConstantInt::get(Ty: CurrX->getType(), V: Res->Mask);
2950 BitPos = ConstantInt::get(Ty: CurrX->getType(), V: Res->Mask.logBase2());
2951 return true;
2952 }
2953 return false;
2954 };
2955
2956 if (!MatchVariableBitMask() && !MatchDecomposableConstantBitMask()) {
2957 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge comparison.\n");
2958 return false;
2959 }
2960
2961 // Step 3: Check if the recurrence is in desirable form.
2962 auto *CurrXPN = dyn_cast<PHINode>(Val: CurrX);
2963 if (!CurrXPN || CurrXPN->getParent() != LoopHeaderBB) {
2964 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n");
2965 return false;
2966 }
2967
2968 BaseX = CurrXPN->getIncomingValueForBlock(BB: LoopPreheaderBB);
2969 NextX =
2970 dyn_cast<Instruction>(Val: CurrXPN->getIncomingValueForBlock(BB: LoopHeaderBB));
2971
2972 assert(CurLoop->isLoopInvariant(BaseX) &&
2973 "Expected BaseX to be available in the preheader!");
2974
2975 if (!NextX || !match(V: NextX, P: m_Shl(L: m_Specific(V: CurrX), R: m_One()))) {
2976 // FIXME: support right-shift?
2977 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n");
2978 return false;
2979 }
2980
2981 // Step 4: Check if the backedge's destinations are in desirable form.
2982
2983 assert(ICmpInst::isEquality(Pred) &&
2984 "Should only get equality predicates here.");
2985
2986 // cmp-br is commutative, so canonicalize to a single variant.
2987 if (Pred != ICmpInst::Predicate::ICMP_EQ) {
2988 Pred = ICmpInst::getInversePredicate(pred: Pred);
2989 std::swap(a&: TrueBB, b&: FalseBB);
2990 }
2991
2992 // We expect to exit loop when comparison yields false,
2993 // so when it yields true we should branch back to loop header.
2994 if (TrueBB != LoopHeaderBB) {
2995 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n");
2996 return false;
2997 }
2998
2999 // Okay, idiom checks out.
3000 return true;
3001}
3002
3003/// Look for the following loop:
3004/// \code
3005/// entry:
3006/// <...>
3007/// %bitmask = shl i32 1, %bitpos
3008/// br label %loop
3009///
3010/// loop:
3011/// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ]
3012/// %x.curr.bitmasked = and i32 %x.curr, %bitmask
3013/// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0
3014/// %x.next = shl i32 %x.curr, 1
3015/// <...>
3016/// br i1 %x.curr.isbitunset, label %loop, label %end
3017///
3018/// end:
3019/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
3020/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
3021/// <...>
3022/// \endcode
3023///
3024/// And transform it into:
3025/// \code
3026/// entry:
3027/// %bitmask = shl i32 1, %bitpos
3028/// %lowbitmask = add i32 %bitmask, -1
3029/// %mask = or i32 %lowbitmask, %bitmask
3030/// %x.masked = and i32 %x, %mask
3031/// %x.masked.numleadingzeros = call i32 @llvm.ctlz.i32(i32 %x.masked,
3032/// i1 true)
3033/// %x.masked.numactivebits = sub i32 32, %x.masked.numleadingzeros
3034/// %x.masked.leadingonepos = add i32 %x.masked.numactivebits, -1
3035/// %backedgetakencount = sub i32 %bitpos, %x.masked.leadingonepos
3036/// %tripcount = add i32 %backedgetakencount, 1
3037/// %x.curr = shl i32 %x, %backedgetakencount
3038/// %x.next = shl i32 %x, %tripcount
3039/// br label %loop
3040///
3041/// loop:
3042/// %loop.iv = phi i32 [ 0, %entry ], [ %loop.iv.next, %loop ]
3043/// %loop.iv.next = add nuw i32 %loop.iv, 1
3044/// %loop.ivcheck = icmp eq i32 %loop.iv.next, %tripcount
3045/// <...>
3046/// br i1 %loop.ivcheck, label %end, label %loop
3047///
3048/// end:
3049/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
3050/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
3051/// <...>
3052/// \endcode
3053bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
3054 bool MadeChange = false;
3055
3056 Value *X, *BitMask, *BitPos, *XCurr;
3057 Instruction *XNext;
3058 if (!detectShiftUntilBitTestIdiom(CurLoop, BaseX&: X, BitMask, BitPos, CurrX&: XCurr,
3059 NextX&: XNext)) {
3060 LLVM_DEBUG(dbgs() << DEBUG_TYPE
3061 " shift-until-bittest idiom detection failed.\n");
3062 return MadeChange;
3063 }
3064 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom detected!\n");
3065
3066 // Ok, it is the idiom we were looking for, we *could* transform this loop,
3067 // but is it profitable to transform?
3068
3069 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3070 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3071 assert(LoopPreheaderBB && "There is always a loop preheader.");
3072
3073 BasicBlock *SuccessorBB = CurLoop->getExitBlock();
3074 assert(SuccessorBB && "There is only a single successor.");
3075
3076 IRBuilder<> Builder(LoopPreheaderBB->getTerminator());
3077 Builder.SetCurrentDebugLocation(cast<Instruction>(Val: XCurr)->getDebugLoc());
3078
3079 Intrinsic::ID IntrID = Intrinsic::ctlz;
3080 Type *Ty = X->getType();
3081 unsigned Bitwidth = Ty->getScalarSizeInBits();
3082
3083 TargetTransformInfo::TargetCostKind CostKind =
3084 TargetTransformInfo::TCK_SizeAndLatency;
3085
3086 // The rewrite is considered to be unprofitable iff and only iff the
3087 // intrinsic/shift we'll use are not cheap. Note that we are okay with *just*
3088 // making the loop countable, even if nothing else changes.
3089 IntrinsicCostAttributes Attrs(
3090 IntrID, Ty, {PoisonValue::get(T: Ty), /*is_zero_poison=*/Builder.getTrue()});
3091 InstructionCost Cost = TTI->getIntrinsicInstrCost(ICA: Attrs, CostKind);
3092 if (Cost > TargetTransformInfo::TCC_Basic) {
3093 LLVM_DEBUG(dbgs() << DEBUG_TYPE
3094 " Intrinsic is too costly, not beneficial\n");
3095 return MadeChange;
3096 }
3097 if (TTI->getArithmeticInstrCost(Opcode: Instruction::Shl, Ty, CostKind) >
3098 TargetTransformInfo::TCC_Basic) {
3099 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Shift is too costly, not beneficial\n");
3100 return MadeChange;
3101 }
3102
3103 // Ok, transform appears worthwhile.
3104 MadeChange = true;
3105
3106 if (!isGuaranteedNotToBeUndefOrPoison(V: BitPos)) {
3107 // BitMask may be computed from BitPos, Freeze BitPos so we can increase
3108 // it's use count.
3109 std::optional<BasicBlock::iterator> InsertPt = std::nullopt;
3110 if (auto *BitPosI = dyn_cast<Instruction>(Val: BitPos))
3111 InsertPt = BitPosI->getInsertionPointAfterDef();
3112 else
3113 InsertPt = DT->getRoot()->getFirstNonPHIOrDbgOrAlloca();
3114 if (!InsertPt)
3115 return false;
3116 FreezeInst *BitPosFrozen =
3117 new FreezeInst(BitPos, BitPos->getName() + ".fr", *InsertPt);
3118 BitPos->replaceUsesWithIf(New: BitPosFrozen, ShouldReplace: [BitPosFrozen](Use &U) {
3119 return U.getUser() != BitPosFrozen;
3120 });
3121 BitPos = BitPosFrozen;
3122 }
3123
3124 // Step 1: Compute the loop trip count.
3125
3126 Value *LowBitMask = Builder.CreateAdd(LHS: BitMask, RHS: Constant::getAllOnesValue(Ty),
3127 Name: BitPos->getName() + ".lowbitmask");
3128 Value *Mask =
3129 Builder.CreateOr(LHS: LowBitMask, RHS: BitMask, Name: BitPos->getName() + ".mask");
3130 Value *XMasked = Builder.CreateAnd(LHS: X, RHS: Mask, Name: X->getName() + ".masked");
3131 CallInst *XMaskedNumLeadingZeros = Builder.CreateIntrinsic(
3132 ID: IntrID, Types: Ty, Args: {XMasked, /*is_zero_poison=*/Builder.getTrue()},
3133 /*FMFSource=*/nullptr, Name: XMasked->getName() + ".numleadingzeros");
3134 Value *XMaskedNumActiveBits = Builder.CreateSub(
3135 LHS: ConstantInt::get(Ty, V: Ty->getScalarSizeInBits()), RHS: XMaskedNumLeadingZeros,
3136 Name: XMasked->getName() + ".numactivebits", /*HasNUW=*/true,
3137 /*HasNSW=*/Bitwidth != 2);
3138 Value *XMaskedLeadingOnePos =
3139 Builder.CreateAdd(LHS: XMaskedNumActiveBits, RHS: Constant::getAllOnesValue(Ty),
3140 Name: XMasked->getName() + ".leadingonepos", /*HasNUW=*/false,
3141 /*HasNSW=*/Bitwidth > 2);
3142
3143 Value *LoopBackedgeTakenCount = Builder.CreateSub(
3144 LHS: BitPos, RHS: XMaskedLeadingOnePos, Name: CurLoop->getName() + ".backedgetakencount",
3145 /*HasNUW=*/true, /*HasNSW=*/true);
3146 // We know loop's backedge-taken count, but what's loop's trip count?
3147 // Note that while NUW is always safe, while NSW is only for bitwidths != 2.
3148 Value *LoopTripCount =
3149 Builder.CreateAdd(LHS: LoopBackedgeTakenCount, RHS: ConstantInt::get(Ty, V: 1),
3150 Name: CurLoop->getName() + ".tripcount", /*HasNUW=*/true,
3151 /*HasNSW=*/Bitwidth != 2);
3152
3153 // Step 2: Compute the recurrence's final value without a loop.
3154
3155 // NewX is always safe to compute, because `LoopBackedgeTakenCount`
3156 // will always be smaller than `bitwidth(X)`, i.e. we never get poison.
3157 Value *NewX = Builder.CreateShl(LHS: X, RHS: LoopBackedgeTakenCount);
3158 NewX->takeName(V: XCurr);
3159 if (auto *I = dyn_cast<Instruction>(Val: NewX))
3160 I->copyIRFlags(V: XNext, /*IncludeWrapFlags=*/true);
3161
3162 Value *NewXNext;
3163 // Rewriting XNext is more complicated, however, because `X << LoopTripCount`
3164 // will be poison iff `LoopTripCount == bitwidth(X)` (which will happen
3165 // iff `BitPos` is `bitwidth(x) - 1` and `X` is `1`). So unless we know
3166 // that isn't the case, we'll need to emit an alternative, safe IR.
3167 if (XNext->hasNoSignedWrap() || XNext->hasNoUnsignedWrap() ||
3168 PatternMatch::match(
3169 V: BitPos, P: PatternMatch::m_SpecificInt_ICMP(
3170 Predicate: ICmpInst::ICMP_NE, Threshold: APInt(Ty->getScalarSizeInBits(),
3171 Ty->getScalarSizeInBits() - 1))))
3172 NewXNext = Builder.CreateShl(LHS: X, RHS: LoopTripCount);
3173 else {
3174 // Otherwise, just additionally shift by one. It's the smallest solution,
3175 // alternatively, we could check that NewX is INT_MIN (or BitPos is )
3176 // and select 0 instead.
3177 NewXNext = Builder.CreateShl(LHS: NewX, RHS: ConstantInt::get(Ty, V: 1));
3178 }
3179
3180 NewXNext->takeName(V: XNext);
3181 if (auto *I = dyn_cast<Instruction>(Val: NewXNext))
3182 I->copyIRFlags(V: XNext, /*IncludeWrapFlags=*/true);
3183
3184 // Step 3: Adjust the successor basic block to receive the computed
3185 // recurrence's final value instead of the recurrence itself.
3186
3187 XCurr->replaceUsesOutsideBlock(V: NewX, BB: LoopHeaderBB);
3188 XNext->replaceUsesOutsideBlock(V: NewXNext, BB: LoopHeaderBB);
3189
3190 // Step 4: Rewrite the loop into a countable form, with canonical IV.
3191
3192 // The new canonical induction variable.
3193 Builder.SetInsertPoint(TheBB: LoopHeaderBB, IP: LoopHeaderBB->begin());
3194 auto *IV = Builder.CreatePHI(Ty, NumReservedValues: 2, Name: CurLoop->getName() + ".iv");
3195
3196 // The induction itself.
3197 // Note that while NUW is always safe, while NSW is only for bitwidths != 2.
3198 Builder.SetInsertPoint(LoopHeaderBB->getTerminator());
3199 auto *IVNext =
3200 Builder.CreateAdd(LHS: IV, RHS: ConstantInt::get(Ty, V: 1), Name: IV->getName() + ".next",
3201 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
3202
3203 // The loop trip count check.
3204 auto *IVCheck = Builder.CreateICmpEQ(LHS: IVNext, RHS: LoopTripCount,
3205 Name: CurLoop->getName() + ".ivcheck");
3206 SmallVector<uint32_t> BranchWeights;
3207 const bool HasBranchWeights =
3208 !ProfcheckDisableMetadataFixes &&
3209 extractBranchWeights(I: *LoopHeaderBB->getTerminator(), Weights&: BranchWeights);
3210
3211 auto *BI = Builder.CreateCondBr(Cond: IVCheck, True: SuccessorBB, False: LoopHeaderBB);
3212 if (HasBranchWeights) {
3213 if (SuccessorBB == LoopHeaderBB->getTerminator()->getSuccessor(Idx: 1))
3214 std::swap(a&: BranchWeights[0], b&: BranchWeights[1]);
3215 // We're not changing the loop profile, so we can reuse the original loop's
3216 // profile.
3217 setBranchWeights(I&: *BI, Weights: BranchWeights,
3218 /*IsExpected=*/false);
3219 }
3220
3221 LoopHeaderBB->getTerminator()->eraseFromParent();
3222
3223 // Populate the IV PHI.
3224 IV->addIncoming(V: ConstantInt::get(Ty, V: 0), BB: LoopPreheaderBB);
3225 IV->addIncoming(V: IVNext, BB: LoopHeaderBB);
3226
3227 // Step 5: Forget the "non-computable" trip-count SCEV associated with the
3228 // loop. The loop would otherwise not be deleted even if it becomes empty.
3229
3230 SE->forgetLoop(L: CurLoop);
3231
3232 // Other passes will take care of actually deleting the loop if possible.
3233
3234 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom optimized!\n");
3235
3236 ++NumShiftUntilBitTest;
3237 return MadeChange;
3238}
3239
3240/// Return true if the idiom is detected in the loop.
3241///
3242/// The core idiom we are trying to detect is:
3243/// \code
3244/// entry:
3245/// <...>
3246/// %start = <...>
3247/// %extraoffset = <...>
3248/// <...>
3249/// br label %for.cond
3250///
3251/// loop:
3252/// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ]
3253/// %nbits = add nsw i8 %iv, %extraoffset
3254/// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits
3255/// %val.shifted.iszero = icmp eq i8 %val.shifted, 0
3256/// %iv.next = add i8 %iv, 1
3257/// <...>
3258/// br i1 %val.shifted.iszero, label %end, label %loop
3259///
3260/// end:
3261/// %iv.res = phi i8 [ %iv, %loop ] <...>
3262/// %nbits.res = phi i8 [ %nbits, %loop ] <...>
3263/// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...>
3264/// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...>
3265/// %iv.next.res = phi i8 [ %iv.next, %loop ] <...>
3266/// <...>
3267/// \endcode
3268static bool detectShiftUntilZeroIdiom(Loop *CurLoop, ScalarEvolution *SE,
3269 Instruction *&ValShiftedIsZero,
3270 Intrinsic::ID &IntrinID, Instruction *&IV,
3271 Value *&Start, Value *&Val,
3272 const SCEV *&ExtraOffsetExpr,
3273 bool &InvertedCond) {
3274 LLVM_DEBUG(dbgs() << DEBUG_TYPE
3275 " Performing shift-until-zero idiom detection.\n");
3276
3277 // Give up if the loop has multiple blocks or multiple backedges.
3278 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) {
3279 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n");
3280 return false;
3281 }
3282
3283 Instruction *ValShifted, *NBits, *IVNext;
3284 Value *ExtraOffset;
3285
3286 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3287 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3288 assert(LoopPreheaderBB && "There is always a loop preheader.");
3289
3290 using namespace PatternMatch;
3291
3292 // Step 1: Check if the loop backedge, condition is in desirable form.
3293
3294 CmpPredicate Pred;
3295 BasicBlock *TrueBB, *FalseBB;
3296 if (!match(V: LoopHeaderBB->getTerminator(),
3297 P: m_Br(C: m_Instruction(I&: ValShiftedIsZero), T: m_BasicBlock(V&: TrueBB),
3298 F: m_BasicBlock(V&: FalseBB))) ||
3299 !match(V: ValShiftedIsZero,
3300 P: m_ICmp(Pred, L: m_Instruction(I&: ValShifted), R: m_Zero())) ||
3301 !ICmpInst::isEquality(P: Pred)) {
3302 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n");
3303 return false;
3304 }
3305
3306 // Step 2: Check if the comparison's operand is in desirable form.
3307 // FIXME: Val could be a one-input PHI node, which we should look past.
3308 if (!match(V: ValShifted, P: m_Shift(L: m_LoopInvariant(M: m_Value(V&: Val), L: CurLoop),
3309 R: m_Instruction(I&: NBits)))) {
3310 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad comparisons value computation.\n");
3311 return false;
3312 }
3313 IntrinID = ValShifted->getOpcode() == Instruction::Shl ? Intrinsic::cttz
3314 : Intrinsic::ctlz;
3315
3316 // Step 3: Check if the shift amount is in desirable form.
3317
3318 if (match(V: NBits, P: m_c_Add(L: m_Instruction(I&: IV),
3319 R: m_LoopInvariant(M: m_Value(V&: ExtraOffset), L: CurLoop))) &&
3320 (NBits->hasNoSignedWrap() || NBits->hasNoUnsignedWrap()))
3321 ExtraOffsetExpr = SE->getNegativeSCEV(V: SE->getSCEV(V: ExtraOffset));
3322 else if (match(V: NBits,
3323 P: m_Sub(L: m_Instruction(I&: IV),
3324 R: m_LoopInvariant(M: m_Value(V&: ExtraOffset), L: CurLoop))) &&
3325 NBits->hasNoSignedWrap())
3326 ExtraOffsetExpr = SE->getSCEV(V: ExtraOffset);
3327 else {
3328 IV = NBits;
3329 ExtraOffsetExpr = SE->getZero(Ty: NBits->getType());
3330 }
3331
3332 // Step 4: Check if the recurrence is in desirable form.
3333 auto *IVPN = dyn_cast<PHINode>(Val: IV);
3334 if (!IVPN || IVPN->getParent() != LoopHeaderBB) {
3335 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n");
3336 return false;
3337 }
3338
3339 Start = IVPN->getIncomingValueForBlock(BB: LoopPreheaderBB);
3340 IVNext = dyn_cast<Instruction>(Val: IVPN->getIncomingValueForBlock(BB: LoopHeaderBB));
3341
3342 if (!IVNext || !match(V: IVNext, P: m_Add(L: m_Specific(V: IVPN), R: m_One()))) {
3343 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n");
3344 return false;
3345 }
3346
3347 // Step 4: Check if the backedge's destinations are in desirable form.
3348
3349 assert(ICmpInst::isEquality(Pred) &&
3350 "Should only get equality predicates here.");
3351
3352 // cmp-br is commutative, so canonicalize to a single variant.
3353 InvertedCond = Pred != ICmpInst::Predicate::ICMP_EQ;
3354 if (InvertedCond) {
3355 Pred = ICmpInst::getInversePredicate(pred: Pred);
3356 std::swap(a&: TrueBB, b&: FalseBB);
3357 }
3358
3359 // We expect to exit loop when comparison yields true,
3360 // so when it yields false we should branch back to loop header.
3361 if (FalseBB != LoopHeaderBB) {
3362 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n");
3363 return false;
3364 }
3365
3366 // The new, countable, loop will certainly only run a known number of
3367 // iterations, It won't be infinite. But the old loop might be infinite
3368 // under certain conditions. For logical shifts, the value will become zero
3369 // after at most bitwidth(%Val) loop iterations. However, for arithmetic
3370 // right-shift, iff the sign bit was set, the value will never become zero,
3371 // and the loop may never finish.
3372 if (ValShifted->getOpcode() == Instruction::AShr &&
3373 !isMustProgress(L: CurLoop) && !SE->isKnownNonNegative(S: SE->getSCEV(V: Val))) {
3374 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Can not prove the loop is finite.\n");
3375 return false;
3376 }
3377
3378 // Okay, idiom checks out.
3379 return true;
3380}
3381
3382/// Look for the following loop:
3383/// \code
3384/// entry:
3385/// <...>
3386/// %start = <...>
3387/// %extraoffset = <...>
3388/// <...>
3389/// br label %loop
3390///
3391/// loop:
3392/// %iv = phi i8 [ %start, %entry ], [ %iv.next, %loop ]
3393/// %nbits = add nsw i8 %iv, %extraoffset
3394/// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits
3395/// %val.shifted.iszero = icmp eq i8 %val.shifted, 0
3396/// %iv.next = add i8 %iv, 1
3397/// <...>
3398/// br i1 %val.shifted.iszero, label %end, label %loop
3399///
3400/// end:
3401/// %iv.res = phi i8 [ %iv, %loop ] <...>
3402/// %nbits.res = phi i8 [ %nbits, %loop ] <...>
3403/// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...>
3404/// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...>
3405/// %iv.next.res = phi i8 [ %iv.next, %loop ] <...>
3406/// <...>
3407/// \endcode
3408///
3409/// And transform it into:
3410/// \code
3411/// entry:
3412/// <...>
3413/// %start = <...>
3414/// %extraoffset = <...>
3415/// <...>
3416/// %val.numleadingzeros = call i8 @llvm.ct{l,t}z.i8(i8 %val, i1 0)
3417/// %val.numactivebits = sub i8 8, %val.numleadingzeros
3418/// %extraoffset.neg = sub i8 0, %extraoffset
3419/// %tmp = add i8 %val.numactivebits, %extraoffset.neg
3420/// %iv.final = call i8 @llvm.smax.i8(i8 %tmp, i8 %start)
3421/// %loop.tripcount = sub i8 %iv.final, %start
3422/// br label %loop
3423///
3424/// loop:
3425/// %loop.iv = phi i8 [ 0, %entry ], [ %loop.iv.next, %loop ]
3426/// %loop.iv.next = add i8 %loop.iv, 1
3427/// %loop.ivcheck = icmp eq i8 %loop.iv.next, %loop.tripcount
3428/// %iv = add i8 %loop.iv, %start
3429/// <...>
3430/// br i1 %loop.ivcheck, label %end, label %loop
3431///
3432/// end:
3433/// %iv.res = phi i8 [ %iv.final, %loop ] <...>
3434/// <...>
3435/// \endcode
3436bool LoopIdiomRecognize::recognizeShiftUntilZero() {
3437 bool MadeChange = false;
3438
3439 Instruction *ValShiftedIsZero;
3440 Intrinsic::ID IntrID;
3441 Instruction *IV;
3442 Value *Start, *Val;
3443 const SCEV *ExtraOffsetExpr;
3444 bool InvertedCond;
3445 if (!detectShiftUntilZeroIdiom(CurLoop, SE, ValShiftedIsZero, IntrinID&: IntrID, IV,
3446 Start, Val, ExtraOffsetExpr, InvertedCond)) {
3447 LLVM_DEBUG(dbgs() << DEBUG_TYPE
3448 " shift-until-zero idiom detection failed.\n");
3449 return MadeChange;
3450 }
3451 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom detected!\n");
3452
3453 // Ok, it is the idiom we were looking for, we *could* transform this loop,
3454 // but is it profitable to transform?
3455
3456 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3457 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3458 assert(LoopPreheaderBB && "There is always a loop preheader.");
3459
3460 BasicBlock *SuccessorBB = CurLoop->getExitBlock();
3461 assert(SuccessorBB && "There is only a single successor.");
3462
3463 IRBuilder<> Builder(LoopPreheaderBB->getTerminator());
3464 Builder.SetCurrentDebugLocation(IV->getDebugLoc());
3465
3466 Type *Ty = Val->getType();
3467 unsigned Bitwidth = Ty->getScalarSizeInBits();
3468
3469 TargetTransformInfo::TargetCostKind CostKind =
3470 TargetTransformInfo::TCK_SizeAndLatency;
3471
3472 // The rewrite is considered to be unprofitable iff and only iff the
3473 // intrinsic we'll use are not cheap. Note that we are okay with *just*
3474 // making the loop countable, even if nothing else changes.
3475 IntrinsicCostAttributes Attrs(
3476 IntrID, Ty, {PoisonValue::get(T: Ty), /*is_zero_poison=*/Builder.getFalse()});
3477 InstructionCost Cost = TTI->getIntrinsicInstrCost(ICA: Attrs, CostKind);
3478 if (Cost > TargetTransformInfo::TCC_Basic) {
3479 LLVM_DEBUG(dbgs() << DEBUG_TYPE
3480 " Intrinsic is too costly, not beneficial\n");
3481 return MadeChange;
3482 }
3483
3484 // Ok, transform appears worthwhile.
3485 MadeChange = true;
3486
3487 bool OffsetIsZero = ExtraOffsetExpr->isZero();
3488
3489 // Step 1: Compute the loop's final IV value / trip count.
3490
3491 CallInst *ValNumLeadingZeros = Builder.CreateIntrinsic(
3492 ID: IntrID, Types: Ty, Args: {Val, /*is_zero_poison=*/Builder.getFalse()},
3493 /*FMFSource=*/nullptr, Name: Val->getName() + ".numleadingzeros");
3494 Value *ValNumActiveBits = Builder.CreateSub(
3495 LHS: ConstantInt::get(Ty, V: Ty->getScalarSizeInBits()), RHS: ValNumLeadingZeros,
3496 Name: Val->getName() + ".numactivebits", /*HasNUW=*/true,
3497 /*HasNSW=*/Bitwidth != 2);
3498
3499 SCEVExpander Expander(*SE, "loop-idiom");
3500 Expander.setInsertPoint(&*Builder.GetInsertPoint());
3501 Value *ExtraOffset = Expander.expandCodeFor(SH: ExtraOffsetExpr);
3502
3503 Value *ValNumActiveBitsOffset = Builder.CreateAdd(
3504 LHS: ValNumActiveBits, RHS: ExtraOffset, Name: ValNumActiveBits->getName() + ".offset",
3505 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true);
3506 Value *IVFinal = Builder.CreateIntrinsic(ID: Intrinsic::smax, Types: {Ty},
3507 Args: {ValNumActiveBitsOffset, Start},
3508 /*FMFSource=*/nullptr, Name: "iv.final");
3509
3510 auto *LoopBackedgeTakenCount = cast<Instruction>(Val: Builder.CreateSub(
3511 LHS: IVFinal, RHS: Start, Name: CurLoop->getName() + ".backedgetakencount",
3512 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true));
3513 // FIXME: or when the offset was `add nuw`
3514
3515 // We know loop's backedge-taken count, but what's loop's trip count?
3516 Value *LoopTripCount =
3517 Builder.CreateAdd(LHS: LoopBackedgeTakenCount, RHS: ConstantInt::get(Ty, V: 1),
3518 Name: CurLoop->getName() + ".tripcount", /*HasNUW=*/true,
3519 /*HasNSW=*/Bitwidth != 2);
3520
3521 // Step 2: Adjust the successor basic block to receive the original
3522 // induction variable's final value instead of the orig. IV itself.
3523
3524 IV->replaceUsesOutsideBlock(V: IVFinal, BB: LoopHeaderBB);
3525
3526 // Step 3: Rewrite the loop into a countable form, with canonical IV.
3527
3528 // The new canonical induction variable.
3529 Builder.SetInsertPoint(TheBB: LoopHeaderBB, IP: LoopHeaderBB->begin());
3530 auto *CIV = Builder.CreatePHI(Ty, NumReservedValues: 2, Name: CurLoop->getName() + ".iv");
3531
3532 // The induction itself.
3533 Builder.SetInsertPoint(TheBB: LoopHeaderBB, IP: LoopHeaderBB->getFirstNonPHIIt());
3534 auto *CIVNext =
3535 Builder.CreateAdd(LHS: CIV, RHS: ConstantInt::get(Ty, V: 1), Name: CIV->getName() + ".next",
3536 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
3537
3538 // The loop trip count check.
3539 auto *CIVCheck = Builder.CreateICmpEQ(LHS: CIVNext, RHS: LoopTripCount,
3540 Name: CurLoop->getName() + ".ivcheck");
3541 auto *NewIVCheck = CIVCheck;
3542 if (InvertedCond) {
3543 NewIVCheck = Builder.CreateNot(V: CIVCheck);
3544 NewIVCheck->takeName(V: ValShiftedIsZero);
3545 }
3546
3547 // The original IV, but rebased to be an offset to the CIV.
3548 auto *IVDePHId = Builder.CreateAdd(LHS: CIV, RHS: Start, Name: "", /*HasNUW=*/false,
3549 /*HasNSW=*/true); // FIXME: what about NUW?
3550 IVDePHId->takeName(V: IV);
3551
3552 // The loop terminator.
3553 Builder.SetInsertPoint(LoopHeaderBB->getTerminator());
3554 SmallVector<uint32_t> BranchWeights;
3555 const bool HasBranchWeights =
3556 !ProfcheckDisableMetadataFixes &&
3557 extractBranchWeights(I: *LoopHeaderBB->getTerminator(), Weights&: BranchWeights);
3558
3559 auto *BI = Builder.CreateCondBr(Cond: CIVCheck, True: SuccessorBB, False: LoopHeaderBB);
3560 if (HasBranchWeights) {
3561 if (InvertedCond)
3562 std::swap(a&: BranchWeights[0], b&: BranchWeights[1]);
3563 // We're not changing the loop profile, so we can reuse the original loop's
3564 // profile.
3565 setBranchWeights(I&: *BI, Weights: BranchWeights, /*IsExpected=*/false);
3566 }
3567 LoopHeaderBB->getTerminator()->eraseFromParent();
3568
3569 // Populate the IV PHI.
3570 CIV->addIncoming(V: ConstantInt::get(Ty, V: 0), BB: LoopPreheaderBB);
3571 CIV->addIncoming(V: CIVNext, BB: LoopHeaderBB);
3572
3573 // Step 4: Forget the "non-computable" trip-count SCEV associated with the
3574 // loop. The loop would otherwise not be deleted even if it becomes empty.
3575
3576 SE->forgetLoop(L: CurLoop);
3577
3578 // Step 5: Try to cleanup the loop's body somewhat.
3579 IV->replaceAllUsesWith(V: IVDePHId);
3580 IV->eraseFromParent();
3581
3582 ValShiftedIsZero->replaceAllUsesWith(V: NewIVCheck);
3583 ValShiftedIsZero->eraseFromParent();
3584
3585 // Other passes will take care of actually deleting the loop if possible.
3586
3587 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom optimized!\n");
3588
3589 ++NumShiftUntilZero;
3590 return MadeChange;
3591}
3592