1//===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a Loop Data Prefetching Pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Scalar/LoopDataPrefetch.h"
14#include "llvm/InitializePasses.h"
15
16#include "llvm/ADT/DepthFirstIterator.h"
17#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/CodeMetrics.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/Analysis/OptimizationRemarkEmitter.h"
22#include "llvm/Analysis/ScalarEvolution.h"
23#include "llvm/Analysis/ScalarEvolutionExpressions.h"
24#include "llvm/Analysis/TargetTransformInfo.h"
25#include "llvm/IR/Dominators.h"
26#include "llvm/IR/Function.h"
27#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Transforms/Scalar.h"
30#include "llvm/Transforms/Utils.h"
31#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
32
33#define DEBUG_TYPE "loop-data-prefetch"
34
35using namespace llvm;
36
37// By default, we limit this to creating 16 PHIs (which is a little over half
38// of the allocatable register set).
39static cl::opt<bool>
40PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(Val: false),
41 cl::desc("Prefetch write addresses"));
42
43static cl::opt<unsigned>
44 PrefetchDistance("prefetch-distance",
45 cl::desc("Number of instructions to prefetch ahead"),
46 cl::Hidden);
47
48static cl::opt<unsigned>
49 MinPrefetchStride("min-prefetch-stride",
50 cl::desc("Min stride to add prefetches"), cl::Hidden);
51
52static cl::opt<unsigned> MaxPrefetchIterationsAhead(
53 "max-prefetch-iters-ahead",
54 cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden);
55
56STATISTIC(NumPrefetches, "Number of prefetches inserted");
57
58namespace {
59
60/// Loop prefetch implementation class.
61class LoopDataPrefetch {
62public:
63 LoopDataPrefetch(AssumptionCache *AC, DominatorTree *DT, LoopInfo *LI,
64 ScalarEvolution *SE, const TargetTransformInfo *TTI,
65 OptimizationRemarkEmitter *ORE)
66 : AC(AC), DT(DT), LI(LI), SE(SE), TTI(TTI), ORE(ORE) {}
67
68 bool run();
69
70private:
71 bool runOnLoop(Loop *L);
72
73 /// Check if the stride of the accesses is large enough to
74 /// warrant a prefetch.
75 bool isStrideLargeEnough(const SCEVAddRecExpr *AR, unsigned TargetMinStride);
76
77 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
78 unsigned NumStridedMemAccesses,
79 unsigned NumPrefetches,
80 bool HasCall) {
81 if (MinPrefetchStride.getNumOccurrences() > 0)
82 return MinPrefetchStride;
83 return TTI->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
84 NumPrefetches, HasCall);
85 }
86
87 unsigned getPrefetchDistance() {
88 if (PrefetchDistance.getNumOccurrences() > 0)
89 return PrefetchDistance;
90 return TTI->getPrefetchDistance();
91 }
92
93 unsigned getMaxPrefetchIterationsAhead() {
94 if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0)
95 return MaxPrefetchIterationsAhead;
96 return TTI->getMaxPrefetchIterationsAhead();
97 }
98
99 bool doPrefetchWrites() {
100 if (PrefetchWrites.getNumOccurrences() > 0)
101 return PrefetchWrites;
102 return TTI->enableWritePrefetching();
103 }
104
105 AssumptionCache *AC;
106 DominatorTree *DT;
107 LoopInfo *LI;
108 ScalarEvolution *SE;
109 const TargetTransformInfo *TTI;
110 OptimizationRemarkEmitter *ORE;
111};
112
113/// Legacy class for inserting loop data prefetches.
114class LoopDataPrefetchLegacyPass : public FunctionPass {
115public:
116 static char ID; // Pass ID, replacement for typeid
117 LoopDataPrefetchLegacyPass() : FunctionPass(ID) {
118 initializeLoopDataPrefetchLegacyPassPass(*PassRegistry::getPassRegistry());
119 }
120
121 void getAnalysisUsage(AnalysisUsage &AU) const override {
122 AU.addRequired<AssumptionCacheTracker>();
123 AU.addRequired<DominatorTreeWrapperPass>();
124 AU.addPreserved<DominatorTreeWrapperPass>();
125 AU.addRequired<LoopInfoWrapperPass>();
126 AU.addPreserved<LoopInfoWrapperPass>();
127 AU.addRequiredID(ID&: LoopSimplifyID);
128 AU.addPreservedID(ID&: LoopSimplifyID);
129 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
130 AU.addRequired<ScalarEvolutionWrapperPass>();
131 AU.addPreserved<ScalarEvolutionWrapperPass>();
132 AU.addRequired<TargetTransformInfoWrapperPass>();
133 }
134
135 bool runOnFunction(Function &F) override;
136 };
137}
138
139char LoopDataPrefetchLegacyPass::ID = 0;
140INITIALIZE_PASS_BEGIN(LoopDataPrefetchLegacyPass, "loop-data-prefetch",
141 "Loop Data Prefetch", false, false)
142INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
143INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
144INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
145INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
146INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
147INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
148INITIALIZE_PASS_END(LoopDataPrefetchLegacyPass, "loop-data-prefetch",
149 "Loop Data Prefetch", false, false)
150
151FunctionPass *llvm::createLoopDataPrefetchPass() {
152 return new LoopDataPrefetchLegacyPass();
153}
154
155bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR,
156 unsigned TargetMinStride) {
157 // No need to check if any stride goes.
158 if (TargetMinStride <= 1)
159 return true;
160
161 const auto *ConstStride = dyn_cast<SCEVConstant>(Val: AR->getStepRecurrence(SE&: *SE));
162 // If MinStride is set, don't prefetch unless we can ensure that stride is
163 // larger.
164 if (!ConstStride)
165 return false;
166
167 unsigned AbsStride = std::abs(i: ConstStride->getAPInt().getSExtValue());
168 return TargetMinStride <= AbsStride;
169}
170
171PreservedAnalyses LoopDataPrefetchPass::run(Function &F,
172 FunctionAnalysisManager &AM) {
173 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
174 LoopInfo *LI = &AM.getResult<LoopAnalysis>(IR&: F);
175 ScalarEvolution *SE = &AM.getResult<ScalarEvolutionAnalysis>(IR&: F);
176 AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(IR&: F);
177 OptimizationRemarkEmitter *ORE =
178 &AM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
179 const TargetTransformInfo *TTI = &AM.getResult<TargetIRAnalysis>(IR&: F);
180
181 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE);
182 bool Changed = LDP.run();
183
184 if (Changed) {
185 PreservedAnalyses PA;
186 PA.preserve<DominatorTreeAnalysis>();
187 PA.preserve<LoopAnalysis>();
188 return PA;
189 }
190
191 return PreservedAnalyses::all();
192}
193
194bool LoopDataPrefetchLegacyPass::runOnFunction(Function &F) {
195 if (skipFunction(F))
196 return false;
197
198 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
199 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
200 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
201 AssumptionCache *AC =
202 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
203 OptimizationRemarkEmitter *ORE =
204 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
205 const TargetTransformInfo *TTI =
206 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
207
208 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE);
209 return LDP.run();
210}
211
212bool LoopDataPrefetch::run() {
213 // If PrefetchDistance is not set, don't run the pass. This gives an
214 // opportunity for targets to run this pass for selected subtargets only
215 // (whose TTI sets PrefetchDistance and CacheLineSize).
216 if (getPrefetchDistance() == 0 || TTI->getCacheLineSize() == 0) {
217 LLVM_DEBUG(dbgs() << "Please set both PrefetchDistance and CacheLineSize "
218 "for loop data prefetch.\n");
219 return false;
220 }
221
222 bool MadeChange = false;
223
224 for (Loop *I : *LI)
225 for (Loop *L : depth_first(G: I))
226 MadeChange |= runOnLoop(L);
227
228 return MadeChange;
229}
230
231/// A record for a potential prefetch made during the initial scan of the
232/// loop. This is used to let a single prefetch target multiple memory accesses.
233struct Prefetch {
234 /// The address formula for this prefetch as returned by ScalarEvolution.
235 const SCEVAddRecExpr *LSCEVAddRec;
236 /// The point of insertion for the prefetch instruction.
237 Instruction *InsertPt = nullptr;
238 /// True if targeting a write memory access.
239 bool Writes = false;
240 /// The (first seen) prefetched instruction.
241 Instruction *MemI = nullptr;
242
243 /// Constructor to create a new Prefetch for \p I.
244 Prefetch(const SCEVAddRecExpr *L, Instruction *I) : LSCEVAddRec(L) {
245 addInstruction(I);
246 };
247
248 /// Add the instruction \param I to this prefetch. If it's not the first
249 /// one, 'InsertPt' and 'Writes' will be updated as required.
250 /// \param PtrDiff the known constant address difference to the first added
251 /// instruction.
252 void addInstruction(Instruction *I, DominatorTree *DT = nullptr,
253 int64_t PtrDiff = 0) {
254 if (!InsertPt) {
255 MemI = I;
256 InsertPt = I;
257 Writes = isa<StoreInst>(Val: I);
258 } else {
259 BasicBlock *PrefBB = InsertPt->getParent();
260 BasicBlock *InsBB = I->getParent();
261 if (PrefBB != InsBB) {
262 BasicBlock *DomBB = DT->findNearestCommonDominator(A: PrefBB, B: InsBB);
263 if (DomBB != PrefBB)
264 InsertPt = DomBB->getTerminator();
265 }
266
267 if (isa<StoreInst>(Val: I) && PtrDiff == 0)
268 Writes = true;
269 }
270 }
271};
272
273bool LoopDataPrefetch::runOnLoop(Loop *L) {
274 bool MadeChange = false;
275
276 // Only prefetch in the inner-most loop
277 if (!L->isInnermost())
278 return MadeChange;
279
280 SmallPtrSet<const Value *, 32> EphValues;
281 CodeMetrics::collectEphemeralValues(L, AC, EphValues);
282
283 // Calculate the number of iterations ahead to prefetch
284 CodeMetrics Metrics;
285 bool HasCall = false;
286 for (const auto BB : L->blocks()) {
287 // If the loop already has prefetches, then assume that the user knows
288 // what they are doing and don't add any more.
289 for (auto &I : *BB) {
290 if (isa<CallInst>(Val: &I) || isa<InvokeInst>(Val: &I)) {
291 if (const Function *F = cast<CallBase>(Val&: I).getCalledFunction()) {
292 if (F->getIntrinsicID() == Intrinsic::prefetch)
293 return MadeChange;
294 if (TTI->isLoweredToCall(F))
295 HasCall = true;
296 } else { // indirect call.
297 HasCall = true;
298 }
299 }
300 }
301 Metrics.analyzeBasicBlock(BB, TTI: *TTI, EphValues);
302 }
303
304 if (!Metrics.NumInsts.isValid())
305 return MadeChange;
306
307 unsigned LoopSize = Metrics.NumInsts.getValue();
308 if (!LoopSize)
309 LoopSize = 1;
310
311 unsigned ItersAhead = getPrefetchDistance() / LoopSize;
312 if (!ItersAhead)
313 ItersAhead = 1;
314
315 if (ItersAhead > getMaxPrefetchIterationsAhead())
316 return MadeChange;
317
318 unsigned ConstantMaxTripCount = SE->getSmallConstantMaxTripCount(L);
319 if (ConstantMaxTripCount && ConstantMaxTripCount < ItersAhead + 1)
320 return MadeChange;
321
322 unsigned NumMemAccesses = 0;
323 unsigned NumStridedMemAccesses = 0;
324 SmallVector<Prefetch, 16> Prefetches;
325 for (const auto BB : L->blocks())
326 for (auto &I : *BB) {
327 Value *PtrValue;
328 Instruction *MemI;
329
330 if (LoadInst *LMemI = dyn_cast<LoadInst>(Val: &I)) {
331 MemI = LMemI;
332 PtrValue = LMemI->getPointerOperand();
333 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(Val: &I)) {
334 if (!doPrefetchWrites()) continue;
335 MemI = SMemI;
336 PtrValue = SMemI->getPointerOperand();
337 } else continue;
338
339 unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
340 if (!TTI->shouldPrefetchAddressSpace(AS: PtrAddrSpace))
341 continue;
342 NumMemAccesses++;
343 if (L->isLoopInvariant(V: PtrValue))
344 continue;
345
346 const SCEV *LSCEV = SE->getSCEV(V: PtrValue);
347 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(Val: LSCEV);
348 if (!LSCEVAddRec)
349 continue;
350 NumStridedMemAccesses++;
351
352 // We don't want to double prefetch individual cache lines. If this
353 // access is known to be within one cache line of some other one that
354 // has already been prefetched, then don't prefetch this one as well.
355 bool DupPref = false;
356 for (auto &Pref : Prefetches) {
357 const SCEV *PtrDiff = SE->getMinusSCEV(LHS: LSCEVAddRec, RHS: Pref.LSCEVAddRec);
358 if (const SCEVConstant *ConstPtrDiff =
359 dyn_cast<SCEVConstant>(Val: PtrDiff)) {
360 int64_t PD = std::abs(i: ConstPtrDiff->getValue()->getSExtValue());
361 if (PD < (int64_t) TTI->getCacheLineSize()) {
362 Pref.addInstruction(I: MemI, DT, PtrDiff: PD);
363 DupPref = true;
364 break;
365 }
366 }
367 }
368 if (!DupPref)
369 Prefetches.push_back(Elt: Prefetch(LSCEVAddRec, MemI));
370 }
371
372 unsigned TargetMinStride =
373 getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
374 NumPrefetches: Prefetches.size(), HasCall);
375
376 LLVM_DEBUG(dbgs() << "Prefetching " << ItersAhead
377 << " iterations ahead (loop size: " << LoopSize << ") in "
378 << L->getHeader()->getParent()->getName() << ": " << *L);
379 LLVM_DEBUG(dbgs() << "Loop has: "
380 << NumMemAccesses << " memory accesses, "
381 << NumStridedMemAccesses << " strided memory accesses, "
382 << Prefetches.size() << " potential prefetch(es), "
383 << "a minimum stride of " << TargetMinStride << ", "
384 << (HasCall ? "calls" : "no calls") << ".\n");
385
386 for (auto &P : Prefetches) {
387 // Check if the stride of the accesses is large enough to warrant a
388 // prefetch.
389 if (!isStrideLargeEnough(AR: P.LSCEVAddRec, TargetMinStride))
390 continue;
391
392 BasicBlock *BB = P.InsertPt->getParent();
393 SCEVExpander SCEVE(*SE, BB->getDataLayout(), "prefaddr");
394 const SCEV *NextLSCEV = SE->getAddExpr(LHS: P.LSCEVAddRec, RHS: SE->getMulExpr(
395 LHS: SE->getConstant(Ty: P.LSCEVAddRec->getType(), V: ItersAhead),
396 RHS: P.LSCEVAddRec->getStepRecurrence(SE&: *SE)));
397 if (!SCEVE.isSafeToExpand(S: NextLSCEV))
398 continue;
399
400 unsigned PtrAddrSpace = NextLSCEV->getType()->getPointerAddressSpace();
401 Type *I8Ptr = PointerType::get(C&: BB->getContext(), AddressSpace: PtrAddrSpace);
402 Value *PrefPtrValue = SCEVE.expandCodeFor(SH: NextLSCEV, Ty: I8Ptr, I: P.InsertPt);
403
404 IRBuilder<> Builder(P.InsertPt);
405 Type *I32 = Type::getInt32Ty(C&: BB->getContext());
406 Builder.CreateIntrinsic(ID: Intrinsic::prefetch, Types: PrefPtrValue->getType(),
407 Args: {PrefPtrValue, ConstantInt::get(Ty: I32, V: P.Writes),
408 ConstantInt::get(Ty: I32, V: 3),
409 ConstantInt::get(Ty: I32, V: 1)});
410 ++NumPrefetches;
411 LLVM_DEBUG(dbgs() << " Access: "
412 << *P.MemI->getOperand(isa<LoadInst>(P.MemI) ? 0 : 1)
413 << ", SCEV: " << *P.LSCEVAddRec << "\n");
414 ORE->emit(RemarkBuilder: [&]() {
415 return OptimizationRemark(DEBUG_TYPE, "Prefetched", P.MemI)
416 << "prefetched memory access";
417 });
418
419 MadeChange = true;
420 }
421
422 return MadeChange;
423}
424