1//===-- PGOMemOPSizeOpt.cpp - Optimizations based on value profiling ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the transformation that optimizes memory intrinsics
10// such as memcpy using the size value profile. When memory intrinsic size
11// value profile metadata is available, a single memory intrinsic is expanded
12// to a sequence of guarded specialized versions that are called with the
13// hottest size(s), for later expansion into more optimal inline sequences.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/ADT/StringRef.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/Analysis/BlockFrequencyInfo.h"
22#include "llvm/Analysis/DomTreeUpdater.h"
23#include "llvm/Analysis/OptimizationRemarkEmitter.h"
24#include "llvm/Analysis/TargetLibraryInfo.h"
25#include "llvm/IR/BasicBlock.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Dominators.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/IRBuilder.h"
30#include "llvm/IR/InstVisitor.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/Instructions.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/PassManager.h"
35#include "llvm/IR/Type.h"
36#include "llvm/ProfileData/InstrProf.h"
37#define INSTR_PROF_VALUE_PROF_MEMOP_API
38#include "llvm/ProfileData/InstrProfData.inc"
39#include "llvm/Support/Casting.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/MathExtras.h"
44#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
45#include "llvm/Transforms/Utils/BasicBlockUtils.h"
46#include <cassert>
47#include <cstdint>
48#include <vector>
49
50using namespace llvm;
51
52#define DEBUG_TYPE "pgo-memop-opt"
53
54STATISTIC(NumOfPGOMemOPOpt, "Number of memop intrinsics optimized.");
55STATISTIC(NumOfPGOMemOPAnnotate, "Number of memop intrinsics annotated.");
56
57namespace llvm {
58
59// The minimum call count to optimize memory intrinsic calls.
60static cl::opt<unsigned>
61 MemOPCountThreshold("pgo-memop-count-threshold", cl::Hidden, cl::init(Val: 1000),
62 cl::desc("The minimum count to optimize memory "
63 "intrinsic calls"));
64
65// Command line option to disable memory intrinsic optimization. The default is
66// false. This is for debug purpose.
67static cl::opt<bool> DisableMemOPOPT("disable-memop-opt", cl::init(Val: false),
68 cl::Hidden, cl::desc("Disable optimize"));
69
70// The percent threshold to optimize memory intrinsic calls.
71static cl::opt<unsigned>
72 MemOPPercentThreshold("pgo-memop-percent-threshold", cl::init(Val: 40),
73 cl::Hidden,
74 cl::desc("The percentage threshold for the "
75 "memory intrinsic calls optimization"));
76
77// Maximum number of versions for optimizing memory intrinsic call.
78static cl::opt<unsigned>
79 MemOPMaxVersion("pgo-memop-max-version", cl::init(Val: 3), cl::Hidden,
80 cl::desc("The max version for the optimized memory "
81 " intrinsic calls"));
82
83// Scale the counts from the annotation using the BB count value.
84static cl::opt<bool>
85 MemOPScaleCount("pgo-memop-scale-count", cl::init(Val: true), cl::Hidden,
86 cl::desc("Scale the memop size counts using the basic "
87 " block count value"));
88
89cl::opt<bool>
90 MemOPOptMemcmpBcmp("pgo-memop-optimize-memcmp-bcmp", cl::init(Val: true),
91 cl::Hidden,
92 cl::desc("Size-specialize memcmp and bcmp calls"));
93
94static cl::opt<unsigned>
95 MemOpMaxOptSize("memop-value-prof-max-opt-size", cl::Hidden, cl::init(Val: 128),
96 cl::desc("Optimize the memop size <= this value"));
97
98} // end namespace llvm
99
100namespace {
101
102static const char *getMIName(const MemIntrinsic *MI) {
103 switch (MI->getIntrinsicID()) {
104 case Intrinsic::memcpy:
105 return "memcpy";
106 case Intrinsic::memmove:
107 return "memmove";
108 case Intrinsic::memset:
109 return "memset";
110 default:
111 return "unknown";
112 }
113}
114
115// A class that abstracts a memop (memcpy, memmove, memset, memcmp and bcmp).
116struct MemOp {
117 Instruction *I;
118 MemOp(MemIntrinsic *MI) : I(MI) {}
119 MemOp(CallInst *CI) : I(CI) {}
120 MemIntrinsic *asMI() { return dyn_cast<MemIntrinsic>(Val: I); }
121 CallInst *asCI() { return cast<CallInst>(Val: I); }
122 MemOp clone() {
123 if (auto MI = asMI())
124 return MemOp(cast<MemIntrinsic>(Val: MI->clone()));
125 return MemOp(cast<CallInst>(Val: asCI()->clone()));
126 }
127 Value *getLength() {
128 if (auto MI = asMI())
129 return MI->getLength();
130 return asCI()->getArgOperand(i: 2);
131 }
132 void setLength(Value *Length) {
133 if (auto MI = asMI())
134 return MI->setLength(Length);
135 asCI()->setArgOperand(i: 2, v: Length);
136 }
137 StringRef getFuncName() {
138 if (auto MI = asMI())
139 return MI->getCalledFunction()->getName();
140 return asCI()->getCalledFunction()->getName();
141 }
142 bool isMemmove() {
143 if (auto MI = asMI())
144 if (MI->getIntrinsicID() == Intrinsic::memmove)
145 return true;
146 return false;
147 }
148 bool isMemcmp(TargetLibraryInfo &TLI) {
149 LibFunc Func;
150 if (asMI() == nullptr && TLI.getLibFunc(CB: *asCI(), F&: Func) &&
151 Func == LibFunc_memcmp) {
152 return true;
153 }
154 return false;
155 }
156 bool isBcmp(TargetLibraryInfo &TLI) {
157 LibFunc Func;
158 if (asMI() == nullptr && TLI.getLibFunc(CB: *asCI(), F&: Func) &&
159 Func == LibFunc_bcmp) {
160 return true;
161 }
162 return false;
163 }
164 const char *getName(TargetLibraryInfo &TLI) {
165 if (auto MI = asMI())
166 return getMIName(MI);
167 LibFunc Func;
168 if (TLI.getLibFunc(CB: *asCI(), F&: Func)) {
169 if (Func == LibFunc_memcmp)
170 return "memcmp";
171 if (Func == LibFunc_bcmp)
172 return "bcmp";
173 }
174 llvm_unreachable("Must be MemIntrinsic or memcmp/bcmp CallInst");
175 return nullptr;
176 }
177};
178
179class MemOPSizeOpt : public InstVisitor<MemOPSizeOpt> {
180public:
181 MemOPSizeOpt(Function &Func, BlockFrequencyInfo &BFI,
182 OptimizationRemarkEmitter &ORE, DominatorTree *DT,
183 TargetLibraryInfo &TLI)
184 : Func(Func), BFI(BFI), ORE(ORE), DT(DT), TLI(TLI), Changed(false) {}
185 bool isChanged() const { return Changed; }
186 void perform() {
187 WorkList.clear();
188 visit(F&: Func);
189
190 for (auto &MO : WorkList) {
191 ++NumOfPGOMemOPAnnotate;
192 if (perform(MO)) {
193 Changed = true;
194 ++NumOfPGOMemOPOpt;
195 LLVM_DEBUG(dbgs() << "MemOP call: " << MO.getFuncName()
196 << "is Transformed.\n");
197 }
198 }
199 }
200
201 void visitMemIntrinsic(MemIntrinsic &MI) {
202 Value *Length = MI.getLength();
203 // Not perform on constant length calls.
204 if (isa<ConstantInt>(Val: Length))
205 return;
206 WorkList.push_back(x: MemOp(&MI));
207 }
208
209 void visitCallInst(CallInst &CI) {
210 LibFunc Func;
211 if (TLI.getLibFunc(CB: CI, F&: Func) &&
212 (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
213 !isa<ConstantInt>(Val: CI.getArgOperand(i: 2))) {
214 WorkList.push_back(x: MemOp(&CI));
215 }
216 }
217
218private:
219 Function &Func;
220 BlockFrequencyInfo &BFI;
221 OptimizationRemarkEmitter &ORE;
222 DominatorTree *DT;
223 TargetLibraryInfo &TLI;
224 bool Changed;
225 std::vector<MemOp> WorkList;
226 bool perform(MemOp MO);
227};
228
229static bool isProfitable(uint64_t Count, uint64_t TotalCount) {
230 assert(Count <= TotalCount);
231 if (Count < MemOPCountThreshold)
232 return false;
233 if (Count < TotalCount * MemOPPercentThreshold / 100)
234 return false;
235 return true;
236}
237
238static inline uint64_t getScaledCount(uint64_t Count, uint64_t Num,
239 uint64_t Denom) {
240 if (!MemOPScaleCount)
241 return Count;
242 bool Overflowed;
243 uint64_t ScaleCount = SaturatingMultiply(X: Count, Y: Num, ResultOverflowed: &Overflowed);
244 return ScaleCount / Denom;
245}
246
247bool MemOPSizeOpt::perform(MemOp MO) {
248 assert(MO.I);
249 if (MO.isMemmove())
250 return false;
251 if (!MemOPOptMemcmpBcmp && (MO.isMemcmp(TLI) || MO.isBcmp(TLI)))
252 return false;
253
254 uint32_t MaxNumVals = INSTR_PROF_NUM_BUCKETS;
255 uint64_t TotalCount;
256 auto VDs =
257 getValueProfDataFromInst(Inst: *MO.I, ValueKind: IPVK_MemOPSize, MaxNumValueData: MaxNumVals, TotalC&: TotalCount);
258 if (VDs.empty())
259 return false;
260
261 uint64_t ActualCount = TotalCount;
262 uint64_t SavedTotalCount = TotalCount;
263 if (MemOPScaleCount) {
264 auto BBEdgeCount = BFI.getBlockProfileCount(BB: MO.I->getParent());
265 if (!BBEdgeCount)
266 return false;
267 ActualCount = *BBEdgeCount;
268 }
269
270 LLVM_DEBUG(dbgs() << "Read one memory intrinsic profile with count "
271 << ActualCount << "\n");
272 LLVM_DEBUG(
273 for (auto &VD
274 : VDs) { dbgs() << " (" << VD.Value << "," << VD.Count << ")\n"; });
275
276 if (ActualCount < MemOPCountThreshold)
277 return false;
278 // Skip if the total value profiled count is 0, in which case we can't
279 // scale up the counts properly (and there is no profitable transformation).
280 if (TotalCount == 0)
281 return false;
282
283 TotalCount = ActualCount;
284 if (MemOPScaleCount)
285 LLVM_DEBUG(dbgs() << "Scale counts: numerator = " << ActualCount
286 << " denominator = " << SavedTotalCount << "\n");
287
288 // Keeping track of the count of the default case:
289 uint64_t RemainCount = TotalCount;
290 uint64_t SavedRemainCount = SavedTotalCount;
291 SmallVector<uint64_t, 16> SizeIds;
292 SmallVector<uint64_t, 16> CaseCounts;
293 SmallDenseSet<uint64_t, 16> SeenSizeId;
294 uint64_t MaxCount = 0;
295 unsigned Version = 0;
296 // Default case is in the front -- save the slot here.
297 CaseCounts.push_back(Elt: 0);
298 SmallVector<InstrProfValueData, 24> RemainingVDs;
299 for (auto I = VDs.begin(), E = VDs.end(); I != E; ++I) {
300 auto &VD = *I;
301 int64_t V = VD.Value;
302 uint64_t C = VD.Count;
303 if (MemOPScaleCount)
304 C = getScaledCount(Count: C, Num: ActualCount, Denom: SavedTotalCount);
305
306 if (!InstrProfIsSingleValRange(Value: V) || V > MemOpMaxOptSize) {
307 RemainingVDs.push_back(Elt: VD);
308 continue;
309 }
310
311 // ValueCounts are sorted on the count. Break at the first un-profitable
312 // value.
313 if (!isProfitable(Count: C, TotalCount: RemainCount)) {
314 RemainingVDs.insert(I: RemainingVDs.end(), From: I, To: E);
315 break;
316 }
317
318 if (!SeenSizeId.insert(V).second) {
319 errs() << "warning: Invalid Profile Data in Function " << Func.getName()
320 << ": Two identical values in MemOp value counts.\n";
321 return false;
322 }
323
324 SizeIds.push_back(Elt: V);
325 CaseCounts.push_back(Elt: C);
326 if (C > MaxCount)
327 MaxCount = C;
328
329 assert(RemainCount >= C);
330 RemainCount -= C;
331 assert(SavedRemainCount >= VD.Count);
332 SavedRemainCount -= VD.Count;
333
334 if (++Version >= MemOPMaxVersion && MemOPMaxVersion != 0) {
335 RemainingVDs.insert(I: RemainingVDs.end(), From: I + 1, To: E);
336 break;
337 }
338 }
339
340 if (Version == 0)
341 return false;
342
343 CaseCounts[0] = RemainCount;
344 if (RemainCount > MaxCount)
345 MaxCount = RemainCount;
346
347 uint64_t SumForOpt = TotalCount - RemainCount;
348
349 LLVM_DEBUG(dbgs() << "Optimize one memory intrinsic call to " << Version
350 << " Versions (covering " << SumForOpt << " out of "
351 << TotalCount << ")\n");
352
353 // mem_op(..., size)
354 // ==>
355 // switch (size) {
356 // case s1:
357 // mem_op(..., s1);
358 // goto merge_bb;
359 // case s2:
360 // mem_op(..., s2);
361 // goto merge_bb;
362 // ...
363 // default:
364 // mem_op(..., size);
365 // goto merge_bb;
366 // }
367 // merge_bb:
368
369 BasicBlock *BB = MO.I->getParent();
370 LLVM_DEBUG(dbgs() << "\n\n== Basic Block Before ==\n");
371 LLVM_DEBUG(dbgs() << *BB << "\n");
372 auto OrigBBFreq = BFI.getBlockFreq(BB);
373
374 BasicBlock *DefaultBB = SplitBlock(Old: BB, SplitPt: MO.I, DT);
375 BasicBlock::iterator It(*MO.I);
376 ++It;
377 assert(It != DefaultBB->end());
378 BasicBlock *MergeBB = SplitBlock(Old: DefaultBB, SplitPt: &(*It), DT);
379 MergeBB->setName("MemOP.Merge");
380 BFI.setBlockFreq(BB: MergeBB, Freq: OrigBBFreq);
381 DefaultBB->setName("MemOP.Default");
382
383 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
384 auto &Ctx = Func.getContext();
385 IRBuilder<> IRB(BB);
386 BB->getTerminator()->eraseFromParent();
387 Value *SizeVar = MO.getLength();
388 SwitchInst *SI = IRB.CreateSwitch(V: SizeVar, Dest: DefaultBB, NumCases: SizeIds.size());
389 Type *MemOpTy = MO.I->getType();
390 PHINode *PHI = nullptr;
391 if (!MemOpTy->isVoidTy()) {
392 // Insert a phi for the return values at the merge block.
393 IRBuilder<> IRBM(MergeBB, MergeBB->getFirstNonPHIIt());
394 PHI = IRBM.CreatePHI(Ty: MemOpTy, NumReservedValues: SizeIds.size() + 1, Name: "MemOP.RVMerge");
395 MO.I->replaceAllUsesWith(V: PHI);
396 PHI->addIncoming(V: MO.I, BB: DefaultBB);
397 }
398
399 // Clear the value profile data.
400 MO.I->setMetadata(KindID: LLVMContext::MD_prof, Node: nullptr);
401 // If all promoted, we don't need the MD.prof metadata.
402 if (SavedRemainCount > 0 || Version != VDs.size()) {
403 // Otherwise we need update with the un-promoted records back.
404 annotateValueSite(M&: *Func.getParent(), Inst&: *MO.I, VDs: RemainingVDs, Sum: SavedRemainCount,
405 ValueKind: IPVK_MemOPSize, MaxMDCount: VDs.size());
406 }
407
408 LLVM_DEBUG(dbgs() << "\n\n== Basic Block After==\n");
409
410 std::vector<DominatorTree::UpdateType> Updates;
411 if (DT)
412 Updates.reserve(n: 2 * SizeIds.size());
413
414 for (uint64_t SizeId : SizeIds) {
415 BasicBlock *CaseBB = BasicBlock::Create(
416 Context&: Ctx, Name: Twine("MemOP.Case.") + Twine(SizeId), Parent: &Func, InsertBefore: DefaultBB);
417 MemOp NewMO = MO.clone();
418 // Fix the argument.
419 auto *SizeType = dyn_cast<IntegerType>(Val: NewMO.getLength()->getType());
420 assert(SizeType && "Expected integer type size argument.");
421 ConstantInt *CaseSizeId = ConstantInt::get(Ty: SizeType, V: SizeId);
422 NewMO.setLength(CaseSizeId);
423 NewMO.I->insertInto(ParentBB: CaseBB, It: CaseBB->end());
424 IRBuilder<> IRBCase(CaseBB);
425 IRBCase.CreateBr(Dest: MergeBB);
426 SI->addCase(OnVal: CaseSizeId, Dest: CaseBB);
427 if (!MemOpTy->isVoidTy())
428 PHI->addIncoming(V: NewMO.I, BB: CaseBB);
429 if (DT) {
430 Updates.push_back(x: {DominatorTree::Insert, CaseBB, MergeBB});
431 Updates.push_back(x: {DominatorTree::Insert, BB, CaseBB});
432 }
433 LLVM_DEBUG(dbgs() << *CaseBB << "\n");
434 }
435 DTU.applyUpdates(Updates);
436 Updates.clear();
437
438 if (MaxCount)
439 setProfMetadata(TI: SI, EdgeCounts: CaseCounts, MaxCount);
440
441 LLVM_DEBUG(dbgs() << *BB << "\n");
442 LLVM_DEBUG(dbgs() << *DefaultBB << "\n");
443 LLVM_DEBUG(dbgs() << *MergeBB << "\n");
444
445 ORE.emit(RemarkBuilder: [&]() {
446 using namespace ore;
447 return OptimizationRemark(DEBUG_TYPE, "memopt-opt", MO.I)
448 << "optimized " << NV("Memop", MO.getName(TLI)) << " with count "
449 << NV("Count", SumForOpt) << " out of " << NV("Total", TotalCount)
450 << " for " << NV("Versions", Version) << " versions";
451 });
452
453 return true;
454}
455} // namespace
456
457static bool PGOMemOPSizeOptImpl(Function &F, BlockFrequencyInfo &BFI,
458 OptimizationRemarkEmitter &ORE,
459 DominatorTree *DT, TargetLibraryInfo &TLI) {
460 if (DisableMemOPOPT)
461 return false;
462
463 if (F.hasOptSize())
464 return false;
465 MemOPSizeOpt MemOPSizeOpt(F, BFI, ORE, DT, TLI);
466 MemOPSizeOpt.perform();
467 return MemOPSizeOpt.isChanged();
468}
469
470PreservedAnalyses PGOMemOPSizeOpt::run(Function &F,
471 FunctionAnalysisManager &FAM) {
472 auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(IR&: F);
473 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
474 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
475 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(IR&: F);
476 bool Changed = PGOMemOPSizeOptImpl(F, BFI, ORE, DT, TLI);
477 if (!Changed)
478 return PreservedAnalyses::all();
479 auto PA = PreservedAnalyses();
480 PA.preserve<DominatorTreeAnalysis>();
481 return PA;
482}
483