1//===- JumpTableToSwitch.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Transforms/Scalar/JumpTableToSwitch.h"
10#include "llvm/ADT/DenseSet.h"
11#include "llvm/ADT/STLExtras.h"
12#include "llvm/ADT/SmallVector.h"
13#include "llvm/Analysis/ConstantFolding.h"
14#include "llvm/Analysis/CtxProfAnalysis.h"
15#include "llvm/Analysis/DomTreeUpdater.h"
16#include "llvm/Analysis/OptimizationRemarkEmitter.h"
17#include "llvm/Analysis/PostDominators.h"
18#include "llvm/IR/IRBuilder.h"
19#include "llvm/IR/LLVMContext.h"
20#include "llvm/IR/ProfDataUtils.h"
21#include "llvm/ProfileData/InstrProf.h"
22#include "llvm/Support/CommandLine.h"
23#include "llvm/Support/Error.h"
24#include "llvm/Transforms/Utils/BasicBlockUtils.h"
25#include <limits>
26
27using namespace llvm;
28
29static cl::opt<unsigned>
30 JumpTableSizeThreshold("jump-table-to-switch-size-threshold", cl::Hidden,
31 cl::desc("Only split jump tables with size less or "
32 "equal than JumpTableSizeThreshold."),
33 cl::init(Val: 10));
34
35// TODO: Consider adding a cost model for profitability analysis of this
36// transformation. Currently we replace a jump table with a switch if all the
37// functions in the jump table are smaller than the provided threshold.
38static cl::opt<unsigned> FunctionSizeThreshold(
39 "jump-table-to-switch-function-size-threshold", cl::Hidden,
40 cl::desc("Only split jump tables containing functions whose sizes are less "
41 "or equal than this threshold."),
42 cl::init(Val: 50));
43
44namespace llvm {
45extern cl::opt<bool> ProfcheckDisableMetadataFixes;
46} // end namespace llvm
47
48#define DEBUG_TYPE "jump-table-to-switch"
49
50namespace {
51struct JumpTableTy {
52 Value *Index;
53 SmallVector<Function *, 10> Funcs;
54};
55} // anonymous namespace
56
57static std::optional<JumpTableTy> parseJumpTable(GetElementPtrInst *GEP,
58 PointerType *PtrTy) {
59 Constant *Ptr = dyn_cast<Constant>(Val: GEP->getPointerOperand());
60 if (!Ptr)
61 return std::nullopt;
62
63 GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: Ptr);
64 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
65 return std::nullopt;
66
67 Function &F = *GEP->getParent()->getParent();
68 const DataLayout &DL = F.getDataLayout();
69 const unsigned BitWidth =
70 DL.getIndexSizeInBits(AS: GEP->getPointerAddressSpace());
71 SmallMapVector<Value *, APInt, 4> VariableOffsets;
72 APInt ConstantOffset(BitWidth, 0);
73 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
74 return std::nullopt;
75 if (VariableOffsets.size() != 1)
76 return std::nullopt;
77 // TODO: consider supporting more general patterns
78 if (!ConstantOffset.isZero())
79 return std::nullopt;
80 APInt StrideBytes = VariableOffsets.front().second;
81 const uint64_t JumpTableSizeBytes = GV->getGlobalSize(DL);
82 if (JumpTableSizeBytes % StrideBytes.getZExtValue() != 0)
83 return std::nullopt;
84 const uint64_t N = JumpTableSizeBytes / StrideBytes.getZExtValue();
85 if (N > JumpTableSizeThreshold)
86 return std::nullopt;
87
88 JumpTableTy JumpTable;
89 JumpTable.Index = VariableOffsets.front().first;
90 JumpTable.Funcs.reserve(N);
91 for (uint64_t Index = 0; Index < N; ++Index) {
92 // ConstantOffset is zero.
93 APInt Offset = Index * StrideBytes;
94 Constant *C =
95 ConstantFoldLoadFromConst(C: GV->getInitializer(), Ty: PtrTy, Offset, DL);
96 auto *Func = dyn_cast_or_null<Function>(Val: C);
97 if (!Func || Func->isDeclaration() ||
98 Func->getInstructionCount() > FunctionSizeThreshold)
99 return std::nullopt;
100 JumpTable.Funcs.push_back(Elt: Func);
101 }
102 return JumpTable;
103}
104
105static BasicBlock *
106expandToSwitch(CallBase *CB, const JumpTableTy &JT, DomTreeUpdater &DTU,
107 OptimizationRemarkEmitter &ORE,
108 llvm::function_ref<GlobalValue::GUID(const Function &)>
109 GetGuidForFunction) {
110 const bool IsVoid = CB->getType() == Type::getVoidTy(C&: CB->getContext());
111
112 SmallVector<DominatorTree::UpdateType, 8> DTUpdates;
113 BasicBlock *BB = CB->getParent();
114 BasicBlock *Tail = SplitBlock(Old: BB, SplitPt: CB, DTU: &DTU, LI: nullptr, MSSAU: nullptr,
115 BBName: BB->getName() + Twine(".tail"));
116 DTUpdates.push_back(Elt: {DominatorTree::Delete, BB, Tail});
117 BB->getTerminator()->eraseFromParent();
118
119 Function &F = *BB->getParent();
120 BasicBlock *BBUnreachable = BasicBlock::Create(
121 Context&: F.getContext(), Name: "default.switch.case.unreachable", Parent: &F, InsertBefore: Tail);
122 IRBuilder<> BuilderUnreachable(BBUnreachable);
123 BuilderUnreachable.CreateUnreachable();
124
125 IRBuilder<> Builder(BB);
126 SwitchInst *Switch = Builder.CreateSwitch(V: JT.Index, Dest: BBUnreachable);
127 DTUpdates.push_back(Elt: {DominatorTree::Insert, BB, BBUnreachable});
128
129 IRBuilder<> BuilderTail(CB);
130 PHINode *PHI =
131 IsVoid ? nullptr : BuilderTail.CreatePHI(Ty: CB->getType(), NumReservedValues: JT.Funcs.size());
132 const auto *ProfMD = CB->getMetadata(KindID: LLVMContext::MD_prof);
133
134 SmallVector<uint64_t> BranchWeights;
135 DenseMap<GlobalValue::GUID, uint64_t> GuidToCounter;
136 const bool HadProfile = isValueProfileMD(ProfileData: ProfMD);
137 if (HadProfile) {
138 // The assumptions, coming in, are that the functions in JT.Funcs are
139 // defined in this module (from parseJumpTable).
140 assert(llvm::all_of(
141 JT.Funcs, [](const Function *F) { return F && !F->isDeclaration(); }));
142 BranchWeights.reserve(N: JT.Funcs.size() + 1);
143 // The first is the default target, which is the unreachable block created
144 // above.
145 BranchWeights.push_back(Elt: 0U);
146 uint64_t TotalCount = 0;
147 auto Targets = getValueProfDataFromInst(
148 Inst: *CB, ValueKind: InstrProfValueKind::IPVK_IndirectCallTarget,
149 MaxNumValueData: std::numeric_limits<uint32_t>::max(), TotalC&: TotalCount);
150
151 for (const auto &[G, C] : Targets) {
152 [[maybe_unused]] auto It = GuidToCounter.insert(KV: {G, C});
153 assert(It.second);
154 }
155 }
156 for (auto [Index, Func] : llvm::enumerate(First: JT.Funcs)) {
157 BasicBlock *B = BasicBlock::Create(Context&: Func->getContext(),
158 Name: "call." + Twine(Index), Parent: &F, InsertBefore: Tail);
159 DTUpdates.push_back(Elt: {DominatorTree::Insert, BB, B});
160 DTUpdates.push_back(Elt: {DominatorTree::Insert, B, Tail});
161
162 CallBase *Call = cast<CallBase>(Val: CB->clone());
163 // The MD_prof metadata (VP kind), if it existed, can be dropped, it doesn't
164 // make sense on a direct call. Note that the values are used for the branch
165 // weights of the switch.
166 Call->setMetadata(KindID: LLVMContext::MD_prof, Node: nullptr);
167 Call->setCalledFunction(Func);
168 Call->insertInto(ParentBB: B, It: B->end());
169 Switch->addCase(
170 OnVal: cast<ConstantInt>(Val: ConstantInt::get(Ty: JT.Index->getType(), V: Index)), Dest: B);
171 GlobalValue::GUID FctID = GetGuidForFunction(*Func);
172 // It'd be OK to _not_ find target functions in GuidToCounter, e.g. suppose
173 // just some of the jump targets are taken (for the given profile).
174 BranchWeights.push_back(Elt: FctID == 0U ? 0U
175 : GuidToCounter.lookup_or(Val: FctID, Default: 0U));
176 BranchInst::Create(IfTrue: Tail, InsertBefore: B);
177 if (PHI)
178 PHI->addIncoming(V: Call, BB: B);
179 }
180 DTU.applyUpdates(Updates: DTUpdates);
181 ORE.emit(RemarkBuilder: [&]() {
182 return OptimizationRemark(DEBUG_TYPE, "ReplacedJumpTableWithSwitch", CB)
183 << "expanded indirect call into switch";
184 });
185 if (HadProfile && !ProfcheckDisableMetadataFixes) {
186 // At least one of the targets must've been taken.
187 assert(llvm::any_of(BranchWeights, not_equal_to(0)));
188 setBranchWeights(I&: *Switch, Weights: downscaleWeights(Weights: BranchWeights),
189 /*IsExpected=*/false);
190 } else
191 setExplicitlyUnknownBranchWeights(I&: *Switch, DEBUG_TYPE);
192 if (PHI)
193 CB->replaceAllUsesWith(V: PHI);
194 CB->eraseFromParent();
195 return Tail;
196}
197
198PreservedAnalyses JumpTableToSwitchPass::run(Function &F,
199 FunctionAnalysisManager &AM) {
200 OptimizationRemarkEmitter &ORE =
201 AM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
202 DominatorTree *DT = AM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
203 PostDominatorTree *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(IR&: F);
204 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
205 bool Changed = false;
206 auto FuncToGuid = [&](const Function &Fct) {
207 if (Fct.getMetadata(Kind: AssignGUIDPass::GUIDMetadataName))
208 return AssignGUIDPass::getGUID(F: Fct);
209
210 return Function::getGUIDAssumingExternalLinkage(GlobalName: getIRPGOFuncName(F, InLTO));
211 };
212
213 for (BasicBlock &BB : make_early_inc_range(Range&: F)) {
214 BasicBlock *CurrentBB = &BB;
215 while (CurrentBB) {
216 BasicBlock *SplittedOutTail = nullptr;
217 for (Instruction &I : make_early_inc_range(Range&: *CurrentBB)) {
218 auto *Call = dyn_cast<CallInst>(Val: &I);
219 if (!Call || Call->getCalledFunction() || Call->isMustTailCall())
220 continue;
221 auto *L = dyn_cast<LoadInst>(Val: Call->getCalledOperand());
222 // Skip atomic or volatile loads.
223 if (!L || !L->isSimple())
224 continue;
225 auto *GEP = dyn_cast<GetElementPtrInst>(Val: L->getPointerOperand());
226 if (!GEP)
227 continue;
228 auto *PtrTy = dyn_cast<PointerType>(Val: L->getType());
229 assert(PtrTy && "call operand must be a pointer");
230 std::optional<JumpTableTy> JumpTable = parseJumpTable(GEP, PtrTy);
231 if (!JumpTable)
232 continue;
233 SplittedOutTail =
234 expandToSwitch(CB: Call, JT: *JumpTable, DTU, ORE, GetGuidForFunction: FuncToGuid);
235 Changed = true;
236 break;
237 }
238 CurrentBB = SplittedOutTail ? SplittedOutTail : nullptr;
239 }
240 }
241
242 if (!Changed)
243 return PreservedAnalyses::all();
244
245 PreservedAnalyses PA;
246 if (DT)
247 PA.preserve<DominatorTreeAnalysis>();
248 if (PDT)
249 PA.preserve<PostDominatorTreeAnalysis>();
250 return PA;
251}
252