1 | //===-- BasicBlockSections.cpp ---=========--------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // BasicBlockSections implementation. |
10 | // |
11 | // The purpose of this pass is to assign sections to basic blocks when |
12 | // -fbasic-block-sections= option is used. Further, with profile information |
13 | // only the subset of basic blocks with profiles are placed in separate sections |
14 | // and the rest are grouped in a cold section. The exception handling blocks are |
15 | // treated specially to ensure they are all in one seciton. |
16 | // |
17 | // Basic Block Sections |
18 | // ==================== |
19 | // |
20 | // With option, -fbasic-block-sections=list, every function may be split into |
21 | // clusters of basic blocks. Every cluster will be emitted into a separate |
22 | // section with its basic blocks sequenced in the given order. To get the |
23 | // optimized performance, the clusters must form an optimal BB layout for the |
24 | // function. We insert a symbol at the beginning of every cluster's section to |
25 | // allow the linker to reorder the sections in any arbitrary sequence. A global |
26 | // order of these sections would encapsulate the function layout. |
27 | // For example, consider the following clusters for a function foo (consisting |
28 | // of 6 basic blocks 0, 1, ..., 5). |
29 | // |
30 | // 0 2 |
31 | // 1 3 5 |
32 | // |
33 | // * Basic blocks 0 and 2 are placed in one section with symbol `foo` |
34 | // referencing the beginning of this section. |
35 | // * Basic blocks 1, 3, 5 are placed in a separate section. A new symbol |
36 | // `foo.__part.1` will reference the beginning of this section. |
37 | // * Basic block 4 (note that it is not referenced in the list) is placed in |
38 | // one section, and a new symbol `foo.cold` will point to it. |
39 | // |
40 | // There are a couple of challenges to be addressed: |
41 | // |
42 | // 1. The last basic block of every cluster should not have any implicit |
43 | // fallthrough to its next basic block, as it can be reordered by the linker. |
44 | // The compiler should make these fallthroughs explicit by adding |
45 | // unconditional jumps.. |
46 | // |
47 | // 2. All inter-cluster branch targets would now need to be resolved by the |
48 | // linker as they cannot be calculated during compile time. This is done |
49 | // using static relocations. Further, the compiler tries to use short branch |
50 | // instructions on some ISAs for small branch offsets. This is not possible |
51 | // for inter-cluster branches as the offset is not determined at compile |
52 | // time, and therefore, long branch instructions have to be used for those. |
53 | // |
54 | // 3. Debug Information (DebugInfo) and Call Frame Information (CFI) emission |
55 | // needs special handling with basic block sections. DebugInfo needs to be |
56 | // emitted with more relocations as basic block sections can break a |
57 | // function into potentially several disjoint pieces, and CFI needs to be |
58 | // emitted per cluster. This also bloats the object file and binary sizes. |
59 | // |
60 | // Basic Block Address Map |
61 | // ================== |
62 | // |
63 | // With -fbasic-block-address-map, we emit the offsets of BB addresses of |
64 | // every function into the .llvm_bb_addr_map section. Along with the function |
65 | // symbols, this allows for mapping of virtual addresses in PMU profiles back to |
66 | // the corresponding basic blocks. This logic is implemented in AsmPrinter. This |
67 | // pass only assigns the BBSectionType of every function to ``labels``. |
68 | // |
69 | //===----------------------------------------------------------------------===// |
70 | |
71 | #include "llvm/ADT/SmallVector.h" |
72 | #include "llvm/ADT/StringRef.h" |
73 | #include "llvm/CodeGen/BasicBlockSectionUtils.h" |
74 | #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h" |
75 | #include "llvm/CodeGen/MachineDominators.h" |
76 | #include "llvm/CodeGen/MachineFunction.h" |
77 | #include "llvm/CodeGen/MachineFunctionPass.h" |
78 | #include "llvm/CodeGen/MachinePostDominators.h" |
79 | #include "llvm/CodeGen/Passes.h" |
80 | #include "llvm/CodeGen/TargetInstrInfo.h" |
81 | #include "llvm/InitializePasses.h" |
82 | #include "llvm/Target/TargetMachine.h" |
83 | #include <optional> |
84 | |
85 | using namespace llvm; |
86 | |
87 | // Placing the cold clusters in a separate section mitigates against poor |
88 | // profiles and allows optimizations such as hugepage mapping to be applied at a |
89 | // section granularity. Defaults to ".text.split." which is recognized by lld |
90 | // via the `-z keep-text-section-prefix` flag. |
91 | cl::opt<std::string> llvm::BBSectionsColdTextPrefix( |
92 | "bbsections-cold-text-prefix" , |
93 | cl::desc("The text prefix to use for cold basic block clusters" ), |
94 | cl::init(Val: ".text.split." ), cl::Hidden); |
95 | |
96 | static cl::opt<bool> BBSectionsDetectSourceDrift( |
97 | "bbsections-detect-source-drift" , |
98 | cl::desc("This checks if there is a fdo instr. profile hash " |
99 | "mismatch for this function" ), |
100 | cl::init(Val: true), cl::Hidden); |
101 | |
102 | namespace { |
103 | |
104 | class BasicBlockSections : public MachineFunctionPass { |
105 | public: |
106 | static char ID; |
107 | |
108 | BasicBlockSectionsProfileReaderWrapperPass *BBSectionsProfileReader = nullptr; |
109 | |
110 | BasicBlockSections() : MachineFunctionPass(ID) { |
111 | initializeBasicBlockSectionsPass(*PassRegistry::getPassRegistry()); |
112 | } |
113 | |
114 | StringRef getPassName() const override { |
115 | return "Basic Block Sections Analysis" ; |
116 | } |
117 | |
118 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
119 | |
120 | /// Identify basic blocks that need separate sections and prepare to emit them |
121 | /// accordingly. |
122 | bool runOnMachineFunction(MachineFunction &MF) override; |
123 | |
124 | private: |
125 | bool handleBBSections(MachineFunction &MF); |
126 | bool handleBBAddrMap(MachineFunction &MF); |
127 | }; |
128 | |
129 | } // end anonymous namespace |
130 | |
131 | char BasicBlockSections::ID = 0; |
132 | INITIALIZE_PASS_BEGIN( |
133 | BasicBlockSections, "bbsections-prepare" , |
134 | "Prepares for basic block sections, by splitting functions " |
135 | "into clusters of basic blocks." , |
136 | false, false) |
137 | INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass) |
138 | INITIALIZE_PASS_END(BasicBlockSections, "bbsections-prepare" , |
139 | "Prepares for basic block sections, by splitting functions " |
140 | "into clusters of basic blocks." , |
141 | false, false) |
142 | |
143 | // This function updates and optimizes the branching instructions of every basic |
144 | // block in a given function to account for changes in the layout. |
145 | static void |
146 | updateBranches(MachineFunction &MF, |
147 | const SmallVector<MachineBasicBlock *> &PreLayoutFallThroughs) { |
148 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); |
149 | SmallVector<MachineOperand, 4> Cond; |
150 | for (auto &MBB : MF) { |
151 | auto NextMBBI = std::next(x: MBB.getIterator()); |
152 | auto *FTMBB = PreLayoutFallThroughs[MBB.getNumber()]; |
153 | // If this block had a fallthrough before we need an explicit unconditional |
154 | // branch to that block if either |
155 | // 1- the block ends a section, which means its next block may be |
156 | // reorderd by the linker, or |
157 | // 2- the fallthrough block is not adjacent to the block in the new |
158 | // order. |
159 | if (FTMBB && (MBB.isEndSection() || &*NextMBBI != FTMBB)) |
160 | TII->insertUnconditionalBranch(MBB, DestBB: FTMBB, DL: MBB.findBranchDebugLoc()); |
161 | |
162 | // We do not optimize branches for machine basic blocks ending sections, as |
163 | // their adjacent block might be reordered by the linker. |
164 | if (MBB.isEndSection()) |
165 | continue; |
166 | |
167 | // It might be possible to optimize branches by flipping the branch |
168 | // condition. |
169 | Cond.clear(); |
170 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For analyzeBranch. |
171 | if (TII->analyzeBranch(MBB, TBB, FBB, Cond)) |
172 | continue; |
173 | MBB.updateTerminator(PreviousLayoutSuccessor: FTMBB); |
174 | } |
175 | } |
176 | |
177 | // This function sorts basic blocks according to the cluster's information. |
178 | // All explicitly specified clusters of basic blocks will be ordered |
179 | // accordingly. All non-specified BBs go into a separate "Cold" section. |
180 | // Additionally, if exception handling landing pads end up in more than one |
181 | // clusters, they are moved into a single "Exception" section. Eventually, |
182 | // clusters are ordered in increasing order of their IDs, with the "Exception" |
183 | // and "Cold" succeeding all other clusters. |
184 | // FuncClusterInfo represents the cluster information for basic blocks. It |
185 | // maps from BBID of basic blocks to their cluster information. If this is |
186 | // empty, it means unique sections for all basic blocks in the function. |
187 | static void |
188 | assignSections(MachineFunction &MF, |
189 | const DenseMap<UniqueBBID, BBClusterInfo> &FuncClusterInfo) { |
190 | assert(MF.hasBBSections() && "BB Sections is not set for function." ); |
191 | // This variable stores the section ID of the cluster containing eh_pads (if |
192 | // all eh_pads are one cluster). If more than one cluster contain eh_pads, we |
193 | // set it equal to ExceptionSectionID. |
194 | std::optional<MBBSectionID> EHPadsSectionID; |
195 | |
196 | for (auto &MBB : MF) { |
197 | // With the 'all' option, every basic block is placed in a unique section. |
198 | // With the 'list' option, every basic block is placed in a section |
199 | // associated with its cluster, unless we want individual unique sections |
200 | // for every basic block in this function (if FuncClusterInfo is empty). |
201 | if (MF.getTarget().getBBSectionsType() == llvm::BasicBlockSection::All || |
202 | FuncClusterInfo.empty()) { |
203 | // If unique sections are desired for all basic blocks of the function, we |
204 | // set every basic block's section ID equal to its original position in |
205 | // the layout (which is equal to its number). This ensures that basic |
206 | // blocks are ordered canonically. |
207 | MBB.setSectionID(MBB.getNumber()); |
208 | } else { |
209 | auto I = FuncClusterInfo.find(Val: *MBB.getBBID()); |
210 | if (I != FuncClusterInfo.end()) { |
211 | MBB.setSectionID(I->second.ClusterID); |
212 | } else { |
213 | const TargetInstrInfo &TII = |
214 | *MBB.getParent()->getSubtarget().getInstrInfo(); |
215 | |
216 | if (TII.isMBBSafeToSplitToCold(MBB)) { |
217 | // BB goes into the special cold section if it is not specified in the |
218 | // cluster info map. |
219 | MBB.setSectionID(MBBSectionID::ColdSectionID); |
220 | } |
221 | } |
222 | } |
223 | |
224 | if (MBB.isEHPad() && EHPadsSectionID != MBB.getSectionID() && |
225 | EHPadsSectionID != MBBSectionID::ExceptionSectionID) { |
226 | // If we already have one cluster containing eh_pads, this must be updated |
227 | // to ExceptionSectionID. Otherwise, we set it equal to the current |
228 | // section ID. |
229 | EHPadsSectionID = EHPadsSectionID ? MBBSectionID::ExceptionSectionID |
230 | : MBB.getSectionID(); |
231 | } |
232 | } |
233 | |
234 | // If EHPads are in more than one section, this places all of them in the |
235 | // special exception section. |
236 | if (EHPadsSectionID == MBBSectionID::ExceptionSectionID) |
237 | for (auto &MBB : MF) |
238 | if (MBB.isEHPad()) |
239 | MBB.setSectionID(*EHPadsSectionID); |
240 | } |
241 | |
242 | void llvm::sortBasicBlocksAndUpdateBranches( |
243 | MachineFunction &MF, MachineBasicBlockComparator MBBCmp) { |
244 | [[maybe_unused]] const MachineBasicBlock *EntryBlock = &MF.front(); |
245 | SmallVector<MachineBasicBlock *> PreLayoutFallThroughs(MF.getNumBlockIDs()); |
246 | for (auto &MBB : MF) |
247 | PreLayoutFallThroughs[MBB.getNumber()] = |
248 | MBB.getFallThrough(/*JumpToFallThrough=*/false); |
249 | |
250 | MF.sort(comp: MBBCmp); |
251 | assert(&MF.front() == EntryBlock && |
252 | "Entry block should not be displaced by basic block sections" ); |
253 | |
254 | // Set IsBeginSection and IsEndSection according to the assigned section IDs. |
255 | MF.assignBeginEndSections(); |
256 | |
257 | // After reordering basic blocks, we must update basic block branches to |
258 | // insert explicit fallthrough branches when required and optimize branches |
259 | // when possible. |
260 | updateBranches(MF, PreLayoutFallThroughs); |
261 | } |
262 | |
263 | // If the exception section begins with a landing pad, that landing pad will |
264 | // assume a zero offset (relative to @LPStart) in the LSDA. However, a value of |
265 | // zero implies "no landing pad." This function inserts a NOP just before the EH |
266 | // pad label to ensure a nonzero offset. |
267 | void llvm::avoidZeroOffsetLandingPad(MachineFunction &MF) { |
268 | for (auto &MBB : MF) { |
269 | if (MBB.isBeginSection() && MBB.isEHPad()) { |
270 | MachineBasicBlock::iterator MI = MBB.begin(); |
271 | while (!MI->isEHLabel()) |
272 | ++MI; |
273 | MF.getSubtarget().getInstrInfo()->insertNoop(MBB, MI); |
274 | } |
275 | } |
276 | } |
277 | |
278 | bool llvm::hasInstrProfHashMismatch(MachineFunction &MF) { |
279 | if (!BBSectionsDetectSourceDrift) |
280 | return false; |
281 | |
282 | const char MetadataName[] = "instr_prof_hash_mismatch" ; |
283 | auto *Existing = MF.getFunction().getMetadata(KindID: LLVMContext::MD_annotation); |
284 | if (Existing) { |
285 | MDTuple *Tuple = cast<MDTuple>(Val: Existing); |
286 | for (const auto &N : Tuple->operands()) |
287 | if (N.equalsStr(Str: MetadataName)) |
288 | return true; |
289 | } |
290 | |
291 | return false; |
292 | } |
293 | |
294 | // Identify, arrange, and modify basic blocks which need separate sections |
295 | // according to the specification provided by the -fbasic-block-sections flag. |
296 | bool BasicBlockSections::handleBBSections(MachineFunction &MF) { |
297 | auto BBSectionsType = MF.getTarget().getBBSectionsType(); |
298 | if (BBSectionsType == BasicBlockSection::None) |
299 | return false; |
300 | |
301 | // Check for source drift. If the source has changed since the profiles |
302 | // were obtained, optimizing basic blocks might be sub-optimal. |
303 | // This only applies to BasicBlockSection::List as it creates |
304 | // clusters of basic blocks using basic block ids. Source drift can |
305 | // invalidate these groupings leading to sub-optimal code generation with |
306 | // regards to performance. |
307 | if (BBSectionsType == BasicBlockSection::List && |
308 | hasInstrProfHashMismatch(MF)) |
309 | return false; |
310 | // Renumber blocks before sorting them. This is useful for accessing the |
311 | // original layout positions and finding the original fallthroughs. |
312 | MF.RenumberBlocks(); |
313 | |
314 | DenseMap<UniqueBBID, BBClusterInfo> FuncClusterInfo; |
315 | if (BBSectionsType == BasicBlockSection::List) { |
316 | auto [HasProfile, ClusterInfo] = |
317 | getAnalysis<BasicBlockSectionsProfileReaderWrapperPass>() |
318 | .getClusterInfoForFunction(FuncName: MF.getName()); |
319 | if (!HasProfile) |
320 | return false; |
321 | for (auto &BBClusterInfo : ClusterInfo) { |
322 | FuncClusterInfo.try_emplace(Key: BBClusterInfo.BBID, Args&: BBClusterInfo); |
323 | } |
324 | } |
325 | |
326 | MF.setBBSectionsType(BBSectionsType); |
327 | assignSections(MF, FuncClusterInfo); |
328 | |
329 | const MachineBasicBlock &EntryBB = MF.front(); |
330 | auto EntryBBSectionID = EntryBB.getSectionID(); |
331 | |
332 | // Helper function for ordering BB sections as follows: |
333 | // * Entry section (section including the entry block). |
334 | // * Regular sections (in increasing order of their Number). |
335 | // ... |
336 | // * Exception section |
337 | // * Cold section |
338 | auto MBBSectionOrder = [EntryBBSectionID](const MBBSectionID &LHS, |
339 | const MBBSectionID &RHS) { |
340 | // We make sure that the section containing the entry block precedes all the |
341 | // other sections. |
342 | if (LHS == EntryBBSectionID || RHS == EntryBBSectionID) |
343 | return LHS == EntryBBSectionID; |
344 | return LHS.Type == RHS.Type ? LHS.Number < RHS.Number : LHS.Type < RHS.Type; |
345 | }; |
346 | |
347 | // We sort all basic blocks to make sure the basic blocks of every cluster are |
348 | // contiguous and ordered accordingly. Furthermore, clusters are ordered in |
349 | // increasing order of their section IDs, with the exception and the |
350 | // cold section placed at the end of the function. |
351 | // Also, we force the entry block of the function to be placed at the |
352 | // beginning of the function, regardless of the requested order. |
353 | auto Comparator = [&](const MachineBasicBlock &X, |
354 | const MachineBasicBlock &Y) { |
355 | auto XSectionID = X.getSectionID(); |
356 | auto YSectionID = Y.getSectionID(); |
357 | if (XSectionID != YSectionID) |
358 | return MBBSectionOrder(XSectionID, YSectionID); |
359 | // Make sure that the entry block is placed at the beginning. |
360 | if (&X == &EntryBB || &Y == &EntryBB) |
361 | return &X == &EntryBB; |
362 | // If the two basic block are in the same section, the order is decided by |
363 | // their position within the section. |
364 | if (XSectionID.Type == MBBSectionID::SectionType::Default) |
365 | return FuncClusterInfo.lookup(Val: *X.getBBID()).PositionInCluster < |
366 | FuncClusterInfo.lookup(Val: *Y.getBBID()).PositionInCluster; |
367 | return X.getNumber() < Y.getNumber(); |
368 | }; |
369 | |
370 | sortBasicBlocksAndUpdateBranches(MF, MBBCmp: Comparator); |
371 | avoidZeroOffsetLandingPad(MF); |
372 | return true; |
373 | } |
374 | |
375 | // When the BB address map needs to be generated, this renumbers basic blocks to |
376 | // make them appear in increasing order of their IDs in the function. This |
377 | // avoids the need to store basic block IDs in the BB address map section, since |
378 | // they can be determined implicitly. |
379 | bool BasicBlockSections::handleBBAddrMap(MachineFunction &MF) { |
380 | if (!MF.getTarget().Options.BBAddrMap) |
381 | return false; |
382 | MF.RenumberBlocks(); |
383 | return true; |
384 | } |
385 | |
386 | bool BasicBlockSections::runOnMachineFunction(MachineFunction &MF) { |
387 | // First handle the basic block sections. |
388 | auto R1 = handleBBSections(MF); |
389 | // Handle basic block address map after basic block sections are finalized. |
390 | auto R2 = handleBBAddrMap(MF); |
391 | |
392 | // We renumber blocks, so update the dominator tree we want to preserve. |
393 | if (auto *WP = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>()) |
394 | WP->getDomTree().updateBlockNumbers(); |
395 | if (auto *WP = getAnalysisIfAvailable<MachinePostDominatorTreeWrapperPass>()) |
396 | WP->getPostDomTree().updateBlockNumbers(); |
397 | |
398 | return R1 || R2; |
399 | } |
400 | |
401 | void BasicBlockSections::getAnalysisUsage(AnalysisUsage &AU) const { |
402 | AU.setPreservesAll(); |
403 | AU.addRequired<BasicBlockSectionsProfileReaderWrapperPass>(); |
404 | AU.addUsedIfAvailable<MachineDominatorTreeWrapperPass>(); |
405 | AU.addUsedIfAvailable<MachinePostDominatorTreeWrapperPass>(); |
406 | MachineFunctionPass::getAnalysisUsage(AU); |
407 | } |
408 | |
409 | MachineFunctionPass *llvm::createBasicBlockSectionsPass() { |
410 | return new BasicBlockSections(); |
411 | } |
412 | |