1 | //===- BranchFolding.cpp - Fold machine code branch instructions ----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass forwards branches to unconditional branches to make them branch |
10 | // directly to the target block. This pass often results in dead MBB's, which |
11 | // it then removes. |
12 | // |
13 | // Note that this pass must be run after register allocation, it cannot handle |
14 | // SSA form. It also must handle virtual registers for targets that emit virtual |
15 | // ISA (e.g. NVPTX). |
16 | // |
17 | //===----------------------------------------------------------------------===// |
18 | |
19 | #include "BranchFolding.h" |
20 | #include "llvm/ADT/BitVector.h" |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SmallSet.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/Statistic.h" |
25 | #include "llvm/Analysis/ProfileSummaryInfo.h" |
26 | #include "llvm/CodeGen/Analysis.h" |
27 | #include "llvm/CodeGen/MBFIWrapper.h" |
28 | #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" |
29 | #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" |
30 | #include "llvm/CodeGen/MachineFunction.h" |
31 | #include "llvm/CodeGen/MachineFunctionPass.h" |
32 | #include "llvm/CodeGen/MachineInstr.h" |
33 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
34 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
35 | #include "llvm/CodeGen/MachineLoopInfo.h" |
36 | #include "llvm/CodeGen/MachineOperand.h" |
37 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
38 | #include "llvm/CodeGen/MachineSizeOpts.h" |
39 | #include "llvm/CodeGen/TargetInstrInfo.h" |
40 | #include "llvm/CodeGen/TargetOpcodes.h" |
41 | #include "llvm/CodeGen/TargetPassConfig.h" |
42 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
43 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
44 | #include "llvm/IR/DebugInfoMetadata.h" |
45 | #include "llvm/IR/DebugLoc.h" |
46 | #include "llvm/IR/Function.h" |
47 | #include "llvm/InitializePasses.h" |
48 | #include "llvm/MC/LaneBitmask.h" |
49 | #include "llvm/MC/MCRegisterInfo.h" |
50 | #include "llvm/Pass.h" |
51 | #include "llvm/Support/BlockFrequency.h" |
52 | #include "llvm/Support/BranchProbability.h" |
53 | #include "llvm/Support/CommandLine.h" |
54 | #include "llvm/Support/Debug.h" |
55 | #include "llvm/Support/ErrorHandling.h" |
56 | #include "llvm/Support/raw_ostream.h" |
57 | #include "llvm/Target/TargetMachine.h" |
58 | #include <cassert> |
59 | #include <cstddef> |
60 | #include <iterator> |
61 | #include <numeric> |
62 | |
63 | using namespace llvm; |
64 | |
65 | #define DEBUG_TYPE "branch-folder" |
66 | |
67 | STATISTIC(NumDeadBlocks, "Number of dead blocks removed" ); |
68 | STATISTIC(NumBranchOpts, "Number of branches optimized" ); |
69 | STATISTIC(NumTailMerge , "Number of block tails merged" ); |
70 | STATISTIC(NumHoist , "Number of times common instructions are hoisted" ); |
71 | STATISTIC(NumTailCalls, "Number of tail calls optimized" ); |
72 | |
73 | static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge" , |
74 | cl::init(Val: cl::BOU_UNSET), cl::Hidden); |
75 | |
76 | // Throttle for huge numbers of predecessors (compile speed problems) |
77 | static cl::opt<unsigned> |
78 | TailMergeThreshold("tail-merge-threshold" , |
79 | cl::desc("Max number of predecessors to consider tail merging" ), |
80 | cl::init(Val: 150), cl::Hidden); |
81 | |
82 | // Heuristic for tail merging (and, inversely, tail duplication). |
83 | static cl::opt<unsigned> |
84 | TailMergeSize("tail-merge-size" , |
85 | cl::desc("Min number of instructions to consider tail merging" ), |
86 | cl::init(Val: 3), cl::Hidden); |
87 | |
88 | namespace { |
89 | |
90 | /// BranchFolderPass - Wrap branch folder in a machine function pass. |
91 | class BranchFolderPass : public MachineFunctionPass { |
92 | public: |
93 | static char ID; |
94 | |
95 | explicit BranchFolderPass(): MachineFunctionPass(ID) {} |
96 | |
97 | bool runOnMachineFunction(MachineFunction &MF) override; |
98 | |
99 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
100 | AU.addRequired<MachineBlockFrequencyInfoWrapperPass>(); |
101 | AU.addRequired<MachineBranchProbabilityInfoWrapperPass>(); |
102 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); |
103 | AU.addRequired<TargetPassConfig>(); |
104 | MachineFunctionPass::getAnalysisUsage(AU); |
105 | } |
106 | |
107 | MachineFunctionProperties getRequiredProperties() const override { |
108 | return MachineFunctionProperties().set( |
109 | MachineFunctionProperties::Property::NoPHIs); |
110 | } |
111 | }; |
112 | |
113 | } // end anonymous namespace |
114 | |
115 | char BranchFolderPass::ID = 0; |
116 | |
117 | char &llvm::BranchFolderPassID = BranchFolderPass::ID; |
118 | |
119 | INITIALIZE_PASS(BranchFolderPass, DEBUG_TYPE, |
120 | "Control Flow Optimizer" , false, false) |
121 | |
122 | bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) { |
123 | if (skipFunction(F: MF.getFunction())) |
124 | return false; |
125 | |
126 | TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); |
127 | // TailMerge can create jump into if branches that make CFG irreducible for |
128 | // HW that requires structurized CFG. |
129 | bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() && |
130 | PassConfig->getEnableTailMerge(); |
131 | MBFIWrapper MBBFreqInfo( |
132 | getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI()); |
133 | BranchFolder Folder( |
134 | EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo, |
135 | getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI(), |
136 | &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI()); |
137 | return Folder.OptimizeFunction(MF, tii: MF.getSubtarget().getInstrInfo(), |
138 | tri: MF.getSubtarget().getRegisterInfo()); |
139 | } |
140 | |
141 | BranchFolder::BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist, |
142 | MBFIWrapper &FreqInfo, |
143 | const MachineBranchProbabilityInfo &ProbInfo, |
144 | ProfileSummaryInfo *PSI, unsigned MinTailLength) |
145 | : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength), |
146 | MBBFreqInfo(FreqInfo), MBPI(ProbInfo), PSI(PSI) { |
147 | switch (FlagEnableTailMerge) { |
148 | case cl::BOU_UNSET: |
149 | EnableTailMerge = DefaultEnableTailMerge; |
150 | break; |
151 | case cl::BOU_TRUE: EnableTailMerge = true; break; |
152 | case cl::BOU_FALSE: EnableTailMerge = false; break; |
153 | } |
154 | } |
155 | |
156 | void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) { |
157 | assert(MBB->pred_empty() && "MBB must be dead!" ); |
158 | LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB); |
159 | |
160 | MachineFunction *MF = MBB->getParent(); |
161 | // drop all successors. |
162 | while (!MBB->succ_empty()) |
163 | MBB->removeSuccessor(I: MBB->succ_end()-1); |
164 | |
165 | // Avoid matching if this pointer gets reused. |
166 | TriedMerging.erase(Ptr: MBB); |
167 | |
168 | // Update call site info. |
169 | for (const MachineInstr &MI : *MBB) |
170 | if (MI.shouldUpdateCallSiteInfo()) |
171 | MF->eraseCallSiteInfo(MI: &MI); |
172 | |
173 | // Remove the block. |
174 | MF->erase(MBBI: MBB); |
175 | EHScopeMembership.erase(Val: MBB); |
176 | if (MLI) |
177 | MLI->removeBlock(BB: MBB); |
178 | } |
179 | |
180 | bool BranchFolder::OptimizeFunction(MachineFunction &MF, |
181 | const TargetInstrInfo *tii, |
182 | const TargetRegisterInfo *tri, |
183 | MachineLoopInfo *mli, bool AfterPlacement) { |
184 | if (!tii) return false; |
185 | |
186 | TriedMerging.clear(); |
187 | |
188 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
189 | AfterBlockPlacement = AfterPlacement; |
190 | TII = tii; |
191 | TRI = tri; |
192 | MLI = mli; |
193 | this->MRI = &MRI; |
194 | |
195 | if (MinCommonTailLength == 0) { |
196 | MinCommonTailLength = TailMergeSize.getNumOccurrences() > 0 |
197 | ? TailMergeSize |
198 | : TII->getTailMergeSize(MF); |
199 | } |
200 | |
201 | UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF); |
202 | if (!UpdateLiveIns) |
203 | MRI.invalidateLiveness(); |
204 | |
205 | bool MadeChange = false; |
206 | |
207 | // Recalculate EH scope membership. |
208 | EHScopeMembership = getEHScopeMembership(MF); |
209 | |
210 | bool MadeChangeThisIteration = true; |
211 | while (MadeChangeThisIteration) { |
212 | MadeChangeThisIteration = TailMergeBlocks(MF); |
213 | // No need to clean up if tail merging does not change anything after the |
214 | // block placement. |
215 | if (!AfterBlockPlacement || MadeChangeThisIteration) |
216 | MadeChangeThisIteration |= OptimizeBranches(MF); |
217 | if (EnableHoistCommonCode) |
218 | MadeChangeThisIteration |= HoistCommonCode(MF); |
219 | MadeChange |= MadeChangeThisIteration; |
220 | } |
221 | |
222 | // See if any jump tables have become dead as the code generator |
223 | // did its thing. |
224 | MachineJumpTableInfo *JTI = MF.getJumpTableInfo(); |
225 | if (!JTI) |
226 | return MadeChange; |
227 | |
228 | // Walk the function to find jump tables that are live. |
229 | BitVector JTIsLive(JTI->getJumpTables().size()); |
230 | for (const MachineBasicBlock &BB : MF) { |
231 | for (const MachineInstr &I : BB) |
232 | for (const MachineOperand &Op : I.operands()) { |
233 | if (!Op.isJTI()) continue; |
234 | |
235 | // Remember that this JT is live. |
236 | JTIsLive.set(Op.getIndex()); |
237 | } |
238 | } |
239 | |
240 | // Finally, remove dead jump tables. This happens when the |
241 | // indirect jump was unreachable (and thus deleted). |
242 | for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i) |
243 | if (!JTIsLive.test(Idx: i)) { |
244 | JTI->RemoveJumpTable(Idx: i); |
245 | MadeChange = true; |
246 | } |
247 | |
248 | return MadeChange; |
249 | } |
250 | |
251 | //===----------------------------------------------------------------------===// |
252 | // Tail Merging of Blocks |
253 | //===----------------------------------------------------------------------===// |
254 | |
255 | /// HashMachineInstr - Compute a hash value for MI and its operands. |
256 | static unsigned HashMachineInstr(const MachineInstr &MI) { |
257 | unsigned Hash = MI.getOpcode(); |
258 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
259 | const MachineOperand &Op = MI.getOperand(i); |
260 | |
261 | // Merge in bits from the operand if easy. We can't use MachineOperand's |
262 | // hash_code here because it's not deterministic and we sort by hash value |
263 | // later. |
264 | unsigned OperandHash = 0; |
265 | switch (Op.getType()) { |
266 | case MachineOperand::MO_Register: |
267 | OperandHash = Op.getReg(); |
268 | break; |
269 | case MachineOperand::MO_Immediate: |
270 | OperandHash = Op.getImm(); |
271 | break; |
272 | case MachineOperand::MO_MachineBasicBlock: |
273 | OperandHash = Op.getMBB()->getNumber(); |
274 | break; |
275 | case MachineOperand::MO_FrameIndex: |
276 | case MachineOperand::MO_ConstantPoolIndex: |
277 | case MachineOperand::MO_JumpTableIndex: |
278 | OperandHash = Op.getIndex(); |
279 | break; |
280 | case MachineOperand::MO_GlobalAddress: |
281 | case MachineOperand::MO_ExternalSymbol: |
282 | // Global address / external symbol are too hard, don't bother, but do |
283 | // pull in the offset. |
284 | OperandHash = Op.getOffset(); |
285 | break; |
286 | default: |
287 | break; |
288 | } |
289 | |
290 | Hash += ((OperandHash << 3) | Op.getType()) << (i & 31); |
291 | } |
292 | return Hash; |
293 | } |
294 | |
295 | /// HashEndOfMBB - Hash the last instruction in the MBB. |
296 | static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) { |
297 | MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr(SkipPseudoOp: false); |
298 | if (I == MBB.end()) |
299 | return 0; |
300 | |
301 | return HashMachineInstr(MI: *I); |
302 | } |
303 | |
304 | /// Whether MI should be counted as an instruction when calculating common tail. |
305 | static bool countsAsInstruction(const MachineInstr &MI) { |
306 | return !(MI.isDebugInstr() || MI.isCFIInstruction()); |
307 | } |
308 | |
309 | /// Iterate backwards from the given iterator \p I, towards the beginning of the |
310 | /// block. If a MI satisfying 'countsAsInstruction' is found, return an iterator |
311 | /// pointing to that MI. If no such MI is found, return the end iterator. |
312 | static MachineBasicBlock::iterator |
313 | skipBackwardPastNonInstructions(MachineBasicBlock::iterator I, |
314 | MachineBasicBlock *MBB) { |
315 | while (I != MBB->begin()) { |
316 | --I; |
317 | if (countsAsInstruction(MI: *I)) |
318 | return I; |
319 | } |
320 | return MBB->end(); |
321 | } |
322 | |
323 | /// Given two machine basic blocks, return the number of instructions they |
324 | /// actually have in common together at their end. If a common tail is found (at |
325 | /// least by one instruction), then iterators for the first shared instruction |
326 | /// in each block are returned as well. |
327 | /// |
328 | /// Non-instructions according to countsAsInstruction are ignored. |
329 | static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, |
330 | MachineBasicBlock *MBB2, |
331 | MachineBasicBlock::iterator &I1, |
332 | MachineBasicBlock::iterator &I2) { |
333 | MachineBasicBlock::iterator MBBI1 = MBB1->end(); |
334 | MachineBasicBlock::iterator MBBI2 = MBB2->end(); |
335 | |
336 | unsigned TailLen = 0; |
337 | while (true) { |
338 | MBBI1 = skipBackwardPastNonInstructions(I: MBBI1, MBB: MBB1); |
339 | MBBI2 = skipBackwardPastNonInstructions(I: MBBI2, MBB: MBB2); |
340 | if (MBBI1 == MBB1->end() || MBBI2 == MBB2->end()) |
341 | break; |
342 | if (!MBBI1->isIdenticalTo(Other: *MBBI2) || |
343 | // FIXME: This check is dubious. It's used to get around a problem where |
344 | // people incorrectly expect inline asm directives to remain in the same |
345 | // relative order. This is untenable because normal compiler |
346 | // optimizations (like this one) may reorder and/or merge these |
347 | // directives. |
348 | MBBI1->isInlineAsm()) { |
349 | break; |
350 | } |
351 | if (MBBI1->getFlag(Flag: MachineInstr::NoMerge) || |
352 | MBBI2->getFlag(Flag: MachineInstr::NoMerge)) |
353 | break; |
354 | ++TailLen; |
355 | I1 = MBBI1; |
356 | I2 = MBBI2; |
357 | } |
358 | |
359 | return TailLen; |
360 | } |
361 | |
362 | void BranchFolder::replaceTailWithBranchTo(MachineBasicBlock::iterator OldInst, |
363 | MachineBasicBlock &NewDest) { |
364 | if (UpdateLiveIns) { |
365 | // OldInst should always point to an instruction. |
366 | MachineBasicBlock &OldMBB = *OldInst->getParent(); |
367 | LiveRegs.clear(); |
368 | LiveRegs.addLiveOuts(MBB: OldMBB); |
369 | // Move backward to the place where will insert the jump. |
370 | MachineBasicBlock::iterator I = OldMBB.end(); |
371 | do { |
372 | --I; |
373 | LiveRegs.stepBackward(MI: *I); |
374 | } while (I != OldInst); |
375 | |
376 | // Merging the tails may have switched some undef operand to non-undef ones. |
377 | // Add IMPLICIT_DEFS into OldMBB as necessary to have a definition of the |
378 | // register. |
379 | for (MachineBasicBlock::RegisterMaskPair P : NewDest.liveins()) { |
380 | // We computed the liveins with computeLiveIn earlier and should only see |
381 | // full registers: |
382 | assert(P.LaneMask == LaneBitmask::getAll() && |
383 | "Can only handle full register." ); |
384 | MCPhysReg Reg = P.PhysReg; |
385 | if (!LiveRegs.available(MRI: *MRI, Reg)) |
386 | continue; |
387 | DebugLoc DL; |
388 | BuildMI(BB&: OldMBB, I: OldInst, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg: Reg); |
389 | } |
390 | } |
391 | |
392 | TII->ReplaceTailWithBranchTo(Tail: OldInst, NewDest: &NewDest); |
393 | ++NumTailMerge; |
394 | } |
395 | |
396 | MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB, |
397 | MachineBasicBlock::iterator BBI1, |
398 | const BasicBlock *BB) { |
399 | if (!TII->isLegalToSplitMBBAt(MBB&: CurMBB, MBBI: BBI1)) |
400 | return nullptr; |
401 | |
402 | MachineFunction &MF = *CurMBB.getParent(); |
403 | |
404 | // Create the fall-through block. |
405 | MachineFunction::iterator MBBI = CurMBB.getIterator(); |
406 | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(BB); |
407 | CurMBB.getParent()->insert(MBBI: ++MBBI, MBB: NewMBB); |
408 | |
409 | // Move all the successors of this block to the specified block. |
410 | NewMBB->transferSuccessors(FromMBB: &CurMBB); |
411 | |
412 | // Add an edge from CurMBB to NewMBB for the fall-through. |
413 | CurMBB.addSuccessor(Succ: NewMBB); |
414 | |
415 | // Splice the code over. |
416 | NewMBB->splice(Where: NewMBB->end(), Other: &CurMBB, From: BBI1, To: CurMBB.end()); |
417 | |
418 | // NewMBB belongs to the same loop as CurMBB. |
419 | if (MLI) |
420 | if (MachineLoop *ML = MLI->getLoopFor(BB: &CurMBB)) |
421 | ML->addBasicBlockToLoop(NewBB: NewMBB, LI&: *MLI); |
422 | |
423 | // NewMBB inherits CurMBB's block frequency. |
424 | MBBFreqInfo.setBlockFreq(MBB: NewMBB, F: MBBFreqInfo.getBlockFreq(MBB: &CurMBB)); |
425 | |
426 | if (UpdateLiveIns) |
427 | computeAndAddLiveIns(LiveRegs, MBB&: *NewMBB); |
428 | |
429 | // Add the new block to the EH scope. |
430 | const auto &EHScopeI = EHScopeMembership.find(Val: &CurMBB); |
431 | if (EHScopeI != EHScopeMembership.end()) { |
432 | auto n = EHScopeI->second; |
433 | EHScopeMembership[NewMBB] = n; |
434 | } |
435 | |
436 | return NewMBB; |
437 | } |
438 | |
439 | /// EstimateRuntime - Make a rough estimate for how long it will take to run |
440 | /// the specified code. |
441 | static unsigned EstimateRuntime(MachineBasicBlock::iterator I, |
442 | MachineBasicBlock::iterator E) { |
443 | unsigned Time = 0; |
444 | for (; I != E; ++I) { |
445 | if (!countsAsInstruction(MI: *I)) |
446 | continue; |
447 | if (I->isCall()) |
448 | Time += 10; |
449 | else if (I->mayLoadOrStore()) |
450 | Time += 2; |
451 | else |
452 | ++Time; |
453 | } |
454 | return Time; |
455 | } |
456 | |
457 | // CurMBB needs to add an unconditional branch to SuccMBB (we removed these |
458 | // branches temporarily for tail merging). In the case where CurMBB ends |
459 | // with a conditional branch to the next block, optimize by reversing the |
460 | // test and conditionally branching to SuccMBB instead. |
461 | static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, |
462 | const TargetInstrInfo *TII, const DebugLoc &BranchDL) { |
463 | MachineFunction *MF = CurMBB->getParent(); |
464 | MachineFunction::iterator I = std::next(x: MachineFunction::iterator(CurMBB)); |
465 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; |
466 | SmallVector<MachineOperand, 4> Cond; |
467 | DebugLoc dl = CurMBB->findBranchDebugLoc(); |
468 | if (!dl) |
469 | dl = BranchDL; |
470 | if (I != MF->end() && !TII->analyzeBranch(MBB&: *CurMBB, TBB, FBB, Cond, AllowModify: true)) { |
471 | MachineBasicBlock *NextBB = &*I; |
472 | if (TBB == NextBB && !Cond.empty() && !FBB) { |
473 | if (!TII->reverseBranchCondition(Cond)) { |
474 | TII->removeBranch(MBB&: *CurMBB); |
475 | TII->insertBranch(MBB&: *CurMBB, TBB: SuccBB, FBB: nullptr, Cond, DL: dl); |
476 | return; |
477 | } |
478 | } |
479 | } |
480 | TII->insertBranch(MBB&: *CurMBB, TBB: SuccBB, FBB: nullptr, |
481 | Cond: SmallVector<MachineOperand, 0>(), DL: dl); |
482 | } |
483 | |
484 | bool |
485 | BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const { |
486 | if (getHash() < o.getHash()) |
487 | return true; |
488 | if (getHash() > o.getHash()) |
489 | return false; |
490 | if (getBlock()->getNumber() < o.getBlock()->getNumber()) |
491 | return true; |
492 | if (getBlock()->getNumber() > o.getBlock()->getNumber()) |
493 | return false; |
494 | return false; |
495 | } |
496 | |
497 | /// CountTerminators - Count the number of terminators in the given |
498 | /// block and set I to the position of the first non-terminator, if there |
499 | /// is one, or MBB->end() otherwise. |
500 | static unsigned CountTerminators(MachineBasicBlock *MBB, |
501 | MachineBasicBlock::iterator &I) { |
502 | I = MBB->end(); |
503 | unsigned NumTerms = 0; |
504 | while (true) { |
505 | if (I == MBB->begin()) { |
506 | I = MBB->end(); |
507 | break; |
508 | } |
509 | --I; |
510 | if (!I->isTerminator()) break; |
511 | ++NumTerms; |
512 | } |
513 | return NumTerms; |
514 | } |
515 | |
516 | /// A no successor, non-return block probably ends in unreachable and is cold. |
517 | /// Also consider a block that ends in an indirect branch to be a return block, |
518 | /// since many targets use plain indirect branches to return. |
519 | static bool blockEndsInUnreachable(const MachineBasicBlock *MBB) { |
520 | if (!MBB->succ_empty()) |
521 | return false; |
522 | if (MBB->empty()) |
523 | return true; |
524 | return !(MBB->back().isReturn() || MBB->back().isIndirectBranch()); |
525 | } |
526 | |
527 | /// ProfitableToMerge - Check if two machine basic blocks have a common tail |
528 | /// and decide if it would be profitable to merge those tails. Return the |
529 | /// length of the common tail and iterators to the first common instruction |
530 | /// in each block. |
531 | /// MBB1, MBB2 The blocks to check |
532 | /// MinCommonTailLength Minimum size of tail block to be merged. |
533 | /// CommonTailLen Out parameter to record the size of the shared tail between |
534 | /// MBB1 and MBB2 |
535 | /// I1, I2 Iterator references that will be changed to point to the first |
536 | /// instruction in the common tail shared by MBB1,MBB2 |
537 | /// SuccBB A common successor of MBB1, MBB2 which are in a canonical form |
538 | /// relative to SuccBB |
539 | /// PredBB The layout predecessor of SuccBB, if any. |
540 | /// EHScopeMembership map from block to EH scope #. |
541 | /// AfterPlacement True if we are merging blocks after layout. Stricter |
542 | /// thresholds apply to prevent undoing tail-duplication. |
543 | static bool |
544 | ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, |
545 | unsigned MinCommonTailLength, unsigned &CommonTailLen, |
546 | MachineBasicBlock::iterator &I1, |
547 | MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB, |
548 | MachineBasicBlock *PredBB, |
549 | DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, |
550 | bool AfterPlacement, |
551 | MBFIWrapper &MBBFreqInfo, |
552 | ProfileSummaryInfo *PSI) { |
553 | // It is never profitable to tail-merge blocks from two different EH scopes. |
554 | if (!EHScopeMembership.empty()) { |
555 | auto EHScope1 = EHScopeMembership.find(Val: MBB1); |
556 | assert(EHScope1 != EHScopeMembership.end()); |
557 | auto EHScope2 = EHScopeMembership.find(Val: MBB2); |
558 | assert(EHScope2 != EHScopeMembership.end()); |
559 | if (EHScope1->second != EHScope2->second) |
560 | return false; |
561 | } |
562 | |
563 | CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2); |
564 | if (CommonTailLen == 0) |
565 | return false; |
566 | LLVM_DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1) |
567 | << " and " << printMBBReference(*MBB2) << " is " |
568 | << CommonTailLen << '\n'); |
569 | |
570 | // Move the iterators to the beginning of the MBB if we only got debug |
571 | // instructions before the tail. This is to avoid splitting a block when we |
572 | // only got debug instructions before the tail (to be invariant on -g). |
573 | if (skipDebugInstructionsForward(It: MBB1->begin(), End: MBB1->end(), SkipPseudoOp: false) == I1) |
574 | I1 = MBB1->begin(); |
575 | if (skipDebugInstructionsForward(It: MBB2->begin(), End: MBB2->end(), SkipPseudoOp: false) == I2) |
576 | I2 = MBB2->begin(); |
577 | |
578 | bool FullBlockTail1 = I1 == MBB1->begin(); |
579 | bool FullBlockTail2 = I2 == MBB2->begin(); |
580 | |
581 | // It's almost always profitable to merge any number of non-terminator |
582 | // instructions with the block that falls through into the common successor. |
583 | // This is true only for a single successor. For multiple successors, we are |
584 | // trading a conditional branch for an unconditional one. |
585 | // TODO: Re-visit successor size for non-layout tail merging. |
586 | if ((MBB1 == PredBB || MBB2 == PredBB) && |
587 | (!AfterPlacement || MBB1->succ_size() == 1)) { |
588 | MachineBasicBlock::iterator I; |
589 | unsigned NumTerms = CountTerminators(MBB: MBB1 == PredBB ? MBB2 : MBB1, I); |
590 | if (CommonTailLen > NumTerms) |
591 | return true; |
592 | } |
593 | |
594 | // If these are identical non-return blocks with no successors, merge them. |
595 | // Such blocks are typically cold calls to noreturn functions like abort, and |
596 | // are unlikely to become a fallthrough target after machine block placement. |
597 | // Tail merging these blocks is unlikely to create additional unconditional |
598 | // branches, and will reduce the size of this cold code. |
599 | if (FullBlockTail1 && FullBlockTail2 && |
600 | blockEndsInUnreachable(MBB: MBB1) && blockEndsInUnreachable(MBB: MBB2)) |
601 | return true; |
602 | |
603 | // If one of the blocks can be completely merged and happens to be in |
604 | // a position where the other could fall through into it, merge any number |
605 | // of instructions, because it can be done without a branch. |
606 | // TODO: If the blocks are not adjacent, move one of them so that they are? |
607 | if (MBB1->isLayoutSuccessor(MBB: MBB2) && FullBlockTail2) |
608 | return true; |
609 | if (MBB2->isLayoutSuccessor(MBB: MBB1) && FullBlockTail1) |
610 | return true; |
611 | |
612 | // If both blocks are identical and end in a branch, merge them unless they |
613 | // both have a fallthrough predecessor and successor. |
614 | // We can only do this after block placement because it depends on whether |
615 | // there are fallthroughs, and we don't know until after layout. |
616 | if (AfterPlacement && FullBlockTail1 && FullBlockTail2) { |
617 | auto BothFallThrough = [](MachineBasicBlock *MBB) { |
618 | if (!MBB->succ_empty() && !MBB->canFallThrough()) |
619 | return false; |
620 | MachineFunction::iterator I(MBB); |
621 | MachineFunction *MF = MBB->getParent(); |
622 | return (MBB != &*MF->begin()) && std::prev(x: I)->canFallThrough(); |
623 | }; |
624 | if (!BothFallThrough(MBB1) || !BothFallThrough(MBB2)) |
625 | return true; |
626 | } |
627 | |
628 | // If both blocks have an unconditional branch temporarily stripped out, |
629 | // count that as an additional common instruction for the following |
630 | // heuristics. This heuristic is only accurate for single-succ blocks, so to |
631 | // make sure that during layout merging and duplicating don't crash, we check |
632 | // for that when merging during layout. |
633 | unsigned EffectiveTailLen = CommonTailLen; |
634 | if (SuccBB && MBB1 != PredBB && MBB2 != PredBB && |
635 | (MBB1->succ_size() == 1 || !AfterPlacement) && |
636 | !MBB1->back().isBarrier() && |
637 | !MBB2->back().isBarrier()) |
638 | ++EffectiveTailLen; |
639 | |
640 | // Check if the common tail is long enough to be worthwhile. |
641 | if (EffectiveTailLen >= MinCommonTailLength) |
642 | return true; |
643 | |
644 | // If we are optimizing for code size, 2 instructions in common is enough if |
645 | // we don't have to split a block. At worst we will be introducing 1 new |
646 | // branch instruction, which is likely to be smaller than the 2 |
647 | // instructions that would be deleted in the merge. |
648 | MachineFunction *MF = MBB1->getParent(); |
649 | bool OptForSize = |
650 | MF->getFunction().hasOptSize() || |
651 | (llvm::shouldOptimizeForSize(MBB: MBB1, PSI, MBFIWrapper: &MBBFreqInfo) && |
652 | llvm::shouldOptimizeForSize(MBB: MBB2, PSI, MBFIWrapper: &MBBFreqInfo)); |
653 | return EffectiveTailLen >= 2 && OptForSize && |
654 | (FullBlockTail1 || FullBlockTail2); |
655 | } |
656 | |
657 | unsigned BranchFolder::ComputeSameTails(unsigned CurHash, |
658 | unsigned MinCommonTailLength, |
659 | MachineBasicBlock *SuccBB, |
660 | MachineBasicBlock *PredBB) { |
661 | unsigned maxCommonTailLength = 0U; |
662 | SameTails.clear(); |
663 | MachineBasicBlock::iterator TrialBBI1, TrialBBI2; |
664 | MPIterator HighestMPIter = std::prev(x: MergePotentials.end()); |
665 | for (MPIterator CurMPIter = std::prev(x: MergePotentials.end()), |
666 | B = MergePotentials.begin(); |
667 | CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) { |
668 | for (MPIterator I = std::prev(x: CurMPIter); I->getHash() == CurHash; --I) { |
669 | unsigned CommonTailLen; |
670 | if (ProfitableToMerge(MBB1: CurMPIter->getBlock(), MBB2: I->getBlock(), |
671 | MinCommonTailLength, |
672 | CommonTailLen, I1&: TrialBBI1, I2&: TrialBBI2, |
673 | SuccBB, PredBB, |
674 | EHScopeMembership, |
675 | AfterPlacement: AfterBlockPlacement, MBBFreqInfo, PSI)) { |
676 | if (CommonTailLen > maxCommonTailLength) { |
677 | SameTails.clear(); |
678 | maxCommonTailLength = CommonTailLen; |
679 | HighestMPIter = CurMPIter; |
680 | SameTails.push_back(x: SameTailElt(CurMPIter, TrialBBI1)); |
681 | } |
682 | if (HighestMPIter == CurMPIter && |
683 | CommonTailLen == maxCommonTailLength) |
684 | SameTails.push_back(x: SameTailElt(I, TrialBBI2)); |
685 | } |
686 | if (I == B) |
687 | break; |
688 | } |
689 | } |
690 | return maxCommonTailLength; |
691 | } |
692 | |
693 | void BranchFolder::RemoveBlocksWithHash(unsigned CurHash, |
694 | MachineBasicBlock *SuccBB, |
695 | MachineBasicBlock *PredBB, |
696 | const DebugLoc &BranchDL) { |
697 | MPIterator CurMPIter, B; |
698 | for (CurMPIter = std::prev(x: MergePotentials.end()), |
699 | B = MergePotentials.begin(); |
700 | CurMPIter->getHash() == CurHash; --CurMPIter) { |
701 | // Put the unconditional branch back, if we need one. |
702 | MachineBasicBlock *CurMBB = CurMPIter->getBlock(); |
703 | if (SuccBB && CurMBB != PredBB) |
704 | FixTail(CurMBB, SuccBB, TII, BranchDL); |
705 | if (CurMPIter == B) |
706 | break; |
707 | } |
708 | if (CurMPIter->getHash() != CurHash) |
709 | CurMPIter++; |
710 | MergePotentials.erase(first: CurMPIter, last: MergePotentials.end()); |
711 | } |
712 | |
713 | bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB, |
714 | MachineBasicBlock *SuccBB, |
715 | unsigned maxCommonTailLength, |
716 | unsigned &commonTailIndex) { |
717 | commonTailIndex = 0; |
718 | unsigned TimeEstimate = ~0U; |
719 | for (unsigned i = 0, e = SameTails.size(); i != e; ++i) { |
720 | // Use PredBB if possible; that doesn't require a new branch. |
721 | if (SameTails[i].getBlock() == PredBB) { |
722 | commonTailIndex = i; |
723 | break; |
724 | } |
725 | // Otherwise, make a (fairly bogus) choice based on estimate of |
726 | // how long it will take the various blocks to execute. |
727 | unsigned t = EstimateRuntime(I: SameTails[i].getBlock()->begin(), |
728 | E: SameTails[i].getTailStartPos()); |
729 | if (t <= TimeEstimate) { |
730 | TimeEstimate = t; |
731 | commonTailIndex = i; |
732 | } |
733 | } |
734 | |
735 | MachineBasicBlock::iterator BBI = |
736 | SameTails[commonTailIndex].getTailStartPos(); |
737 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); |
738 | |
739 | LLVM_DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size " |
740 | << maxCommonTailLength); |
741 | |
742 | // If the split block unconditionally falls-thru to SuccBB, it will be |
743 | // merged. In control flow terms it should then take SuccBB's name. e.g. If |
744 | // SuccBB is an inner loop, the common tail is still part of the inner loop. |
745 | const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ? |
746 | SuccBB->getBasicBlock() : MBB->getBasicBlock(); |
747 | MachineBasicBlock *newMBB = SplitMBBAt(CurMBB&: *MBB, BBI1: BBI, BB); |
748 | if (!newMBB) { |
749 | LLVM_DEBUG(dbgs() << "... failed!" ); |
750 | return false; |
751 | } |
752 | |
753 | SameTails[commonTailIndex].setBlock(newMBB); |
754 | SameTails[commonTailIndex].setTailStartPos(newMBB->begin()); |
755 | |
756 | // If we split PredBB, newMBB is the new predecessor. |
757 | if (PredBB == MBB) |
758 | PredBB = newMBB; |
759 | |
760 | return true; |
761 | } |
762 | |
763 | static void |
764 | mergeOperations(MachineBasicBlock::iterator MBBIStartPos, |
765 | MachineBasicBlock &MBBCommon) { |
766 | MachineBasicBlock *MBB = MBBIStartPos->getParent(); |
767 | // Note CommonTailLen does not necessarily matches the size of |
768 | // the common BB nor all its instructions because of debug |
769 | // instructions differences. |
770 | unsigned CommonTailLen = 0; |
771 | for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos) |
772 | ++CommonTailLen; |
773 | |
774 | MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(); |
775 | MachineBasicBlock::reverse_iterator MBBIE = MBB->rend(); |
776 | MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin(); |
777 | MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend(); |
778 | |
779 | while (CommonTailLen--) { |
780 | assert(MBBI != MBBIE && "Reached BB end within common tail length!" ); |
781 | (void)MBBIE; |
782 | |
783 | if (!countsAsInstruction(MI: *MBBI)) { |
784 | ++MBBI; |
785 | continue; |
786 | } |
787 | |
788 | while ((MBBICommon != MBBIECommon) && !countsAsInstruction(MI: *MBBICommon)) |
789 | ++MBBICommon; |
790 | |
791 | assert(MBBICommon != MBBIECommon && |
792 | "Reached BB end within common tail length!" ); |
793 | assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!" ); |
794 | |
795 | // Merge MMOs from memory operations in the common block. |
796 | if (MBBICommon->mayLoadOrStore()) |
797 | MBBICommon->cloneMergedMemRefs(MF&: *MBB->getParent(), MIs: {&*MBBICommon, &*MBBI}); |
798 | // Drop undef flags if they aren't present in all merged instructions. |
799 | for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) { |
800 | MachineOperand &MO = MBBICommon->getOperand(i: I); |
801 | if (MO.isReg() && MO.isUndef()) { |
802 | const MachineOperand &OtherMO = MBBI->getOperand(i: I); |
803 | if (!OtherMO.isUndef()) |
804 | MO.setIsUndef(false); |
805 | } |
806 | } |
807 | |
808 | ++MBBI; |
809 | ++MBBICommon; |
810 | } |
811 | } |
812 | |
813 | void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { |
814 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); |
815 | |
816 | std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size()); |
817 | for (unsigned int i = 0 ; i != SameTails.size() ; ++i) { |
818 | if (i != commonTailIndex) { |
819 | NextCommonInsts[i] = SameTails[i].getTailStartPos(); |
820 | mergeOperations(MBBIStartPos: SameTails[i].getTailStartPos(), MBBCommon&: *MBB); |
821 | } else { |
822 | assert(SameTails[i].getTailStartPos() == MBB->begin() && |
823 | "MBB is not a common tail only block" ); |
824 | } |
825 | } |
826 | |
827 | for (auto &MI : *MBB) { |
828 | if (!countsAsInstruction(MI)) |
829 | continue; |
830 | DebugLoc DL = MI.getDebugLoc(); |
831 | for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) { |
832 | if (i == commonTailIndex) |
833 | continue; |
834 | |
835 | auto &Pos = NextCommonInsts[i]; |
836 | assert(Pos != SameTails[i].getBlock()->end() && |
837 | "Reached BB end within common tail" ); |
838 | while (!countsAsInstruction(MI: *Pos)) { |
839 | ++Pos; |
840 | assert(Pos != SameTails[i].getBlock()->end() && |
841 | "Reached BB end within common tail" ); |
842 | } |
843 | assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!" ); |
844 | DL = DILocation::getMergedLocation(LocA: DL, LocB: Pos->getDebugLoc()); |
845 | NextCommonInsts[i] = ++Pos; |
846 | } |
847 | MI.setDebugLoc(DL); |
848 | } |
849 | |
850 | if (UpdateLiveIns) { |
851 | LivePhysRegs NewLiveIns(*TRI); |
852 | computeLiveIns(LiveRegs&: NewLiveIns, MBB: *MBB); |
853 | LiveRegs.init(TRI: *TRI); |
854 | |
855 | // The flag merging may lead to some register uses no longer using the |
856 | // <undef> flag, add IMPLICIT_DEFs in the predecessors as necessary. |
857 | for (MachineBasicBlock *Pred : MBB->predecessors()) { |
858 | LiveRegs.clear(); |
859 | LiveRegs.addLiveOuts(MBB: *Pred); |
860 | MachineBasicBlock::iterator InsertBefore = Pred->getFirstTerminator(); |
861 | for (Register Reg : NewLiveIns) { |
862 | if (!LiveRegs.available(MRI: *MRI, Reg)) |
863 | continue; |
864 | |
865 | // Skip the register if we are about to add one of its super registers. |
866 | // TODO: Common this up with the same logic in addLineIns(). |
867 | if (any_of(Range: TRI->superregs(Reg), P: [&](MCPhysReg SReg) { |
868 | return NewLiveIns.contains(Reg: SReg) && !MRI->isReserved(PhysReg: SReg); |
869 | })) |
870 | continue; |
871 | |
872 | DebugLoc DL; |
873 | BuildMI(BB&: *Pred, I: InsertBefore, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), |
874 | DestReg: Reg); |
875 | } |
876 | } |
877 | |
878 | MBB->clearLiveIns(); |
879 | addLiveIns(MBB&: *MBB, LiveRegs: NewLiveIns); |
880 | } |
881 | } |
882 | |
883 | // See if any of the blocks in MergePotentials (which all have SuccBB as a |
884 | // successor, or all have no successor if it is null) can be tail-merged. |
885 | // If there is a successor, any blocks in MergePotentials that are not |
886 | // tail-merged and are not immediately before Succ must have an unconditional |
887 | // branch to Succ added (but the predecessor/successor lists need no |
888 | // adjustment). The lone predecessor of Succ that falls through into Succ, |
889 | // if any, is given in PredBB. |
890 | // MinCommonTailLength - Except for the special cases below, tail-merge if |
891 | // there are at least this many instructions in common. |
892 | bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, |
893 | MachineBasicBlock *PredBB, |
894 | unsigned MinCommonTailLength) { |
895 | bool MadeChange = false; |
896 | |
897 | LLVM_DEBUG( |
898 | dbgs() << "\nTryTailMergeBlocks: " ; |
899 | for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs() |
900 | << printMBBReference(*MergePotentials[i].getBlock()) |
901 | << (i == e - 1 ? "" : ", " ); |
902 | dbgs() << "\n" ; if (SuccBB) { |
903 | dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n'; |
904 | if (PredBB) |
905 | dbgs() << " which has fall-through from " |
906 | << printMBBReference(*PredBB) << "\n" ; |
907 | } dbgs() << "Looking for common tails of at least " |
908 | << MinCommonTailLength << " instruction" |
909 | << (MinCommonTailLength == 1 ? "" : "s" ) << '\n';); |
910 | |
911 | // Sort by hash value so that blocks with identical end sequences sort |
912 | // together. |
913 | array_pod_sort(Start: MergePotentials.begin(), End: MergePotentials.end()); |
914 | |
915 | // Walk through equivalence sets looking for actual exact matches. |
916 | while (MergePotentials.size() > 1) { |
917 | unsigned CurHash = MergePotentials.back().getHash(); |
918 | const DebugLoc &BranchDL = MergePotentials.back().getBranchDebugLoc(); |
919 | |
920 | // Build SameTails, identifying the set of blocks with this hash code |
921 | // and with the maximum number of instructions in common. |
922 | unsigned maxCommonTailLength = ComputeSameTails(CurHash, |
923 | MinCommonTailLength, |
924 | SuccBB, PredBB); |
925 | |
926 | // If we didn't find any pair that has at least MinCommonTailLength |
927 | // instructions in common, remove all blocks with this hash code and retry. |
928 | if (SameTails.empty()) { |
929 | RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL); |
930 | continue; |
931 | } |
932 | |
933 | // If one of the blocks is the entire common tail (and is not the entry |
934 | // block/an EH pad, which we can't jump to), we can treat all blocks with |
935 | // this same tail at once. Use PredBB if that is one of the possibilities, |
936 | // as that will not introduce any extra branches. |
937 | MachineBasicBlock *EntryBB = |
938 | &MergePotentials.front().getBlock()->getParent()->front(); |
939 | unsigned commonTailIndex = SameTails.size(); |
940 | // If there are two blocks, check to see if one can be made to fall through |
941 | // into the other. |
942 | if (SameTails.size() == 2 && |
943 | SameTails[0].getBlock()->isLayoutSuccessor(MBB: SameTails[1].getBlock()) && |
944 | SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad()) |
945 | commonTailIndex = 1; |
946 | else if (SameTails.size() == 2 && |
947 | SameTails[1].getBlock()->isLayoutSuccessor( |
948 | MBB: SameTails[0].getBlock()) && |
949 | SameTails[0].tailIsWholeBlock() && |
950 | !SameTails[0].getBlock()->isEHPad()) |
951 | commonTailIndex = 0; |
952 | else { |
953 | // Otherwise just pick one, favoring the fall-through predecessor if |
954 | // there is one. |
955 | for (unsigned i = 0, e = SameTails.size(); i != e; ++i) { |
956 | MachineBasicBlock *MBB = SameTails[i].getBlock(); |
957 | if ((MBB == EntryBB || MBB->isEHPad()) && |
958 | SameTails[i].tailIsWholeBlock()) |
959 | continue; |
960 | if (MBB == PredBB) { |
961 | commonTailIndex = i; |
962 | break; |
963 | } |
964 | if (SameTails[i].tailIsWholeBlock()) |
965 | commonTailIndex = i; |
966 | } |
967 | } |
968 | |
969 | if (commonTailIndex == SameTails.size() || |
970 | (SameTails[commonTailIndex].getBlock() == PredBB && |
971 | !SameTails[commonTailIndex].tailIsWholeBlock())) { |
972 | // None of the blocks consist entirely of the common tail. |
973 | // Split a block so that one does. |
974 | if (!CreateCommonTailOnlyBlock(PredBB, SuccBB, |
975 | maxCommonTailLength, commonTailIndex)) { |
976 | RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL); |
977 | continue; |
978 | } |
979 | } |
980 | |
981 | MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); |
982 | |
983 | // Recompute common tail MBB's edge weights and block frequency. |
984 | setCommonTailEdgeWeights(*MBB); |
985 | |
986 | // Merge debug locations, MMOs and undef flags across identical instructions |
987 | // for common tail. |
988 | mergeCommonTails(commonTailIndex); |
989 | |
990 | // MBB is common tail. Adjust all other BB's to jump to this one. |
991 | // Traversal must be forwards so erases work. |
992 | LLVM_DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB) |
993 | << " for " ); |
994 | for (unsigned int i=0, e = SameTails.size(); i != e; ++i) { |
995 | if (commonTailIndex == i) |
996 | continue; |
997 | LLVM_DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock()) |
998 | << (i == e - 1 ? "" : ", " )); |
999 | // Hack the end off BB i, making it jump to BB commonTailIndex instead. |
1000 | replaceTailWithBranchTo(OldInst: SameTails[i].getTailStartPos(), NewDest&: *MBB); |
1001 | // BB i is no longer a predecessor of SuccBB; remove it from the worklist. |
1002 | MergePotentials.erase(position: SameTails[i].getMPIter()); |
1003 | } |
1004 | LLVM_DEBUG(dbgs() << "\n" ); |
1005 | // We leave commonTailIndex in the worklist in case there are other blocks |
1006 | // that match it with a smaller number of instructions. |
1007 | MadeChange = true; |
1008 | } |
1009 | return MadeChange; |
1010 | } |
1011 | |
1012 | bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { |
1013 | bool MadeChange = false; |
1014 | if (!EnableTailMerge) |
1015 | return MadeChange; |
1016 | |
1017 | // First find blocks with no successors. |
1018 | // Block placement may create new tail merging opportunities for these blocks. |
1019 | MergePotentials.clear(); |
1020 | for (MachineBasicBlock &MBB : MF) { |
1021 | if (MergePotentials.size() == TailMergeThreshold) |
1022 | break; |
1023 | if (!TriedMerging.count(Ptr: &MBB) && MBB.succ_empty()) |
1024 | MergePotentials.push_back(x: MergePotentialsElt(HashEndOfMBB(MBB), &MBB, |
1025 | MBB.findBranchDebugLoc())); |
1026 | } |
1027 | |
1028 | // If this is a large problem, avoid visiting the same basic blocks |
1029 | // multiple times. |
1030 | if (MergePotentials.size() == TailMergeThreshold) |
1031 | for (const MergePotentialsElt &Elt : MergePotentials) |
1032 | TriedMerging.insert(Ptr: Elt.getBlock()); |
1033 | |
1034 | // See if we can do any tail merging on those. |
1035 | if (MergePotentials.size() >= 2) |
1036 | MadeChange |= TryTailMergeBlocks(SuccBB: nullptr, PredBB: nullptr, MinCommonTailLength); |
1037 | |
1038 | // Look at blocks (IBB) with multiple predecessors (PBB). |
1039 | // We change each predecessor to a canonical form, by |
1040 | // (1) temporarily removing any unconditional branch from the predecessor |
1041 | // to IBB, and |
1042 | // (2) alter conditional branches so they branch to the other block |
1043 | // not IBB; this may require adding back an unconditional branch to IBB |
1044 | // later, where there wasn't one coming in. E.g. |
1045 | // Bcc IBB |
1046 | // fallthrough to QBB |
1047 | // here becomes |
1048 | // Bncc QBB |
1049 | // with a conceptual B to IBB after that, which never actually exists. |
1050 | // With those changes, we see whether the predecessors' tails match, |
1051 | // and merge them if so. We change things out of canonical form and |
1052 | // back to the way they were later in the process. (OptimizeBranches |
1053 | // would undo some of this, but we can't use it, because we'd get into |
1054 | // a compile-time infinite loop repeatedly doing and undoing the same |
1055 | // transformations.) |
1056 | |
1057 | for (MachineFunction::iterator I = std::next(x: MF.begin()), E = MF.end(); |
1058 | I != E; ++I) { |
1059 | if (I->pred_size() < 2) continue; |
1060 | SmallPtrSet<MachineBasicBlock *, 8> UniquePreds; |
1061 | MachineBasicBlock *IBB = &*I; |
1062 | MachineBasicBlock *PredBB = &*std::prev(x: I); |
1063 | MergePotentials.clear(); |
1064 | MachineLoop *ML; |
1065 | |
1066 | // Bail if merging after placement and IBB is the loop header because |
1067 | // -- If merging predecessors that belong to the same loop as IBB, the |
1068 | // common tail of merged predecessors may become the loop top if block |
1069 | // placement is called again and the predecessors may branch to this common |
1070 | // tail and require more branches. This can be relaxed if |
1071 | // MachineBlockPlacement::findBestLoopTop is more flexible. |
1072 | // --If merging predecessors that do not belong to the same loop as IBB, the |
1073 | // loop info of IBB's loop and the other loops may be affected. Calling the |
1074 | // block placement again may make big change to the layout and eliminate the |
1075 | // reason to do tail merging here. |
1076 | if (AfterBlockPlacement && MLI) { |
1077 | ML = MLI->getLoopFor(BB: IBB); |
1078 | if (ML && IBB == ML->getHeader()) |
1079 | continue; |
1080 | } |
1081 | |
1082 | for (MachineBasicBlock *PBB : I->predecessors()) { |
1083 | if (MergePotentials.size() == TailMergeThreshold) |
1084 | break; |
1085 | |
1086 | if (TriedMerging.count(Ptr: PBB)) |
1087 | continue; |
1088 | |
1089 | // Skip blocks that loop to themselves, can't tail merge these. |
1090 | if (PBB == IBB) |
1091 | continue; |
1092 | |
1093 | // Visit each predecessor only once. |
1094 | if (!UniquePreds.insert(Ptr: PBB).second) |
1095 | continue; |
1096 | |
1097 | // Skip blocks which may jump to a landing pad or jump from an asm blob. |
1098 | // Can't tail merge these. |
1099 | if (PBB->hasEHPadSuccessor() || PBB->mayHaveInlineAsmBr()) |
1100 | continue; |
1101 | |
1102 | // After block placement, only consider predecessors that belong to the |
1103 | // same loop as IBB. The reason is the same as above when skipping loop |
1104 | // header. |
1105 | if (AfterBlockPlacement && MLI) |
1106 | if (ML != MLI->getLoopFor(BB: PBB)) |
1107 | continue; |
1108 | |
1109 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; |
1110 | SmallVector<MachineOperand, 4> Cond; |
1111 | if (!TII->analyzeBranch(MBB&: *PBB, TBB, FBB, Cond, AllowModify: true)) { |
1112 | // Failing case: IBB is the target of a cbr, and we cannot reverse the |
1113 | // branch. |
1114 | SmallVector<MachineOperand, 4> NewCond(Cond); |
1115 | if (!Cond.empty() && TBB == IBB) { |
1116 | if (TII->reverseBranchCondition(Cond&: NewCond)) |
1117 | continue; |
1118 | // This is the QBB case described above |
1119 | if (!FBB) { |
1120 | auto Next = ++PBB->getIterator(); |
1121 | if (Next != MF.end()) |
1122 | FBB = &*Next; |
1123 | } |
1124 | } |
1125 | |
1126 | // Remove the unconditional branch at the end, if any. |
1127 | DebugLoc dl = PBB->findBranchDebugLoc(); |
1128 | if (TBB && (Cond.empty() || FBB)) { |
1129 | TII->removeBranch(MBB&: *PBB); |
1130 | if (!Cond.empty()) |
1131 | // reinsert conditional branch only, for now |
1132 | TII->insertBranch(MBB&: *PBB, TBB: (TBB == IBB) ? FBB : TBB, FBB: nullptr, |
1133 | Cond: NewCond, DL: dl); |
1134 | } |
1135 | |
1136 | MergePotentials.push_back( |
1137 | x: MergePotentialsElt(HashEndOfMBB(MBB: *PBB), PBB, dl)); |
1138 | } |
1139 | } |
1140 | |
1141 | // If this is a large problem, avoid visiting the same basic blocks multiple |
1142 | // times. |
1143 | if (MergePotentials.size() == TailMergeThreshold) |
1144 | for (MergePotentialsElt &Elt : MergePotentials) |
1145 | TriedMerging.insert(Ptr: Elt.getBlock()); |
1146 | |
1147 | if (MergePotentials.size() >= 2) |
1148 | MadeChange |= TryTailMergeBlocks(SuccBB: IBB, PredBB, MinCommonTailLength); |
1149 | |
1150 | // Reinsert an unconditional branch if needed. The 1 below can occur as a |
1151 | // result of removing blocks in TryTailMergeBlocks. |
1152 | PredBB = &*std::prev(x: I); // this may have been changed in TryTailMergeBlocks |
1153 | if (MergePotentials.size() == 1 && |
1154 | MergePotentials.begin()->getBlock() != PredBB) |
1155 | FixTail(CurMBB: MergePotentials.begin()->getBlock(), SuccBB: IBB, TII, |
1156 | BranchDL: MergePotentials.begin()->getBranchDebugLoc()); |
1157 | } |
1158 | |
1159 | return MadeChange; |
1160 | } |
1161 | |
1162 | void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) { |
1163 | SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size()); |
1164 | BlockFrequency AccumulatedMBBFreq; |
1165 | |
1166 | // Aggregate edge frequency of successor edge j: |
1167 | // edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)), |
1168 | // where bb is a basic block that is in SameTails. |
1169 | for (const auto &Src : SameTails) { |
1170 | const MachineBasicBlock *SrcMBB = Src.getBlock(); |
1171 | BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(MBB: SrcMBB); |
1172 | AccumulatedMBBFreq += BlockFreq; |
1173 | |
1174 | // It is not necessary to recompute edge weights if TailBB has less than two |
1175 | // successors. |
1176 | if (TailMBB.succ_size() <= 1) |
1177 | continue; |
1178 | |
1179 | auto EdgeFreq = EdgeFreqLs.begin(); |
1180 | |
1181 | for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end(); |
1182 | SuccI != SuccE; ++SuccI, ++EdgeFreq) |
1183 | *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(Src: SrcMBB, Dst: *SuccI); |
1184 | } |
1185 | |
1186 | MBBFreqInfo.setBlockFreq(MBB: &TailMBB, F: AccumulatedMBBFreq); |
1187 | |
1188 | if (TailMBB.succ_size() <= 1) |
1189 | return; |
1190 | |
1191 | auto SumEdgeFreq = |
1192 | std::accumulate(first: EdgeFreqLs.begin(), last: EdgeFreqLs.end(), init: BlockFrequency(0)) |
1193 | .getFrequency(); |
1194 | auto EdgeFreq = EdgeFreqLs.begin(); |
1195 | |
1196 | if (SumEdgeFreq > 0) { |
1197 | for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end(); |
1198 | SuccI != SuccE; ++SuccI, ++EdgeFreq) { |
1199 | auto Prob = BranchProbability::getBranchProbability( |
1200 | Numerator: EdgeFreq->getFrequency(), Denominator: SumEdgeFreq); |
1201 | TailMBB.setSuccProbability(I: SuccI, Prob); |
1202 | } |
1203 | } |
1204 | } |
1205 | |
1206 | //===----------------------------------------------------------------------===// |
1207 | // Branch Optimization |
1208 | //===----------------------------------------------------------------------===// |
1209 | |
1210 | bool BranchFolder::OptimizeBranches(MachineFunction &MF) { |
1211 | bool MadeChange = false; |
1212 | |
1213 | // Make sure blocks are numbered in order |
1214 | MF.RenumberBlocks(); |
1215 | // Renumbering blocks alters EH scope membership, recalculate it. |
1216 | EHScopeMembership = getEHScopeMembership(MF); |
1217 | |
1218 | for (MachineBasicBlock &MBB : |
1219 | llvm::make_early_inc_range(Range: llvm::drop_begin(RangeOrContainer&: MF))) { |
1220 | MadeChange |= OptimizeBlock(MBB: &MBB); |
1221 | |
1222 | // If it is dead, remove it. |
1223 | if (MBB.pred_empty() && !MBB.isMachineBlockAddressTaken()) { |
1224 | RemoveDeadBlock(MBB: &MBB); |
1225 | MadeChange = true; |
1226 | ++NumDeadBlocks; |
1227 | } |
1228 | } |
1229 | |
1230 | return MadeChange; |
1231 | } |
1232 | |
1233 | // Blocks should be considered empty if they contain only debug info; |
1234 | // else the debug info would affect codegen. |
1235 | static bool IsEmptyBlock(MachineBasicBlock *MBB) { |
1236 | return MBB->getFirstNonDebugInstr(SkipPseudoOp: true) == MBB->end(); |
1237 | } |
1238 | |
1239 | // Blocks with only debug info and branches should be considered the same |
1240 | // as blocks with only branches. |
1241 | static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) { |
1242 | MachineBasicBlock::iterator I = MBB->getFirstNonDebugInstr(); |
1243 | assert(I != MBB->end() && "empty block!" ); |
1244 | return I->isBranch(); |
1245 | } |
1246 | |
1247 | /// IsBetterFallthrough - Return true if it would be clearly better to |
1248 | /// fall-through to MBB1 than to fall through into MBB2. This has to return |
1249 | /// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will |
1250 | /// result in infinite loops. |
1251 | static bool IsBetterFallthrough(MachineBasicBlock *MBB1, |
1252 | MachineBasicBlock *MBB2) { |
1253 | assert(MBB1 && MBB2 && "Unknown MachineBasicBlock" ); |
1254 | |
1255 | // Right now, we use a simple heuristic. If MBB2 ends with a call, and |
1256 | // MBB1 doesn't, we prefer to fall through into MBB1. This allows us to |
1257 | // optimize branches that branch to either a return block or an assert block |
1258 | // into a fallthrough to the return. |
1259 | MachineBasicBlock::iterator MBB1I = MBB1->getLastNonDebugInstr(); |
1260 | MachineBasicBlock::iterator MBB2I = MBB2->getLastNonDebugInstr(); |
1261 | if (MBB1I == MBB1->end() || MBB2I == MBB2->end()) |
1262 | return false; |
1263 | |
1264 | // If there is a clear successor ordering we make sure that one block |
1265 | // will fall through to the next |
1266 | if (MBB1->isSuccessor(MBB: MBB2)) return true; |
1267 | if (MBB2->isSuccessor(MBB: MBB1)) return false; |
1268 | |
1269 | return MBB2I->isCall() && !MBB1I->isCall(); |
1270 | } |
1271 | |
1272 | /// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch |
1273 | /// instructions on the block. |
1274 | static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) { |
1275 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
1276 | if (I != MBB.end() && I->isBranch()) |
1277 | return I->getDebugLoc(); |
1278 | return DebugLoc(); |
1279 | } |
1280 | |
1281 | static void copyDebugInfoToPredecessor(const TargetInstrInfo *TII, |
1282 | MachineBasicBlock &MBB, |
1283 | MachineBasicBlock &PredMBB) { |
1284 | auto InsertBefore = PredMBB.getFirstTerminator(); |
1285 | for (MachineInstr &MI : MBB.instrs()) |
1286 | if (MI.isDebugInstr()) { |
1287 | TII->duplicate(MBB&: PredMBB, InsertBefore, Orig: MI); |
1288 | LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to pred: " |
1289 | << MI); |
1290 | } |
1291 | } |
1292 | |
1293 | static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII, |
1294 | MachineBasicBlock &MBB, |
1295 | MachineBasicBlock &SuccMBB) { |
1296 | auto InsertBefore = SuccMBB.SkipPHIsAndLabels(I: SuccMBB.begin()); |
1297 | for (MachineInstr &MI : MBB.instrs()) |
1298 | if (MI.isDebugInstr()) { |
1299 | TII->duplicate(MBB&: SuccMBB, InsertBefore, Orig: MI); |
1300 | LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to succ: " |
1301 | << MI); |
1302 | } |
1303 | } |
1304 | |
1305 | // Try to salvage DBG_VALUE instructions from an otherwise empty block. If such |
1306 | // a basic block is removed we would lose the debug information unless we have |
1307 | // copied the information to a predecessor/successor. |
1308 | // |
1309 | // TODO: This function only handles some simple cases. An alternative would be |
1310 | // to run a heavier analysis, such as the LiveDebugValues pass, before we do |
1311 | // branch folding. |
1312 | static void salvageDebugInfoFromEmptyBlock(const TargetInstrInfo *TII, |
1313 | MachineBasicBlock &MBB) { |
1314 | assert(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info)." ); |
1315 | // If this MBB is the only predecessor of a successor it is legal to copy |
1316 | // DBG_VALUE instructions to the beginning of the successor. |
1317 | for (MachineBasicBlock *SuccBB : MBB.successors()) |
1318 | if (SuccBB->pred_size() == 1) |
1319 | copyDebugInfoToSuccessor(TII, MBB, SuccMBB&: *SuccBB); |
1320 | // If this MBB is the only successor of a predecessor it is legal to copy the |
1321 | // DBG_VALUE instructions to the end of the predecessor (just before the |
1322 | // terminators, assuming that the terminator isn't affecting the DBG_VALUE). |
1323 | for (MachineBasicBlock *PredBB : MBB.predecessors()) |
1324 | if (PredBB->succ_size() == 1) |
1325 | copyDebugInfoToPredecessor(TII, MBB, PredMBB&: *PredBB); |
1326 | } |
1327 | |
1328 | bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) { |
1329 | bool MadeChange = false; |
1330 | MachineFunction &MF = *MBB->getParent(); |
1331 | ReoptimizeBlock: |
1332 | |
1333 | MachineFunction::iterator FallThrough = MBB->getIterator(); |
1334 | ++FallThrough; |
1335 | |
1336 | // Make sure MBB and FallThrough belong to the same EH scope. |
1337 | bool SameEHScope = true; |
1338 | if (!EHScopeMembership.empty() && FallThrough != MF.end()) { |
1339 | auto MBBEHScope = EHScopeMembership.find(Val: MBB); |
1340 | assert(MBBEHScope != EHScopeMembership.end()); |
1341 | auto FallThroughEHScope = EHScopeMembership.find(Val: &*FallThrough); |
1342 | assert(FallThroughEHScope != EHScopeMembership.end()); |
1343 | SameEHScope = MBBEHScope->second == FallThroughEHScope->second; |
1344 | } |
1345 | |
1346 | // Analyze the branch in the current block. As a side-effect, this may cause |
1347 | // the block to become empty. |
1348 | MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr; |
1349 | SmallVector<MachineOperand, 4> CurCond; |
1350 | bool CurUnAnalyzable = |
1351 | TII->analyzeBranch(MBB&: *MBB, TBB&: CurTBB, FBB&: CurFBB, Cond&: CurCond, AllowModify: true); |
1352 | |
1353 | // If this block is empty, make everyone use its fall-through, not the block |
1354 | // explicitly. Landing pads should not do this since the landing-pad table |
1355 | // points to this block. Blocks with their addresses taken shouldn't be |
1356 | // optimized away. |
1357 | if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() && |
1358 | SameEHScope) { |
1359 | salvageDebugInfoFromEmptyBlock(TII, MBB&: *MBB); |
1360 | // Dead block? Leave for cleanup later. |
1361 | if (MBB->pred_empty()) return MadeChange; |
1362 | |
1363 | if (FallThrough == MF.end()) { |
1364 | // TODO: Simplify preds to not branch here if possible! |
1365 | } else if (FallThrough->isEHPad()) { |
1366 | // Don't rewrite to a landing pad fallthough. That could lead to the case |
1367 | // where a BB jumps to more than one landing pad. |
1368 | // TODO: Is it ever worth rewriting predecessors which don't already |
1369 | // jump to a landing pad, and so can safely jump to the fallthrough? |
1370 | } else if (MBB->isSuccessor(MBB: &*FallThrough)) { |
1371 | // Rewrite all predecessors of the old block to go to the fallthrough |
1372 | // instead. |
1373 | while (!MBB->pred_empty()) { |
1374 | MachineBasicBlock *Pred = *(MBB->pred_end()-1); |
1375 | Pred->ReplaceUsesOfBlockWith(Old: MBB, New: &*FallThrough); |
1376 | } |
1377 | // Add rest successors of MBB to successors of FallThrough. Those |
1378 | // successors are not directly reachable via MBB, so it should be |
1379 | // landing-pad. |
1380 | for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) |
1381 | if (*SI != &*FallThrough && !FallThrough->isSuccessor(MBB: *SI)) { |
1382 | assert((*SI)->isEHPad() && "Bad CFG" ); |
1383 | FallThrough->copySuccessor(Orig: MBB, I: SI); |
1384 | } |
1385 | // If MBB was the target of a jump table, update jump tables to go to the |
1386 | // fallthrough instead. |
1387 | if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo()) |
1388 | MJTI->ReplaceMBBInJumpTables(Old: MBB, New: &*FallThrough); |
1389 | MadeChange = true; |
1390 | } |
1391 | return MadeChange; |
1392 | } |
1393 | |
1394 | // Check to see if we can simplify the terminator of the block before this |
1395 | // one. |
1396 | MachineBasicBlock &PrevBB = *std::prev(x: MachineFunction::iterator(MBB)); |
1397 | |
1398 | MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr; |
1399 | SmallVector<MachineOperand, 4> PriorCond; |
1400 | bool PriorUnAnalyzable = |
1401 | TII->analyzeBranch(MBB&: PrevBB, TBB&: PriorTBB, FBB&: PriorFBB, Cond&: PriorCond, AllowModify: true); |
1402 | if (!PriorUnAnalyzable) { |
1403 | // If the previous branch is conditional and both conditions go to the same |
1404 | // destination, remove the branch, replacing it with an unconditional one or |
1405 | // a fall-through. |
1406 | if (PriorTBB && PriorTBB == PriorFBB) { |
1407 | DebugLoc dl = getBranchDebugLoc(MBB&: PrevBB); |
1408 | TII->removeBranch(MBB&: PrevBB); |
1409 | PriorCond.clear(); |
1410 | if (PriorTBB != MBB) |
1411 | TII->insertBranch(MBB&: PrevBB, TBB: PriorTBB, FBB: nullptr, Cond: PriorCond, DL: dl); |
1412 | MadeChange = true; |
1413 | ++NumBranchOpts; |
1414 | goto ReoptimizeBlock; |
1415 | } |
1416 | |
1417 | // If the previous block unconditionally falls through to this block and |
1418 | // this block has no other predecessors, move the contents of this block |
1419 | // into the prior block. This doesn't usually happen when SimplifyCFG |
1420 | // has been used, but it can happen if tail merging splits a fall-through |
1421 | // predecessor of a block. |
1422 | // This has to check PrevBB->succ_size() because EH edges are ignored by |
1423 | // analyzeBranch. |
1424 | if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 && |
1425 | PrevBB.succ_size() == 1 && PrevBB.isSuccessor(MBB) && |
1426 | !MBB->hasAddressTaken() && !MBB->isEHPad()) { |
1427 | LLVM_DEBUG(dbgs() << "\nMerging into block: " << PrevBB |
1428 | << "From MBB: " << *MBB); |
1429 | // Remove redundant DBG_VALUEs first. |
1430 | if (!PrevBB.empty()) { |
1431 | MachineBasicBlock::iterator PrevBBIter = PrevBB.end(); |
1432 | --PrevBBIter; |
1433 | MachineBasicBlock::iterator MBBIter = MBB->begin(); |
1434 | // Check if DBG_VALUE at the end of PrevBB is identical to the |
1435 | // DBG_VALUE at the beginning of MBB. |
1436 | while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end() |
1437 | && PrevBBIter->isDebugInstr() && MBBIter->isDebugInstr()) { |
1438 | if (!MBBIter->isIdenticalTo(Other: *PrevBBIter)) |
1439 | break; |
1440 | MachineInstr &DuplicateDbg = *MBBIter; |
1441 | ++MBBIter; -- PrevBBIter; |
1442 | DuplicateDbg.eraseFromParent(); |
1443 | } |
1444 | } |
1445 | PrevBB.splice(Where: PrevBB.end(), Other: MBB, From: MBB->begin(), To: MBB->end()); |
1446 | PrevBB.removeSuccessor(I: PrevBB.succ_begin()); |
1447 | assert(PrevBB.succ_empty()); |
1448 | PrevBB.transferSuccessors(FromMBB: MBB); |
1449 | MadeChange = true; |
1450 | return MadeChange; |
1451 | } |
1452 | |
1453 | // If the previous branch *only* branches to *this* block (conditional or |
1454 | // not) remove the branch. |
1455 | if (PriorTBB == MBB && !PriorFBB) { |
1456 | TII->removeBranch(MBB&: PrevBB); |
1457 | MadeChange = true; |
1458 | ++NumBranchOpts; |
1459 | goto ReoptimizeBlock; |
1460 | } |
1461 | |
1462 | // If the prior block branches somewhere else on the condition and here if |
1463 | // the condition is false, remove the uncond second branch. |
1464 | if (PriorFBB == MBB) { |
1465 | DebugLoc dl = getBranchDebugLoc(MBB&: PrevBB); |
1466 | TII->removeBranch(MBB&: PrevBB); |
1467 | TII->insertBranch(MBB&: PrevBB, TBB: PriorTBB, FBB: nullptr, Cond: PriorCond, DL: dl); |
1468 | MadeChange = true; |
1469 | ++NumBranchOpts; |
1470 | goto ReoptimizeBlock; |
1471 | } |
1472 | |
1473 | // If the prior block branches here on true and somewhere else on false, and |
1474 | // if the branch condition is reversible, reverse the branch to create a |
1475 | // fall-through. |
1476 | if (PriorTBB == MBB) { |
1477 | SmallVector<MachineOperand, 4> NewPriorCond(PriorCond); |
1478 | if (!TII->reverseBranchCondition(Cond&: NewPriorCond)) { |
1479 | DebugLoc dl = getBranchDebugLoc(MBB&: PrevBB); |
1480 | TII->removeBranch(MBB&: PrevBB); |
1481 | TII->insertBranch(MBB&: PrevBB, TBB: PriorFBB, FBB: nullptr, Cond: NewPriorCond, DL: dl); |
1482 | MadeChange = true; |
1483 | ++NumBranchOpts; |
1484 | goto ReoptimizeBlock; |
1485 | } |
1486 | } |
1487 | |
1488 | // If this block has no successors (e.g. it is a return block or ends with |
1489 | // a call to a no-return function like abort or __cxa_throw) and if the pred |
1490 | // falls through into this block, and if it would otherwise fall through |
1491 | // into the block after this, move this block to the end of the function. |
1492 | // |
1493 | // We consider it more likely that execution will stay in the function (e.g. |
1494 | // due to loops) than it is to exit it. This asserts in loops etc, moving |
1495 | // the assert condition out of the loop body. |
1496 | if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB && |
1497 | MachineFunction::iterator(PriorTBB) == FallThrough && |
1498 | !MBB->canFallThrough()) { |
1499 | bool DoTransform = true; |
1500 | |
1501 | // We have to be careful that the succs of PredBB aren't both no-successor |
1502 | // blocks. If neither have successors and if PredBB is the second from |
1503 | // last block in the function, we'd just keep swapping the two blocks for |
1504 | // last. Only do the swap if one is clearly better to fall through than |
1505 | // the other. |
1506 | if (FallThrough == --MF.end() && |
1507 | !IsBetterFallthrough(MBB1: PriorTBB, MBB2: MBB)) |
1508 | DoTransform = false; |
1509 | |
1510 | if (DoTransform) { |
1511 | // Reverse the branch so we will fall through on the previous true cond. |
1512 | SmallVector<MachineOperand, 4> NewPriorCond(PriorCond); |
1513 | if (!TII->reverseBranchCondition(Cond&: NewPriorCond)) { |
1514 | LLVM_DEBUG(dbgs() << "\nMoving MBB: " << *MBB |
1515 | << "To make fallthrough to: " << *PriorTBB << "\n" ); |
1516 | |
1517 | DebugLoc dl = getBranchDebugLoc(MBB&: PrevBB); |
1518 | TII->removeBranch(MBB&: PrevBB); |
1519 | TII->insertBranch(MBB&: PrevBB, TBB: MBB, FBB: nullptr, Cond: NewPriorCond, DL: dl); |
1520 | |
1521 | // Move this block to the end of the function. |
1522 | MBB->moveAfter(NewBefore: &MF.back()); |
1523 | MadeChange = true; |
1524 | ++NumBranchOpts; |
1525 | return MadeChange; |
1526 | } |
1527 | } |
1528 | } |
1529 | } |
1530 | |
1531 | if (!IsEmptyBlock(MBB)) { |
1532 | MachineInstr &TailCall = *MBB->getFirstNonDebugInstr(); |
1533 | if (TII->isUnconditionalTailCall(MI: TailCall)) { |
1534 | SmallVector<MachineBasicBlock *> PredsChanged; |
1535 | for (auto &Pred : MBB->predecessors()) { |
1536 | MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr; |
1537 | SmallVector<MachineOperand, 4> PredCond; |
1538 | bool PredAnalyzable = |
1539 | !TII->analyzeBranch(MBB&: *Pred, TBB&: PredTBB, FBB&: PredFBB, Cond&: PredCond, AllowModify: true); |
1540 | |
1541 | // Only eliminate if MBB == TBB (Taken Basic Block) |
1542 | if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB && |
1543 | PredTBB != PredFBB) { |
1544 | // The predecessor has a conditional branch to this block which |
1545 | // consists of only a tail call. Try to fold the tail call into the |
1546 | // conditional branch. |
1547 | if (TII->canMakeTailCallConditional(Cond&: PredCond, TailCall)) { |
1548 | // TODO: It would be nice if analyzeBranch() could provide a pointer |
1549 | // to the branch instruction so replaceBranchWithTailCall() doesn't |
1550 | // have to search for it. |
1551 | TII->replaceBranchWithTailCall(MBB&: *Pred, Cond&: PredCond, TailCall); |
1552 | PredsChanged.push_back(Elt: Pred); |
1553 | } |
1554 | } |
1555 | // If the predecessor is falling through to this block, we could reverse |
1556 | // the branch condition and fold the tail call into that. However, after |
1557 | // that we might have to re-arrange the CFG to fall through to the other |
1558 | // block and there is a high risk of regressing code size rather than |
1559 | // improving it. |
1560 | } |
1561 | if (!PredsChanged.empty()) { |
1562 | NumTailCalls += PredsChanged.size(); |
1563 | for (auto &Pred : PredsChanged) |
1564 | Pred->removeSuccessor(Succ: MBB); |
1565 | |
1566 | return true; |
1567 | } |
1568 | } |
1569 | } |
1570 | |
1571 | if (!CurUnAnalyzable) { |
1572 | // If this is a two-way branch, and the FBB branches to this block, reverse |
1573 | // the condition so the single-basic-block loop is faster. Instead of: |
1574 | // Loop: xxx; jcc Out; jmp Loop |
1575 | // we want: |
1576 | // Loop: xxx; jncc Loop; jmp Out |
1577 | if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) { |
1578 | SmallVector<MachineOperand, 4> NewCond(CurCond); |
1579 | if (!TII->reverseBranchCondition(Cond&: NewCond)) { |
1580 | DebugLoc dl = getBranchDebugLoc(MBB&: *MBB); |
1581 | TII->removeBranch(MBB&: *MBB); |
1582 | TII->insertBranch(MBB&: *MBB, TBB: CurFBB, FBB: CurTBB, Cond: NewCond, DL: dl); |
1583 | MadeChange = true; |
1584 | ++NumBranchOpts; |
1585 | goto ReoptimizeBlock; |
1586 | } |
1587 | } |
1588 | |
1589 | // If this branch is the only thing in its block, see if we can forward |
1590 | // other blocks across it. |
1591 | if (CurTBB && CurCond.empty() && !CurFBB && |
1592 | IsBranchOnlyBlock(MBB) && CurTBB != MBB && |
1593 | !MBB->hasAddressTaken() && !MBB->isEHPad()) { |
1594 | DebugLoc dl = getBranchDebugLoc(MBB&: *MBB); |
1595 | // This block may contain just an unconditional branch. Because there can |
1596 | // be 'non-branch terminators' in the block, try removing the branch and |
1597 | // then seeing if the block is empty. |
1598 | TII->removeBranch(MBB&: *MBB); |
1599 | // If the only things remaining in the block are debug info, remove these |
1600 | // as well, so this will behave the same as an empty block in non-debug |
1601 | // mode. |
1602 | if (IsEmptyBlock(MBB)) { |
1603 | // Make the block empty, losing the debug info (we could probably |
1604 | // improve this in some cases.) |
1605 | MBB->erase(I: MBB->begin(), E: MBB->end()); |
1606 | } |
1607 | // If this block is just an unconditional branch to CurTBB, we can |
1608 | // usually completely eliminate the block. The only case we cannot |
1609 | // completely eliminate the block is when the block before this one |
1610 | // falls through into MBB and we can't understand the prior block's branch |
1611 | // condition. |
1612 | if (MBB->empty()) { |
1613 | bool PredHasNoFallThrough = !PrevBB.canFallThrough(); |
1614 | if (PredHasNoFallThrough || !PriorUnAnalyzable || |
1615 | !PrevBB.isSuccessor(MBB)) { |
1616 | // If the prior block falls through into us, turn it into an |
1617 | // explicit branch to us to make updates simpler. |
1618 | if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) && |
1619 | PriorTBB != MBB && PriorFBB != MBB) { |
1620 | if (!PriorTBB) { |
1621 | assert(PriorCond.empty() && !PriorFBB && |
1622 | "Bad branch analysis" ); |
1623 | PriorTBB = MBB; |
1624 | } else { |
1625 | assert(!PriorFBB && "Machine CFG out of date!" ); |
1626 | PriorFBB = MBB; |
1627 | } |
1628 | DebugLoc pdl = getBranchDebugLoc(MBB&: PrevBB); |
1629 | TII->removeBranch(MBB&: PrevBB); |
1630 | TII->insertBranch(MBB&: PrevBB, TBB: PriorTBB, FBB: PriorFBB, Cond: PriorCond, DL: pdl); |
1631 | } |
1632 | |
1633 | // Iterate through all the predecessors, revectoring each in-turn. |
1634 | size_t PI = 0; |
1635 | bool DidChange = false; |
1636 | bool HasBranchToSelf = false; |
1637 | while(PI != MBB->pred_size()) { |
1638 | MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI); |
1639 | if (PMBB == MBB) { |
1640 | // If this block has an uncond branch to itself, leave it. |
1641 | ++PI; |
1642 | HasBranchToSelf = true; |
1643 | } else { |
1644 | DidChange = true; |
1645 | PMBB->ReplaceUsesOfBlockWith(Old: MBB, New: CurTBB); |
1646 | // Add rest successors of MBB to successors of CurTBB. Those |
1647 | // successors are not directly reachable via MBB, so it should be |
1648 | // landing-pad. |
1649 | for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; |
1650 | ++SI) |
1651 | if (*SI != CurTBB && !CurTBB->isSuccessor(MBB: *SI)) { |
1652 | assert((*SI)->isEHPad() && "Bad CFG" ); |
1653 | CurTBB->copySuccessor(Orig: MBB, I: SI); |
1654 | } |
1655 | // If this change resulted in PMBB ending in a conditional |
1656 | // branch where both conditions go to the same destination, |
1657 | // change this to an unconditional branch. |
1658 | MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr; |
1659 | SmallVector<MachineOperand, 4> NewCurCond; |
1660 | bool NewCurUnAnalyzable = TII->analyzeBranch( |
1661 | MBB&: *PMBB, TBB&: NewCurTBB, FBB&: NewCurFBB, Cond&: NewCurCond, AllowModify: true); |
1662 | if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) { |
1663 | DebugLoc pdl = getBranchDebugLoc(MBB&: *PMBB); |
1664 | TII->removeBranch(MBB&: *PMBB); |
1665 | NewCurCond.clear(); |
1666 | TII->insertBranch(MBB&: *PMBB, TBB: NewCurTBB, FBB: nullptr, Cond: NewCurCond, DL: pdl); |
1667 | MadeChange = true; |
1668 | ++NumBranchOpts; |
1669 | } |
1670 | } |
1671 | } |
1672 | |
1673 | // Change any jumptables to go to the new MBB. |
1674 | if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo()) |
1675 | MJTI->ReplaceMBBInJumpTables(Old: MBB, New: CurTBB); |
1676 | if (DidChange) { |
1677 | ++NumBranchOpts; |
1678 | MadeChange = true; |
1679 | if (!HasBranchToSelf) return MadeChange; |
1680 | } |
1681 | } |
1682 | } |
1683 | |
1684 | // Add the branch back if the block is more than just an uncond branch. |
1685 | TII->insertBranch(MBB&: *MBB, TBB: CurTBB, FBB: nullptr, Cond: CurCond, DL: dl); |
1686 | } |
1687 | } |
1688 | |
1689 | // If the prior block doesn't fall through into this block, and if this |
1690 | // block doesn't fall through into some other block, see if we can find a |
1691 | // place to move this block where a fall-through will happen. |
1692 | if (!PrevBB.canFallThrough()) { |
1693 | // Now we know that there was no fall-through into this block, check to |
1694 | // see if it has a fall-through into its successor. |
1695 | bool CurFallsThru = MBB->canFallThrough(); |
1696 | |
1697 | if (!MBB->isEHPad()) { |
1698 | // Check all the predecessors of this block. If one of them has no fall |
1699 | // throughs, and analyzeBranch thinks it _could_ fallthrough to this |
1700 | // block, move this block right after it. |
1701 | for (MachineBasicBlock *PredBB : MBB->predecessors()) { |
1702 | // Analyze the branch at the end of the pred. |
1703 | MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr; |
1704 | SmallVector<MachineOperand, 4> PredCond; |
1705 | if (PredBB != MBB && !PredBB->canFallThrough() && |
1706 | !TII->analyzeBranch(MBB&: *PredBB, TBB&: PredTBB, FBB&: PredFBB, Cond&: PredCond, AllowModify: true) && |
1707 | (PredTBB == MBB || PredFBB == MBB) && |
1708 | (!CurFallsThru || !CurTBB || !CurFBB) && |
1709 | (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) { |
1710 | // If the current block doesn't fall through, just move it. |
1711 | // If the current block can fall through and does not end with a |
1712 | // conditional branch, we need to append an unconditional jump to |
1713 | // the (current) next block. To avoid a possible compile-time |
1714 | // infinite loop, move blocks only backward in this case. |
1715 | // Also, if there are already 2 branches here, we cannot add a third; |
1716 | // this means we have the case |
1717 | // Bcc next |
1718 | // B elsewhere |
1719 | // next: |
1720 | if (CurFallsThru) { |
1721 | MachineBasicBlock *NextBB = &*std::next(x: MBB->getIterator()); |
1722 | CurCond.clear(); |
1723 | TII->insertBranch(MBB&: *MBB, TBB: NextBB, FBB: nullptr, Cond: CurCond, DL: DebugLoc()); |
1724 | } |
1725 | MBB->moveAfter(NewBefore: PredBB); |
1726 | MadeChange = true; |
1727 | goto ReoptimizeBlock; |
1728 | } |
1729 | } |
1730 | } |
1731 | |
1732 | if (!CurFallsThru) { |
1733 | // Check analyzable branch-successors to see if we can move this block |
1734 | // before one. |
1735 | if (!CurUnAnalyzable) { |
1736 | for (MachineBasicBlock *SuccBB : {CurFBB, CurTBB}) { |
1737 | if (!SuccBB) |
1738 | continue; |
1739 | // Analyze the branch at the end of the block before the succ. |
1740 | MachineFunction::iterator SuccPrev = --SuccBB->getIterator(); |
1741 | |
1742 | // If this block doesn't already fall-through to that successor, and |
1743 | // if the succ doesn't already have a block that can fall through into |
1744 | // it, we can arrange for the fallthrough to happen. |
1745 | if (SuccBB != MBB && &*SuccPrev != MBB && |
1746 | !SuccPrev->canFallThrough()) { |
1747 | MBB->moveBefore(NewAfter: SuccBB); |
1748 | MadeChange = true; |
1749 | goto ReoptimizeBlock; |
1750 | } |
1751 | } |
1752 | } |
1753 | |
1754 | // Okay, there is no really great place to put this block. If, however, |
1755 | // the block before this one would be a fall-through if this block were |
1756 | // removed, move this block to the end of the function. There is no real |
1757 | // advantage in "falling through" to an EH block, so we don't want to |
1758 | // perform this transformation for that case. |
1759 | // |
1760 | // Also, Windows EH introduced the possibility of an arbitrary number of |
1761 | // successors to a given block. The analyzeBranch call does not consider |
1762 | // exception handling and so we can get in a state where a block |
1763 | // containing a call is followed by multiple EH blocks that would be |
1764 | // rotated infinitely at the end of the function if the transformation |
1765 | // below were performed for EH "FallThrough" blocks. Therefore, even if |
1766 | // that appears not to be happening anymore, we should assume that it is |
1767 | // possible and not remove the "!FallThrough()->isEHPad" condition below. |
1768 | MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr; |
1769 | SmallVector<MachineOperand, 4> PrevCond; |
1770 | if (FallThrough != MF.end() && |
1771 | !FallThrough->isEHPad() && |
1772 | !TII->analyzeBranch(MBB&: PrevBB, TBB&: PrevTBB, FBB&: PrevFBB, Cond&: PrevCond, AllowModify: true) && |
1773 | PrevBB.isSuccessor(MBB: &*FallThrough)) { |
1774 | MBB->moveAfter(NewBefore: &MF.back()); |
1775 | MadeChange = true; |
1776 | return MadeChange; |
1777 | } |
1778 | } |
1779 | } |
1780 | |
1781 | return MadeChange; |
1782 | } |
1783 | |
1784 | //===----------------------------------------------------------------------===// |
1785 | // Hoist Common Code |
1786 | //===----------------------------------------------------------------------===// |
1787 | |
1788 | bool BranchFolder::HoistCommonCode(MachineFunction &MF) { |
1789 | bool MadeChange = false; |
1790 | for (MachineBasicBlock &MBB : llvm::make_early_inc_range(Range&: MF)) |
1791 | MadeChange |= HoistCommonCodeInSuccs(MBB: &MBB); |
1792 | |
1793 | return MadeChange; |
1794 | } |
1795 | |
1796 | /// findFalseBlock - BB has a fallthrough. Find its 'false' successor given |
1797 | /// its 'true' successor. |
1798 | static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB, |
1799 | MachineBasicBlock *TrueBB) { |
1800 | for (MachineBasicBlock *SuccBB : BB->successors()) |
1801 | if (SuccBB != TrueBB) |
1802 | return SuccBB; |
1803 | return nullptr; |
1804 | } |
1805 | |
1806 | template <class Container> |
1807 | static void addRegAndItsAliases(Register Reg, const TargetRegisterInfo *TRI, |
1808 | Container &Set) { |
1809 | if (Reg.isPhysical()) { |
1810 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) |
1811 | Set.insert(*AI); |
1812 | } else { |
1813 | Set.insert(Reg); |
1814 | } |
1815 | } |
1816 | |
1817 | /// findHoistingInsertPosAndDeps - Find the location to move common instructions |
1818 | /// in successors to. The location is usually just before the terminator, |
1819 | /// however if the terminator is a conditional branch and its previous |
1820 | /// instruction is the flag setting instruction, the previous instruction is |
1821 | /// the preferred location. This function also gathers uses and defs of the |
1822 | /// instructions from the insertion point to the end of the block. The data is |
1823 | /// used by HoistCommonCodeInSuccs to ensure safety. |
1824 | static |
1825 | MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, |
1826 | const TargetInstrInfo *TII, |
1827 | const TargetRegisterInfo *TRI, |
1828 | SmallSet<Register, 4> &Uses, |
1829 | SmallSet<Register, 4> &Defs) { |
1830 | MachineBasicBlock::iterator Loc = MBB->getFirstTerminator(); |
1831 | if (!TII->isUnpredicatedTerminator(MI: *Loc)) |
1832 | return MBB->end(); |
1833 | |
1834 | for (const MachineOperand &MO : Loc->operands()) { |
1835 | if (!MO.isReg()) |
1836 | continue; |
1837 | Register Reg = MO.getReg(); |
1838 | if (!Reg) |
1839 | continue; |
1840 | if (MO.isUse()) { |
1841 | addRegAndItsAliases(Reg, TRI, Set&: Uses); |
1842 | } else { |
1843 | if (!MO.isDead()) |
1844 | // Don't try to hoist code in the rare case the terminator defines a |
1845 | // register that is later used. |
1846 | return MBB->end(); |
1847 | |
1848 | // If the terminator defines a register, make sure we don't hoist |
1849 | // the instruction whose def might be clobbered by the terminator. |
1850 | addRegAndItsAliases(Reg, TRI, Set&: Defs); |
1851 | } |
1852 | } |
1853 | |
1854 | if (Uses.empty()) |
1855 | return Loc; |
1856 | // If the terminator is the only instruction in the block and Uses is not |
1857 | // empty (or we would have returned above), we can still safely hoist |
1858 | // instructions just before the terminator as long as the Defs/Uses are not |
1859 | // violated (which is checked in HoistCommonCodeInSuccs). |
1860 | if (Loc == MBB->begin()) |
1861 | return Loc; |
1862 | |
1863 | // The terminator is probably a conditional branch, try not to separate the |
1864 | // branch from condition setting instruction. |
1865 | MachineBasicBlock::iterator PI = prev_nodbg(It: Loc, Begin: MBB->begin()); |
1866 | |
1867 | bool IsDef = false; |
1868 | for (const MachineOperand &MO : PI->operands()) { |
1869 | // If PI has a regmask operand, it is probably a call. Separate away. |
1870 | if (MO.isRegMask()) |
1871 | return Loc; |
1872 | if (!MO.isReg() || MO.isUse()) |
1873 | continue; |
1874 | Register Reg = MO.getReg(); |
1875 | if (!Reg) |
1876 | continue; |
1877 | if (Uses.count(V: Reg)) { |
1878 | IsDef = true; |
1879 | break; |
1880 | } |
1881 | } |
1882 | if (!IsDef) |
1883 | // The condition setting instruction is not just before the conditional |
1884 | // branch. |
1885 | return Loc; |
1886 | |
1887 | // Be conservative, don't insert instruction above something that may have |
1888 | // side-effects. And since it's potentially bad to separate flag setting |
1889 | // instruction from the conditional branch, just abort the optimization |
1890 | // completely. |
1891 | // Also avoid moving code above predicated instruction since it's hard to |
1892 | // reason about register liveness with predicated instruction. |
1893 | bool DontMoveAcrossStore = true; |
1894 | if (!PI->isSafeToMove(AA: nullptr, SawStore&: DontMoveAcrossStore) || TII->isPredicated(MI: *PI)) |
1895 | return MBB->end(); |
1896 | |
1897 | // Find out what registers are live. Note this routine is ignoring other live |
1898 | // registers which are only used by instructions in successor blocks. |
1899 | for (const MachineOperand &MO : PI->operands()) { |
1900 | if (!MO.isReg()) |
1901 | continue; |
1902 | Register Reg = MO.getReg(); |
1903 | if (!Reg) |
1904 | continue; |
1905 | if (MO.isUse()) { |
1906 | addRegAndItsAliases(Reg, TRI, Set&: Uses); |
1907 | } else { |
1908 | if (Uses.erase(V: Reg)) { |
1909 | if (Reg.isPhysical()) { |
1910 | for (MCPhysReg SubReg : TRI->subregs(Reg)) |
1911 | Uses.erase(V: SubReg); // Use sub-registers to be conservative |
1912 | } |
1913 | } |
1914 | addRegAndItsAliases(Reg, TRI, Set&: Defs); |
1915 | } |
1916 | } |
1917 | |
1918 | return PI; |
1919 | } |
1920 | |
1921 | bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) { |
1922 | MachineBasicBlock *TBB = nullptr, *FBB = nullptr; |
1923 | SmallVector<MachineOperand, 4> Cond; |
1924 | if (TII->analyzeBranch(MBB&: *MBB, TBB, FBB, Cond, AllowModify: true) || !TBB || Cond.empty()) |
1925 | return false; |
1926 | |
1927 | if (!FBB) FBB = findFalseBlock(BB: MBB, TrueBB: TBB); |
1928 | if (!FBB) |
1929 | // Malformed bcc? True and false blocks are the same? |
1930 | return false; |
1931 | |
1932 | // Restrict the optimization to cases where MBB is the only predecessor, |
1933 | // it is an obvious win. |
1934 | if (TBB->pred_size() > 1 || FBB->pred_size() > 1) |
1935 | return false; |
1936 | |
1937 | // Find a suitable position to hoist the common instructions to. Also figure |
1938 | // out which registers are used or defined by instructions from the insertion |
1939 | // point to the end of the block. |
1940 | SmallSet<Register, 4> Uses, Defs; |
1941 | MachineBasicBlock::iterator Loc = |
1942 | findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs); |
1943 | if (Loc == MBB->end()) |
1944 | return false; |
1945 | |
1946 | bool HasDups = false; |
1947 | SmallSet<Register, 4> ActiveDefsSet, AllDefsSet; |
1948 | MachineBasicBlock::iterator TIB = TBB->begin(); |
1949 | MachineBasicBlock::iterator FIB = FBB->begin(); |
1950 | MachineBasicBlock::iterator TIE = TBB->end(); |
1951 | MachineBasicBlock::iterator FIE = FBB->end(); |
1952 | while (TIB != TIE && FIB != FIE) { |
1953 | // Skip dbg_value instructions. These do not count. |
1954 | TIB = skipDebugInstructionsForward(It: TIB, End: TIE, SkipPseudoOp: false); |
1955 | FIB = skipDebugInstructionsForward(It: FIB, End: FIE, SkipPseudoOp: false); |
1956 | if (TIB == TIE || FIB == FIE) |
1957 | break; |
1958 | |
1959 | if (!TIB->isIdenticalTo(Other: *FIB, Check: MachineInstr::CheckKillDead)) |
1960 | break; |
1961 | |
1962 | if (TII->isPredicated(MI: *TIB)) |
1963 | // Hard to reason about register liveness with predicated instruction. |
1964 | break; |
1965 | |
1966 | bool IsSafe = true; |
1967 | for (MachineOperand &MO : TIB->operands()) { |
1968 | // Don't attempt to hoist instructions with register masks. |
1969 | if (MO.isRegMask()) { |
1970 | IsSafe = false; |
1971 | break; |
1972 | } |
1973 | if (!MO.isReg()) |
1974 | continue; |
1975 | Register Reg = MO.getReg(); |
1976 | if (!Reg) |
1977 | continue; |
1978 | if (MO.isDef()) { |
1979 | if (Uses.count(V: Reg)) { |
1980 | // Avoid clobbering a register that's used by the instruction at |
1981 | // the point of insertion. |
1982 | IsSafe = false; |
1983 | break; |
1984 | } |
1985 | |
1986 | if (Defs.count(V: Reg) && !MO.isDead()) { |
1987 | // Don't hoist the instruction if the def would be clobber by the |
1988 | // instruction at the point insertion. FIXME: This is overly |
1989 | // conservative. It should be possible to hoist the instructions |
1990 | // in BB2 in the following example: |
1991 | // BB1: |
1992 | // r1, eflag = op1 r2, r3 |
1993 | // brcc eflag |
1994 | // |
1995 | // BB2: |
1996 | // r1 = op2, ... |
1997 | // = op3, killed r1 |
1998 | IsSafe = false; |
1999 | break; |
2000 | } |
2001 | } else if (!ActiveDefsSet.count(V: Reg)) { |
2002 | if (Defs.count(V: Reg)) { |
2003 | // Use is defined by the instruction at the point of insertion. |
2004 | IsSafe = false; |
2005 | break; |
2006 | } |
2007 | |
2008 | if (MO.isKill() && Uses.count(V: Reg)) |
2009 | // Kills a register that's read by the instruction at the point of |
2010 | // insertion. Remove the kill marker. |
2011 | MO.setIsKill(false); |
2012 | } |
2013 | } |
2014 | if (!IsSafe) |
2015 | break; |
2016 | |
2017 | bool DontMoveAcrossStore = true; |
2018 | if (!TIB->isSafeToMove(AA: nullptr, SawStore&: DontMoveAcrossStore)) |
2019 | break; |
2020 | |
2021 | // Remove kills from ActiveDefsSet, these registers had short live ranges. |
2022 | for (const MachineOperand &MO : TIB->all_uses()) { |
2023 | if (!MO.isKill()) |
2024 | continue; |
2025 | Register Reg = MO.getReg(); |
2026 | if (!Reg) |
2027 | continue; |
2028 | if (!AllDefsSet.count(V: Reg)) { |
2029 | continue; |
2030 | } |
2031 | if (Reg.isPhysical()) { |
2032 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) |
2033 | ActiveDefsSet.erase(V: *AI); |
2034 | } else { |
2035 | ActiveDefsSet.erase(V: Reg); |
2036 | } |
2037 | } |
2038 | |
2039 | // Track local defs so we can update liveins. |
2040 | for (const MachineOperand &MO : TIB->all_defs()) { |
2041 | if (MO.isDead()) |
2042 | continue; |
2043 | Register Reg = MO.getReg(); |
2044 | if (!Reg || Reg.isVirtual()) |
2045 | continue; |
2046 | addRegAndItsAliases(Reg, TRI, Set&: ActiveDefsSet); |
2047 | addRegAndItsAliases(Reg, TRI, Set&: AllDefsSet); |
2048 | } |
2049 | |
2050 | HasDups = true; |
2051 | ++TIB; |
2052 | ++FIB; |
2053 | } |
2054 | |
2055 | if (!HasDups) |
2056 | return false; |
2057 | |
2058 | MBB->splice(Where: Loc, Other: TBB, From: TBB->begin(), To: TIB); |
2059 | FBB->erase(I: FBB->begin(), E: FIB); |
2060 | |
2061 | if (UpdateLiveIns) |
2062 | fullyRecomputeLiveIns(MBBs: {TBB, FBB}); |
2063 | |
2064 | ++NumHoist; |
2065 | return true; |
2066 | } |
2067 | |