1//===- ShrinkWrap.cpp - Compute safe point for prolog/epilog insertion ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for safe point where the prologue and epilogue can be
10// inserted.
11// The safe point for the prologue (resp. epilogue) is called Save
12// (resp. Restore).
13// A point is safe for prologue (resp. epilogue) if and only if
14// it 1) dominates (resp. post-dominates) all the frame related operations and
15// between 2) two executions of the Save (resp. Restore) point there is an
16// execution of the Restore (resp. Save) point.
17//
18// For instance, the following points are safe:
19// for (int i = 0; i < 10; ++i) {
20// Save
21// ...
22// Restore
23// }
24// Indeed, the execution looks like Save -> Restore -> Save -> Restore ...
25// And the following points are not:
26// for (int i = 0; i < 10; ++i) {
27// Save
28// ...
29// }
30// for (int i = 0; i < 10; ++i) {
31// ...
32// Restore
33// }
34// Indeed, the execution looks like Save -> Save -> ... -> Restore -> Restore.
35//
36// This pass also ensures that the safe points are 3) cheaper than the regular
37// entry and exits blocks.
38//
39// Property #1 is ensured via the use of MachineDominatorTree and
40// MachinePostDominatorTree.
41// Property #2 is ensured via property #1 and MachineLoopInfo, i.e., both
42// points must be in the same loop.
43// Property #3 is ensured via the MachineBlockFrequencyInfo.
44//
45// If this pass found points matching all these properties, then
46// MachineFrameInfo is updated with this information.
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/CodeGen/ShrinkWrap.h"
51#include "llvm/ADT/BitVector.h"
52#include "llvm/ADT/PostOrderIterator.h"
53#include "llvm/ADT/SetVector.h"
54#include "llvm/ADT/SmallVector.h"
55#include "llvm/ADT/Statistic.h"
56#include "llvm/Analysis/CFG.h"
57#include "llvm/Analysis/ValueTracking.h"
58#include "llvm/CodeGen/MachineBasicBlock.h"
59#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
60#include "llvm/CodeGen/MachineDominators.h"
61#include "llvm/CodeGen/MachineFrameInfo.h"
62#include "llvm/CodeGen/MachineFunction.h"
63#include "llvm/CodeGen/MachineFunctionPass.h"
64#include "llvm/CodeGen/MachineInstr.h"
65#include "llvm/CodeGen/MachineLoopInfo.h"
66#include "llvm/CodeGen/MachineOperand.h"
67#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
68#include "llvm/CodeGen/MachinePostDominators.h"
69#include "llvm/CodeGen/RegisterClassInfo.h"
70#include "llvm/CodeGen/RegisterScavenging.h"
71#include "llvm/CodeGen/TargetFrameLowering.h"
72#include "llvm/CodeGen/TargetInstrInfo.h"
73#include "llvm/CodeGen/TargetLowering.h"
74#include "llvm/CodeGen/TargetRegisterInfo.h"
75#include "llvm/CodeGen/TargetSubtargetInfo.h"
76#include "llvm/IR/Attributes.h"
77#include "llvm/IR/Function.h"
78#include "llvm/InitializePasses.h"
79#include "llvm/MC/MCAsmInfo.h"
80#include "llvm/Pass.h"
81#include "llvm/Support/CommandLine.h"
82#include "llvm/Support/Debug.h"
83#include "llvm/Support/ErrorHandling.h"
84#include "llvm/Support/raw_ostream.h"
85#include "llvm/Target/TargetMachine.h"
86#include <cassert>
87#include <memory>
88
89using namespace llvm;
90
91#define DEBUG_TYPE "shrink-wrap"
92
93STATISTIC(NumFunc, "Number of functions");
94STATISTIC(NumCandidates, "Number of shrink-wrapping candidates");
95STATISTIC(NumCandidatesDropped,
96 "Number of shrink-wrapping candidates dropped because of frequency");
97
98static cl::opt<cl::boolOrDefault>
99EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden,
100 cl::desc("enable the shrink-wrapping pass"));
101static cl::opt<bool> EnablePostShrinkWrapOpt(
102 "enable-shrink-wrap-region-split", cl::init(Val: true), cl::Hidden,
103 cl::desc("enable splitting of the restore block if possible"));
104
105namespace {
106
107/// Class to determine where the safe point to insert the
108/// prologue and epilogue are.
109/// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the
110/// shrink-wrapping term for prologue/epilogue placement, this pass
111/// does not rely on expensive data-flow analysis. Instead we use the
112/// dominance properties and loop information to decide which point
113/// are safe for such insertion.
114class ShrinkWrapImpl {
115 /// Hold callee-saved information.
116 RegisterClassInfo RCI;
117 MachineDominatorTree *MDT = nullptr;
118 MachinePostDominatorTree *MPDT = nullptr;
119
120 /// Current safe point found for the prologue.
121 /// The prologue will be inserted before the first instruction
122 /// in this basic block.
123 MachineBasicBlock *Save = nullptr;
124
125 /// Current safe point found for the epilogue.
126 /// The epilogue will be inserted before the first terminator instruction
127 /// in this basic block.
128 MachineBasicBlock *Restore = nullptr;
129
130 /// Hold the information of the basic block frequency.
131 /// Use to check the profitability of the new points.
132 MachineBlockFrequencyInfo *MBFI = nullptr;
133
134 /// Hold the loop information. Used to determine if Save and Restore
135 /// are in the same loop.
136 MachineLoopInfo *MLI = nullptr;
137
138 // Emit remarks.
139 MachineOptimizationRemarkEmitter *ORE = nullptr;
140
141 /// Frequency of the Entry block.
142 BlockFrequency EntryFreq;
143
144 /// Current opcode for frame setup.
145 unsigned FrameSetupOpcode = ~0u;
146
147 /// Current opcode for frame destroy.
148 unsigned FrameDestroyOpcode = ~0u;
149
150 /// Stack pointer register, used by llvm.{savestack,restorestack}
151 Register SP;
152
153 /// Entry block.
154 const MachineBasicBlock *Entry = nullptr;
155
156 using SetOfRegs = SmallSetVector<unsigned, 16>;
157
158 /// Registers that need to be saved for the current function.
159 mutable SetOfRegs CurrentCSRs;
160
161 /// Current MachineFunction.
162 MachineFunction *MachineFunc = nullptr;
163
164 /// Is `true` for the block numbers where we assume possible stack accesses
165 /// or computation of stack-relative addresses on any CFG path including the
166 /// block itself. Is `false` for basic blocks where we can guarantee the
167 /// opposite. False positives won't lead to incorrect analysis results,
168 /// therefore this approach is fair.
169 BitVector StackAddressUsedBlockInfo;
170
171 /// Check if \p MI uses or defines a callee-saved register or
172 /// a frame index. If this is the case, this means \p MI must happen
173 /// after Save and before Restore.
174 bool useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
175 bool StackAddressUsed) const;
176
177 const SetOfRegs &getCurrentCSRs(RegScavenger *RS) const {
178 if (CurrentCSRs.empty()) {
179 BitVector SavedRegs;
180 const TargetFrameLowering *TFI =
181 MachineFunc->getSubtarget().getFrameLowering();
182
183 TFI->determineCalleeSaves(MF&: *MachineFunc, SavedRegs, RS);
184
185 for (int Reg = SavedRegs.find_first(); Reg != -1;
186 Reg = SavedRegs.find_next(Prev: Reg))
187 CurrentCSRs.insert(X: (unsigned)Reg);
188 }
189 return CurrentCSRs;
190 }
191
192 /// Update the Save and Restore points such that \p MBB is in
193 /// the region that is dominated by Save and post-dominated by Restore
194 /// and Save and Restore still match the safe point definition.
195 /// Such point may not exist and Save and/or Restore may be null after
196 /// this call.
197 void updateSaveRestorePoints(MachineBasicBlock &MBB, RegScavenger *RS);
198
199 // Try to find safe point based on dominance and block frequency without
200 // any change in IR.
201 bool performShrinkWrapping(
202 const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
203 RegScavenger *RS);
204
205 /// This function tries to split the restore point if doing so can shrink the
206 /// save point further. \return True if restore point is split.
207 bool postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
208 RegScavenger *RS);
209
210 /// This function analyzes if the restore point can split to create a new
211 /// restore point. This function collects
212 /// 1. Any preds of current restore that are reachable by callee save/FI
213 /// blocks
214 /// - indicated by DirtyPreds
215 /// 2. Any preds of current restore that are not DirtyPreds - indicated by
216 /// CleanPreds
217 /// Both sets should be non-empty for considering restore point split.
218 bool checkIfRestoreSplittable(
219 const MachineBasicBlock *CurRestore,
220 const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
221 SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
222 SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
223 const TargetInstrInfo *TII, RegScavenger *RS);
224
225 /// Initialize the pass for \p MF.
226 void init(MachineFunction &MF) {
227 RCI.runOnMachineFunction(MF);
228 Save = nullptr;
229 Restore = nullptr;
230 EntryFreq = MBFI->getEntryFreq();
231 const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
232 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
233 FrameSetupOpcode = TII.getCallFrameSetupOpcode();
234 FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
235 SP = Subtarget.getTargetLowering()->getStackPointerRegisterToSaveRestore();
236 Entry = &MF.front();
237 CurrentCSRs.clear();
238 MachineFunc = &MF;
239
240 ++NumFunc;
241 }
242
243 /// Check whether or not Save and Restore points are still interesting for
244 /// shrink-wrapping.
245 bool ArePointsInteresting() const { return Save != Entry && Save && Restore; }
246
247public:
248 ShrinkWrapImpl(MachineDominatorTree *MDT, MachinePostDominatorTree *MPDT,
249 MachineBlockFrequencyInfo *MBFI, MachineLoopInfo *MLI,
250 MachineOptimizationRemarkEmitter *ORE)
251 : MDT(MDT), MPDT(MPDT), MBFI(MBFI), MLI(MLI), ORE(ORE) {}
252
253 /// Check if shrink wrapping is enabled for this target and function.
254 static bool isShrinkWrapEnabled(const MachineFunction &MF);
255
256 bool run(MachineFunction &MF);
257};
258
259class ShrinkWrapLegacy : public MachineFunctionPass {
260public:
261 static char ID;
262
263 ShrinkWrapLegacy() : MachineFunctionPass(ID) {}
264
265 void getAnalysisUsage(AnalysisUsage &AU) const override {
266 AU.setPreservesAll();
267 AU.addRequired<MachineBlockFrequencyInfoWrapperPass>();
268 AU.addRequired<MachineDominatorTreeWrapperPass>();
269 AU.addRequired<MachinePostDominatorTreeWrapperPass>();
270 AU.addRequired<MachineLoopInfoWrapperPass>();
271 AU.addRequired<MachineOptimizationRemarkEmitterPass>();
272 MachineFunctionPass::getAnalysisUsage(AU);
273 }
274
275 MachineFunctionProperties getRequiredProperties() const override {
276 return MachineFunctionProperties().setNoVRegs();
277 }
278
279 StringRef getPassName() const override { return "Shrink Wrapping analysis"; }
280
281 /// Perform the shrink-wrapping analysis and update
282 /// the MachineFrameInfo attached to \p MF with the results.
283 bool runOnMachineFunction(MachineFunction &MF) override;
284};
285
286} // end anonymous namespace
287
288char ShrinkWrapLegacy::ID = 0;
289
290char &llvm::ShrinkWrapID = ShrinkWrapLegacy::ID;
291
292INITIALIZE_PASS_BEGIN(ShrinkWrapLegacy, DEBUG_TYPE, "Shrink Wrap Pass", false,
293 false)
294INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfoWrapperPass)
295INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
296INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass)
297INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
298INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
299INITIALIZE_PASS_END(ShrinkWrapLegacy, DEBUG_TYPE, "Shrink Wrap Pass", false,
300 false)
301
302bool ShrinkWrapImpl::useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
303 bool StackAddressUsed) const {
304 /// Check if \p Op is known to access an address not on the function's stack .
305 /// At the moment, accesses where the underlying object is a global, function
306 /// argument, or jump table are considered non-stack accesses. Note that the
307 /// caller's stack may get accessed when passing an argument via the stack,
308 /// but not the stack of the current function.
309 ///
310 auto IsKnownNonStackPtr = [](MachineMemOperand *Op) {
311 if (Op->getValue()) {
312 const Value *UO = getUnderlyingObject(V: Op->getValue());
313 if (!UO)
314 return false;
315 if (auto *Arg = dyn_cast<Argument>(Val: UO))
316 return !Arg->hasPassPointeeByValueCopyAttr();
317 return isa<GlobalValue>(Val: UO);
318 }
319 if (const PseudoSourceValue *PSV = Op->getPseudoValue())
320 return PSV->isJumpTable() || PSV->isConstantPool();
321 return false;
322 };
323 // Load/store operations may access the stack indirectly when we previously
324 // computed an address to a stack location.
325 if (StackAddressUsed && MI.mayLoadOrStore() &&
326 (MI.isCall() || MI.hasUnmodeledSideEffects() || MI.memoperands_empty() ||
327 !all_of(Range: MI.memoperands(), P: IsKnownNonStackPtr)))
328 return true;
329
330 if (MI.getOpcode() == FrameSetupOpcode ||
331 MI.getOpcode() == FrameDestroyOpcode) {
332 LLVM_DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
333 return true;
334 }
335 const MachineFunction *MF = MI.getParent()->getParent();
336 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
337 for (const MachineOperand &MO : MI.operands()) {
338 bool UseOrDefCSR = false;
339 if (MO.isReg()) {
340 // Ignore instructions like DBG_VALUE which don't read/def the register.
341 if (!MO.isDef() && !MO.readsReg())
342 continue;
343 Register PhysReg = MO.getReg();
344 if (!PhysReg)
345 continue;
346 assert(PhysReg.isPhysical() && "Unallocated register?!");
347 // The stack pointer is not normally described as a callee-saved register
348 // in calling convention definitions, so we need to watch for it
349 // separately. An SP mentioned by a call instruction, we can ignore,
350 // though, as it's harmless and we do not want to effectively disable tail
351 // calls by forcing the restore point to post-dominate them.
352 // PPC's LR is also not normally described as a callee-saved register in
353 // calling convention definitions, so we need to watch for it, too. An LR
354 // mentioned implicitly by a return (or "branch to link register")
355 // instruction we can ignore, otherwise we may pessimize shrinkwrapping.
356 // PPC's Frame pointer (FP) is also not described as a callee-saved
357 // register. Until the FP is assigned a Physical Register PPC's FP needs
358 // to be checked separately.
359 UseOrDefCSR = (!MI.isCall() && PhysReg == SP) ||
360 RCI.getLastCalleeSavedAlias(PhysReg) ||
361 (!MI.isReturn() &&
362 TRI->isNonallocatableRegisterCalleeSave(Reg: PhysReg)) ||
363 TRI->isVirtualFrameRegister(Reg: PhysReg);
364 } else if (MO.isRegMask()) {
365 // Check if this regmask clobbers any of the CSRs.
366 for (unsigned Reg : getCurrentCSRs(RS)) {
367 if (MO.clobbersPhysReg(PhysReg: Reg)) {
368 UseOrDefCSR = true;
369 break;
370 }
371 }
372 }
373 // Skip FrameIndex operands in DBG_VALUE instructions.
374 if (UseOrDefCSR || (MO.isFI() && !MI.isDebugValue())) {
375 LLVM_DEBUG(dbgs() << "Use or define CSR(" << UseOrDefCSR << ") or FI("
376 << MO.isFI() << "): " << MI << '\n');
377 return true;
378 }
379 }
380 return false;
381}
382
383/// Helper function to find the immediate (post) dominator.
384template <typename ListOfBBs, typename DominanceAnalysis>
385static MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs,
386 DominanceAnalysis &Dom, bool Strict = true) {
387 MachineBasicBlock *IDom = Dom.findNearestCommonDominator(iterator_range(BBs));
388 if (Strict && IDom == &Block)
389 return nullptr;
390 return IDom;
391}
392
393static bool isAnalyzableBB(const TargetInstrInfo &TII,
394 MachineBasicBlock &Entry) {
395 // Check if the block is analyzable.
396 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
397 SmallVector<MachineOperand, 4> Cond;
398 return !TII.analyzeBranch(MBB&: Entry, TBB, FBB, Cond);
399}
400
401/// Determines if any predecessor of MBB is on the path from block that has use
402/// or def of CSRs/FI to MBB.
403/// ReachableByDirty: All blocks reachable from block that has use or def of
404/// CSR/FI.
405static bool
406hasDirtyPred(const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
407 const MachineBasicBlock &MBB) {
408 for (const MachineBasicBlock *PredBB : MBB.predecessors())
409 if (ReachableByDirty.count(V: PredBB))
410 return true;
411 return false;
412}
413
414/// Derives the list of all the basic blocks reachable from MBB.
415static void markAllReachable(DenseSet<const MachineBasicBlock *> &Visited,
416 const MachineBasicBlock &MBB) {
417 SmallVector<MachineBasicBlock *, 4> Worklist(MBB.successors());
418 Visited.insert(V: &MBB);
419 while (!Worklist.empty()) {
420 MachineBasicBlock *SuccMBB = Worklist.pop_back_val();
421 if (!Visited.insert(V: SuccMBB).second)
422 continue;
423 Worklist.append(in_start: SuccMBB->succ_begin(), in_end: SuccMBB->succ_end());
424 }
425}
426
427/// Collect blocks reachable by use or def of CSRs/FI.
428static void collectBlocksReachableByDirty(
429 const DenseSet<const MachineBasicBlock *> &DirtyBBs,
430 DenseSet<const MachineBasicBlock *> &ReachableByDirty) {
431 for (const MachineBasicBlock *MBB : DirtyBBs) {
432 if (ReachableByDirty.count(V: MBB))
433 continue;
434 // Mark all offsprings as reachable.
435 markAllReachable(Visited&: ReachableByDirty, MBB: *MBB);
436 }
437}
438
439/// \return true if there is a clean path from SavePoint to the original
440/// Restore.
441static bool
442isSaveReachableThroughClean(const MachineBasicBlock *SavePoint,
443 ArrayRef<MachineBasicBlock *> CleanPreds) {
444 DenseSet<const MachineBasicBlock *> Visited;
445 SmallVector<MachineBasicBlock *, 4> Worklist(CleanPreds);
446 while (!Worklist.empty()) {
447 MachineBasicBlock *CleanBB = Worklist.pop_back_val();
448 if (CleanBB == SavePoint)
449 return true;
450 if (!Visited.insert(V: CleanBB).second || !CleanBB->pred_size())
451 continue;
452 Worklist.append(in_start: CleanBB->pred_begin(), in_end: CleanBB->pred_end());
453 }
454 return false;
455}
456
457/// This function updates the branches post restore point split.
458///
459/// Restore point has been split.
460/// Old restore point: MBB
461/// New restore point: NMBB
462/// Any basic block(say BBToUpdate) which had a fallthrough to MBB
463/// previously should
464/// 1. Fallthrough to NMBB iff NMBB is inserted immediately above MBB in the
465/// block layout OR
466/// 2. Branch unconditionally to NMBB iff NMBB is inserted at any other place.
467static void updateTerminator(MachineBasicBlock *BBToUpdate,
468 MachineBasicBlock *NMBB,
469 const TargetInstrInfo *TII) {
470 DebugLoc DL = BBToUpdate->findBranchDebugLoc();
471 // if NMBB isn't the new layout successor for BBToUpdate, insert unconditional
472 // branch to it
473 if (!BBToUpdate->isLayoutSuccessor(MBB: NMBB))
474 TII->insertUnconditionalBranch(MBB&: *BBToUpdate, DestBB: NMBB, DL);
475}
476
477/// This function splits the restore point and returns new restore point/BB.
478///
479/// DirtyPreds: Predessors of \p MBB that are ReachableByDirty
480///
481/// Decision has been made to split the restore point.
482/// old restore point: \p MBB
483/// new restore point: \p NMBB
484/// This function makes the necessary block layout changes so that
485/// 1. \p NMBB points to \p MBB unconditionally
486/// 2. All dirtyPreds that previously pointed to \p MBB point to \p NMBB
487static MachineBasicBlock *
488tryToSplitRestore(MachineBasicBlock *MBB,
489 ArrayRef<MachineBasicBlock *> DirtyPreds,
490 const TargetInstrInfo *TII) {
491 MachineFunction *MF = MBB->getParent();
492
493 // get the list of DirtyPreds who have a fallthrough to MBB
494 // before the block layout change. This is just to ensure that if the NMBB is
495 // inserted after MBB, then we create unconditional branch from
496 // DirtyPred/CleanPred to NMBB
497 SmallPtrSet<MachineBasicBlock *, 8> MBBFallthrough;
498 for (MachineBasicBlock *BB : DirtyPreds)
499 if (BB->getFallThrough(JumpToFallThrough: false) == MBB)
500 MBBFallthrough.insert(Ptr: BB);
501
502 MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
503 // Insert this block at the end of the function. Inserting in between may
504 // interfere with control flow optimizer decisions.
505 MF->insert(MBBI: MF->end(), MBB: NMBB);
506
507 for (const MachineBasicBlock::RegisterMaskPair &LI : MBB->liveins())
508 NMBB->addLiveIn(PhysReg: LI.PhysReg);
509
510 TII->insertUnconditionalBranch(MBB&: *NMBB, DestBB: MBB, DL: DebugLoc());
511
512 // After splitting, all predecessors of the restore point should be dirty
513 // blocks.
514 for (MachineBasicBlock *SuccBB : DirtyPreds)
515 SuccBB->ReplaceUsesOfBlockWith(Old: MBB, New: NMBB);
516
517 NMBB->addSuccessor(Succ: MBB);
518
519 for (MachineBasicBlock *BBToUpdate : MBBFallthrough)
520 updateTerminator(BBToUpdate, NMBB, TII);
521
522 return NMBB;
523}
524
525/// This function undoes the restore point split done earlier.
526///
527/// DirtyPreds: All predecessors of \p NMBB that are ReachableByDirty.
528///
529/// Restore point was split and the change needs to be unrolled. Make necessary
530/// changes to reset restore point from \p NMBB to \p MBB.
531static void rollbackRestoreSplit(MachineFunction &MF, MachineBasicBlock *NMBB,
532 MachineBasicBlock *MBB,
533 ArrayRef<MachineBasicBlock *> DirtyPreds,
534 const TargetInstrInfo *TII) {
535 // For a BB, if NMBB is fallthrough in the current layout, then in the new
536 // layout a. BB should fallthrough to MBB OR b. BB should undconditionally
537 // branch to MBB
538 SmallPtrSet<MachineBasicBlock *, 8> NMBBFallthrough;
539 for (MachineBasicBlock *BB : DirtyPreds)
540 if (BB->getFallThrough(JumpToFallThrough: false) == NMBB)
541 NMBBFallthrough.insert(Ptr: BB);
542
543 NMBB->removeSuccessor(Succ: MBB);
544 for (MachineBasicBlock *SuccBB : DirtyPreds)
545 SuccBB->ReplaceUsesOfBlockWith(Old: NMBB, New: MBB);
546
547 NMBB->erase(I: NMBB->begin(), E: NMBB->end());
548 NMBB->eraseFromParent();
549
550 for (MachineBasicBlock *BBToUpdate : NMBBFallthrough)
551 updateTerminator(BBToUpdate, NMBB: MBB, TII);
552}
553
554// A block is deemed fit for restore point split iff there exist
555// 1. DirtyPreds - preds of CurRestore reachable from use or def of CSR/FI
556// 2. CleanPreds - preds of CurRestore that arent DirtyPreds
557bool ShrinkWrapImpl::checkIfRestoreSplittable(
558 const MachineBasicBlock *CurRestore,
559 const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
560 SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
561 SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
562 const TargetInstrInfo *TII, RegScavenger *RS) {
563 for (const MachineInstr &MI : *CurRestore)
564 if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true))
565 return false;
566
567 for (MachineBasicBlock *PredBB : CurRestore->predecessors()) {
568 if (!isAnalyzableBB(TII: *TII, Entry&: *PredBB))
569 return false;
570
571 if (ReachableByDirty.count(V: PredBB))
572 DirtyPreds.push_back(Elt: PredBB);
573 else
574 CleanPreds.push_back(Elt: PredBB);
575 }
576
577 return !(CleanPreds.empty() || DirtyPreds.empty());
578}
579
580bool ShrinkWrapImpl::postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
581 RegScavenger *RS) {
582 if (!EnablePostShrinkWrapOpt)
583 return false;
584
585 MachineBasicBlock *InitSave = nullptr;
586 MachineBasicBlock *InitRestore = nullptr;
587
588 if (HasCandidate) {
589 InitSave = Save;
590 InitRestore = Restore;
591 } else {
592 InitRestore = nullptr;
593 InitSave = &MF.front();
594 for (MachineBasicBlock &MBB : MF) {
595 if (MBB.isEHFuncletEntry())
596 return false;
597 if (MBB.isReturnBlock()) {
598 // Do not support multiple restore points.
599 if (InitRestore)
600 return false;
601 InitRestore = &MBB;
602 }
603 }
604 }
605
606 if (!InitSave || !InitRestore || InitRestore == InitSave ||
607 !MDT->dominates(A: InitSave, B: InitRestore) ||
608 !MPDT->dominates(A: InitRestore, B: InitSave))
609 return false;
610
611 // Bail out of the optimization if any of the basic block is target of
612 // INLINEASM_BR instruction
613 for (MachineBasicBlock &MBB : MF)
614 if (MBB.isInlineAsmBrIndirectTarget())
615 return false;
616
617 DenseSet<const MachineBasicBlock *> DirtyBBs;
618 for (MachineBasicBlock &MBB : MF) {
619 if (!MDT->isReachableFromEntry(A: &MBB))
620 continue;
621 if (MBB.isEHPad()) {
622 DirtyBBs.insert(V: &MBB);
623 continue;
624 }
625 for (const MachineInstr &MI : MBB)
626 if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true)) {
627 DirtyBBs.insert(V: &MBB);
628 break;
629 }
630 }
631
632 // Find blocks reachable from the use or def of CSRs/FI.
633 DenseSet<const MachineBasicBlock *> ReachableByDirty;
634 collectBlocksReachableByDirty(DirtyBBs, ReachableByDirty);
635
636 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
637 SmallVector<MachineBasicBlock *, 2> DirtyPreds;
638 SmallVector<MachineBasicBlock *, 2> CleanPreds;
639 if (!checkIfRestoreSplittable(CurRestore: InitRestore, ReachableByDirty, DirtyPreds,
640 CleanPreds, TII, RS))
641 return false;
642
643 // Trying to reach out to the new save point which dominates all dirty blocks.
644 MachineBasicBlock *NewSave =
645 FindIDom<>(Block&: **DirtyPreds.begin(), BBs: DirtyPreds, Dom&: *MDT, Strict: false);
646
647 while (NewSave && (hasDirtyPred(ReachableByDirty, MBB: *NewSave) ||
648 EntryFreq < MBFI->getBlockFreq(MBB: NewSave) ||
649 /*Entry freq has been observed more than a loop block in
650 some cases*/
651 MLI->getLoopFor(BB: NewSave))) {
652 SmallVector<MachineBasicBlock*> ReachablePreds;
653 for (auto BB: NewSave->predecessors())
654 if (MDT->isReachableFromEntry(A: BB))
655 ReachablePreds.push_back(Elt: BB);
656 if (ReachablePreds.empty())
657 break;
658
659 NewSave = FindIDom<>(Block&: **ReachablePreds.begin(), BBs: ReachablePreds, Dom&: *MDT,
660 Strict: false);
661 }
662
663 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
664 if (!NewSave || NewSave == InitSave ||
665 isSaveReachableThroughClean(SavePoint: NewSave, CleanPreds) ||
666 !TFI->canUseAsPrologue(MBB: *NewSave))
667 return false;
668
669 // Now we know that splitting a restore point can isolate the restore point
670 // from clean blocks and doing so can shrink the save point.
671 MachineBasicBlock *NewRestore =
672 tryToSplitRestore(MBB: InitRestore, DirtyPreds, TII);
673
674 // Make sure if the new restore point is valid as an epilogue, depending on
675 // targets.
676 if (!TFI->canUseAsEpilogue(MBB: *NewRestore)) {
677 rollbackRestoreSplit(MF, NMBB: NewRestore, MBB: InitRestore, DirtyPreds, TII);
678 return false;
679 }
680
681 Save = NewSave;
682 Restore = NewRestore;
683
684 MDT->recalculate(Func&: MF);
685 MPDT->recalculate(Func&: MF);
686
687 assert((MDT->dominates(Save, Restore) && MPDT->dominates(Restore, Save)) &&
688 "Incorrect save or restore point due to dominance relations");
689 assert((!MLI->getLoopFor(Save) && !MLI->getLoopFor(Restore)) &&
690 "Unexpected save or restore point in a loop");
691 assert((EntryFreq >= MBFI->getBlockFreq(Save) &&
692 EntryFreq >= MBFI->getBlockFreq(Restore)) &&
693 "Incorrect save or restore point based on block frequency");
694 return true;
695}
696
697void ShrinkWrapImpl::updateSaveRestorePoints(MachineBasicBlock &MBB,
698 RegScavenger *RS) {
699 // Get rid of the easy cases first.
700 if (!Save)
701 Save = &MBB;
702 else
703 Save = MDT->findNearestCommonDominator(A: Save, B: &MBB);
704 assert(Save);
705
706 if (!Restore)
707 Restore = &MBB;
708 else if (MPDT->getNode(BB: &MBB)) // If the block is not in the post dom tree, it
709 // means the block never returns. If that's the
710 // case, we don't want to call
711 // `findNearestCommonDominator`, which will
712 // return `Restore`.
713 Restore = MPDT->findNearestCommonDominator(A: Restore, B: &MBB);
714 else
715 Restore = nullptr; // Abort, we can't find a restore point in this case.
716
717 // Make sure we would be able to insert the restore code before the
718 // terminator.
719 if (Restore == &MBB) {
720 for (const MachineInstr &Terminator : MBB.terminators()) {
721 if (!useOrDefCSROrFI(MI: Terminator, RS, /*StackAddressUsed=*/true))
722 continue;
723 // One of the terminator needs to happen before the restore point.
724 if (MBB.succ_empty()) {
725 Restore = nullptr; // Abort, we can't find a restore point in this case.
726 break;
727 }
728 // Look for a restore point that post-dominates all the successors.
729 // The immediate post-dominator is what we are looking for.
730 Restore = FindIDom<>(Block&: *Restore, BBs: Restore->successors(), Dom&: *MPDT);
731 break;
732 }
733 }
734
735 if (!Restore) {
736 LLVM_DEBUG(
737 dbgs() << "Restore point needs to be spanned on several blocks\n");
738 return;
739 }
740
741 // Make sure Save and Restore are suitable for shrink-wrapping:
742 // 1. all path from Save needs to lead to Restore before exiting.
743 // 2. all path to Restore needs to go through Save from Entry.
744 // We achieve that by making sure that:
745 // A. Save dominates Restore.
746 // B. Restore post-dominates Save.
747 // C. Save and Restore are in the same loop.
748 bool SaveDominatesRestore = false;
749 bool RestorePostDominatesSave = false;
750 while (Restore &&
751 (!(SaveDominatesRestore = MDT->dominates(A: Save, B: Restore)) ||
752 !(RestorePostDominatesSave = MPDT->dominates(A: Restore, B: Save)) ||
753 // Post-dominance is not enough in loops to ensure that all uses/defs
754 // are after the prologue and before the epilogue at runtime.
755 // E.g.,
756 // while(1) {
757 // Save
758 // Restore
759 // if (...)
760 // break;
761 // use/def CSRs
762 // }
763 // All the uses/defs of CSRs are dominated by Save and post-dominated
764 // by Restore. However, the CSRs uses are still reachable after
765 // Restore and before Save are executed.
766 //
767 // For now, just push the restore/save points outside of loops.
768 // FIXME: Refine the criteria to still find interesting cases
769 // for loops.
770 MLI->getLoopFor(BB: Save) || MLI->getLoopFor(BB: Restore))) {
771 // Fix (A).
772 if (!SaveDominatesRestore) {
773 Save = MDT->findNearestCommonDominator(A: Save, B: Restore);
774 continue;
775 }
776 // Fix (B).
777 if (!RestorePostDominatesSave)
778 Restore = MPDT->findNearestCommonDominator(A: Restore, B: Save);
779
780 // Fix (C).
781 if (Restore && (MLI->getLoopFor(BB: Save) || MLI->getLoopFor(BB: Restore))) {
782 if (MLI->getLoopDepth(BB: Save) > MLI->getLoopDepth(BB: Restore)) {
783 // Push Save outside of this loop if immediate dominator is different
784 // from save block. If immediate dominator is not different, bail out.
785 SmallVector<MachineBasicBlock *> Preds;
786 for (auto *PBB : Save->predecessors())
787 if (MDT->isReachableFromEntry(A: PBB))
788 Preds.push_back(Elt: PBB);
789 Save = FindIDom<>(Block&: *Save, BBs: Preds, Dom&: *MDT);
790 if (!Save)
791 break;
792 } else {
793 // If the loop does not exit, there is no point in looking
794 // for a post-dominator outside the loop.
795 SmallVector<MachineBasicBlock*, 4> ExitBlocks;
796 MLI->getLoopFor(BB: Restore)->getExitingBlocks(ExitingBlocks&: ExitBlocks);
797 // Push Restore outside of this loop.
798 // Look for the immediate post-dominator of the loop exits.
799 MachineBasicBlock *IPdom = Restore;
800 for (MachineBasicBlock *LoopExitBB: ExitBlocks) {
801 IPdom = FindIDom<>(Block&: *IPdom, BBs: LoopExitBB->successors(), Dom&: *MPDT);
802 if (!IPdom)
803 break;
804 }
805 // If the immediate post-dominator is not in a less nested loop,
806 // then we are stuck in a program with an infinite loop.
807 // In that case, we will not find a safe point, hence, bail out.
808 if (IPdom && MLI->getLoopDepth(BB: IPdom) < MLI->getLoopDepth(BB: Restore))
809 Restore = IPdom;
810 else {
811 Restore = nullptr;
812 break;
813 }
814 }
815 }
816 }
817}
818
819static bool giveUpWithRemarks(MachineOptimizationRemarkEmitter *ORE,
820 StringRef RemarkName, StringRef RemarkMessage,
821 const DiagnosticLocation &Loc,
822 const MachineBasicBlock *MBB) {
823 ORE->emit(RemarkBuilder: [&]() {
824 return MachineOptimizationRemarkMissed(DEBUG_TYPE, RemarkName, Loc, MBB)
825 << RemarkMessage;
826 });
827
828 LLVM_DEBUG(dbgs() << RemarkMessage << '\n');
829 return false;
830}
831
832bool ShrinkWrapImpl::performShrinkWrapping(
833 const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
834 RegScavenger *RS) {
835 for (MachineBasicBlock *MBB : RPOT) {
836 LLVM_DEBUG(dbgs() << "Look into: " << printMBBReference(*MBB) << '\n');
837
838 if (MBB->isEHFuncletEntry())
839 return giveUpWithRemarks(ORE, RemarkName: "UnsupportedEHFunclets",
840 RemarkMessage: "EH Funclets are not supported yet.",
841 Loc: MBB->front().getDebugLoc(), MBB);
842
843 if (MBB->isEHPad() || MBB->isInlineAsmBrIndirectTarget()) {
844 // Push the prologue and epilogue outside of the region that may throw (or
845 // jump out via inlineasm_br), by making sure that all the landing pads
846 // are at least at the boundary of the save and restore points. The
847 // problem is that a basic block can jump out from the middle in these
848 // cases, which we do not handle.
849 updateSaveRestorePoints(MBB&: *MBB, RS);
850 if (!ArePointsInteresting()) {
851 LLVM_DEBUG(dbgs() << "EHPad/inlineasm_br prevents shrink-wrapping\n");
852 return false;
853 }
854 continue;
855 }
856
857 bool StackAddressUsed = false;
858 // Check if we found any stack accesses in the predecessors. We are not
859 // doing a full dataflow analysis here to keep things simple but just
860 // rely on a reverse portorder traversal (RPOT) to guarantee predecessors
861 // are already processed except for loops (and accept the conservative
862 // result for loops).
863 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
864 if (StackAddressUsedBlockInfo.test(Idx: Pred->getNumber())) {
865 StackAddressUsed = true;
866 break;
867 }
868 }
869
870 for (const MachineInstr &MI : *MBB) {
871 if (useOrDefCSROrFI(MI, RS, StackAddressUsed)) {
872 // Save (resp. restore) point must dominate (resp. post dominate)
873 // MI. Look for the proper basic block for those.
874 updateSaveRestorePoints(MBB&: *MBB, RS);
875 // If we are at a point where we cannot improve the placement of
876 // save/restore instructions, just give up.
877 if (!ArePointsInteresting()) {
878 LLVM_DEBUG(dbgs() << "No Shrink wrap candidate found\n");
879 return false;
880 }
881 // No need to look for other instructions, this basic block
882 // will already be part of the handled region.
883 StackAddressUsed = true;
884 break;
885 }
886 }
887 StackAddressUsedBlockInfo[MBB->getNumber()] = StackAddressUsed;
888 }
889 if (!ArePointsInteresting()) {
890 // If the points are not interesting at this point, then they must be null
891 // because it means we did not encounter any frame/CSR related code.
892 // Otherwise, we would have returned from the previous loop.
893 assert(!Save && !Restore && "We miss a shrink-wrap opportunity?!");
894 LLVM_DEBUG(dbgs() << "Nothing to shrink-wrap\n");
895 return false;
896 }
897
898 LLVM_DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: "
899 << EntryFreq.getFrequency() << '\n');
900
901 const TargetFrameLowering *TFI =
902 MachineFunc->getSubtarget().getFrameLowering();
903 do {
904 LLVM_DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
905 << printMBBReference(*Save) << ' '
906 << printBlockFreq(*MBFI, *Save)
907 << "\nRestore: " << printMBBReference(*Restore) << ' '
908 << printBlockFreq(*MBFI, *Restore) << '\n');
909
910 bool IsSaveCheap, TargetCanUseSaveAsPrologue = false;
911 if (((IsSaveCheap = EntryFreq >= MBFI->getBlockFreq(MBB: Save)) &&
912 EntryFreq >= MBFI->getBlockFreq(MBB: Restore)) &&
913 ((TargetCanUseSaveAsPrologue = TFI->canUseAsPrologue(MBB: *Save)) &&
914 TFI->canUseAsEpilogue(MBB: *Restore)))
915 break;
916 LLVM_DEBUG(
917 dbgs() << "New points are too expensive or invalid for the target\n");
918 MachineBasicBlock *NewBB;
919 if (!IsSaveCheap || !TargetCanUseSaveAsPrologue) {
920 Save = FindIDom<>(Block&: *Save, BBs: Save->predecessors(), Dom&: *MDT);
921 if (!Save)
922 break;
923 NewBB = Save;
924 } else {
925 // Restore is expensive.
926 Restore = FindIDom<>(Block&: *Restore, BBs: Restore->successors(), Dom&: *MPDT);
927 if (!Restore)
928 break;
929 NewBB = Restore;
930 }
931 updateSaveRestorePoints(MBB&: *NewBB, RS);
932 } while (Save && Restore);
933
934 if (!ArePointsInteresting()) {
935 ++NumCandidatesDropped;
936 return false;
937 }
938 return true;
939}
940
941bool ShrinkWrapImpl::run(MachineFunction &MF) {
942 LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
943
944 init(MF);
945
946 ReversePostOrderTraversal<MachineBasicBlock *> RPOT(&*MF.begin());
947 if (containsIrreducibleCFG<MachineBasicBlock *>(RPOTraversal&: RPOT, LI: *MLI)) {
948 // If MF is irreducible, a block may be in a loop without
949 // MachineLoopInfo reporting it. I.e., we may use the
950 // post-dominance property in loops, which lead to incorrect
951 // results. Moreover, we may miss that the prologue and
952 // epilogue are not in the same loop, leading to unbalanced
953 // construction/deconstruction of the stack frame.
954 return giveUpWithRemarks(ORE, RemarkName: "UnsupportedIrreducibleCFG",
955 RemarkMessage: "Irreducible CFGs are not supported yet.",
956 Loc: MF.getFunction().getSubprogram(), MBB: &MF.front());
957 }
958
959 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
960 std::unique_ptr<RegScavenger> RS(
961 TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr);
962
963 bool Changed = false;
964
965 // Initially, conservatively assume that stack addresses can be used in each
966 // basic block and change the state only for those basic blocks for which we
967 // were able to prove the opposite.
968 StackAddressUsedBlockInfo.resize(N: MF.getNumBlockIDs(), t: true);
969 bool HasCandidate = performShrinkWrapping(RPOT, RS: RS.get());
970 StackAddressUsedBlockInfo.clear();
971 Changed = postShrinkWrapping(HasCandidate, MF, RS: RS.get());
972 if (!HasCandidate && !Changed)
973 return false;
974 if (!ArePointsInteresting())
975 return Changed;
976
977 LLVM_DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: "
978 << printMBBReference(*Save) << ' '
979 << "\nRestore: " << printMBBReference(*Restore) << '\n');
980
981 MachineFrameInfo &MFI = MF.getFrameInfo();
982
983 // List of CalleeSavedInfo for registers will be added during prologepilog
984 // pass
985 SaveRestorePoints SavePoints({{Save, {}}});
986 SaveRestorePoints RestorePoints({{Restore, {}}});
987
988 MFI.setSavePoints(SavePoints);
989 MFI.setRestorePoints(RestorePoints);
990 ++NumCandidates;
991 return Changed;
992}
993
994bool ShrinkWrapLegacy::runOnMachineFunction(MachineFunction &MF) {
995 if (skipFunction(F: MF.getFunction()) || MF.empty() ||
996 !ShrinkWrapImpl::isShrinkWrapEnabled(MF))
997 return false;
998
999 MachineDominatorTree *MDT =
1000 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
1001 MachinePostDominatorTree *MPDT =
1002 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
1003 MachineBlockFrequencyInfo *MBFI =
1004 &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI();
1005 MachineLoopInfo *MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
1006 MachineOptimizationRemarkEmitter *ORE =
1007 &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
1008
1009 return ShrinkWrapImpl(MDT, MPDT, MBFI, MLI, ORE).run(MF);
1010}
1011
1012PreservedAnalyses ShrinkWrapPass::run(MachineFunction &MF,
1013 MachineFunctionAnalysisManager &MFAM) {
1014 MFPropsModifier _(*this, MF);
1015 if (MF.empty() || !ShrinkWrapImpl::isShrinkWrapEnabled(MF))
1016 return PreservedAnalyses::all();
1017
1018 MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(IR&: MF);
1019 MachinePostDominatorTree &MPDT =
1020 MFAM.getResult<MachinePostDominatorTreeAnalysis>(IR&: MF);
1021 MachineBlockFrequencyInfo &MBFI =
1022 MFAM.getResult<MachineBlockFrequencyAnalysis>(IR&: MF);
1023 MachineLoopInfo &MLI = MFAM.getResult<MachineLoopAnalysis>(IR&: MF);
1024 MachineOptimizationRemarkEmitter &ORE =
1025 MFAM.getResult<MachineOptimizationRemarkEmitterAnalysis>(IR&: MF);
1026
1027 ShrinkWrapImpl(&MDT, &MPDT, &MBFI, &MLI, &ORE).run(MF);
1028 return PreservedAnalyses::all();
1029}
1030
1031bool ShrinkWrapImpl::isShrinkWrapEnabled(const MachineFunction &MF) {
1032 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
1033
1034 switch (EnableShrinkWrapOpt) {
1035 case cl::BOU_UNSET:
1036 return TFI->enableShrinkWrapping(MF) &&
1037 // Windows with CFI has some limitations that make it impossible
1038 // to use shrink-wrapping.
1039 !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
1040 // Sanitizers look at the value of the stack at the location
1041 // of the crash. Since a crash can happen anywhere, the
1042 // frame must be lowered before anything else happen for the
1043 // sanitizers to be able to get a correct stack frame.
1044 !(MF.getFunction().hasFnAttribute(Kind: Attribute::SanitizeAddress) ||
1045 MF.getFunction().hasFnAttribute(Kind: Attribute::SanitizeThread) ||
1046 MF.getFunction().hasFnAttribute(Kind: Attribute::SanitizeMemory) ||
1047 MF.getFunction().hasFnAttribute(Kind: Attribute::SanitizeType) ||
1048 MF.getFunction().hasFnAttribute(Kind: Attribute::SanitizeHWAddress));
1049 // If EnableShrinkWrap is set, it takes precedence on whatever the
1050 // target sets. The rational is that we assume we want to test
1051 // something related to shrink-wrapping.
1052 case cl::BOU_TRUE:
1053 return true;
1054 case cl::BOU_FALSE:
1055 return false;
1056 }
1057 llvm_unreachable("Invalid shrink-wrapping state");
1058}
1059