1//===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is a variant of the UnifyFunctionExitNodes pass. Rather than ensuring
10// there is at most one ret and one unreachable instruction, it ensures there is
11// at most one divergent exiting block.
12//
13// StructurizeCFG can't deal with multi-exit regions formed by branches to
14// multiple return nodes. It is not desirable to structurize regions with
15// uniform branches, so unifying those to the same return block as divergent
16// branches inhibits use of scalar branching. It still can't deal with the case
17// where one branch goes to return, and one unreachable. Replace unreachable in
18// this case with a return.
19//
20//===----------------------------------------------------------------------===//
21
22#include "AMDGPUUnifyDivergentExitNodes.h"
23#include "AMDGPU.h"
24#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/Analysis/DomTreeUpdater.h"
29#include "llvm/Analysis/PostDominators.h"
30#include "llvm/Analysis/TargetTransformInfo.h"
31#include "llvm/Analysis/UniformityAnalysis.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/CFG.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/Dominators.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/IRBuilder.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsAMDGPU.h"
42#include "llvm/IR/Type.h"
43#include "llvm/InitializePasses.h"
44#include "llvm/Pass.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Transforms/Scalar.h"
47#include "llvm/Transforms/Utils.h"
48#include "llvm/Transforms/Utils/BasicBlockUtils.h"
49#include "llvm/Transforms/Utils/Local.h"
50
51using namespace llvm;
52
53#define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes"
54
55namespace {
56
57class AMDGPUUnifyDivergentExitNodesImpl {
58private:
59 const TargetTransformInfo *TTI = nullptr;
60
61public:
62 AMDGPUUnifyDivergentExitNodesImpl() = delete;
63 AMDGPUUnifyDivergentExitNodesImpl(const TargetTransformInfo *TTI)
64 : TTI(TTI) {}
65
66 // We can preserve non-critical-edgeness when we unify function exit nodes
67 BasicBlock *unifyReturnBlockSet(Function &F, DomTreeUpdater &DTU,
68 ArrayRef<BasicBlock *> ReturningBlocks,
69 StringRef Name);
70 bool run(Function &F, DominatorTree *DT, const PostDominatorTree &PDT,
71 const UniformityInfo &UA);
72};
73
74class AMDGPUUnifyDivergentExitNodes : public FunctionPass {
75public:
76 static char ID;
77 AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) {}
78 void getAnalysisUsage(AnalysisUsage &AU) const override;
79 bool runOnFunction(Function &F) override;
80};
81} // end anonymous namespace
82
83char AMDGPUUnifyDivergentExitNodes::ID = 0;
84
85char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID;
86
87INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
88 "Unify divergent function exit nodes", false, false)
89INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
90INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
91INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
92INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
93 "Unify divergent function exit nodes", false, false)
94
95void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const {
96 if (RequireAndPreserveDomTree)
97 AU.addRequired<DominatorTreeWrapperPass>();
98
99 AU.addRequired<PostDominatorTreeWrapperPass>();
100
101 AU.addRequired<UniformityInfoWrapperPass>();
102
103 if (RequireAndPreserveDomTree) {
104 AU.addPreserved<DominatorTreeWrapperPass>();
105 // FIXME: preserve PostDominatorTreeWrapperPass
106 }
107
108 // We preserve the non-critical-edgeness property
109 AU.addPreservedID(ID&: BreakCriticalEdgesID);
110
111 FunctionPass::getAnalysisUsage(AU);
112
113 AU.addRequired<TargetTransformInfoWrapperPass>();
114}
115
116/// \returns true if \p BB is reachable through only uniform branches.
117/// XXX - Is there a more efficient way to find this?
118static bool isUniformlyReached(const UniformityInfo &UA, BasicBlock &BB) {
119 SmallVector<BasicBlock *, 8> Stack(predecessors(BB: &BB));
120 SmallPtrSet<BasicBlock *, 8> Visited;
121
122 while (!Stack.empty()) {
123 BasicBlock *Top = Stack.pop_back_val();
124 if (!UA.isUniform(I: Top->getTerminator()))
125 return false;
126
127 for (BasicBlock *Pred : predecessors(BB: Top)) {
128 if (Visited.insert(Ptr: Pred).second)
129 Stack.push_back(Elt: Pred);
130 }
131 }
132
133 return true;
134}
135
136BasicBlock *AMDGPUUnifyDivergentExitNodesImpl::unifyReturnBlockSet(
137 Function &F, DomTreeUpdater &DTU, ArrayRef<BasicBlock *> ReturningBlocks,
138 StringRef Name) {
139 // Otherwise, we need to insert a new basic block into the function, add a PHI
140 // nodes (if the function returns values), and convert all of the return
141 // instructions into unconditional branches.
142 BasicBlock *NewRetBlock = BasicBlock::Create(Context&: F.getContext(), Name, Parent: &F);
143 IRBuilder<> B(NewRetBlock);
144
145 PHINode *PN = nullptr;
146 if (F.getReturnType()->isVoidTy()) {
147 B.CreateRetVoid();
148 } else {
149 // If the function doesn't return void... add a PHI node to the block...
150 PN = B.CreatePHI(Ty: F.getReturnType(), NumReservedValues: ReturningBlocks.size(),
151 Name: "UnifiedRetVal");
152 B.CreateRet(V: PN);
153 }
154
155 // Loop over all of the blocks, replacing the return instruction with an
156 // unconditional branch.
157 std::vector<DominatorTree::UpdateType> Updates;
158 Updates.reserve(n: ReturningBlocks.size());
159 for (BasicBlock *BB : ReturningBlocks) {
160 // Add an incoming element to the PHI node for every return instruction that
161 // is merging into this new block...
162 if (PN)
163 PN->addIncoming(V: BB->getTerminator()->getOperand(i: 0), BB);
164
165 // Remove and delete the return inst.
166 BB->getTerminator()->eraseFromParent();
167 BranchInst::Create(IfTrue: NewRetBlock, InsertBefore: BB);
168 Updates.emplace_back(args: DominatorTree::Insert, args&: BB, args&: NewRetBlock);
169 }
170
171 if (RequireAndPreserveDomTree)
172 DTU.applyUpdates(Updates);
173 Updates.clear();
174
175 for (BasicBlock *BB : ReturningBlocks) {
176 // Cleanup possible branch to unconditional branch to the return.
177 simplifyCFG(BB, TTI: *TTI, DTU: RequireAndPreserveDomTree ? &DTU : nullptr,
178 Options: SimplifyCFGOptions().bonusInstThreshold(I: 2));
179 }
180
181 return NewRetBlock;
182}
183
184bool AMDGPUUnifyDivergentExitNodesImpl::run(Function &F, DominatorTree *DT,
185 const PostDominatorTree &PDT,
186 const UniformityInfo &UA) {
187 assert(hasOnlySimpleTerminator(F) && "Unsupported block terminator.");
188
189 if (PDT.root_size() == 0 ||
190 (PDT.root_size() == 1 &&
191 !isa<BranchInst>(Val: PDT.getRoot()->getTerminator())))
192 return false;
193
194 // Loop over all of the blocks in a function, tracking all of the blocks that
195 // return.
196 SmallVector<BasicBlock *, 4> ReturningBlocks;
197 SmallVector<BasicBlock *, 4> UnreachableBlocks;
198
199 // Dummy return block for infinite loop.
200 BasicBlock *DummyReturnBB = nullptr;
201
202 bool Changed = false;
203 std::vector<DominatorTree::UpdateType> Updates;
204
205 // TODO: For now we unify all exit blocks, even though they are uniformly
206 // reachable, if there are any exits not uniformly reached. This is to
207 // workaround the limitation of structurizer, which can not handle multiple
208 // function exits. After structurizer is able to handle multiple function
209 // exits, we should only unify UnreachableBlocks that are not uniformly
210 // reachable.
211 bool HasDivergentExitBlock = llvm::any_of(
212 Range: PDT.roots(), P: [&](auto BB) { return !isUniformlyReached(UA, *BB); });
213
214 for (BasicBlock *BB : PDT.roots()) {
215 if (auto *RI = dyn_cast<ReturnInst>(Val: BB->getTerminator())) {
216 auto *CI = dyn_cast_or_null<CallInst>(Val: RI->getPrevNode());
217 if (CI && CI->isMustTailCall())
218 continue;
219 if (HasDivergentExitBlock)
220 ReturningBlocks.push_back(Elt: BB);
221 } else if (isa<UnreachableInst>(Val: BB->getTerminator())) {
222 if (HasDivergentExitBlock)
223 UnreachableBlocks.push_back(Elt: BB);
224 } else if (BranchInst *BI = dyn_cast<BranchInst>(Val: BB->getTerminator())) {
225
226 ConstantInt *BoolTrue = ConstantInt::getTrue(Context&: F.getContext());
227 if (DummyReturnBB == nullptr) {
228 DummyReturnBB = BasicBlock::Create(Context&: F.getContext(),
229 Name: "DummyReturnBlock", Parent: &F);
230 Type *RetTy = F.getReturnType();
231 Value *RetVal = RetTy->isVoidTy() ? nullptr : PoisonValue::get(T: RetTy);
232 ReturnInst::Create(C&: F.getContext(), retVal: RetVal, InsertBefore: DummyReturnBB);
233 ReturningBlocks.push_back(Elt: DummyReturnBB);
234 }
235
236 if (BI->isUnconditional()) {
237 BasicBlock *LoopHeaderBB = BI->getSuccessor(i: 0);
238 BI->eraseFromParent(); // Delete the unconditional branch.
239 // Add a new conditional branch with a dummy edge to the return block.
240 BranchInst::Create(IfTrue: LoopHeaderBB, IfFalse: DummyReturnBB, Cond: BoolTrue, InsertBefore: BB);
241 Updates.emplace_back(args: DominatorTree::Insert, args&: BB, args&: DummyReturnBB);
242 } else { // Conditional branch.
243 SmallVector<BasicBlock *, 2> Successors(successors(BB));
244
245 // Create a new transition block to hold the conditional branch.
246 BasicBlock *TransitionBB = BB->splitBasicBlock(I: BI, BBName: "TransitionBlock");
247
248 Updates.reserve(n: Updates.size() + 2 * Successors.size() + 2);
249
250 // 'Successors' become successors of TransitionBB instead of BB,
251 // and TransitionBB becomes a single successor of BB.
252 Updates.emplace_back(args: DominatorTree::Insert, args&: BB, args&: TransitionBB);
253 for (BasicBlock *Successor : Successors) {
254 Updates.emplace_back(args: DominatorTree::Insert, args&: TransitionBB, args&: Successor);
255 Updates.emplace_back(args: DominatorTree::Delete, args&: BB, args&: Successor);
256 }
257
258 // Create a branch that will always branch to the transition block and
259 // references DummyReturnBB.
260 BB->getTerminator()->eraseFromParent();
261 BranchInst::Create(IfTrue: TransitionBB, IfFalse: DummyReturnBB, Cond: BoolTrue, InsertBefore: BB);
262 Updates.emplace_back(args: DominatorTree::Insert, args&: BB, args&: DummyReturnBB);
263 }
264 Changed = true;
265 }
266 }
267
268 if (!UnreachableBlocks.empty()) {
269 BasicBlock *UnreachableBlock = nullptr;
270
271 if (UnreachableBlocks.size() == 1) {
272 UnreachableBlock = UnreachableBlocks.front();
273 } else {
274 UnreachableBlock = BasicBlock::Create(Context&: F.getContext(),
275 Name: "UnifiedUnreachableBlock", Parent: &F);
276 new UnreachableInst(F.getContext(), UnreachableBlock);
277
278 Updates.reserve(n: Updates.size() + UnreachableBlocks.size());
279 for (BasicBlock *BB : UnreachableBlocks) {
280 // Remove and delete the unreachable inst.
281 BB->getTerminator()->eraseFromParent();
282 BranchInst::Create(IfTrue: UnreachableBlock, InsertBefore: BB);
283 Updates.emplace_back(args: DominatorTree::Insert, args&: BB, args&: UnreachableBlock);
284 }
285 Changed = true;
286 }
287
288 if (!ReturningBlocks.empty()) {
289 // Don't create a new unreachable inst if we have a return. The
290 // structurizer/annotator can't handle the multiple exits
291
292 Type *RetTy = F.getReturnType();
293 Value *RetVal = RetTy->isVoidTy() ? nullptr : PoisonValue::get(T: RetTy);
294 // Remove and delete the unreachable inst.
295 UnreachableBlock->getTerminator()->eraseFromParent();
296
297 Function *UnreachableIntrin = Intrinsic::getOrInsertDeclaration(
298 M: F.getParent(), id: Intrinsic::amdgcn_unreachable);
299
300 // Insert a call to an intrinsic tracking that this is an unreachable
301 // point, in case we want to kill the active lanes or something later.
302 CallInst::Create(Func: UnreachableIntrin, Args: {}, NameStr: "", InsertBefore: UnreachableBlock);
303
304 // Don't create a scalar trap. We would only want to trap if this code was
305 // really reached, but a scalar trap would happen even if no lanes
306 // actually reached here.
307 ReturnInst::Create(C&: F.getContext(), retVal: RetVal, InsertBefore: UnreachableBlock);
308 ReturningBlocks.push_back(Elt: UnreachableBlock);
309 Changed = true;
310 }
311 }
312
313 // FIXME: add PDT here once simplifycfg is ready.
314 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
315 if (RequireAndPreserveDomTree)
316 DTU.applyUpdates(Updates);
317 Updates.clear();
318
319 // Now handle return blocks.
320 if (ReturningBlocks.empty())
321 return Changed; // No blocks return
322
323 if (ReturningBlocks.size() == 1)
324 return Changed; // Already has a single return block
325
326 unifyReturnBlockSet(F, DTU, ReturningBlocks, Name: "UnifiedReturnBlock");
327 return true;
328}
329
330bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) {
331 DominatorTree *DT = nullptr;
332 if (RequireAndPreserveDomTree)
333 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
334 const auto &PDT =
335 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
336 const auto &UA = getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
337 const auto *TranformInfo =
338 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
339 return AMDGPUUnifyDivergentExitNodesImpl(TranformInfo).run(F, DT, PDT, UA);
340}
341
342PreservedAnalyses
343AMDGPUUnifyDivergentExitNodesPass::run(Function &F,
344 FunctionAnalysisManager &AM) {
345 DominatorTree *DT = nullptr;
346 if (RequireAndPreserveDomTree)
347 DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
348
349 const auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(IR&: F);
350 const auto &UA = AM.getResult<UniformityInfoAnalysis>(IR&: F);
351 const auto *TransformInfo = &AM.getResult<TargetIRAnalysis>(IR&: F);
352 return AMDGPUUnifyDivergentExitNodesImpl(TransformInfo).run(F, DT, PDT, UA)
353 ? PreservedAnalyses::none()
354 : PreservedAnalyses::all();
355}
356