1//===-- AMDGPUGlobalISelDivergenceLowering.cpp ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// GlobalISel pass that selects divergent i1 phis as lane mask phis.
11/// Lane mask merging uses same algorithm as SDAG in SILowerI1Copies.
12/// Handles all cases of temporal divergence.
13/// For divergent non-phi i1 and uniform i1 uses outside of the cycle this pass
14/// currently depends on LCSSA to insert phis with one incoming.
15//
16//===----------------------------------------------------------------------===//
17
18#include "AMDGPU.h"
19#include "AMDGPUGlobalISelUtils.h"
20#include "SILowerI1Copies.h"
21#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineUniformityAnalysis.h"
24#include "llvm/InitializePasses.h"
25
26#define DEBUG_TYPE "amdgpu-global-isel-divergence-lowering"
27
28using namespace llvm;
29
30namespace {
31
32class AMDGPUGlobalISelDivergenceLowering : public MachineFunctionPass {
33public:
34 static char ID;
35
36public:
37 AMDGPUGlobalISelDivergenceLowering() : MachineFunctionPass(ID) {}
38
39 bool runOnMachineFunction(MachineFunction &MF) override;
40
41 StringRef getPassName() const override {
42 return "AMDGPU GlobalISel divergence lowering";
43 }
44
45 void getAnalysisUsage(AnalysisUsage &AU) const override {
46 AU.setPreservesCFG();
47 AU.addRequired<MachineDominatorTreeWrapperPass>();
48 AU.addRequired<MachinePostDominatorTreeWrapperPass>();
49 AU.addRequired<MachineUniformityAnalysisPass>();
50 MachineFunctionPass::getAnalysisUsage(AU);
51 }
52};
53
54class DivergenceLoweringHelper : public PhiLoweringHelper {
55public:
56 DivergenceLoweringHelper(MachineFunction *MF, MachineDominatorTree *DT,
57 MachinePostDominatorTree *PDT,
58 MachineUniformityInfo *MUI);
59
60private:
61 MachineUniformityInfo *MUI = nullptr;
62 MachineIRBuilder B;
63 Register buildRegCopyToLaneMask(Register Reg);
64
65public:
66 void markAsLaneMask(Register DstReg) const override;
67 void getCandidatesForLowering(
68 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const override;
69 void collectIncomingValuesFromPhi(
70 const MachineInstr *MI,
71 SmallVectorImpl<Incoming> &Incomings) const override;
72 void replaceDstReg(Register NewReg, Register OldReg,
73 MachineBasicBlock *MBB) override;
74 void buildMergeLaneMasks(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator I, const DebugLoc &DL,
76 Register DstReg, Register PrevReg,
77 Register CurReg) override;
78 void constrainAsLaneMask(Incoming &In) override;
79
80 bool lowerTemporalDivergence();
81 bool lowerTemporalDivergenceI1();
82};
83
84DivergenceLoweringHelper::DivergenceLoweringHelper(
85 MachineFunction *MF, MachineDominatorTree *DT,
86 MachinePostDominatorTree *PDT, MachineUniformityInfo *MUI)
87 : PhiLoweringHelper(MF, DT, PDT), MUI(MUI), B(*MF) {}
88
89// _(s1) -> SReg_32/64(s1)
90void DivergenceLoweringHelper::markAsLaneMask(Register DstReg) const {
91 assert(MRI->getType(DstReg) == LLT::scalar(1));
92
93 if (MRI->getRegClassOrNull(Reg: DstReg)) {
94 if (MRI->constrainRegClass(Reg: DstReg, RC: ST->getBoolRC()))
95 return;
96 llvm_unreachable("Failed to constrain register class");
97 }
98
99 MRI->setRegClass(Reg: DstReg, RC: ST->getBoolRC());
100}
101
102void DivergenceLoweringHelper::getCandidatesForLowering(
103 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const {
104 LLT S1 = LLT::scalar(SizeInBits: 1);
105
106 // Add divergent i1 G_PHIs to the list. Only consider G_PHI instructions,
107 // not PHI instructions that may have been created by earlier lowering stages
108 // (e.g., lowerTemporalDivergenceI1).
109 for (MachineBasicBlock &MBB : *MF) {
110 for (MachineInstr &MI : MBB.phis()) {
111 if (MI.getOpcode() != TargetOpcode::G_PHI)
112 continue;
113 Register Dst = MI.getOperand(i: 0).getReg();
114 if (MRI->getType(Reg: Dst) == S1 && MUI->isDivergent(V: Dst))
115 Vreg1Phis.push_back(Elt: &MI);
116 }
117 }
118}
119
120void DivergenceLoweringHelper::collectIncomingValuesFromPhi(
121 const MachineInstr *MI, SmallVectorImpl<Incoming> &Incomings) const {
122 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
123 Incomings.emplace_back(Args: MI->getOperand(i).getReg(),
124 Args: MI->getOperand(i: i + 1).getMBB(), Args: Register());
125 }
126}
127
128void DivergenceLoweringHelper::replaceDstReg(Register NewReg, Register OldReg,
129 MachineBasicBlock *MBB) {
130 BuildMI(BB&: *MBB, I: MBB->getFirstNonPHI(), MIMD: {}, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: OldReg)
131 .addReg(RegNo: NewReg);
132}
133
134// Copy Reg to new lane mask register, insert a copy after instruction that
135// defines Reg while skipping phis if needed.
136Register DivergenceLoweringHelper::buildRegCopyToLaneMask(Register Reg) {
137 Register LaneMask = createLaneMaskReg(MRI, LaneMaskRegAttrs);
138 MachineInstr *Instr = MRI->getVRegDef(Reg);
139 MachineBasicBlock *MBB = Instr->getParent();
140 B.setInsertPt(MBB&: *MBB, II: MBB->SkipPHIsAndLabels(I: std::next(x: Instr->getIterator())));
141 B.buildCopy(Res: LaneMask, Op: Reg);
142 return LaneMask;
143}
144
145// bb.previous
146// %PrevReg = ...
147//
148// bb.current
149// %CurReg = ...
150//
151// %DstReg - not defined
152//
153// -> (wave32 example, new registers have sreg_32 reg class and S1 LLT)
154//
155// bb.previous
156// %PrevReg = ...
157// %PrevRegCopy:sreg_32(s1) = COPY %PrevReg
158//
159// bb.current
160// %CurReg = ...
161// %CurRegCopy:sreg_32(s1) = COPY %CurReg
162// ...
163// %PrevMaskedReg:sreg_32(s1) = ANDN2 %PrevRegCopy, ExecReg - active lanes 0
164// %CurMaskedReg:sreg_32(s1) = AND %ExecReg, CurRegCopy - inactive lanes to 0
165// %DstReg:sreg_32(s1) = OR %PrevMaskedReg, CurMaskedReg
166//
167// DstReg = for active lanes rewrite bit in PrevReg with bit from CurReg
168void DivergenceLoweringHelper::buildMergeLaneMasks(
169 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
170 Register DstReg, Register PrevReg, Register CurReg) {
171 // DstReg = (PrevReg & !EXEC) | (CurReg & EXEC)
172 // TODO: check if inputs are constants or results of a compare.
173
174 Register PrevRegCopy = buildRegCopyToLaneMask(Reg: PrevReg);
175 Register CurRegCopy = buildRegCopyToLaneMask(Reg: CurReg);
176 Register PrevMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
177 Register CurMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
178
179 B.setInsertPt(MBB, II: I);
180 B.buildInstr(Opc: AndN2Op, DstOps: {PrevMaskedReg}, SrcOps: {PrevRegCopy, ExecReg});
181 B.buildInstr(Opc: AndOp, DstOps: {CurMaskedReg}, SrcOps: {ExecReg, CurRegCopy});
182 B.buildInstr(Opc: OrOp, DstOps: {DstReg}, SrcOps: {PrevMaskedReg, CurMaskedReg});
183}
184
185// GlobalISel has to constrain S1 incoming taken as-is with lane mask register
186// class. Insert a copy of Incoming.Reg to new lane mask inside Incoming.Block,
187// Incoming.Reg becomes that new lane mask.
188void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) {
189 B.setInsertPt(MBB&: *In.Block, II: In.Block->getFirstTerminator());
190
191 auto Copy = B.buildCopy(Res: LLT::scalar(SizeInBits: 1), Op: In.Reg);
192 MRI->setRegClass(Reg: Copy.getReg(Idx: 0), RC: ST->getBoolRC());
193 In.Reg = Copy.getReg(Idx: 0);
194}
195
196void replaceUsesOfRegInInstWith(Register Reg, MachineInstr *Inst,
197 Register NewReg) {
198 for (MachineOperand &Op : Inst->operands()) {
199 if (Op.isReg() && Op.getReg() == Reg)
200 Op.setReg(NewReg);
201 }
202}
203
204bool DivergenceLoweringHelper::lowerTemporalDivergence() {
205 AMDGPU::IntrinsicLaneMaskAnalyzer ILMA(*MF);
206 DenseMap<Register, Register> TDCache;
207
208 for (auto [Reg, UseInst, _] : MUI->getTemporalDivergenceList()) {
209 if (MRI->getType(Reg) == LLT::scalar(SizeInBits: 1) || MUI->isDivergent(V: Reg) ||
210 ILMA.isS32S64LaneMask(Reg))
211 continue;
212
213 Register CachedTDCopy = TDCache.lookup(Val: Reg);
214 if (CachedTDCopy) {
215 replaceUsesOfRegInInstWith(Reg, Inst: UseInst, NewReg: CachedTDCopy);
216 continue;
217 }
218
219 MachineInstr *Inst = MRI->getVRegDef(Reg);
220 MachineBasicBlock *MBB = Inst->getParent();
221 B.setInsertPt(MBB&: *MBB, II: MBB->SkipPHIsAndLabels(I: std::next(x: Inst->getIterator())));
222
223 Register VgprReg = MRI->createGenericVirtualRegister(Ty: MRI->getType(Reg));
224 B.buildInstr(Opc: AMDGPU::COPY, DstOps: {VgprReg}, SrcOps: {Reg})
225 .addUse(RegNo: ExecReg, Flags: RegState::Implicit);
226
227 replaceUsesOfRegInInstWith(Reg, Inst: UseInst, NewReg: VgprReg);
228 TDCache[Reg] = VgprReg;
229 }
230 return false;
231}
232
233bool DivergenceLoweringHelper::lowerTemporalDivergenceI1() {
234 MachineRegisterInfo::VRegAttrs BoolS1 = {.RCOrRB: ST->getBoolRC(), .Ty: LLT::scalar(SizeInBits: 1)};
235 initializeLaneMaskRegisterAttributes(Attrs: BoolS1);
236 MachineSSAUpdater SSAUpdater(*MF);
237
238 // In case of use outside muliple nested cycles or muliple uses we only need
239 // to merge lane mask across largest relevant cycle.
240 SmallDenseMap<Register, std::pair<const MachineCycle *, Register>> LRCCache;
241 for (auto [Reg, UseInst, LRC] : MUI->getTemporalDivergenceList()) {
242 if (MRI->getType(Reg) != LLT::scalar(SizeInBits: 1))
243 continue;
244
245 auto [LRCCacheIter, RegNotCached] = LRCCache.try_emplace(Key: Reg);
246 auto &CycleMergedMask = LRCCacheIter->getSecond();
247 const MachineCycle *&CachedLRC = CycleMergedMask.first;
248 if (RegNotCached || LRC->contains(C: CachedLRC)) {
249 CachedLRC = LRC;
250 }
251 }
252
253 for (auto &LRCCacheEntry : LRCCache) {
254 Register Reg = LRCCacheEntry.first;
255 auto &CycleMergedMask = LRCCacheEntry.getSecond();
256 const MachineCycle *Cycle = CycleMergedMask.first;
257
258 Register MergedMask = MRI->createVirtualRegister(RegAttr: BoolS1);
259 SSAUpdater.Initialize(V: MergedMask);
260
261 MachineBasicBlock *MBB = MRI->getVRegDef(Reg)->getParent();
262 SSAUpdater.AddAvailableValue(BB: MBB, V: MergedMask);
263
264 for (auto Entry : Cycle->getEntries()) {
265 for (MachineBasicBlock *Pred : Entry->predecessors()) {
266 if (!Cycle->contains(Block: Pred)) {
267 B.setInsertPt(MBB&: *Pred, II: Pred->getFirstTerminator());
268 auto ImplDef = B.buildInstr(Opc: AMDGPU::IMPLICIT_DEF, DstOps: {BoolS1}, SrcOps: {});
269 SSAUpdater.AddAvailableValue(BB: Pred, V: ImplDef.getReg(Idx: 0));
270 }
271 }
272 }
273
274 buildMergeLaneMasks(MBB&: *MBB, I: MBB->getFirstTerminator(), DL: {}, DstReg: MergedMask,
275 PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: MBB), CurReg: Reg);
276
277 CycleMergedMask.second = MergedMask;
278 }
279
280 for (auto [Reg, UseInst, Cycle] : MUI->getTemporalDivergenceList()) {
281 if (MRI->getType(Reg) != LLT::scalar(SizeInBits: 1))
282 continue;
283
284 replaceUsesOfRegInInstWith(Reg, Inst: UseInst, NewReg: LRCCache.lookup(Val: Reg).second);
285 }
286
287 return false;
288}
289
290} // End anonymous namespace.
291
292INITIALIZE_PASS_BEGIN(AMDGPUGlobalISelDivergenceLowering, DEBUG_TYPE,
293 "AMDGPU GlobalISel divergence lowering", false, false)
294INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
295INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass)
296INITIALIZE_PASS_DEPENDENCY(MachineUniformityAnalysisPass)
297INITIALIZE_PASS_END(AMDGPUGlobalISelDivergenceLowering, DEBUG_TYPE,
298 "AMDGPU GlobalISel divergence lowering", false, false)
299
300char AMDGPUGlobalISelDivergenceLowering::ID = 0;
301
302char &llvm::AMDGPUGlobalISelDivergenceLoweringID =
303 AMDGPUGlobalISelDivergenceLowering::ID;
304
305FunctionPass *llvm::createAMDGPUGlobalISelDivergenceLoweringPass() {
306 return new AMDGPUGlobalISelDivergenceLowering();
307}
308
309bool AMDGPUGlobalISelDivergenceLowering::runOnMachineFunction(
310 MachineFunction &MF) {
311 MachineDominatorTree &DT =
312 getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
313 MachinePostDominatorTree &PDT =
314 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
315 MachineUniformityInfo &MUI =
316 getAnalysis<MachineUniformityAnalysisPass>().getUniformityInfo();
317
318 DivergenceLoweringHelper Helper(&MF, &DT, &PDT, &MUI);
319
320 bool Changed = false;
321 // Temporal divergence lowering needs to inspect list of instructions used
322 // outside cycle with divergent exit provided by uniformity analysis. Uniform
323 // instructions from the list require lowering, no instruction is deleted.
324 // Thus it needs to be run before lowerPhis that deletes phis that require
325 // lowering and replaces them with new instructions.
326
327 // Non-i1 temporal divergence lowering.
328 Changed |= Helper.lowerTemporalDivergence();
329 // This covers both uniform and divergent i1s. Lane masks are in sgpr and need
330 // to be updated in each iteration.
331 Changed |= Helper.lowerTemporalDivergenceI1();
332 // Temporal divergence lowering of divergent i1 phi used outside of the cycle
333 // could also be handled by lowerPhis but we do it in lowerTempDivergenceI1
334 // since in some case lowerPhis does unnecessary lane mask merging.
335 Changed |= Helper.lowerPhis();
336 return Changed;
337}
338