1 | //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass lowers all occurrences of i1 values (with a vreg_1 register class) |
10 | // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA |
11 | // form and a wave-level control flow graph. |
12 | // |
13 | // Before this pass, values that are semantically i1 and are defined and used |
14 | // within the same basic block are already represented as lane masks in scalar |
15 | // registers. However, values that cross basic blocks are always transferred |
16 | // between basic blocks in vreg_1 virtual registers and are lowered by this |
17 | // pass. |
18 | // |
19 | // The only instructions that use or define vreg_1 virtual registers are COPY, |
20 | // PHI, and IMPLICIT_DEF. |
21 | // |
22 | //===----------------------------------------------------------------------===// |
23 | |
24 | #include "SILowerI1Copies.h" |
25 | #include "AMDGPU.h" |
26 | #include "llvm/CodeGen/MachineSSAUpdater.h" |
27 | #include "llvm/InitializePasses.h" |
28 | #include "llvm/Target/CGPassBuilderOption.h" |
29 | |
30 | #define DEBUG_TYPE "si-i1-copies" |
31 | |
32 | using namespace llvm; |
33 | |
34 | static Register |
35 | insertUndefLaneMask(MachineBasicBlock *MBB, MachineRegisterInfo *MRI, |
36 | MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs); |
37 | |
38 | namespace { |
39 | |
40 | class SILowerI1Copies : public MachineFunctionPass { |
41 | public: |
42 | static char ID; |
43 | |
44 | SILowerI1Copies() : MachineFunctionPass(ID) { |
45 | initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry()); |
46 | } |
47 | |
48 | bool runOnMachineFunction(MachineFunction &MF) override; |
49 | |
50 | StringRef getPassName() const override { return "SI Lower i1 Copies" ; } |
51 | |
52 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
53 | AU.setPreservesCFG(); |
54 | AU.addRequired<MachineDominatorTreeWrapperPass>(); |
55 | AU.addRequired<MachinePostDominatorTreeWrapperPass>(); |
56 | MachineFunctionPass::getAnalysisUsage(AU); |
57 | } |
58 | }; |
59 | |
60 | class Vreg1LoweringHelper : public PhiLoweringHelper { |
61 | public: |
62 | Vreg1LoweringHelper(MachineFunction *MF, MachineDominatorTree *DT, |
63 | MachinePostDominatorTree *PDT); |
64 | |
65 | private: |
66 | DenseSet<Register> ConstrainRegs; |
67 | |
68 | public: |
69 | void markAsLaneMask(Register DstReg) const override; |
70 | void getCandidatesForLowering( |
71 | SmallVectorImpl<MachineInstr *> &Vreg1Phis) const override; |
72 | void collectIncomingValuesFromPhi( |
73 | const MachineInstr *MI, |
74 | SmallVectorImpl<Incoming> &Incomings) const override; |
75 | void replaceDstReg(Register NewReg, Register OldReg, |
76 | MachineBasicBlock *MBB) override; |
77 | void buildMergeLaneMasks(MachineBasicBlock &MBB, |
78 | MachineBasicBlock::iterator I, const DebugLoc &DL, |
79 | Register DstReg, Register PrevReg, |
80 | Register CurReg) override; |
81 | void constrainAsLaneMask(Incoming &In) override; |
82 | |
83 | bool lowerCopiesFromI1(); |
84 | bool lowerCopiesToI1(); |
85 | bool cleanConstrainRegs(bool Changed); |
86 | bool isVreg1(Register Reg) const { |
87 | return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass; |
88 | } |
89 | }; |
90 | |
91 | Vreg1LoweringHelper::Vreg1LoweringHelper(MachineFunction *MF, |
92 | MachineDominatorTree *DT, |
93 | MachinePostDominatorTree *PDT) |
94 | : PhiLoweringHelper(MF, DT, PDT) {} |
95 | |
96 | bool Vreg1LoweringHelper::cleanConstrainRegs(bool Changed) { |
97 | assert(Changed || ConstrainRegs.empty()); |
98 | for (Register Reg : ConstrainRegs) |
99 | MRI->constrainRegClass(Reg, RC: &AMDGPU::SReg_1_XEXECRegClass); |
100 | ConstrainRegs.clear(); |
101 | |
102 | return Changed; |
103 | } |
104 | |
105 | /// Helper class that determines the relationship between incoming values of a |
106 | /// phi in the control flow graph to determine where an incoming value can |
107 | /// simply be taken as a scalar lane mask as-is, and where it needs to be |
108 | /// merged with another, previously defined lane mask. |
109 | /// |
110 | /// The approach is as follows: |
111 | /// - Determine all basic blocks which, starting from the incoming blocks, |
112 | /// a wave may reach before entering the def block (the block containing the |
113 | /// phi). |
114 | /// - If an incoming block has no predecessors in this set, we can take the |
115 | /// incoming value as a scalar lane mask as-is. |
116 | /// -- A special case of this is when the def block has a self-loop. |
117 | /// - Otherwise, the incoming value needs to be merged with a previously |
118 | /// defined lane mask. |
119 | /// - If there is a path into the set of reachable blocks that does _not_ go |
120 | /// through an incoming block where we can take the scalar lane mask as-is, |
121 | /// we need to invent an available value for the SSAUpdater. Choices are |
122 | /// 0 and undef, with differing consequences for how to merge values etc. |
123 | /// |
124 | /// TODO: We could use region analysis to quickly skip over SESE regions during |
125 | /// the traversal. |
126 | /// |
127 | class PhiIncomingAnalysis { |
128 | MachinePostDominatorTree &PDT; |
129 | const SIInstrInfo *TII; |
130 | |
131 | // For each reachable basic block, whether it is a source in the induced |
132 | // subgraph of the CFG. |
133 | DenseMap<MachineBasicBlock *, bool> ReachableMap; |
134 | SmallVector<MachineBasicBlock *, 4> ReachableOrdered; |
135 | SmallVector<MachineBasicBlock *, 4> Stack; |
136 | SmallVector<MachineBasicBlock *, 4> Predecessors; |
137 | |
138 | public: |
139 | PhiIncomingAnalysis(MachinePostDominatorTree &PDT, const SIInstrInfo *TII) |
140 | : PDT(PDT), TII(TII) {} |
141 | |
142 | /// Returns whether \p MBB is a source in the induced subgraph of reachable |
143 | /// blocks. |
144 | bool isSource(MachineBasicBlock &MBB) const { |
145 | return ReachableMap.find(Val: &MBB)->second; |
146 | } |
147 | |
148 | ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; } |
149 | |
150 | void analyze(MachineBasicBlock &DefBlock, ArrayRef<Incoming> Incomings) { |
151 | assert(Stack.empty()); |
152 | ReachableMap.clear(); |
153 | ReachableOrdered.clear(); |
154 | Predecessors.clear(); |
155 | |
156 | // Insert the def block first, so that it acts as an end point for the |
157 | // traversal. |
158 | ReachableMap.try_emplace(Key: &DefBlock, Args: false); |
159 | ReachableOrdered.push_back(Elt: &DefBlock); |
160 | |
161 | for (auto Incoming : Incomings) { |
162 | MachineBasicBlock *MBB = Incoming.Block; |
163 | if (MBB == &DefBlock) { |
164 | ReachableMap[&DefBlock] = true; // self-loop on DefBlock |
165 | continue; |
166 | } |
167 | |
168 | ReachableMap.try_emplace(Key: MBB, Args: false); |
169 | ReachableOrdered.push_back(Elt: MBB); |
170 | |
171 | // If this block has a divergent terminator and the def block is its |
172 | // post-dominator, the wave may first visit the other successors. |
173 | if (TII->hasDivergentBranch(MBB) && PDT.dominates(A: &DefBlock, B: MBB)) |
174 | append_range(C&: Stack, R: MBB->successors()); |
175 | } |
176 | |
177 | while (!Stack.empty()) { |
178 | MachineBasicBlock *MBB = Stack.pop_back_val(); |
179 | if (!ReachableMap.try_emplace(Key: MBB, Args: false).second) |
180 | continue; |
181 | ReachableOrdered.push_back(Elt: MBB); |
182 | |
183 | append_range(C&: Stack, R: MBB->successors()); |
184 | } |
185 | |
186 | for (MachineBasicBlock *MBB : ReachableOrdered) { |
187 | bool HaveReachablePred = false; |
188 | for (MachineBasicBlock *Pred : MBB->predecessors()) { |
189 | if (ReachableMap.count(Val: Pred)) { |
190 | HaveReachablePred = true; |
191 | } else { |
192 | Stack.push_back(Elt: Pred); |
193 | } |
194 | } |
195 | if (!HaveReachablePred) |
196 | ReachableMap[MBB] = true; |
197 | if (HaveReachablePred) { |
198 | for (MachineBasicBlock *UnreachablePred : Stack) { |
199 | if (!llvm::is_contained(Range&: Predecessors, Element: UnreachablePred)) |
200 | Predecessors.push_back(Elt: UnreachablePred); |
201 | } |
202 | } |
203 | Stack.clear(); |
204 | } |
205 | } |
206 | }; |
207 | |
208 | /// Helper class that detects loops which require us to lower an i1 COPY into |
209 | /// bitwise manipulation. |
210 | /// |
211 | /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish |
212 | /// between loops with the same header. Consider this example: |
213 | /// |
214 | /// A-+-+ |
215 | /// | | | |
216 | /// B-+ | |
217 | /// | | |
218 | /// C---+ |
219 | /// |
220 | /// A is the header of a loop containing A, B, and C as far as LoopInfo is |
221 | /// concerned. However, an i1 COPY in B that is used in C must be lowered to |
222 | /// bitwise operations to combine results from different loop iterations when |
223 | /// B has a divergent branch (since by default we will compile this code such |
224 | /// that threads in a wave are merged at the entry of C). |
225 | /// |
226 | /// The following rule is implemented to determine whether bitwise operations |
227 | /// are required: use the bitwise lowering for a def in block B if a backward |
228 | /// edge to B is reachable without going through the nearest common |
229 | /// post-dominator of B and all uses of the def. |
230 | /// |
231 | /// TODO: This rule is conservative because it does not check whether the |
232 | /// relevant branches are actually divergent. |
233 | /// |
234 | /// The class is designed to cache the CFG traversal so that it can be re-used |
235 | /// for multiple defs within the same basic block. |
236 | /// |
237 | /// TODO: We could use region analysis to quickly skip over SESE regions during |
238 | /// the traversal. |
239 | /// |
240 | class LoopFinder { |
241 | MachineDominatorTree &DT; |
242 | MachinePostDominatorTree &PDT; |
243 | |
244 | // All visited / reachable block, tagged by level (level 0 is the def block, |
245 | // level 1 are all blocks reachable including but not going through the def |
246 | // block's IPDOM, etc.). |
247 | DenseMap<MachineBasicBlock *, unsigned> Visited; |
248 | |
249 | // Nearest common dominator of all visited blocks by level (level 0 is the |
250 | // def block). Used for seeding the SSAUpdater. |
251 | SmallVector<MachineBasicBlock *, 4> CommonDominators; |
252 | |
253 | // Post-dominator of all visited blocks. |
254 | MachineBasicBlock *VisitedPostDom = nullptr; |
255 | |
256 | // Level at which a loop was found: 0 is not possible; 1 = a backward edge is |
257 | // reachable without going through the IPDOM of the def block (if the IPDOM |
258 | // itself has an edge to the def block, the loop level is 2), etc. |
259 | unsigned FoundLoopLevel = ~0u; |
260 | |
261 | MachineBasicBlock *DefBlock = nullptr; |
262 | SmallVector<MachineBasicBlock *, 4> Stack; |
263 | SmallVector<MachineBasicBlock *, 4> NextLevel; |
264 | |
265 | public: |
266 | LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT) |
267 | : DT(DT), PDT(PDT) {} |
268 | |
269 | void initialize(MachineBasicBlock &MBB) { |
270 | Visited.clear(); |
271 | CommonDominators.clear(); |
272 | Stack.clear(); |
273 | NextLevel.clear(); |
274 | VisitedPostDom = nullptr; |
275 | FoundLoopLevel = ~0u; |
276 | |
277 | DefBlock = &MBB; |
278 | } |
279 | |
280 | /// Check whether a backward edge can be reached without going through the |
281 | /// given \p PostDom of the def block. |
282 | /// |
283 | /// Return the level of \p PostDom if a loop was found, or 0 otherwise. |
284 | unsigned findLoop(MachineBasicBlock *PostDom) { |
285 | MachineDomTreeNode *PDNode = PDT.getNode(BB: DefBlock); |
286 | |
287 | if (!VisitedPostDom) |
288 | advanceLevel(); |
289 | |
290 | unsigned Level = 0; |
291 | while (PDNode->getBlock() != PostDom) { |
292 | if (PDNode->getBlock() == VisitedPostDom) |
293 | advanceLevel(); |
294 | PDNode = PDNode->getIDom(); |
295 | Level++; |
296 | if (FoundLoopLevel == Level) |
297 | return Level; |
298 | } |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | /// Add undef values dominating the loop and the optionally given additional |
304 | /// blocks, so that the SSA updater doesn't have to search all the way to the |
305 | /// function entry. |
306 | void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater, |
307 | MachineRegisterInfo &MRI, |
308 | MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs, |
309 | ArrayRef<Incoming> Incomings = {}) { |
310 | assert(LoopLevel < CommonDominators.size()); |
311 | |
312 | MachineBasicBlock *Dom = CommonDominators[LoopLevel]; |
313 | for (auto &Incoming : Incomings) |
314 | Dom = DT.findNearestCommonDominator(A: Dom, B: Incoming.Block); |
315 | |
316 | if (!inLoopLevel(MBB&: *Dom, LoopLevel, Incomings)) { |
317 | SSAUpdater.AddAvailableValue( |
318 | BB: Dom, V: insertUndefLaneMask(MBB: Dom, MRI: &MRI, LaneMaskRegAttrs)); |
319 | } else { |
320 | // The dominator is part of the loop or the given blocks, so add the |
321 | // undef value to unreachable predecessors instead. |
322 | for (MachineBasicBlock *Pred : Dom->predecessors()) { |
323 | if (!inLoopLevel(MBB&: *Pred, LoopLevel, Incomings)) |
324 | SSAUpdater.AddAvailableValue( |
325 | BB: Pred, V: insertUndefLaneMask(MBB: Pred, MRI: &MRI, LaneMaskRegAttrs)); |
326 | } |
327 | } |
328 | } |
329 | |
330 | private: |
331 | bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel, |
332 | ArrayRef<Incoming> Incomings) const { |
333 | auto DomIt = Visited.find(Val: &MBB); |
334 | if (DomIt != Visited.end() && DomIt->second <= LoopLevel) |
335 | return true; |
336 | |
337 | for (auto &Incoming : Incomings) |
338 | if (Incoming.Block == &MBB) |
339 | return true; |
340 | |
341 | return false; |
342 | } |
343 | |
344 | void advanceLevel() { |
345 | MachineBasicBlock *VisitedDom; |
346 | |
347 | if (!VisitedPostDom) { |
348 | VisitedPostDom = DefBlock; |
349 | VisitedDom = DefBlock; |
350 | Stack.push_back(Elt: DefBlock); |
351 | } else { |
352 | VisitedPostDom = PDT.getNode(BB: VisitedPostDom)->getIDom()->getBlock(); |
353 | VisitedDom = CommonDominators.back(); |
354 | |
355 | for (unsigned i = 0; i < NextLevel.size();) { |
356 | if (PDT.dominates(A: VisitedPostDom, B: NextLevel[i])) { |
357 | Stack.push_back(Elt: NextLevel[i]); |
358 | |
359 | NextLevel[i] = NextLevel.back(); |
360 | NextLevel.pop_back(); |
361 | } else { |
362 | i++; |
363 | } |
364 | } |
365 | } |
366 | |
367 | unsigned Level = CommonDominators.size(); |
368 | while (!Stack.empty()) { |
369 | MachineBasicBlock *MBB = Stack.pop_back_val(); |
370 | if (!PDT.dominates(A: VisitedPostDom, B: MBB)) |
371 | NextLevel.push_back(Elt: MBB); |
372 | |
373 | Visited[MBB] = Level; |
374 | VisitedDom = DT.findNearestCommonDominator(A: VisitedDom, B: MBB); |
375 | |
376 | for (MachineBasicBlock *Succ : MBB->successors()) { |
377 | if (Succ == DefBlock) { |
378 | if (MBB == VisitedPostDom) |
379 | FoundLoopLevel = std::min(a: FoundLoopLevel, b: Level + 1); |
380 | else |
381 | FoundLoopLevel = std::min(a: FoundLoopLevel, b: Level); |
382 | continue; |
383 | } |
384 | |
385 | if (Visited.try_emplace(Key: Succ, Args: ~0u).second) { |
386 | if (MBB == VisitedPostDom) |
387 | NextLevel.push_back(Elt: Succ); |
388 | else |
389 | Stack.push_back(Elt: Succ); |
390 | } |
391 | } |
392 | } |
393 | |
394 | CommonDominators.push_back(Elt: VisitedDom); |
395 | } |
396 | }; |
397 | |
398 | } // End anonymous namespace. |
399 | |
400 | INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies" , false, |
401 | false) |
402 | INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) |
403 | INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass) |
404 | INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies" , false, |
405 | false) |
406 | |
407 | char SILowerI1Copies::ID = 0; |
408 | |
409 | char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID; |
410 | |
411 | FunctionPass *llvm::createSILowerI1CopiesPass() { |
412 | return new SILowerI1Copies(); |
413 | } |
414 | |
415 | Register |
416 | llvm::createLaneMaskReg(MachineRegisterInfo *MRI, |
417 | MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs) { |
418 | return MRI->createVirtualRegister(RegAttr: LaneMaskRegAttrs); |
419 | } |
420 | |
421 | static Register |
422 | insertUndefLaneMask(MachineBasicBlock *MBB, MachineRegisterInfo *MRI, |
423 | MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs) { |
424 | MachineFunction &MF = *MBB->getParent(); |
425 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
426 | const SIInstrInfo *TII = ST.getInstrInfo(); |
427 | Register UndefReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
428 | BuildMI(BB&: *MBB, I: MBB->getFirstTerminator(), MIMD: {}, MCID: TII->get(Opcode: AMDGPU::IMPLICIT_DEF), |
429 | DestReg: UndefReg); |
430 | return UndefReg; |
431 | } |
432 | |
433 | /// Lower all instructions that def or use vreg_1 registers. |
434 | /// |
435 | /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can |
436 | /// occur around inline assembly. We do this first, before vreg_1 registers |
437 | /// are changed to scalar mask registers. |
438 | /// |
439 | /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before |
440 | /// all others, because phi lowering looks through copies and can therefore |
441 | /// often make copy lowering unnecessary. |
442 | bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) { |
443 | // Only need to run this in SelectionDAG path. |
444 | if (TheMF.getProperties().hasProperty( |
445 | P: MachineFunctionProperties::Property::Selected)) |
446 | return false; |
447 | |
448 | Vreg1LoweringHelper Helper( |
449 | &TheMF, &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree(), |
450 | &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree()); |
451 | |
452 | bool Changed = false; |
453 | Changed |= Helper.lowerCopiesFromI1(); |
454 | Changed |= Helper.lowerPhis(); |
455 | Changed |= Helper.lowerCopiesToI1(); |
456 | return Helper.cleanConstrainRegs(Changed); |
457 | } |
458 | |
459 | #ifndef NDEBUG |
460 | static bool isVRegCompatibleReg(const SIRegisterInfo &TRI, |
461 | const MachineRegisterInfo &MRI, |
462 | Register Reg) { |
463 | unsigned Size = TRI.getRegSizeInBits(Reg, MRI); |
464 | return Size == 1 || Size == 32; |
465 | } |
466 | #endif |
467 | |
468 | bool Vreg1LoweringHelper::lowerCopiesFromI1() { |
469 | bool Changed = false; |
470 | SmallVector<MachineInstr *, 4> DeadCopies; |
471 | |
472 | for (MachineBasicBlock &MBB : *MF) { |
473 | for (MachineInstr &MI : MBB) { |
474 | if (MI.getOpcode() != AMDGPU::COPY) |
475 | continue; |
476 | |
477 | Register DstReg = MI.getOperand(i: 0).getReg(); |
478 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
479 | if (!isVreg1(Reg: SrcReg)) |
480 | continue; |
481 | |
482 | if (isLaneMaskReg(Reg: DstReg) || isVreg1(Reg: DstReg)) |
483 | continue; |
484 | |
485 | Changed = true; |
486 | |
487 | // Copy into a 32-bit vector register. |
488 | LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI); |
489 | DebugLoc DL = MI.getDebugLoc(); |
490 | |
491 | assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg)); |
492 | assert(!MI.getOperand(0).getSubReg()); |
493 | |
494 | ConstrainRegs.insert(V: SrcReg); |
495 | BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_CNDMASK_B32_e64), DestReg: DstReg) |
496 | .addImm(Val: 0) |
497 | .addImm(Val: 0) |
498 | .addImm(Val: 0) |
499 | .addImm(Val: -1) |
500 | .addReg(RegNo: SrcReg); |
501 | DeadCopies.push_back(Elt: &MI); |
502 | } |
503 | |
504 | for (MachineInstr *MI : DeadCopies) |
505 | MI->eraseFromParent(); |
506 | DeadCopies.clear(); |
507 | } |
508 | return Changed; |
509 | } |
510 | |
511 | PhiLoweringHelper::PhiLoweringHelper(MachineFunction *MF, |
512 | MachineDominatorTree *DT, |
513 | MachinePostDominatorTree *PDT) |
514 | : MF(MF), DT(DT), PDT(PDT) { |
515 | MRI = &MF->getRegInfo(); |
516 | |
517 | ST = &MF->getSubtarget<GCNSubtarget>(); |
518 | TII = ST->getInstrInfo(); |
519 | IsWave32 = ST->isWave32(); |
520 | |
521 | if (IsWave32) { |
522 | ExecReg = AMDGPU::EXEC_LO; |
523 | MovOp = AMDGPU::S_MOV_B32; |
524 | AndOp = AMDGPU::S_AND_B32; |
525 | OrOp = AMDGPU::S_OR_B32; |
526 | XorOp = AMDGPU::S_XOR_B32; |
527 | AndN2Op = AMDGPU::S_ANDN2_B32; |
528 | OrN2Op = AMDGPU::S_ORN2_B32; |
529 | } else { |
530 | ExecReg = AMDGPU::EXEC; |
531 | MovOp = AMDGPU::S_MOV_B64; |
532 | AndOp = AMDGPU::S_AND_B64; |
533 | OrOp = AMDGPU::S_OR_B64; |
534 | XorOp = AMDGPU::S_XOR_B64; |
535 | AndN2Op = AMDGPU::S_ANDN2_B64; |
536 | OrN2Op = AMDGPU::S_ORN2_B64; |
537 | } |
538 | } |
539 | |
540 | bool PhiLoweringHelper::lowerPhis() { |
541 | MachineSSAUpdater SSAUpdater(*MF); |
542 | LoopFinder LF(*DT, *PDT); |
543 | PhiIncomingAnalysis PIA(*PDT, TII); |
544 | SmallVector<MachineInstr *, 4> Vreg1Phis; |
545 | SmallVector<Incoming, 4> Incomings; |
546 | |
547 | getCandidatesForLowering(Vreg1Phis); |
548 | if (Vreg1Phis.empty()) |
549 | return false; |
550 | |
551 | DT->getBase().updateDFSNumbers(); |
552 | MachineBasicBlock *PrevMBB = nullptr; |
553 | for (MachineInstr *MI : Vreg1Phis) { |
554 | MachineBasicBlock &MBB = *MI->getParent(); |
555 | if (&MBB != PrevMBB) { |
556 | LF.initialize(MBB); |
557 | PrevMBB = &MBB; |
558 | } |
559 | |
560 | LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI); |
561 | |
562 | Register DstReg = MI->getOperand(i: 0).getReg(); |
563 | markAsLaneMask(DstReg); |
564 | initializeLaneMaskRegisterAttributes(LaneMask: DstReg); |
565 | |
566 | collectIncomingValuesFromPhi(MI, Incomings); |
567 | |
568 | // Sort the incomings such that incoming values that dominate other incoming |
569 | // values are sorted earlier. This allows us to do some amount of on-the-fly |
570 | // constant folding. |
571 | // Incoming with smaller DFSNumIn goes first, DFSNumIn is 0 for entry block. |
572 | llvm::sort(C&: Incomings, Comp: [this](Incoming LHS, Incoming RHS) { |
573 | return DT->getNode(BB: LHS.Block)->getDFSNumIn() < |
574 | DT->getNode(BB: RHS.Block)->getDFSNumIn(); |
575 | }); |
576 | |
577 | #ifndef NDEBUG |
578 | PhiRegisters.insert(DstReg); |
579 | #endif |
580 | |
581 | // Phis in a loop that are observed outside the loop receive a simple but |
582 | // conservatively correct treatment. |
583 | std::vector<MachineBasicBlock *> DomBlocks = {&MBB}; |
584 | for (MachineInstr &Use : MRI->use_instructions(Reg: DstReg)) |
585 | DomBlocks.push_back(x: Use.getParent()); |
586 | |
587 | MachineBasicBlock *PostDomBound = |
588 | PDT->findNearestCommonDominator(Blocks: DomBlocks); |
589 | |
590 | // FIXME: This fails to find irreducible cycles. If we have a def (other |
591 | // than a constant) in a pair of blocks that end up looping back to each |
592 | // other, it will be mishandle. Due to structurization this shouldn't occur |
593 | // in practice. |
594 | unsigned FoundLoopLevel = LF.findLoop(PostDom: PostDomBound); |
595 | |
596 | SSAUpdater.Initialize(V: DstReg); |
597 | |
598 | if (FoundLoopLevel) { |
599 | LF.addLoopEntries(LoopLevel: FoundLoopLevel, SSAUpdater, MRI&: *MRI, LaneMaskRegAttrs, |
600 | Incomings); |
601 | |
602 | for (auto &Incoming : Incomings) { |
603 | Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
604 | SSAUpdater.AddAvailableValue(BB: Incoming.Block, V: Incoming.UpdatedReg); |
605 | } |
606 | |
607 | for (auto &Incoming : Incomings) { |
608 | MachineBasicBlock &IMBB = *Incoming.Block; |
609 | buildMergeLaneMasks( |
610 | MBB&: IMBB, I: getSaluInsertionAtEnd(MBB&: IMBB), DL: {}, DstReg: Incoming.UpdatedReg, |
611 | PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &IMBB), CurReg: Incoming.Reg); |
612 | } |
613 | } else { |
614 | // The phi is not observed from outside a loop. Use a more accurate |
615 | // lowering. |
616 | PIA.analyze(DefBlock&: MBB, Incomings); |
617 | |
618 | for (MachineBasicBlock *MBB : PIA.predecessors()) |
619 | SSAUpdater.AddAvailableValue( |
620 | BB: MBB, V: insertUndefLaneMask(MBB, MRI, LaneMaskRegAttrs)); |
621 | |
622 | for (auto &Incoming : Incomings) { |
623 | MachineBasicBlock &IMBB = *Incoming.Block; |
624 | if (PIA.isSource(MBB&: IMBB)) { |
625 | constrainAsLaneMask(In&: Incoming); |
626 | SSAUpdater.AddAvailableValue(BB: &IMBB, V: Incoming.Reg); |
627 | } else { |
628 | Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
629 | SSAUpdater.AddAvailableValue(BB: &IMBB, V: Incoming.UpdatedReg); |
630 | } |
631 | } |
632 | |
633 | for (auto &Incoming : Incomings) { |
634 | if (!Incoming.UpdatedReg.isValid()) |
635 | continue; |
636 | |
637 | MachineBasicBlock &IMBB = *Incoming.Block; |
638 | buildMergeLaneMasks( |
639 | MBB&: IMBB, I: getSaluInsertionAtEnd(MBB&: IMBB), DL: {}, DstReg: Incoming.UpdatedReg, |
640 | PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &IMBB), CurReg: Incoming.Reg); |
641 | } |
642 | } |
643 | |
644 | Register NewReg = SSAUpdater.GetValueInMiddleOfBlock(BB: &MBB); |
645 | if (NewReg != DstReg) { |
646 | replaceDstReg(NewReg, OldReg: DstReg, MBB: &MBB); |
647 | MI->eraseFromParent(); |
648 | } |
649 | |
650 | Incomings.clear(); |
651 | } |
652 | return true; |
653 | } |
654 | |
655 | bool Vreg1LoweringHelper::lowerCopiesToI1() { |
656 | bool Changed = false; |
657 | MachineSSAUpdater SSAUpdater(*MF); |
658 | LoopFinder LF(*DT, *PDT); |
659 | SmallVector<MachineInstr *, 4> DeadCopies; |
660 | |
661 | for (MachineBasicBlock &MBB : *MF) { |
662 | LF.initialize(MBB); |
663 | |
664 | for (MachineInstr &MI : MBB) { |
665 | if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF && |
666 | MI.getOpcode() != AMDGPU::COPY) |
667 | continue; |
668 | |
669 | Register DstReg = MI.getOperand(i: 0).getReg(); |
670 | if (!isVreg1(Reg: DstReg)) |
671 | continue; |
672 | |
673 | Changed = true; |
674 | |
675 | if (MRI->use_empty(RegNo: DstReg)) { |
676 | DeadCopies.push_back(Elt: &MI); |
677 | continue; |
678 | } |
679 | |
680 | LLVM_DEBUG(dbgs() << "Lower Other: " << MI); |
681 | |
682 | markAsLaneMask(DstReg); |
683 | initializeLaneMaskRegisterAttributes(LaneMask: DstReg); |
684 | |
685 | if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) |
686 | continue; |
687 | |
688 | DebugLoc DL = MI.getDebugLoc(); |
689 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
690 | assert(!MI.getOperand(1).getSubReg()); |
691 | |
692 | if (!SrcReg.isVirtual() || (!isLaneMaskReg(Reg: SrcReg) && !isVreg1(Reg: SrcReg))) { |
693 | assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32); |
694 | Register TmpReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
695 | BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_CMP_NE_U32_e64), DestReg: TmpReg) |
696 | .addReg(RegNo: SrcReg) |
697 | .addImm(Val: 0); |
698 | MI.getOperand(i: 1).setReg(TmpReg); |
699 | SrcReg = TmpReg; |
700 | } else { |
701 | // SrcReg needs to be live beyond copy. |
702 | MI.getOperand(i: 1).setIsKill(false); |
703 | } |
704 | |
705 | // Defs in a loop that are observed outside the loop must be transformed |
706 | // into appropriate bit manipulation. |
707 | std::vector<MachineBasicBlock *> DomBlocks = {&MBB}; |
708 | for (MachineInstr &Use : MRI->use_instructions(Reg: DstReg)) |
709 | DomBlocks.push_back(x: Use.getParent()); |
710 | |
711 | MachineBasicBlock *PostDomBound = |
712 | PDT->findNearestCommonDominator(Blocks: DomBlocks); |
713 | unsigned FoundLoopLevel = LF.findLoop(PostDom: PostDomBound); |
714 | if (FoundLoopLevel) { |
715 | SSAUpdater.Initialize(V: DstReg); |
716 | SSAUpdater.AddAvailableValue(BB: &MBB, V: DstReg); |
717 | LF.addLoopEntries(LoopLevel: FoundLoopLevel, SSAUpdater, MRI&: *MRI, LaneMaskRegAttrs); |
718 | |
719 | buildMergeLaneMasks(MBB, I: MI, DL, DstReg, |
720 | PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &MBB), CurReg: SrcReg); |
721 | DeadCopies.push_back(Elt: &MI); |
722 | } |
723 | } |
724 | |
725 | for (MachineInstr *MI : DeadCopies) |
726 | MI->eraseFromParent(); |
727 | DeadCopies.clear(); |
728 | } |
729 | return Changed; |
730 | } |
731 | |
732 | bool PhiLoweringHelper::isConstantLaneMask(Register Reg, bool &Val) const { |
733 | const MachineInstr *MI; |
734 | for (;;) { |
735 | MI = MRI->getUniqueVRegDef(Reg); |
736 | if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF) |
737 | return true; |
738 | |
739 | if (MI->getOpcode() != AMDGPU::COPY) |
740 | break; |
741 | |
742 | Reg = MI->getOperand(i: 1).getReg(); |
743 | if (!Reg.isVirtual()) |
744 | return false; |
745 | if (!isLaneMaskReg(Reg)) |
746 | return false; |
747 | } |
748 | |
749 | if (MI->getOpcode() != MovOp) |
750 | return false; |
751 | |
752 | if (!MI->getOperand(i: 1).isImm()) |
753 | return false; |
754 | |
755 | int64_t Imm = MI->getOperand(i: 1).getImm(); |
756 | if (Imm == 0) { |
757 | Val = false; |
758 | return true; |
759 | } |
760 | if (Imm == -1) { |
761 | Val = true; |
762 | return true; |
763 | } |
764 | |
765 | return false; |
766 | } |
767 | |
768 | static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) { |
769 | Def = false; |
770 | Use = false; |
771 | |
772 | for (const MachineOperand &MO : MI.operands()) { |
773 | if (MO.isReg() && MO.getReg() == AMDGPU::SCC) { |
774 | if (MO.isUse()) |
775 | Use = true; |
776 | else |
777 | Def = true; |
778 | } |
779 | } |
780 | } |
781 | |
782 | /// Return a point at the end of the given \p MBB to insert SALU instructions |
783 | /// for lane mask calculation. Take terminators and SCC into account. |
784 | MachineBasicBlock::iterator |
785 | PhiLoweringHelper::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const { |
786 | auto InsertionPt = MBB.getFirstTerminator(); |
787 | bool TerminatorsUseSCC = false; |
788 | for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) { |
789 | bool DefsSCC; |
790 | instrDefsUsesSCC(MI: *I, Def&: DefsSCC, Use&: TerminatorsUseSCC); |
791 | if (TerminatorsUseSCC || DefsSCC) |
792 | break; |
793 | } |
794 | |
795 | if (!TerminatorsUseSCC) |
796 | return InsertionPt; |
797 | |
798 | while (InsertionPt != MBB.begin()) { |
799 | InsertionPt--; |
800 | |
801 | bool DefSCC, UseSCC; |
802 | instrDefsUsesSCC(MI: *InsertionPt, Def&: DefSCC, Use&: UseSCC); |
803 | if (DefSCC) |
804 | return InsertionPt; |
805 | } |
806 | |
807 | // We should have at least seen an IMPLICIT_DEF or COPY |
808 | llvm_unreachable("SCC used by terminator but no def in block" ); |
809 | } |
810 | |
811 | // VReg_1 -> SReg_32 or SReg_64 |
812 | void Vreg1LoweringHelper::markAsLaneMask(Register DstReg) const { |
813 | MRI->setRegClass(Reg: DstReg, RC: ST->getBoolRC()); |
814 | } |
815 | |
816 | void Vreg1LoweringHelper::getCandidatesForLowering( |
817 | SmallVectorImpl<MachineInstr *> &Vreg1Phis) const { |
818 | for (MachineBasicBlock &MBB : *MF) { |
819 | for (MachineInstr &MI : MBB.phis()) { |
820 | if (isVreg1(Reg: MI.getOperand(i: 0).getReg())) |
821 | Vreg1Phis.push_back(Elt: &MI); |
822 | } |
823 | } |
824 | } |
825 | |
826 | void Vreg1LoweringHelper::collectIncomingValuesFromPhi( |
827 | const MachineInstr *MI, SmallVectorImpl<Incoming> &Incomings) const { |
828 | for (unsigned i = 1; i < MI->getNumOperands(); i += 2) { |
829 | assert(i + 1 < MI->getNumOperands()); |
830 | Register IncomingReg = MI->getOperand(i).getReg(); |
831 | MachineBasicBlock *IncomingMBB = MI->getOperand(i: i + 1).getMBB(); |
832 | MachineInstr *IncomingDef = MRI->getUniqueVRegDef(Reg: IncomingReg); |
833 | |
834 | if (IncomingDef->getOpcode() == AMDGPU::COPY) { |
835 | IncomingReg = IncomingDef->getOperand(i: 1).getReg(); |
836 | assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg)); |
837 | assert(!IncomingDef->getOperand(1).getSubReg()); |
838 | } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) { |
839 | continue; |
840 | } else { |
841 | assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg)); |
842 | } |
843 | |
844 | Incomings.emplace_back(Args&: IncomingReg, Args&: IncomingMBB, Args: Register()); |
845 | } |
846 | } |
847 | |
848 | void Vreg1LoweringHelper::replaceDstReg(Register NewReg, Register OldReg, |
849 | MachineBasicBlock *MBB) { |
850 | MRI->replaceRegWith(FromReg: NewReg, ToReg: OldReg); |
851 | } |
852 | |
853 | void Vreg1LoweringHelper::buildMergeLaneMasks(MachineBasicBlock &MBB, |
854 | MachineBasicBlock::iterator I, |
855 | const DebugLoc &DL, |
856 | Register DstReg, Register PrevReg, |
857 | Register CurReg) { |
858 | bool PrevVal = false; |
859 | bool PrevConstant = isConstantLaneMask(Reg: PrevReg, Val&: PrevVal); |
860 | bool CurVal = false; |
861 | bool CurConstant = isConstantLaneMask(Reg: CurReg, Val&: CurVal); |
862 | |
863 | if (PrevConstant && CurConstant) { |
864 | if (PrevVal == CurVal) { |
865 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg).addReg(RegNo: CurReg); |
866 | } else if (CurVal) { |
867 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg).addReg(RegNo: ExecReg); |
868 | } else { |
869 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: XorOp), DestReg: DstReg) |
870 | .addReg(RegNo: ExecReg) |
871 | .addImm(Val: -1); |
872 | } |
873 | return; |
874 | } |
875 | |
876 | Register PrevMaskedReg; |
877 | Register CurMaskedReg; |
878 | if (!PrevConstant) { |
879 | if (CurConstant && CurVal) { |
880 | PrevMaskedReg = PrevReg; |
881 | } else { |
882 | PrevMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
883 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AndN2Op), DestReg: PrevMaskedReg) |
884 | .addReg(RegNo: PrevReg) |
885 | .addReg(RegNo: ExecReg); |
886 | } |
887 | } |
888 | if (!CurConstant) { |
889 | // TODO: check whether CurReg is already masked by EXEC |
890 | if (PrevConstant && PrevVal) { |
891 | CurMaskedReg = CurReg; |
892 | } else { |
893 | CurMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs); |
894 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AndOp), DestReg: CurMaskedReg) |
895 | .addReg(RegNo: CurReg) |
896 | .addReg(RegNo: ExecReg); |
897 | } |
898 | } |
899 | |
900 | if (PrevConstant && !PrevVal) { |
901 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg) |
902 | .addReg(RegNo: CurMaskedReg); |
903 | } else if (CurConstant && !CurVal) { |
904 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg) |
905 | .addReg(RegNo: PrevMaskedReg); |
906 | } else if (PrevConstant && PrevVal) { |
907 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: OrN2Op), DestReg: DstReg) |
908 | .addReg(RegNo: CurMaskedReg) |
909 | .addReg(RegNo: ExecReg); |
910 | } else { |
911 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: OrOp), DestReg: DstReg) |
912 | .addReg(RegNo: PrevMaskedReg) |
913 | .addReg(RegNo: CurMaskedReg ? CurMaskedReg : ExecReg); |
914 | } |
915 | } |
916 | |
917 | void Vreg1LoweringHelper::constrainAsLaneMask(Incoming &In) {} |
918 | |