1//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11// form and a wave-level control flow graph.
12//
13// Before this pass, values that are semantically i1 and are defined and used
14// within the same basic block are already represented as lane masks in scalar
15// registers. However, values that cross basic blocks are always transferred
16// between basic blocks in vreg_1 virtual registers and are lowered by this
17// pass.
18//
19// The only instructions that use or define vreg_1 virtual registers are COPY,
20// PHI, and IMPLICIT_DEF.
21//
22//===----------------------------------------------------------------------===//
23
24#include "SILowerI1Copies.h"
25#include "AMDGPU.h"
26#include "llvm/CodeGen/MachineSSAUpdater.h"
27#include "llvm/InitializePasses.h"
28
29#define DEBUG_TYPE "si-i1-copies"
30
31using namespace llvm;
32
33static Register
34insertUndefLaneMask(MachineBasicBlock *MBB, MachineRegisterInfo *MRI,
35 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs);
36
37namespace {
38
39class Vreg1LoweringHelper : public PhiLoweringHelper {
40public:
41 Vreg1LoweringHelper(MachineFunction *MF, MachineDominatorTree *DT,
42 MachinePostDominatorTree *PDT);
43
44private:
45 DenseSet<Register> ConstrainRegs;
46
47public:
48 void markAsLaneMask(Register DstReg) const override;
49 void getCandidatesForLowering(
50 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const override;
51 void collectIncomingValuesFromPhi(
52 const MachineInstr *MI,
53 SmallVectorImpl<Incoming> &Incomings) const override;
54 void replaceDstReg(Register NewReg, Register OldReg,
55 MachineBasicBlock *MBB) override;
56 void buildMergeLaneMasks(MachineBasicBlock &MBB,
57 MachineBasicBlock::iterator I, const DebugLoc &DL,
58 Register DstReg, Register PrevReg,
59 Register CurReg) override;
60 void constrainAsLaneMask(Incoming &In) override;
61
62 bool lowerCopiesFromI1();
63 bool lowerCopiesToI1();
64 bool cleanConstrainRegs(bool Changed);
65 bool isVreg1(Register Reg) const {
66 return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
67 }
68};
69
70Vreg1LoweringHelper::Vreg1LoweringHelper(MachineFunction *MF,
71 MachineDominatorTree *DT,
72 MachinePostDominatorTree *PDT)
73 : PhiLoweringHelper(MF, DT, PDT) {}
74
75bool Vreg1LoweringHelper::cleanConstrainRegs(bool Changed) {
76 assert(Changed || ConstrainRegs.empty());
77 for (Register Reg : ConstrainRegs)
78 MRI->constrainRegClass(Reg, RC: TII->getRegisterInfo().getWaveMaskRegClass());
79 ConstrainRegs.clear();
80
81 return Changed;
82}
83
84/// Helper class that determines the relationship between incoming values of a
85/// phi in the control flow graph to determine where an incoming value can
86/// simply be taken as a scalar lane mask as-is, and where it needs to be
87/// merged with another, previously defined lane mask.
88///
89/// The approach is as follows:
90/// - Determine all basic blocks which, starting from the incoming blocks,
91/// a wave may reach before entering the def block (the block containing the
92/// phi).
93/// - If an incoming block has no predecessors in this set, we can take the
94/// incoming value as a scalar lane mask as-is.
95/// -- A special case of this is when the def block has a self-loop.
96/// - Otherwise, the incoming value needs to be merged with a previously
97/// defined lane mask.
98/// - If there is a path into the set of reachable blocks that does _not_ go
99/// through an incoming block where we can take the scalar lane mask as-is,
100/// we need to invent an available value for the SSAUpdater. Choices are
101/// 0 and undef, with differing consequences for how to merge values etc.
102///
103/// TODO: We could use region analysis to quickly skip over SESE regions during
104/// the traversal.
105///
106class PhiIncomingAnalysis {
107 MachinePostDominatorTree &PDT;
108 const SIInstrInfo *TII;
109
110 // For each reachable basic block, whether it is a source in the induced
111 // subgraph of the CFG.
112 MapVector<MachineBasicBlock *, bool> ReachableMap;
113 SmallVector<MachineBasicBlock *, 4> Stack;
114 SmallVector<MachineBasicBlock *, 4> Predecessors;
115
116public:
117 PhiIncomingAnalysis(MachinePostDominatorTree &PDT, const SIInstrInfo *TII)
118 : PDT(PDT), TII(TII) {}
119
120 /// Returns whether \p MBB is a source in the induced subgraph of reachable
121 /// blocks.
122 bool isSource(MachineBasicBlock &MBB) const {
123 return ReachableMap.find(Key: &MBB)->second;
124 }
125
126 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
127
128 void analyze(MachineBasicBlock &DefBlock, ArrayRef<Incoming> Incomings) {
129 assert(Stack.empty());
130 ReachableMap.clear();
131 Predecessors.clear();
132
133 // Insert the def block first, so that it acts as an end point for the
134 // traversal.
135 ReachableMap.try_emplace(Key: &DefBlock, Args: false);
136
137 for (auto Incoming : Incomings) {
138 MachineBasicBlock *MBB = Incoming.Block;
139 if (MBB == &DefBlock) {
140 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
141 continue;
142 }
143
144 ReachableMap.try_emplace(Key: MBB, Args: false);
145
146 // If this block has a divergent terminator and the def block is its
147 // post-dominator, the wave may first visit the other successors.
148 if (TII->hasDivergentBranch(MBB) && PDT.dominates(A: &DefBlock, B: MBB))
149 append_range(C&: Stack, R: MBB->successors());
150 }
151
152 while (!Stack.empty()) {
153 MachineBasicBlock *MBB = Stack.pop_back_val();
154 if (ReachableMap.try_emplace(Key: MBB, Args: false).second)
155 append_range(C&: Stack, R: MBB->successors());
156 }
157
158 for (auto &[MBB, Reachable] : ReachableMap) {
159 bool HaveReachablePred = false;
160 for (MachineBasicBlock *Pred : MBB->predecessors()) {
161 if (ReachableMap.count(Key: Pred)) {
162 HaveReachablePred = true;
163 } else {
164 Stack.push_back(Elt: Pred);
165 }
166 }
167 if (!HaveReachablePred)
168 Reachable = true;
169 if (HaveReachablePred) {
170 for (MachineBasicBlock *UnreachablePred : Stack) {
171 if (!llvm::is_contained(Range&: Predecessors, Element: UnreachablePred))
172 Predecessors.push_back(Elt: UnreachablePred);
173 }
174 }
175 Stack.clear();
176 }
177 }
178};
179
180/// Helper class that detects loops which require us to lower an i1 COPY into
181/// bitwise manipulation.
182///
183/// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
184/// between loops with the same header. Consider this example:
185///
186/// A-+-+
187/// | | |
188/// B-+ |
189/// | |
190/// C---+
191///
192/// A is the header of a loop containing A, B, and C as far as LoopInfo is
193/// concerned. However, an i1 COPY in B that is used in C must be lowered to
194/// bitwise operations to combine results from different loop iterations when
195/// B has a divergent branch (since by default we will compile this code such
196/// that threads in a wave are merged at the entry of C).
197///
198/// The following rule is implemented to determine whether bitwise operations
199/// are required: use the bitwise lowering for a def in block B if a backward
200/// edge to B is reachable without going through the nearest common
201/// post-dominator of B and all uses of the def.
202///
203/// TODO: This rule is conservative because it does not check whether the
204/// relevant branches are actually divergent.
205///
206/// The class is designed to cache the CFG traversal so that it can be re-used
207/// for multiple defs within the same basic block.
208///
209/// TODO: We could use region analysis to quickly skip over SESE regions during
210/// the traversal.
211///
212class LoopFinder {
213 MachineDominatorTree &DT;
214 MachinePostDominatorTree &PDT;
215
216 // All visited / reachable block, tagged by level (level 0 is the def block,
217 // level 1 are all blocks reachable including but not going through the def
218 // block's IPDOM, etc.).
219 DenseMap<MachineBasicBlock *, unsigned> Visited;
220
221 // Nearest common dominator of all visited blocks by level (level 0 is the
222 // def block). Used for seeding the SSAUpdater.
223 SmallVector<MachineBasicBlock *, 4> CommonDominators;
224
225 // Post-dominator of all visited blocks.
226 MachineBasicBlock *VisitedPostDom = nullptr;
227
228 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
229 // reachable without going through the IPDOM of the def block (if the IPDOM
230 // itself has an edge to the def block, the loop level is 2), etc.
231 unsigned FoundLoopLevel = ~0u;
232
233 MachineBasicBlock *DefBlock = nullptr;
234 SmallVector<MachineBasicBlock *, 4> Stack;
235 SmallVector<MachineBasicBlock *, 4> NextLevel;
236
237public:
238 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
239 : DT(DT), PDT(PDT) {}
240
241 void initialize(MachineBasicBlock &MBB) {
242 Visited.clear();
243 CommonDominators.clear();
244 Stack.clear();
245 NextLevel.clear();
246 VisitedPostDom = nullptr;
247 FoundLoopLevel = ~0u;
248
249 DefBlock = &MBB;
250 }
251
252 /// Check whether a backward edge can be reached without going through the
253 /// given \p PostDom of the def block.
254 ///
255 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
256 unsigned findLoop(MachineBasicBlock *PostDom) {
257 MachineDomTreeNode *PDNode = PDT.getNode(BB: DefBlock);
258
259 if (!VisitedPostDom)
260 advanceLevel();
261
262 unsigned Level = 0;
263 while (PDNode->getBlock() != PostDom) {
264 if (PDNode->getBlock() == VisitedPostDom)
265 advanceLevel();
266 PDNode = PDNode->getIDom();
267 Level++;
268 if (FoundLoopLevel == Level)
269 return Level;
270 }
271
272 return 0;
273 }
274
275 /// Add undef values dominating the loop and the optionally given additional
276 /// blocks, so that the SSA updater doesn't have to search all the way to the
277 /// function entry.
278 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
279 MachineRegisterInfo &MRI,
280 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs,
281 ArrayRef<Incoming> Incomings = {}) {
282 assert(LoopLevel < CommonDominators.size());
283
284 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
285 for (auto &Incoming : Incomings)
286 Dom = DT.findNearestCommonDominator(A: Dom, B: Incoming.Block);
287
288 if (!inLoopLevel(MBB&: *Dom, LoopLevel, Incomings)) {
289 SSAUpdater.AddAvailableValue(
290 BB: Dom, V: insertUndefLaneMask(MBB: Dom, MRI: &MRI, LaneMaskRegAttrs));
291 } else {
292 // The dominator is part of the loop or the given blocks, so add the
293 // undef value to unreachable predecessors instead.
294 for (MachineBasicBlock *Pred : Dom->predecessors()) {
295 if (!inLoopLevel(MBB&: *Pred, LoopLevel, Incomings))
296 SSAUpdater.AddAvailableValue(
297 BB: Pred, V: insertUndefLaneMask(MBB: Pred, MRI: &MRI, LaneMaskRegAttrs));
298 }
299 }
300 }
301
302private:
303 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
304 ArrayRef<Incoming> Incomings) const {
305 auto DomIt = Visited.find(Val: &MBB);
306 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
307 return true;
308
309 for (auto &Incoming : Incomings)
310 if (Incoming.Block == &MBB)
311 return true;
312
313 return false;
314 }
315
316 void advanceLevel() {
317 MachineBasicBlock *VisitedDom;
318
319 if (!VisitedPostDom) {
320 VisitedPostDom = DefBlock;
321 VisitedDom = DefBlock;
322 Stack.push_back(Elt: DefBlock);
323 } else {
324 VisitedPostDom = PDT.getNode(BB: VisitedPostDom)->getIDom()->getBlock();
325 VisitedDom = CommonDominators.back();
326
327 for (unsigned i = 0; i < NextLevel.size();) {
328 if (PDT.dominates(A: VisitedPostDom, B: NextLevel[i])) {
329 Stack.push_back(Elt: NextLevel[i]);
330
331 NextLevel[i] = NextLevel.back();
332 NextLevel.pop_back();
333 } else {
334 i++;
335 }
336 }
337 }
338
339 unsigned Level = CommonDominators.size();
340 while (!Stack.empty()) {
341 MachineBasicBlock *MBB = Stack.pop_back_val();
342 if (!PDT.dominates(A: VisitedPostDom, B: MBB))
343 NextLevel.push_back(Elt: MBB);
344
345 Visited[MBB] = Level;
346 VisitedDom = DT.findNearestCommonDominator(A: VisitedDom, B: MBB);
347
348 for (MachineBasicBlock *Succ : MBB->successors()) {
349 if (Succ == DefBlock) {
350 if (MBB == VisitedPostDom)
351 FoundLoopLevel = std::min(a: FoundLoopLevel, b: Level + 1);
352 else
353 FoundLoopLevel = std::min(a: FoundLoopLevel, b: Level);
354 continue;
355 }
356
357 if (Visited.try_emplace(Key: Succ, Args: ~0u).second) {
358 if (MBB == VisitedPostDom)
359 NextLevel.push_back(Elt: Succ);
360 else
361 Stack.push_back(Elt: Succ);
362 }
363 }
364 }
365
366 CommonDominators.push_back(Elt: VisitedDom);
367 }
368};
369
370} // End anonymous namespace.
371
372Register
373llvm::createLaneMaskReg(MachineRegisterInfo *MRI,
374 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs) {
375 return MRI->createVirtualRegister(RegAttr: LaneMaskRegAttrs);
376}
377
378static Register
379insertUndefLaneMask(MachineBasicBlock *MBB, MachineRegisterInfo *MRI,
380 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs) {
381 MachineFunction &MF = *MBB->getParent();
382 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
383 const SIInstrInfo *TII = ST.getInstrInfo();
384 Register UndefReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
385 BuildMI(BB&: *MBB, I: MBB->getFirstTerminator(), MIMD: {}, MCID: TII->get(Opcode: AMDGPU::IMPLICIT_DEF),
386 DestReg: UndefReg);
387 return UndefReg;
388}
389
390#ifndef NDEBUG
391static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
392 const MachineRegisterInfo &MRI,
393 Register Reg) {
394 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
395 return Size == 1 || Size == 32;
396}
397#endif
398
399bool Vreg1LoweringHelper::lowerCopiesFromI1() {
400 bool Changed = false;
401 SmallVector<MachineInstr *, 4> DeadCopies;
402
403 for (MachineBasicBlock &MBB : *MF) {
404 for (MachineInstr &MI : MBB) {
405 if (MI.getOpcode() != AMDGPU::COPY)
406 continue;
407
408 Register DstReg = MI.getOperand(i: 0).getReg();
409 Register SrcReg = MI.getOperand(i: 1).getReg();
410 if (!isVreg1(Reg: SrcReg))
411 continue;
412
413 if (isLaneMaskReg(Reg: DstReg) || isVreg1(Reg: DstReg))
414 continue;
415
416 Changed = true;
417
418 // Copy into a 32-bit vector register.
419 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
420 const DebugLoc &DL = MI.getDebugLoc();
421
422 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg));
423 assert(!MI.getOperand(0).getSubReg());
424
425 ConstrainRegs.insert(V: SrcReg);
426 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_CNDMASK_B32_e64), DestReg: DstReg)
427 .addImm(Val: 0)
428 .addImm(Val: 0)
429 .addImm(Val: 0)
430 .addImm(Val: -1)
431 .addReg(RegNo: SrcReg);
432 DeadCopies.push_back(Elt: &MI);
433 }
434
435 for (MachineInstr *MI : DeadCopies)
436 MI->eraseFromParent();
437 DeadCopies.clear();
438 }
439 return Changed;
440}
441
442PhiLoweringHelper::PhiLoweringHelper(MachineFunction *MF,
443 MachineDominatorTree *DT,
444 MachinePostDominatorTree *PDT)
445 : MF(MF), DT(DT), PDT(PDT), ST(&MF->getSubtarget<GCNSubtarget>()),
446 LMC(&AMDGPU::LaneMaskConstants::get(ST: *ST)) {
447 MRI = &MF->getRegInfo();
448
449 TII = ST->getInstrInfo();
450}
451
452bool PhiLoweringHelper::lowerPhis() {
453 MachineSSAUpdater SSAUpdater(*MF);
454 LoopFinder LF(*DT, *PDT);
455 PhiIncomingAnalysis PIA(*PDT, TII);
456 SmallVector<MachineInstr *, 4> Vreg1Phis;
457 SmallVector<Incoming, 4> Incomings;
458
459 getCandidatesForLowering(Vreg1Phis);
460 if (Vreg1Phis.empty())
461 return false;
462
463 DT->updateDFSNumbers();
464 MachineBasicBlock *PrevMBB = nullptr;
465 for (MachineInstr *MI : Vreg1Phis) {
466 MachineBasicBlock &MBB = *MI->getParent();
467 if (&MBB != PrevMBB) {
468 LF.initialize(MBB);
469 PrevMBB = &MBB;
470 }
471
472 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
473
474 Register DstReg = MI->getOperand(i: 0).getReg();
475 markAsLaneMask(DstReg);
476 initializeLaneMaskRegisterAttributes(LaneMask: DstReg);
477
478 collectIncomingValuesFromPhi(MI, Incomings);
479
480 // Sort the incomings such that incoming values that dominate other incoming
481 // values are sorted earlier. This allows us to do some amount of on-the-fly
482 // constant folding.
483 // Incoming with smaller DFSNumIn goes first, DFSNumIn is 0 for entry block.
484 llvm::sort(C&: Incomings, Comp: [this](Incoming LHS, Incoming RHS) {
485 return DT->getNode(BB: LHS.Block)->getDFSNumIn() <
486 DT->getNode(BB: RHS.Block)->getDFSNumIn();
487 });
488
489#ifndef NDEBUG
490 PhiRegisters.insert(DstReg);
491#endif
492
493 // Phis in a loop that are observed outside the loop receive a simple but
494 // conservatively correct treatment.
495 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
496 for (MachineInstr &Use : MRI->use_instructions(Reg: DstReg))
497 DomBlocks.push_back(x: Use.getParent());
498
499 MachineBasicBlock *PostDomBound =
500 PDT->findNearestCommonDominator(Blocks: DomBlocks);
501
502 // FIXME: This fails to find irreducible cycles. If we have a def (other
503 // than a constant) in a pair of blocks that end up looping back to each
504 // other, it will be mishandle. Due to structurization this shouldn't occur
505 // in practice.
506 unsigned FoundLoopLevel = LF.findLoop(PostDom: PostDomBound);
507
508 SSAUpdater.Initialize(V: DstReg);
509
510 if (FoundLoopLevel) {
511 LF.addLoopEntries(LoopLevel: FoundLoopLevel, SSAUpdater, MRI&: *MRI, LaneMaskRegAttrs,
512 Incomings);
513
514 for (auto &Incoming : Incomings) {
515 Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
516 SSAUpdater.AddAvailableValue(BB: Incoming.Block, V: Incoming.UpdatedReg);
517 }
518
519 for (auto &Incoming : Incomings) {
520 MachineBasicBlock &IMBB = *Incoming.Block;
521 buildMergeLaneMasks(
522 MBB&: IMBB, I: getSaluInsertionAtEnd(MBB&: IMBB), DL: {}, DstReg: Incoming.UpdatedReg,
523 PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &IMBB), CurReg: Incoming.Reg);
524 }
525 } else {
526 // The phi is not observed from outside a loop. Use a more accurate
527 // lowering.
528 PIA.analyze(DefBlock&: MBB, Incomings);
529
530 for (MachineBasicBlock *MBB : PIA.predecessors())
531 SSAUpdater.AddAvailableValue(
532 BB: MBB, V: insertUndefLaneMask(MBB, MRI, LaneMaskRegAttrs));
533
534 for (auto &Incoming : Incomings) {
535 MachineBasicBlock &IMBB = *Incoming.Block;
536 if (PIA.isSource(MBB&: IMBB)) {
537 constrainAsLaneMask(In&: Incoming);
538 SSAUpdater.AddAvailableValue(BB: &IMBB, V: Incoming.Reg);
539 } else {
540 Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
541 SSAUpdater.AddAvailableValue(BB: &IMBB, V: Incoming.UpdatedReg);
542 }
543 }
544
545 for (auto &Incoming : Incomings) {
546 if (!Incoming.UpdatedReg.isValid())
547 continue;
548
549 MachineBasicBlock &IMBB = *Incoming.Block;
550 buildMergeLaneMasks(
551 MBB&: IMBB, I: getSaluInsertionAtEnd(MBB&: IMBB), DL: {}, DstReg: Incoming.UpdatedReg,
552 PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &IMBB), CurReg: Incoming.Reg);
553 }
554 }
555
556 Register NewReg = SSAUpdater.GetValueInMiddleOfBlock(BB: &MBB);
557 if (NewReg != DstReg) {
558 replaceDstReg(NewReg, OldReg: DstReg, MBB: &MBB);
559 MI->eraseFromParent();
560 }
561
562 Incomings.clear();
563 }
564 return true;
565}
566
567bool Vreg1LoweringHelper::lowerCopiesToI1() {
568 bool Changed = false;
569 MachineSSAUpdater SSAUpdater(*MF);
570 LoopFinder LF(*DT, *PDT);
571 SmallVector<MachineInstr *, 4> DeadCopies;
572
573 for (MachineBasicBlock &MBB : *MF) {
574 LF.initialize(MBB);
575
576 for (MachineInstr &MI : MBB) {
577 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
578 MI.getOpcode() != AMDGPU::COPY)
579 continue;
580
581 Register DstReg = MI.getOperand(i: 0).getReg();
582 if (!isVreg1(Reg: DstReg))
583 continue;
584
585 Changed = true;
586
587 if (MRI->use_empty(RegNo: DstReg)) {
588 DeadCopies.push_back(Elt: &MI);
589 continue;
590 }
591
592 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
593
594 markAsLaneMask(DstReg);
595 initializeLaneMaskRegisterAttributes(LaneMask: DstReg);
596
597 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
598 continue;
599
600 const DebugLoc &DL = MI.getDebugLoc();
601 Register SrcReg = MI.getOperand(i: 1).getReg();
602 assert(!MI.getOperand(1).getSubReg());
603
604 if (!SrcReg.isVirtual() || (!isLaneMaskReg(Reg: SrcReg) && !isVreg1(Reg: SrcReg))) {
605 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
606 Register TmpReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
607 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::V_CMP_NE_U32_e64), DestReg: TmpReg)
608 .addReg(RegNo: SrcReg)
609 .addImm(Val: 0);
610 MI.getOperand(i: 1).setReg(TmpReg);
611 SrcReg = TmpReg;
612 } else {
613 // SrcReg needs to be live beyond copy.
614 MI.getOperand(i: 1).setIsKill(false);
615 }
616
617 // Defs in a loop that are observed outside the loop must be transformed
618 // into appropriate bit manipulation.
619 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
620 for (MachineInstr &Use : MRI->use_instructions(Reg: DstReg))
621 DomBlocks.push_back(x: Use.getParent());
622
623 MachineBasicBlock *PostDomBound =
624 PDT->findNearestCommonDominator(Blocks: DomBlocks);
625 unsigned FoundLoopLevel = LF.findLoop(PostDom: PostDomBound);
626 if (FoundLoopLevel) {
627 SSAUpdater.Initialize(V: DstReg);
628 SSAUpdater.AddAvailableValue(BB: &MBB, V: DstReg);
629 LF.addLoopEntries(LoopLevel: FoundLoopLevel, SSAUpdater, MRI&: *MRI, LaneMaskRegAttrs);
630
631 buildMergeLaneMasks(MBB, I: MI, DL, DstReg,
632 PrevReg: SSAUpdater.GetValueInMiddleOfBlock(BB: &MBB), CurReg: SrcReg);
633 DeadCopies.push_back(Elt: &MI);
634 }
635 }
636
637 for (MachineInstr *MI : DeadCopies)
638 MI->eraseFromParent();
639 DeadCopies.clear();
640 }
641 return Changed;
642}
643
644bool PhiLoweringHelper::isConstantLaneMask(Register Reg, bool &Val) const {
645 const MachineInstr *MI;
646 for (;;) {
647 MI = MRI->getUniqueVRegDef(Reg);
648 if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF)
649 return true;
650
651 if (MI->getOpcode() != AMDGPU::COPY)
652 break;
653
654 Reg = MI->getOperand(i: 1).getReg();
655 if (!Reg.isVirtual())
656 return false;
657 if (!isLaneMaskReg(Reg))
658 return false;
659 }
660
661 if (MI->getOpcode() != LMC->MovOpc)
662 return false;
663
664 if (!MI->getOperand(i: 1).isImm())
665 return false;
666
667 int64_t Imm = MI->getOperand(i: 1).getImm();
668 if (Imm == 0) {
669 Val = false;
670 return true;
671 }
672 if (Imm == -1) {
673 Val = true;
674 return true;
675 }
676
677 return false;
678}
679
680static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
681 Def = false;
682 Use = false;
683
684 for (const MachineOperand &MO : MI.operands()) {
685 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
686 if (MO.isUse())
687 Use = true;
688 else
689 Def = true;
690 }
691 }
692}
693
694/// Return a point at the end of the given \p MBB to insert SALU instructions
695/// for lane mask calculation. Take terminators and SCC into account.
696MachineBasicBlock::iterator
697PhiLoweringHelper::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
698 auto InsertionPt = MBB.getFirstTerminator();
699 bool TerminatorsUseSCC = false;
700 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
701 bool DefsSCC;
702 instrDefsUsesSCC(MI: *I, Def&: DefsSCC, Use&: TerminatorsUseSCC);
703 if (TerminatorsUseSCC || DefsSCC)
704 break;
705 }
706
707 if (!TerminatorsUseSCC)
708 return InsertionPt;
709
710 while (InsertionPt != MBB.begin()) {
711 InsertionPt--;
712
713 bool DefSCC, UseSCC;
714 instrDefsUsesSCC(MI: *InsertionPt, Def&: DefSCC, Use&: UseSCC);
715 if (DefSCC)
716 return InsertionPt;
717 }
718
719 // We should have at least seen an IMPLICIT_DEF or COPY
720 llvm_unreachable("SCC used by terminator but no def in block");
721}
722
723// VReg_1 -> SReg_32 or SReg_64
724void Vreg1LoweringHelper::markAsLaneMask(Register DstReg) const {
725 MRI->setRegClass(Reg: DstReg, RC: ST->getBoolRC());
726}
727
728void Vreg1LoweringHelper::getCandidatesForLowering(
729 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const {
730 for (MachineBasicBlock &MBB : *MF) {
731 for (MachineInstr &MI : MBB.phis()) {
732 if (isVreg1(Reg: MI.getOperand(i: 0).getReg()))
733 Vreg1Phis.push_back(Elt: &MI);
734 }
735 }
736}
737
738void Vreg1LoweringHelper::collectIncomingValuesFromPhi(
739 const MachineInstr *MI, SmallVectorImpl<Incoming> &Incomings) const {
740 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
741 assert(i + 1 < MI->getNumOperands());
742 Register IncomingReg = MI->getOperand(i).getReg();
743 MachineBasicBlock *IncomingMBB = MI->getOperand(i: i + 1).getMBB();
744 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(Reg: IncomingReg);
745
746 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
747 IncomingReg = IncomingDef->getOperand(i: 1).getReg();
748 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
749 assert(!IncomingDef->getOperand(1).getSubReg());
750 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
751 continue;
752 } else {
753 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
754 }
755
756 Incomings.emplace_back(Args&: IncomingReg, Args&: IncomingMBB, Args: Register());
757 }
758}
759
760void Vreg1LoweringHelper::replaceDstReg(Register NewReg, Register OldReg,
761 MachineBasicBlock *MBB) {
762 MRI->replaceRegWith(FromReg: NewReg, ToReg: OldReg);
763}
764
765void Vreg1LoweringHelper::buildMergeLaneMasks(MachineBasicBlock &MBB,
766 MachineBasicBlock::iterator I,
767 const DebugLoc &DL,
768 Register DstReg, Register PrevReg,
769 Register CurReg) {
770 bool PrevVal = false;
771 bool PrevConstant = isConstantLaneMask(Reg: PrevReg, Val&: PrevVal);
772 bool CurVal = false;
773 bool CurConstant = isConstantLaneMask(Reg: CurReg, Val&: CurVal);
774
775 if (PrevConstant && CurConstant) {
776 if (PrevVal == CurVal) {
777 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg).addReg(RegNo: CurReg);
778 } else if (CurVal) {
779 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg).addReg(RegNo: LMC->ExecReg);
780 } else {
781 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: LMC->XorOpc), DestReg: DstReg)
782 .addReg(RegNo: LMC->ExecReg)
783 .addImm(Val: -1);
784 }
785 return;
786 }
787
788 Register PrevMaskedReg;
789 Register CurMaskedReg;
790 if (!PrevConstant) {
791 if (CurConstant && CurVal) {
792 PrevMaskedReg = PrevReg;
793 } else {
794 PrevMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
795 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: LMC->AndN2Opc), DestReg: PrevMaskedReg)
796 .addReg(RegNo: PrevReg)
797 .addReg(RegNo: LMC->ExecReg);
798 }
799 }
800 if (!CurConstant) {
801 // TODO: check whether CurReg is already masked by EXEC
802 if (PrevConstant && PrevVal) {
803 CurMaskedReg = CurReg;
804 } else {
805 CurMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
806 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: LMC->AndOpc), DestReg: CurMaskedReg)
807 .addReg(RegNo: CurReg)
808 .addReg(RegNo: LMC->ExecReg);
809 }
810 }
811
812 if (PrevConstant && !PrevVal) {
813 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg)
814 .addReg(RegNo: CurMaskedReg);
815 } else if (CurConstant && !CurVal) {
816 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: AMDGPU::COPY), DestReg: DstReg)
817 .addReg(RegNo: PrevMaskedReg);
818 } else if (PrevConstant && PrevVal) {
819 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: LMC->OrN2Opc), DestReg: DstReg)
820 .addReg(RegNo: CurMaskedReg)
821 .addReg(RegNo: LMC->ExecReg);
822 } else {
823 BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII->get(Opcode: LMC->OrOpc), DestReg: DstReg)
824 .addReg(RegNo: PrevMaskedReg)
825 .addReg(RegNo: CurMaskedReg ? CurMaskedReg : LMC->ExecReg);
826 }
827}
828
829void Vreg1LoweringHelper::constrainAsLaneMask(Incoming &In) {}
830
831/// Lower all instructions that def or use vreg_1 registers.
832///
833/// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
834/// occur around inline assembly. We do this first, before vreg_1 registers
835/// are changed to scalar mask registers.
836///
837/// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
838/// all others, because phi lowering looks through copies and can therefore
839/// often make copy lowering unnecessary.
840static bool runFixI1Copies(MachineFunction &MF, MachineDominatorTree &MDT,
841 MachinePostDominatorTree &MPDT) {
842 // Only need to run this in SelectionDAG path.
843 if (MF.getProperties().hasSelected())
844 return false;
845
846 Vreg1LoweringHelper Helper(&MF, &MDT, &MPDT);
847 bool Changed = false;
848 Changed |= Helper.lowerCopiesFromI1();
849 Changed |= Helper.lowerPhis();
850 Changed |= Helper.lowerCopiesToI1();
851 return Helper.cleanConstrainRegs(Changed);
852}
853
854PreservedAnalyses
855SILowerI1CopiesPass::run(MachineFunction &MF,
856 MachineFunctionAnalysisManager &MFAM) {
857 MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(IR&: MF);
858 MachinePostDominatorTree &MPDT =
859 MFAM.getResult<MachinePostDominatorTreeAnalysis>(IR&: MF);
860 bool Changed = runFixI1Copies(MF, MDT, MPDT);
861 if (!Changed)
862 return PreservedAnalyses::all();
863
864 // TODO: Probably preserves most.
865 return getMachineFunctionPassPreservedAnalyses().preserveSet<CFGAnalyses>();
866}
867
868class SILowerI1CopiesLegacy : public MachineFunctionPass {
869public:
870 static char ID;
871
872 SILowerI1CopiesLegacy() : MachineFunctionPass(ID) {}
873
874 bool runOnMachineFunction(MachineFunction &MF) override;
875
876 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
877
878 void getAnalysisUsage(AnalysisUsage &AU) const override {
879 AU.setPreservesCFG();
880 AU.addRequired<MachineDominatorTreeWrapperPass>();
881 AU.addRequired<MachinePostDominatorTreeWrapperPass>();
882 MachineFunctionPass::getAnalysisUsage(AU);
883 }
884};
885
886bool SILowerI1CopiesLegacy::runOnMachineFunction(MachineFunction &MF) {
887 MachineDominatorTree &MDT =
888 getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
889 MachinePostDominatorTree &MPDT =
890 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
891 return runFixI1Copies(MF, MDT, MPDT);
892}
893
894INITIALIZE_PASS_BEGIN(SILowerI1CopiesLegacy, DEBUG_TYPE, "SI Lower i1 Copies",
895 false, false)
896INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
897INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass)
898INITIALIZE_PASS_END(SILowerI1CopiesLegacy, DEBUG_TYPE, "SI Lower i1 Copies",
899 false, false)
900
901char SILowerI1CopiesLegacy::ID = 0;
902
903char &llvm::SILowerI1CopiesLegacyID = SILowerI1CopiesLegacy::ID;
904
905FunctionPass *llvm::createSILowerI1CopiesLegacyPass() {
906 return new SILowerI1CopiesLegacy();
907}
908