1//===- AMDGPUInsertDelayAlu.cpp - Insert s_delay_alu instructions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Insert s_delay_alu instructions to avoid stalls on GFX11+.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
17#include "SIInstrInfo.h"
18#include "llvm/ADT/SetVector.h"
19
20using namespace llvm;
21
22#define DEBUG_TYPE "amdgpu-insert-delay-alu"
23
24namespace {
25
26class AMDGPUInsertDelayAlu {
27public:
28 const GCNSubtarget *ST;
29 const SIInstrInfo *SII;
30 const TargetRegisterInfo *TRI;
31
32 const TargetSchedModel *SchedModel;
33
34 // Return true if MI waits for all outstanding VALU instructions to complete.
35 static bool instructionWaitsForVALU(const MachineInstr &MI) {
36 // These instruction types wait for VA_VDST==0 before issuing.
37 const uint64_t VA_VDST_0 = SIInstrFlags::DS | SIInstrFlags::EXP |
38 SIInstrFlags::FLAT | SIInstrFlags::MIMG |
39 SIInstrFlags::MTBUF | SIInstrFlags::MUBUF;
40 if (MI.getDesc().TSFlags & VA_VDST_0)
41 return true;
42 if (MI.getOpcode() == AMDGPU::S_SENDMSG_RTN_B32 ||
43 MI.getOpcode() == AMDGPU::S_SENDMSG_RTN_B64)
44 return true;
45 if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
46 AMDGPU::DepCtr::decodeFieldVaVdst(Encoded: MI.getOperand(i: 0).getImm()) == 0)
47 return true;
48 return false;
49 }
50
51 static bool instructionWaitsForSGPRWrites(const MachineInstr &MI) {
52 // These instruction types wait for VA_SDST==0 before issuing.
53 uint64_t MIFlags = MI.getDesc().TSFlags;
54 if (MIFlags & SIInstrFlags::SMRD)
55 return true;
56
57 if (MIFlags & SIInstrFlags::SALU) {
58 for (auto &Op : MI.operands()) {
59 if (Op.isReg())
60 return true;
61 }
62 }
63 return false;
64 }
65
66 // Types of delay that can be encoded in an s_delay_alu instruction.
67 enum DelayType { VALU, TRANS, SALU, OTHER };
68
69 // Get the delay type for a MachineInstr.
70 DelayType getDelayType(const MachineInstr &MI) {
71 if (SIInstrInfo::isTRANS(MI))
72 return TRANS;
73 // WMMA XDL ops are treated the same as TRANS.
74 if (ST->hasGFX1250Insts() && SII->isXDLWMMA(MI))
75 return TRANS;
76 if (SIInstrInfo::isVALU(MI))
77 return VALU;
78 if (SIInstrInfo::isSALU(MI))
79 return SALU;
80 return OTHER;
81 }
82
83 // Information about the last instruction(s) that wrote to a particular
84 // regunit. In straight-line code there will only be one such instruction, but
85 // when control flow converges we merge the delay information from each path
86 // to represent the union of the worst-case delays of each type.
87 struct DelayInfo {
88 // One larger than the maximum number of (non-TRANS) VALU instructions we
89 // can encode in an s_delay_alu instruction.
90 static constexpr unsigned VALU_MAX = 5;
91
92 // One larger than the maximum number of TRANS instructions we can encode in
93 // an s_delay_alu instruction.
94 static constexpr unsigned TRANS_MAX = 4;
95
96 // One larger than the maximum number of SALU cycles we can encode in an
97 // s_delay_alu instruction.
98 static constexpr unsigned SALU_CYCLES_MAX = 4;
99
100 // If it was written by a (non-TRANS) VALU, remember how many clock cycles
101 // are left until it completes, and how many other (non-TRANS) VALU we have
102 // seen since it was issued.
103 uint8_t VALUCycles = 0;
104 uint8_t VALUNum = VALU_MAX;
105
106 // If it was written by a TRANS, remember how many clock cycles are left
107 // until it completes, and how many other TRANS we have seen since it was
108 // issued.
109 uint8_t TRANSCycles = 0;
110 uint8_t TRANSNum = TRANS_MAX;
111 // Also remember how many other (non-TRANS) VALU we have seen since it was
112 // issued. When an instruction depends on both a prior TRANS and a prior
113 // non-TRANS VALU, this is used to decide whether to encode a wait for just
114 // one or both of them.
115 uint8_t TRANSNumVALU = VALU_MAX;
116
117 // If it was written by an SALU, remember how many clock cycles are left
118 // until it completes.
119 uint8_t SALUCycles = 0;
120
121 DelayInfo() = default;
122
123 DelayInfo(DelayType Type, unsigned Cycles) {
124 switch (Type) {
125 default:
126 llvm_unreachable("unexpected type");
127 case VALU:
128 VALUCycles = Cycles;
129 VALUNum = 0;
130 break;
131 case TRANS:
132 TRANSCycles = Cycles;
133 TRANSNum = 0;
134 TRANSNumVALU = 0;
135 break;
136 case SALU:
137 // Guard against pseudo-instructions like SI_CALL which are marked as
138 // SALU but with a very high latency.
139 SALUCycles = std::min(a: Cycles, b: SALU_CYCLES_MAX);
140 break;
141 }
142 }
143
144 bool operator==(const DelayInfo &RHS) const {
145 return VALUCycles == RHS.VALUCycles && VALUNum == RHS.VALUNum &&
146 TRANSCycles == RHS.TRANSCycles && TRANSNum == RHS.TRANSNum &&
147 TRANSNumVALU == RHS.TRANSNumVALU && SALUCycles == RHS.SALUCycles;
148 }
149
150 bool operator!=(const DelayInfo &RHS) const { return !(*this == RHS); }
151
152 // Merge another DelayInfo into this one, to represent the union of the
153 // worst-case delays of each type.
154 void merge(const DelayInfo &RHS) {
155 VALUCycles = std::max(a: VALUCycles, b: RHS.VALUCycles);
156 VALUNum = std::min(a: VALUNum, b: RHS.VALUNum);
157 TRANSCycles = std::max(a: TRANSCycles, b: RHS.TRANSCycles);
158 TRANSNum = std::min(a: TRANSNum, b: RHS.TRANSNum);
159 TRANSNumVALU = std::min(a: TRANSNumVALU, b: RHS.TRANSNumVALU);
160 SALUCycles = std::max(a: SALUCycles, b: RHS.SALUCycles);
161 }
162
163 // Update this DelayInfo after issuing an instruction of the specified type.
164 // Cycles is the number of cycles it takes to issue the instruction. Return
165 // true if there is no longer any useful delay info.
166 bool advance(DelayType Type, unsigned Cycles) {
167 bool Erase = true;
168
169 VALUNum += (Type == VALU);
170 if (VALUNum >= VALU_MAX || VALUCycles <= Cycles) {
171 // Forget about the VALU instruction. It was too far back or has
172 // definitely completed by now.
173 VALUNum = VALU_MAX;
174 VALUCycles = 0;
175 } else {
176 VALUCycles -= Cycles;
177 Erase = false;
178 }
179
180 TRANSNum += (Type == TRANS);
181 TRANSNumVALU += (Type == VALU);
182 if (TRANSNum >= TRANS_MAX || TRANSCycles <= Cycles) {
183 // Forget about any TRANS instruction. It was too far back or has
184 // definitely completed by now.
185 TRANSNum = TRANS_MAX;
186 TRANSNumVALU = VALU_MAX;
187 TRANSCycles = 0;
188 } else {
189 TRANSCycles -= Cycles;
190 Erase = false;
191 }
192
193 if (SALUCycles <= Cycles) {
194 // Forget about any SALU instruction. It has definitely completed by
195 // now.
196 SALUCycles = 0;
197 } else {
198 SALUCycles -= Cycles;
199 Erase = false;
200 }
201
202 return Erase;
203 }
204
205#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
206 void dump() const {
207 if (VALUCycles)
208 dbgs() << " VALUCycles=" << (int)VALUCycles;
209 if (VALUNum < VALU_MAX)
210 dbgs() << " VALUNum=" << (int)VALUNum;
211 if (TRANSCycles)
212 dbgs() << " TRANSCycles=" << (int)TRANSCycles;
213 if (TRANSNum < TRANS_MAX)
214 dbgs() << " TRANSNum=" << (int)TRANSNum;
215 if (TRANSNumVALU < VALU_MAX)
216 dbgs() << " TRANSNumVALU=" << (int)TRANSNumVALU;
217 if (SALUCycles)
218 dbgs() << " SALUCycles=" << (int)SALUCycles;
219 }
220#endif
221 };
222
223 // A map from regunits to the delay info for that regunit.
224 struct DelayState : DenseMap<MCRegUnit, DelayInfo> {
225 // Merge another DelayState into this one by merging the delay info for each
226 // regunit.
227 void merge(const DelayState &RHS) {
228 for (const auto &KV : RHS) {
229 iterator It;
230 bool Inserted;
231 std::tie(args&: It, args&: Inserted) = insert(KV);
232 if (!Inserted)
233 It->second.merge(RHS: KV.second);
234 }
235 }
236
237 // Advance the delay info for each regunit, erasing any that are no longer
238 // useful.
239 void advance(DelayType Type, unsigned Cycles) {
240 iterator Next;
241 for (auto I = begin(), E = end(); I != E; I = Next) {
242 Next = std::next(x: I);
243 if (I->second.advance(Type, Cycles))
244 erase(I);
245 }
246 }
247
248 void advanceByVALUNum(unsigned VALUNum) {
249 iterator Next;
250 for (auto I = begin(), E = end(); I != E; I = Next) {
251 Next = std::next(x: I);
252 if (I->second.VALUNum >= VALUNum && I->second.VALUCycles > 0) {
253 erase(I);
254 }
255 }
256 }
257
258#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
259 void dump(const TargetRegisterInfo *TRI) const {
260 if (empty()) {
261 dbgs() << " empty\n";
262 return;
263 }
264
265 // Dump DelayInfo for each RegUnit in numerical order.
266 SmallVector<const_iterator, 8> Order;
267 Order.reserve(size());
268 for (const_iterator I = begin(), E = end(); I != E; ++I)
269 Order.push_back(I);
270 llvm::sort(Order, [](const const_iterator &A, const const_iterator &B) {
271 return A->first < B->first;
272 });
273 for (const_iterator I : Order) {
274 dbgs() << " " << printRegUnit(I->first, TRI);
275 I->second.dump();
276 dbgs() << "\n";
277 }
278 }
279#endif
280 };
281
282 // The saved delay state at the end of each basic block.
283 DenseMap<MachineBasicBlock *, DelayState> BlockState;
284
285 // Emit an s_delay_alu instruction if necessary before MI.
286 MachineInstr *emitDelayAlu(MachineInstr &MI, DelayInfo Delay,
287 MachineInstr *LastDelayAlu) {
288 unsigned Imm = 0;
289
290 // Wait for a TRANS instruction.
291 if (Delay.TRANSNum < DelayInfo::TRANS_MAX)
292 Imm |= 4 + Delay.TRANSNum;
293
294 // Wait for a VALU instruction (if it's more recent than any TRANS
295 // instruction that we're also waiting for).
296 if (Delay.VALUNum < DelayInfo::VALU_MAX &&
297 Delay.VALUNum <= Delay.TRANSNumVALU) {
298 if (Imm & 0xf)
299 Imm |= Delay.VALUNum << 7;
300 else
301 Imm |= Delay.VALUNum;
302 }
303
304 // Wait for an SALU instruction.
305 if (Delay.SALUCycles) {
306 assert(Delay.SALUCycles < DelayInfo::SALU_CYCLES_MAX);
307 if (Imm & 0x780) {
308 // We have already encoded a VALU and a TRANS delay. There's no room in
309 // the encoding for an SALU delay as well, so just drop it.
310 } else if (Imm & 0xf) {
311 Imm |= (Delay.SALUCycles + 8) << 7;
312 } else {
313 Imm |= Delay.SALUCycles + 8;
314 }
315 }
316
317 // Don't emit the s_delay_alu instruction if there's nothing to wait for.
318 if (!Imm)
319 return LastDelayAlu;
320
321 // If we only need to wait for one instruction, try encoding it in the last
322 // s_delay_alu that we emitted.
323 if (!(Imm & 0x780) && LastDelayAlu) {
324 unsigned Skip = 0;
325 for (auto I = MachineBasicBlock::instr_iterator(LastDelayAlu),
326 E = MachineBasicBlock::instr_iterator(MI);
327 ++I != E;) {
328 if (I->getOpcode() == AMDGPU::S_SET_VGPR_MSB) {
329 // It is not deterministic whether the skip count counts
330 // S_SET_VGPR_MSB instructions or not, so do not include them in a
331 // skip region.
332 Skip = 6;
333 break;
334 }
335 if (!I->isBundle() && !I->isMetaInstruction())
336 ++Skip;
337 }
338 if (Skip < 6) {
339 MachineOperand &Op = LastDelayAlu->getOperand(i: 0);
340 unsigned LastImm = Op.getImm();
341 assert((LastImm & ~0xf) == 0 &&
342 "Remembered an s_delay_alu with no room for another delay!");
343 LastImm |= Imm << 7 | Skip << 4;
344 Op.setImm(LastImm);
345 return nullptr;
346 }
347 }
348
349 auto &MBB = *MI.getParent();
350 MachineInstr *DelayAlu =
351 BuildMI(BB&: MBB, I&: MI, MIMD: DebugLoc(), MCID: SII->get(Opcode: AMDGPU::S_DELAY_ALU)).addImm(Val: Imm);
352 // Remember the s_delay_alu for next time if there is still room in it to
353 // encode another delay.
354 return (Imm & 0x780) ? nullptr : DelayAlu;
355 }
356
357 bool runOnMachineBasicBlock(MachineBasicBlock &MBB, bool Emit) {
358 DelayState State;
359 for (auto *Pred : MBB.predecessors())
360 State.merge(RHS: BlockState[Pred]);
361
362 LLVM_DEBUG(dbgs() << " State at start of " << printMBBReference(MBB)
363 << "\n";
364 State.dump(TRI););
365
366 bool Changed = false;
367 MachineInstr *LastDelayAlu = nullptr;
368
369 // FIXME: 0 is a valid register unit.
370 MCRegUnit LastSGPRFromVALU = static_cast<MCRegUnit>(0);
371 // Iterate over the contents of bundles, but don't emit any instructions
372 // inside a bundle.
373 for (auto &MI : MBB.instrs()) {
374 if (MI.isBundle() || MI.isMetaInstruction())
375 continue;
376
377 // Ignore some more instructions that do not generate any code.
378 switch (MI.getOpcode()) {
379 case AMDGPU::SI_RETURN_TO_EPILOG:
380 continue;
381 }
382
383 DelayType Type = getDelayType(MI);
384
385 if (instructionWaitsForSGPRWrites(MI)) {
386 auto It = State.find(Val: LastSGPRFromVALU);
387 if (It != State.end()) {
388 DelayInfo Info = It->getSecond();
389 State.advanceByVALUNum(VALUNum: Info.VALUNum);
390 // FIXME: 0 is a valid register unit.
391 LastSGPRFromVALU = static_cast<MCRegUnit>(0);
392 }
393 }
394
395 if (instructionWaitsForVALU(MI)) {
396 // Forget about all outstanding VALU delays.
397 // TODO: This is overkill since it also forgets about SALU delays.
398 State = DelayState();
399 } else if (Type != OTHER) {
400 DelayInfo Delay;
401 // TODO: Scan implicit uses too?
402 for (const auto &Op : MI.explicit_uses()) {
403 if (Op.isReg()) {
404 // One of the operands of the writelane is also the output operand.
405 // This creates the insertion of redundant delays. Hence, we have to
406 // ignore this operand.
407 if (MI.getOpcode() == AMDGPU::V_WRITELANE_B32 && Op.isTied())
408 continue;
409 for (MCRegUnit Unit : TRI->regunits(Reg: Op.getReg())) {
410 auto It = State.find(Val: Unit);
411 if (It != State.end()) {
412 Delay.merge(RHS: It->second);
413 State.erase(Val: Unit);
414 }
415 }
416 }
417 }
418
419 if (SII->isVALU(Opcode: MI.getOpcode())) {
420 for (const auto &Op : MI.defs()) {
421 Register Reg = Op.getReg();
422 if (AMDGPU::isSGPR(Reg, TRI)) {
423 LastSGPRFromVALU = *TRI->regunits(Reg).begin();
424 break;
425 }
426 }
427 }
428
429 if (Emit && !MI.isBundledWithPred()) {
430 // TODO: For VALU->SALU delays should we use s_delay_alu or s_nop or
431 // just ignore them?
432 LastDelayAlu = emitDelayAlu(MI, Delay, LastDelayAlu);
433 }
434 }
435
436 if (Type != OTHER) {
437 // TODO: Scan implicit defs too?
438 for (const auto &Op : MI.defs()) {
439 unsigned Latency = SchedModel->computeOperandLatency(
440 DefMI: &MI, DefOperIdx: Op.getOperandNo(), UseMI: nullptr, UseOperIdx: 0);
441 for (MCRegUnit Unit : TRI->regunits(Reg: Op.getReg()))
442 State[Unit] = DelayInfo(Type, Latency);
443 }
444 }
445
446 // Advance by the number of cycles it takes to issue this instruction.
447 // TODO: Use a more advanced model that accounts for instructions that
448 // take multiple cycles to issue on a particular pipeline.
449 unsigned Cycles = SIInstrInfo::getNumWaitStates(MI);
450 // TODO: In wave64 mode, double the number of cycles for VALU and VMEM
451 // instructions on the assumption that they will usually have to be issued
452 // twice?
453 State.advance(Type, Cycles);
454
455 LLVM_DEBUG(dbgs() << " State after " << MI; State.dump(TRI););
456 }
457
458 if (Emit) {
459 assert(State == BlockState[&MBB] &&
460 "Basic block state should not have changed on final pass!");
461 } else if (DelayState &BS = BlockState[&MBB]; State != BS) {
462 BS = std::move(State);
463 Changed = true;
464 }
465 return Changed;
466 }
467
468 bool run(MachineFunction &MF) {
469 LLVM_DEBUG(dbgs() << "AMDGPUInsertDelayAlu running on " << MF.getName()
470 << "\n");
471
472 ST = &MF.getSubtarget<GCNSubtarget>();
473 if (!ST->hasDelayAlu())
474 return false;
475
476 SII = ST->getInstrInfo();
477 TRI = ST->getRegisterInfo();
478 SchedModel = &SII->getSchedModel();
479
480 // Calculate the delay state for each basic block, iterating until we reach
481 // a fixed point.
482 SetVector<MachineBasicBlock *> WorkList;
483 for (auto &MBB : reverse(C&: MF))
484 WorkList.insert(X: &MBB);
485 while (!WorkList.empty()) {
486 auto &MBB = *WorkList.pop_back_val();
487 bool Changed = runOnMachineBasicBlock(MBB, Emit: false);
488 if (Changed)
489 WorkList.insert_range(R: MBB.successors());
490 }
491
492 LLVM_DEBUG(dbgs() << "Final pass over all BBs\n");
493
494 // Make one last pass over all basic blocks to emit s_delay_alu
495 // instructions.
496 bool Changed = false;
497 for (auto &MBB : MF)
498 Changed |= runOnMachineBasicBlock(MBB, Emit: true);
499 return Changed;
500 }
501};
502
503class AMDGPUInsertDelayAluLegacy : public MachineFunctionPass {
504public:
505 static char ID;
506
507 AMDGPUInsertDelayAluLegacy() : MachineFunctionPass(ID) {}
508
509 void getAnalysisUsage(AnalysisUsage &AU) const override {
510 AU.setPreservesCFG();
511 MachineFunctionPass::getAnalysisUsage(AU);
512 }
513
514 bool runOnMachineFunction(MachineFunction &MF) override {
515 if (skipFunction(F: MF.getFunction()))
516 return false;
517 AMDGPUInsertDelayAlu Impl;
518 return Impl.run(MF);
519 }
520};
521} // namespace
522
523PreservedAnalyses
524AMDGPUInsertDelayAluPass::run(MachineFunction &MF,
525 MachineFunctionAnalysisManager &MFAM) {
526 if (!AMDGPUInsertDelayAlu().run(MF))
527 return PreservedAnalyses::all();
528 auto PA = getMachineFunctionPassPreservedAnalyses();
529 PA.preserveSet<CFGAnalyses>();
530 return PA;
531} // end namespace llvm
532
533char AMDGPUInsertDelayAluLegacy::ID = 0;
534
535char &llvm::AMDGPUInsertDelayAluID = AMDGPUInsertDelayAluLegacy::ID;
536
537INITIALIZE_PASS(AMDGPUInsertDelayAluLegacy, DEBUG_TYPE,
538 "AMDGPU Insert Delay ALU", false, false)
539