1//===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file models the dispatch component of an instruction pipeline.
11///
12/// The DispatchStage is responsible for updating instruction dependencies
13/// and communicating to the simulated instruction scheduler that an instruction
14/// is ready to be scheduled for execution.
15///
16//===----------------------------------------------------------------------===//
17
18#include "llvm/MCA/Stages/DispatchStage.h"
19#include "llvm/MCA/HWEventListener.h"
20#include "llvm/MCA/HardwareUnits/Scheduler.h"
21#include "llvm/Support/Debug.h"
22
23#define DEBUG_TYPE "llvm-mca"
24
25namespace llvm {
26namespace mca {
27
28DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
29 const MCRegisterInfo &MRI,
30 unsigned MaxDispatchWidth, RetireControlUnit &R,
31 RegisterFile &F)
32 : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
33 CarryOver(0U), STI(Subtarget), RCU(R), PRF(F) {
34 if (!DispatchWidth)
35 DispatchWidth = Subtarget.getSchedModel().IssueWidth;
36}
37
38void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
39 ArrayRef<unsigned> UsedRegs,
40 unsigned UOps) const {
41 LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
42 notifyEvent<HWInstructionEvent>(
43 Event: HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
44}
45
46bool DispatchStage::checkPRF(const InstRef &IR) const {
47 SmallVector<MCPhysReg, 4> RegDefs;
48 for (const WriteState &RegDef : IR.getInstruction()->getDefs())
49 RegDefs.emplace_back(Args: RegDef.getRegisterID());
50
51 const unsigned RegisterMask = PRF.isAvailable(Regs: RegDefs);
52 // A mask with all zeroes means: register files are available.
53 if (RegisterMask) {
54 notifyEvent<HWStallEvent>(
55 Event: HWStallEvent(HWStallEvent::RegisterFileStall, IR));
56 return false;
57 }
58
59 return true;
60}
61
62bool DispatchStage::checkRCU(const InstRef &IR) const {
63 const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps();
64 if (RCU.isAvailable(Quantity: NumMicroOps))
65 return true;
66 notifyEvent<HWStallEvent>(
67 Event: HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
68 return false;
69}
70
71bool DispatchStage::canDispatch(const InstRef &IR) const {
72 bool CanDispatch = checkRCU(IR);
73 CanDispatch &= checkPRF(IR);
74 CanDispatch &= checkNextStage(IR);
75 return CanDispatch;
76}
77
78Error DispatchStage::dispatch(InstRef IR) {
79 assert(!CarryOver && "Cannot dispatch another instruction!");
80 Instruction &IS = *IR.getInstruction();
81 const unsigned NumMicroOps = IS.getNumMicroOps();
82 if (NumMicroOps > DispatchWidth) {
83 assert(AvailableEntries == DispatchWidth);
84 AvailableEntries = 0;
85 CarryOver = NumMicroOps - DispatchWidth;
86 CarriedOver = IR;
87 } else {
88 assert(AvailableEntries >= NumMicroOps);
89 AvailableEntries -= NumMicroOps;
90 }
91
92 // Check if this instructions ends the dispatch group.
93 if (IS.getEndGroup())
94 AvailableEntries = 0;
95
96 // Check if this is an optimizable reg-reg move or an XCHG-like instruction.
97 if (IS.isOptimizableMove())
98 if (PRF.tryEliminateMoveOrSwap(Writes: IS.getDefs(), Reads: IS.getUses()))
99 IS.setEliminated();
100
101 // A dependency-breaking instruction doesn't have to wait on the register
102 // input operands, and it is often optimized at register renaming stage.
103 // Update RAW dependencies if this instruction is not a dependency-breaking
104 // instruction. A dependency-breaking instruction is a zero-latency
105 // instruction that doesn't consume hardware resources.
106 // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
107 //
108 // We also don't update data dependencies for instructions that have been
109 // eliminated at register renaming stage.
110 if (!IS.isEliminated()) {
111 for (ReadState &RS : IS.getUses())
112 PRF.addRegisterRead(RS, STI);
113 }
114
115 // By default, a dependency-breaking zero-idiom is expected to be optimized
116 // at register renaming stage. That means, no physical register is allocated
117 // to the instruction.
118 SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
119 for (WriteState &WS : IS.getDefs())
120 PRF.addRegisterWrite(Write: WriteRef(IR.getSourceIndex(), &WS), UsedPhysRegs: RegisterFiles);
121
122 // Reserve entries in the reorder buffer.
123 unsigned RCUTokenID = RCU.dispatch(IS: IR);
124 // Notify the instruction that it has been dispatched.
125 IS.dispatch(RCUTokenID);
126
127 // Notify listeners of the "instruction dispatched" event,
128 // and move IR to the next stage.
129 notifyInstructionDispatched(IR, UsedRegs: RegisterFiles,
130 UOps: std::min(a: DispatchWidth, b: NumMicroOps));
131 return moveToTheNextStage(IR);
132}
133
134Error DispatchStage::cycleStart() {
135 // The retire stage is responsible for calling method `cycleStart`
136 // on the PRF.
137 if (!CarryOver) {
138 AvailableEntries = DispatchWidth;
139 return ErrorSuccess();
140 }
141
142 AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
143 unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
144 CarryOver -= DispatchedOpcodes;
145 assert(CarriedOver && "Invalid dispatched instruction");
146
147 SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
148 notifyInstructionDispatched(IR: CarriedOver, UsedRegs: RegisterFiles, UOps: DispatchedOpcodes);
149 if (!CarryOver)
150 CarriedOver = InstRef();
151 return ErrorSuccess();
152}
153
154bool DispatchStage::isAvailable(const InstRef &IR) const {
155 // Conservatively bail out if there are no available dispatch entries.
156 if (!AvailableEntries)
157 return false;
158
159 const Instruction &Inst = *IR.getInstruction();
160 unsigned NumMicroOps = Inst.getNumMicroOps();
161 unsigned Required = std::min(a: NumMicroOps, b: DispatchWidth);
162 if (Required > AvailableEntries)
163 return false;
164
165 if (Inst.getBeginGroup() && AvailableEntries != DispatchWidth)
166 return false;
167
168 // The dispatch logic doesn't internally buffer instructions. It only accepts
169 // instructions that can be successfully moved to the next stage during this
170 // same cycle.
171 return canDispatch(IR);
172}
173
174Error DispatchStage::execute(InstRef &IR) {
175 assert(canDispatch(IR) && "Cannot dispatch another instruction!");
176 return dispatch(IR);
177}
178
179#ifndef NDEBUG
180void DispatchStage::dump() const {
181 PRF.dump();
182 RCU.dump();
183}
184#endif
185} // namespace mca
186} // namespace llvm
187