1//===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file models the dispatch component of an instruction pipeline.
11///
12/// The DispatchStage is responsible for updating instruction dependencies
13/// and communicating to the simulated instruction scheduler that an instruction
14/// is ready to be scheduled for execution.
15///
16//===----------------------------------------------------------------------===//
17
18#include "llvm/MCA/Stages/DispatchStage.h"
19#include "llvm/MCA/HWEventListener.h"
20#include "llvm/Support/Debug.h"
21
22#define DEBUG_TYPE "llvm-mca"
23
24namespace llvm {
25namespace mca {
26
27DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
28 const MCRegisterInfo &MRI,
29 unsigned MaxDispatchWidth, RetireControlUnit &R,
30 RegisterFile &F)
31 : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
32 CarryOver(0U), STI(Subtarget), RCU(R), PRF(F) {
33 if (!DispatchWidth)
34 DispatchWidth = Subtarget.getSchedModel().IssueWidth;
35}
36
37void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
38 ArrayRef<unsigned> UsedRegs,
39 unsigned UOps) const {
40 LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
41 notifyEvent<HWInstructionEvent>(
42 Event: HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
43}
44
45bool DispatchStage::checkPRF(const InstRef &IR) const {
46 SmallVector<MCPhysReg, 4> RegDefs;
47 for (const WriteState &RegDef : IR.getInstruction()->getDefs())
48 RegDefs.emplace_back(Args: RegDef.getRegisterID());
49
50 const unsigned RegisterMask = PRF.isAvailable(Regs: RegDefs);
51 // A mask with all zeroes means: register files are available.
52 if (RegisterMask) {
53 notifyEvent<HWStallEvent>(
54 Event: HWStallEvent(HWStallEvent::RegisterFileStall, IR));
55 return false;
56 }
57
58 return true;
59}
60
61bool DispatchStage::checkRCU(const InstRef &IR) const {
62 const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps();
63 if (RCU.isAvailable(Quantity: NumMicroOps))
64 return true;
65 notifyEvent<HWStallEvent>(
66 Event: HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
67 return false;
68}
69
70bool DispatchStage::canDispatch(const InstRef &IR) const {
71 bool CanDispatch = checkRCU(IR);
72 CanDispatch &= checkPRF(IR);
73 CanDispatch &= checkNextStage(IR);
74 return CanDispatch;
75}
76
77Error DispatchStage::dispatch(InstRef IR) {
78 assert(!CarryOver && "Cannot dispatch another instruction!");
79 Instruction &IS = *IR.getInstruction();
80 const unsigned NumMicroOps = IS.getNumMicroOps();
81 if (NumMicroOps > DispatchWidth) {
82 assert(AvailableEntries == DispatchWidth);
83 AvailableEntries = 0;
84 CarryOver = NumMicroOps - DispatchWidth;
85 CarriedOver = IR;
86 } else {
87 assert(AvailableEntries >= NumMicroOps);
88 AvailableEntries -= NumMicroOps;
89 }
90
91 // Check if this instructions ends the dispatch group.
92 if (IS.getEndGroup())
93 AvailableEntries = 0;
94
95 // Check if this is an optimizable reg-reg move or an XCHG-like instruction.
96 if (IS.isOptimizableMove())
97 if (PRF.tryEliminateMoveOrSwap(Writes: IS.getDefs(), Reads: IS.getUses()))
98 IS.setEliminated();
99
100 // A dependency-breaking instruction doesn't have to wait on the register
101 // input operands, and it is often optimized at register renaming stage.
102 // Update RAW dependencies if this instruction is not a dependency-breaking
103 // instruction. A dependency-breaking instruction is a zero-latency
104 // instruction that doesn't consume hardware resources.
105 // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
106 //
107 // We also don't update data dependencies for instructions that have been
108 // eliminated at register renaming stage.
109 if (!IS.isEliminated()) {
110 for (ReadState &RS : IS.getUses())
111 PRF.addRegisterRead(RS, STI);
112 }
113
114 // By default, a dependency-breaking zero-idiom is expected to be optimized
115 // at register renaming stage. That means, no physical register is allocated
116 // to the instruction.
117 SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
118 for (WriteState &WS : IS.getDefs())
119 PRF.addRegisterWrite(Write: WriteRef(IR.getSourceIndex(), &WS), UsedPhysRegs: RegisterFiles);
120
121 // Reserve entries in the reorder buffer.
122 unsigned RCUTokenID = RCU.dispatch(IS: IR);
123 // Notify the instruction that it has been dispatched.
124 IS.dispatch(RCUTokenID);
125
126 // Notify listeners of the "instruction dispatched" event,
127 // and move IR to the next stage.
128 notifyInstructionDispatched(IR, UsedRegs: RegisterFiles,
129 UOps: std::min(a: DispatchWidth, b: NumMicroOps));
130 return moveToTheNextStage(IR);
131}
132
133Error DispatchStage::cycleStart() {
134 // The retire stage is responsible for calling method `cycleStart`
135 // on the PRF.
136 if (!CarryOver) {
137 AvailableEntries = DispatchWidth;
138 return ErrorSuccess();
139 }
140
141 AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
142 unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
143 CarryOver -= DispatchedOpcodes;
144 assert(CarriedOver && "Invalid dispatched instruction");
145
146 SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
147 notifyInstructionDispatched(IR: CarriedOver, UsedRegs: RegisterFiles, UOps: DispatchedOpcodes);
148 if (!CarryOver)
149 CarriedOver = InstRef();
150 return ErrorSuccess();
151}
152
153bool DispatchStage::isAvailable(const InstRef &IR) const {
154 // Conservatively bail out if there are no available dispatch entries.
155 if (!AvailableEntries)
156 return false;
157
158 const Instruction &Inst = *IR.getInstruction();
159 unsigned NumMicroOps = Inst.getNumMicroOps();
160 unsigned Required = std::min(a: NumMicroOps, b: DispatchWidth);
161 if (Required > AvailableEntries)
162 return false;
163
164 if (Inst.getBeginGroup() && AvailableEntries != DispatchWidth)
165 return false;
166
167 // The dispatch logic doesn't internally buffer instructions. It only accepts
168 // instructions that can be successfully moved to the next stage during this
169 // same cycle.
170 return canDispatch(IR);
171}
172
173Error DispatchStage::execute(InstRef &IR) {
174 assert(canDispatch(IR) && "Cannot dispatch another instruction!");
175 return dispatch(IR);
176}
177
178#ifndef NDEBUG
179void DispatchStage::dump() const {
180 PRF.dump();
181 RCU.dump();
182}
183#endif
184} // namespace mca
185} // namespace llvm
186