1//===-- SchedClassResolution.cpp --------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "SchedClassResolution.h"
10#include "BenchmarkResult.h"
11#include "llvm/ADT/STLExtras.h"
12#include "llvm/MC/MCAsmInfo.h"
13#include "llvm/MCA/Support.h"
14#include "llvm/Support/FormatVariadic.h"
15#include <vector>
16
17namespace llvm {
18namespace exegesis {
19
20// Return the non-redundant list of WriteProcRes used by the given sched class.
21// The scheduling model for LLVM is such that each instruction has a certain
22// number of uops which consume resources which are described by WriteProcRes
23// entries. Each entry describe how many cycles are spent on a specific ProcRes
24// kind.
25// For example, an instruction might have 3 uOps, one dispatching on P0
26// (ProcResIdx=1) and two on P06 (ProcResIdx = 7).
27// Note that LLVM additionally denormalizes resource consumption to include
28// usage of super resources by subresources. So in practice if there exists a
29// P016 (ProcResIdx=10), then the cycles consumed by P0 are also consumed by
30// P06 (ProcResIdx = 7) and P016 (ProcResIdx = 10), and the resources consumed
31// by P06 are also consumed by P016. In the figure below, parenthesized cycles
32// denote implied usage of superresources by subresources:
33// P0 P06 P016
34// uOp1 1 (1) (1)
35// uOp2 1 (1)
36// uOp3 1 (1)
37// =============================
38// 1 3 3
39// Eventually we end up with three entries for the WriteProcRes of the
40// instruction:
41// {ProcResIdx=1, Cycles=1} // P0
42// {ProcResIdx=7, Cycles=3} // P06
43// {ProcResIdx=10, Cycles=3} // P016
44//
45// Note that in this case, P016 does not contribute any cycles, so it would
46// be removed by this function.
47// FIXME: Merge this with the equivalent in llvm-mca.
48static SmallVector<MCWriteProcResEntry, 8>
49getNonRedundantWriteProcRes(const MCSchedClassDesc &SCDesc,
50 const MCSubtargetInfo &STI) {
51 SmallVector<MCWriteProcResEntry, 8> Result;
52 const auto &SM = STI.getSchedModel();
53 const unsigned NumProcRes = SM.getNumProcResourceKinds();
54
55 // Collect resource masks.
56 SmallVector<uint64_t> ProcResourceMasks(NumProcRes);
57 mca::computeProcResourceMasks(SM, Masks: ProcResourceMasks);
58
59 // Sort entries by smaller resources for (basic) topological ordering.
60 using ResourceMaskAndEntry = std::pair<uint64_t, const MCWriteProcResEntry *>;
61 SmallVector<ResourceMaskAndEntry, 8> ResourceMaskAndEntries;
62 for (const auto *WPR = STI.getWriteProcResBegin(SC: &SCDesc),
63 *const WPREnd = STI.getWriteProcResEnd(SC: &SCDesc);
64 WPR != WPREnd; ++WPR) {
65 uint64_t Mask = ProcResourceMasks[WPR->ProcResourceIdx];
66 ResourceMaskAndEntries.push_back(Elt: {Mask, WPR});
67 }
68 sort(C&: ResourceMaskAndEntries,
69 Comp: [](const ResourceMaskAndEntry &A, const ResourceMaskAndEntry &B) {
70 unsigned popcntA = popcount(Value: A.first);
71 unsigned popcntB = popcount(Value: B.first);
72 if (popcntA < popcntB)
73 return true;
74 if (popcntA > popcntB)
75 return false;
76 return A.first < B.first;
77 });
78
79 SmallVector<float, 32> ProcResUnitUsage(NumProcRes);
80 for (const ResourceMaskAndEntry &Entry : ResourceMaskAndEntries) {
81 const MCWriteProcResEntry *WPR = Entry.second;
82 const MCProcResourceDesc *const ProcResDesc =
83 SM.getProcResource(ProcResourceIdx: WPR->ProcResourceIdx);
84 // TODO: Handle AcquireAtAtCycle in llvm-exegesis and llvm-mca. See
85 // https://github.com/llvm/llvm-project/issues/62680 and
86 // https://github.com/llvm/llvm-project/issues/62681
87 assert(WPR->AcquireAtCycle == 0 &&
88 "`llvm-exegesis` does not handle AcquireAtCycle > 0");
89 if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
90 // This is a ProcResUnit.
91 Result.push_back(
92 Elt: {.ProcResourceIdx: WPR->ProcResourceIdx, .ReleaseAtCycle: WPR->ReleaseAtCycle, .AcquireAtCycle: WPR->AcquireAtCycle});
93 ProcResUnitUsage[WPR->ProcResourceIdx] += WPR->ReleaseAtCycle;
94 } else {
95 // This is a ProcResGroup. First see if it contributes any cycles or if
96 // it has cycles just from subunits.
97 float RemainingCycles = WPR->ReleaseAtCycle;
98 for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
99 SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
100 ++SubResIdx) {
101 RemainingCycles -= ProcResUnitUsage[*SubResIdx];
102 }
103 if (RemainingCycles < 0.01f) {
104 // The ProcResGroup contributes no cycles of its own.
105 continue;
106 }
107 // The ProcResGroup contributes `RemainingCycles` cycles of its own.
108 Result.push_back(Elt: {.ProcResourceIdx: WPR->ProcResourceIdx,
109 .ReleaseAtCycle: static_cast<uint16_t>(std::round(x: RemainingCycles)),
110 .AcquireAtCycle: WPR->AcquireAtCycle});
111 // Spread the remaining cycles over all subunits.
112 for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
113 SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
114 ++SubResIdx) {
115 ProcResUnitUsage[*SubResIdx] += RemainingCycles / ProcResDesc->NumUnits;
116 }
117 }
118 }
119 return Result;
120}
121
122// Distributes a pressure budget as evenly as possible on the provided subunits
123// given the already existing port pressure distribution.
124//
125// The algorithm is as follows: while there is remaining pressure to
126// distribute, find the subunits with minimal pressure, and distribute
127// remaining pressure equally up to the pressure of the unit with
128// second-to-minimal pressure.
129// For example, let's assume we want to distribute 2*P1256
130// (Subunits = [P1,P2,P5,P6]), and the starting DensePressure is:
131// DensePressure = P0 P1 P2 P3 P4 P5 P6 P7
132// 0.1 0.3 0.2 0.0 0.0 0.5 0.5 0.5
133// RemainingPressure = 2.0
134// We sort the subunits by pressure:
135// Subunits = [(P2,p=0.2), (P1,p=0.3), (P5,p=0.5), (P6, p=0.5)]
136// We'll first start by the subunits with minimal pressure, which are at
137// the beginning of the sorted array. In this example there is one (P2).
138// The subunit with second-to-minimal pressure is the next one in the
139// array (P1). So we distribute 0.1 pressure to P2, and remove 0.1 cycles
140// from the budget.
141// Subunits = [(P2,p=0.3), (P1,p=0.3), (P5,p=0.5), (P5,p=0.5)]
142// RemainingPressure = 1.9
143// We repeat this process: distribute 0.2 pressure on each of the minimal
144// P2 and P1, decrease budget by 2*0.2:
145// Subunits = [(P2,p=0.5), (P1,p=0.5), (P5,p=0.5), (P5,p=0.5)]
146// RemainingPressure = 1.5
147// There are no second-to-minimal subunits so we just share the remaining
148// budget (1.5 cycles) equally:
149// Subunits = [(P2,p=0.875), (P1,p=0.875), (P5,p=0.875), (P5,p=0.875)]
150// RemainingPressure = 0.0
151// We stop as there is no remaining budget to distribute.
152static void distributePressure(float RemainingPressure,
153 SmallVector<uint16_t, 32> Subunits,
154 SmallVector<float, 32> &DensePressure) {
155 // Find the number of subunits with minimal pressure (they are at the
156 // front).
157 sort(C&: Subunits, Comp: [&DensePressure](const uint16_t A, const uint16_t B) {
158 return DensePressure[A] < DensePressure[B];
159 });
160 const auto getPressureForSubunit = [&DensePressure,
161 &Subunits](size_t I) -> float & {
162 return DensePressure[Subunits[I]];
163 };
164 size_t NumMinimalSU = 1;
165 while (NumMinimalSU < Subunits.size() &&
166 getPressureForSubunit(NumMinimalSU) == getPressureForSubunit(0)) {
167 ++NumMinimalSU;
168 }
169 while (RemainingPressure > 0.0f) {
170 if (NumMinimalSU == Subunits.size()) {
171 // All units are minimal, just distribute evenly and be done.
172 for (size_t I = 0; I < NumMinimalSU; ++I) {
173 getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
174 }
175 return;
176 }
177 // Distribute the remaining pressure equally.
178 const float MinimalPressure = getPressureForSubunit(NumMinimalSU - 1);
179 const float SecondToMinimalPressure = getPressureForSubunit(NumMinimalSU);
180 assert(MinimalPressure < SecondToMinimalPressure);
181 const float Increment = SecondToMinimalPressure - MinimalPressure;
182 if (RemainingPressure <= NumMinimalSU * Increment) {
183 // There is not enough remaining pressure.
184 for (size_t I = 0; I < NumMinimalSU; ++I) {
185 getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
186 }
187 return;
188 }
189 // Bump all minimal pressure subunits to `SecondToMinimalPressure`.
190 for (size_t I = 0; I < NumMinimalSU; ++I) {
191 getPressureForSubunit(I) = SecondToMinimalPressure;
192 RemainingPressure -= SecondToMinimalPressure;
193 }
194 while (NumMinimalSU < Subunits.size() &&
195 getPressureForSubunit(NumMinimalSU) == SecondToMinimalPressure) {
196 ++NumMinimalSU;
197 }
198 }
199}
200
201std::vector<std::pair<uint16_t, float>>
202computeIdealizedProcResPressure(const MCSchedModel &SM,
203 SmallVector<MCWriteProcResEntry, 8> WPRS) {
204 // DensePressure[I] is the port pressure for Proc Resource I.
205 SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds());
206 sort(C&: WPRS, Comp: [](const MCWriteProcResEntry &A, const MCWriteProcResEntry &B) {
207 return A.ProcResourceIdx < B.ProcResourceIdx;
208 });
209 for (const MCWriteProcResEntry &WPR : WPRS) {
210 // Get units for the entry.
211 const MCProcResourceDesc *const ProcResDesc =
212 SM.getProcResource(ProcResourceIdx: WPR.ProcResourceIdx);
213 if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
214 // This is a ProcResUnit.
215 DensePressure[WPR.ProcResourceIdx] += WPR.ReleaseAtCycle;
216 } else {
217 // This is a ProcResGroup.
218 SmallVector<uint16_t, 32> Subunits(ProcResDesc->SubUnitsIdxBegin,
219 ProcResDesc->SubUnitsIdxBegin +
220 ProcResDesc->NumUnits);
221 distributePressure(RemainingPressure: WPR.ReleaseAtCycle, Subunits, DensePressure);
222 }
223 }
224 // Turn dense pressure into sparse pressure by removing zero entries.
225 std::vector<std::pair<uint16_t, float>> Pressure;
226 for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
227 if (DensePressure[I] > 0.0f)
228 Pressure.emplace_back(args&: I, args&: DensePressure[I]);
229 }
230 return Pressure;
231}
232
233ResolvedSchedClass::ResolvedSchedClass(const MCSubtargetInfo &STI,
234 unsigned ResolvedSchedClassId,
235 bool WasVariant)
236 : SchedClassId(ResolvedSchedClassId),
237 SCDesc(STI.getSchedModel().getSchedClassDesc(SchedClassIdx: ResolvedSchedClassId)),
238 WasVariant(WasVariant),
239 NonRedundantWriteProcRes(getNonRedundantWriteProcRes(SCDesc: *SCDesc, STI)),
240 IdealizedProcResPressure(computeIdealizedProcResPressure(
241 SM: STI.getSchedModel(), WPRS: NonRedundantWriteProcRes)) {
242 assert((SCDesc == nullptr || !SCDesc->isVariant()) &&
243 "ResolvedSchedClass should never be variant");
244}
245
246static unsigned ResolveVariantSchedClassId(const MCSubtargetInfo &STI,
247 const MCInstrInfo &InstrInfo,
248 unsigned SchedClassId,
249 const MCInst &MCI) {
250 const auto &SM = STI.getSchedModel();
251 while (SchedClassId && SM.getSchedClassDesc(SchedClassIdx: SchedClassId)->isVariant()) {
252 SchedClassId = STI.resolveVariantSchedClass(SchedClass: SchedClassId, MI: &MCI, MCII: &InstrInfo,
253 CPUID: SM.getProcessorID());
254 }
255 return SchedClassId;
256}
257
258std::pair<unsigned /*SchedClassId*/, bool /*WasVariant*/>
259ResolvedSchedClass::resolveSchedClassId(const MCSubtargetInfo &SubtargetInfo,
260 const MCInstrInfo &InstrInfo,
261 const MCInst &MCI) {
262 unsigned SchedClassId = InstrInfo.get(Opcode: MCI.getOpcode()).getSchedClass();
263 const bool WasVariant = SchedClassId && SubtargetInfo.getSchedModel()
264 .getSchedClassDesc(SchedClassIdx: SchedClassId)
265 ->isVariant();
266 SchedClassId =
267 ResolveVariantSchedClassId(STI: SubtargetInfo, InstrInfo, SchedClassId, MCI);
268 return std::make_pair(x&: SchedClassId, y: WasVariant);
269}
270
271// Returns a ProxResIdx by id or name.
272static unsigned findProcResIdx(const MCSubtargetInfo &STI,
273 const StringRef NameOrId) {
274 // Interpret the key as an ProcResIdx.
275 unsigned ProcResIdx = 0;
276 if (to_integer(S: NameOrId, Num&: ProcResIdx, Base: 10))
277 return ProcResIdx;
278 // Interpret the key as a ProcRes name.
279 const auto &SchedModel = STI.getSchedModel();
280 for (int I = 0, E = SchedModel.getNumProcResourceKinds(); I < E; ++I) {
281 if (NameOrId == SchedModel.getProcResource(ProcResourceIdx: I)->Name)
282 return I;
283 }
284 return 0;
285}
286
287std::vector<BenchmarkMeasure> ResolvedSchedClass::getAsPoint(
288 Benchmark::ModeE Mode, const MCSubtargetInfo &STI,
289 ArrayRef<PerInstructionStats> Representative) const {
290 const size_t NumMeasurements = Representative.size();
291
292 std::vector<BenchmarkMeasure> SchedClassPoint(NumMeasurements);
293
294 if (Mode == Benchmark::Latency) {
295 assert(NumMeasurements == 1 && "Latency is a single measure.");
296 BenchmarkMeasure &LatencyMeasure = SchedClassPoint[0];
297
298 // Find the latency.
299 LatencyMeasure.PerInstructionValue = 0.0;
300
301 for (unsigned I = 0; I < SCDesc->NumWriteLatencyEntries; ++I) {
302 const MCWriteLatencyEntry *const WLE =
303 STI.getWriteLatencyEntry(SC: SCDesc, DefIdx: I);
304 LatencyMeasure.PerInstructionValue =
305 std::max<double>(a: LatencyMeasure.PerInstructionValue, b: WLE->Cycles);
306 }
307 } else if (Mode == Benchmark::Uops) {
308 for (auto I : zip(t&: SchedClassPoint, u&: Representative)) {
309 BenchmarkMeasure &Measure = std::get<0>(t&: I);
310 const PerInstructionStats &Stats = std::get<1>(t&: I);
311
312 StringRef Key = Stats.key();
313 uint16_t ProcResIdx = findProcResIdx(STI, NameOrId: Key);
314 if (ProcResIdx > 0) {
315 // Find the pressure on ProcResIdx `Key`.
316 const auto ProcResPressureIt =
317 find_if(Range: IdealizedProcResPressure,
318 P: [ProcResIdx](const std::pair<uint16_t, float> &WPR) {
319 return WPR.first == ProcResIdx;
320 });
321 Measure.PerInstructionValue =
322 ProcResPressureIt == IdealizedProcResPressure.end()
323 ? 0.0
324 : ProcResPressureIt->second;
325 } else if (Key == "NumMicroOps") {
326 Measure.PerInstructionValue = SCDesc->NumMicroOps;
327 } else {
328 errs() << "expected `key` to be either a ProcResIdx or a ProcRes "
329 "name, got "
330 << Key << "\n";
331 return {};
332 }
333 }
334 } else if (Mode == Benchmark::InverseThroughput) {
335 assert(NumMeasurements == 1 && "Inverse Throughput is a single measure.");
336 BenchmarkMeasure &RThroughputMeasure = SchedClassPoint[0];
337
338 RThroughputMeasure.PerInstructionValue =
339 MCSchedModel::getReciprocalThroughput(STI, SCDesc: *SCDesc);
340 } else {
341 llvm_unreachable("unimplemented measurement matching mode");
342 }
343
344 return SchedClassPoint;
345}
346
347} // namespace exegesis
348} // namespace llvm
349