| 1 | //===- MCSchedule.cpp - Scheduling ------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the default scheduling model. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "llvm/MC/MCSchedule.h" |
| 14 | #include "llvm/MC/MCInst.h" |
| 15 | #include "llvm/MC/MCInstrDesc.h" |
| 16 | #include "llvm/MC/MCInstrInfo.h" |
| 17 | #include "llvm/MC/MCSubtargetInfo.h" |
| 18 | #include <optional> |
| 19 | #include <type_traits> |
| 20 | |
| 21 | using namespace llvm; |
| 22 | |
| 23 | static_assert(std::is_trivial_v<MCSchedModel>, |
| 24 | "MCSchedModel is required to be a trivial type" ); |
| 25 | const MCSchedModel MCSchedModel::Default = {.IssueWidth: DefaultIssueWidth, |
| 26 | .MicroOpBufferSize: DefaultMicroOpBufferSize, |
| 27 | .LoopMicroOpBufferSize: DefaultLoopMicroOpBufferSize, |
| 28 | .LoadLatency: DefaultLoadLatency, |
| 29 | .HighLatency: DefaultHighLatency, |
| 30 | .MispredictPenalty: DefaultMispredictPenalty, |
| 31 | .PostRAScheduler: false, |
| 32 | .CompleteModel: true, |
| 33 | /*EnableIntervals=*/false, |
| 34 | .ProcID: 0, |
| 35 | .ProcResourceTable: nullptr, |
| 36 | .SchedClassTable: nullptr, |
| 37 | .NumProcResourceKinds: 0, |
| 38 | .NumSchedClasses: 0, |
| 39 | .SchedClassNames: nullptr, |
| 40 | .InstrItineraries: nullptr, |
| 41 | .ExtraProcessorInfo: nullptr}; |
| 42 | |
| 43 | int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, |
| 44 | const MCSchedClassDesc &SCDesc) { |
| 45 | int Latency = 0; |
| 46 | for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries; |
| 47 | DefIdx != DefEnd; ++DefIdx) { |
| 48 | // Lookup the definition's write latency in SubtargetInfo. |
| 49 | const MCWriteLatencyEntry *WLEntry = |
| 50 | STI.getWriteLatencyEntry(SC: &SCDesc, DefIdx); |
| 51 | // Early exit if we found an invalid latency. |
| 52 | if (WLEntry->Cycles < 0) |
| 53 | return WLEntry->Cycles; |
| 54 | Latency = std::max(a: Latency, b: static_cast<int>(WLEntry->Cycles)); |
| 55 | } |
| 56 | return Latency; |
| 57 | } |
| 58 | |
| 59 | int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, |
| 60 | unsigned SchedClass) const { |
| 61 | const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClassIdx: SchedClass); |
| 62 | if (!SCDesc.isValid()) |
| 63 | return 0; |
| 64 | if (!SCDesc.isVariant()) |
| 65 | return MCSchedModel::computeInstrLatency(STI, SCDesc); |
| 66 | |
| 67 | llvm_unreachable("unsupported variant scheduling class" ); |
| 68 | } |
| 69 | |
| 70 | int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, |
| 71 | const MCInstrInfo &MCII, |
| 72 | const MCInst &Inst) const { |
| 73 | return MCSchedModel::computeInstrLatency<MCSubtargetInfo, MCInstrInfo, |
| 74 | InstrItineraryData, MCInst>( |
| 75 | STI, MCII, Inst, |
| 76 | ResolveVariantSchedClass: [&](const MCSchedClassDesc *SCDesc) -> const MCSchedClassDesc * { |
| 77 | if (!SCDesc->isValid()) |
| 78 | return nullptr; |
| 79 | |
| 80 | unsigned CPUID = getProcessorID(); |
| 81 | unsigned SchedClass = 0; |
| 82 | while (SCDesc->isVariant()) { |
| 83 | SchedClass = |
| 84 | STI.resolveVariantSchedClass(SchedClass, MI: &Inst, MCII: &MCII, CPUID); |
| 85 | SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass); |
| 86 | } |
| 87 | |
| 88 | if (!SchedClass) { |
| 89 | assert(false && "unsupported variant scheduling class" ); |
| 90 | return nullptr; |
| 91 | } |
| 92 | |
| 93 | return SCDesc; |
| 94 | }); |
| 95 | } |
| 96 | |
| 97 | double |
| 98 | MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, |
| 99 | const MCSchedClassDesc &SCDesc) { |
| 100 | std::optional<double> MinThroughput; |
| 101 | const MCSchedModel &SM = STI.getSchedModel(); |
| 102 | const MCWriteProcResEntry *I = STI.getWriteProcResBegin(SC: &SCDesc); |
| 103 | const MCWriteProcResEntry *E = STI.getWriteProcResEnd(SC: &SCDesc); |
| 104 | for (; I != E; ++I) { |
| 105 | if (!I->ReleaseAtCycle || I->ReleaseAtCycle == I->AcquireAtCycle) |
| 106 | continue; |
| 107 | assert(I->ReleaseAtCycle > I->AcquireAtCycle && "invalid resource segment" ); |
| 108 | unsigned NumUnits = SM.getProcResource(ProcResourceIdx: I->ProcResourceIdx)->NumUnits; |
| 109 | double Throughput = |
| 110 | double(NumUnits) / double(I->ReleaseAtCycle - I->AcquireAtCycle); |
| 111 | MinThroughput = |
| 112 | MinThroughput ? std::min(a: *MinThroughput, b: Throughput) : Throughput; |
| 113 | } |
| 114 | if (MinThroughput) |
| 115 | return 1.0 / *MinThroughput; |
| 116 | |
| 117 | // If no throughput value was calculated, assume that we can execute at the |
| 118 | // maximum issue width scaled by number of micro-ops for the schedule class. |
| 119 | return ((double)SCDesc.NumMicroOps) / SM.IssueWidth; |
| 120 | } |
| 121 | |
| 122 | double |
| 123 | MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, |
| 124 | const MCInstrInfo &MCII, |
| 125 | const MCInst &Inst) const { |
| 126 | unsigned SchedClass = MCII.get(Opcode: Inst.getOpcode()).getSchedClass(); |
| 127 | const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass); |
| 128 | |
| 129 | // If there's no valid class, assume that the instruction executes/completes |
| 130 | // at the maximum issue width. |
| 131 | if (!SCDesc->isValid()) |
| 132 | return 1.0 / IssueWidth; |
| 133 | |
| 134 | unsigned CPUID = getProcessorID(); |
| 135 | while (SCDesc->isVariant()) { |
| 136 | SchedClass = STI.resolveVariantSchedClass(SchedClass, MI: &Inst, MCII: &MCII, CPUID); |
| 137 | SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass); |
| 138 | } |
| 139 | |
| 140 | if (SchedClass) |
| 141 | return MCSchedModel::getReciprocalThroughput(STI, SCDesc: *SCDesc); |
| 142 | |
| 143 | llvm_unreachable("unsupported variant scheduling class" ); |
| 144 | } |
| 145 | |
| 146 | double |
| 147 | MCSchedModel::getReciprocalThroughput(unsigned SchedClass, |
| 148 | const InstrItineraryData &IID) { |
| 149 | std::optional<double> Throughput; |
| 150 | const InstrStage *I = IID.beginStage(ItinClassIndx: SchedClass); |
| 151 | const InstrStage *E = IID.endStage(ItinClassIndx: SchedClass); |
| 152 | for (; I != E; ++I) { |
| 153 | if (!I->getCycles()) |
| 154 | continue; |
| 155 | double Temp = llvm::popcount(Value: I->getUnits()) * 1.0 / I->getCycles(); |
| 156 | Throughput = Throughput ? std::min(a: *Throughput, b: Temp) : Temp; |
| 157 | } |
| 158 | if (Throughput) |
| 159 | return 1.0 / *Throughput; |
| 160 | |
| 161 | // If there are no execution resources specified for this class, then assume |
| 162 | // that it can execute at the maximum default issue width. |
| 163 | return 1.0 / DefaultIssueWidth; |
| 164 | } |
| 165 | |
| 166 | unsigned |
| 167 | MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries, |
| 168 | unsigned WriteResourceID) { |
| 169 | if (Entries.empty()) |
| 170 | return 0; |
| 171 | |
| 172 | int DelayCycles = 0; |
| 173 | for (const MCReadAdvanceEntry &E : Entries) { |
| 174 | if (E.WriteResourceID != WriteResourceID) |
| 175 | continue; |
| 176 | DelayCycles = std::min(a: DelayCycles, b: E.Cycles); |
| 177 | } |
| 178 | |
| 179 | return std::abs(x: DelayCycles); |
| 180 | } |
| 181 | |
| 182 | unsigned MCSchedModel::getBypassDelayCycles(const MCSubtargetInfo &STI, |
| 183 | const MCSchedClassDesc &SCDesc) { |
| 184 | |
| 185 | ArrayRef<MCReadAdvanceEntry> Entries = STI.getReadAdvanceEntries(SC: SCDesc); |
| 186 | if (Entries.empty()) |
| 187 | return 0; |
| 188 | |
| 189 | unsigned MaxLatency = 0; |
| 190 | unsigned WriteResourceID = 0; |
| 191 | unsigned DefEnd = SCDesc.NumWriteLatencyEntries; |
| 192 | |
| 193 | for (unsigned DefIdx = 0; DefIdx != DefEnd; ++DefIdx) { |
| 194 | // Lookup the definition's write latency in SubtargetInfo. |
| 195 | const MCWriteLatencyEntry *WLEntry = |
| 196 | STI.getWriteLatencyEntry(SC: &SCDesc, DefIdx); |
| 197 | unsigned Cycles = 0; |
| 198 | // If latency is Invalid (<0), consider 0 cycle latency |
| 199 | if (WLEntry->Cycles > 0) |
| 200 | Cycles = (unsigned)WLEntry->Cycles; |
| 201 | if (Cycles > MaxLatency) { |
| 202 | MaxLatency = Cycles; |
| 203 | WriteResourceID = WLEntry->WriteResourceID; |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | for (const MCReadAdvanceEntry &E : Entries) { |
| 208 | if (E.WriteResourceID == WriteResourceID) |
| 209 | return E.Cycles; |
| 210 | } |
| 211 | |
| 212 | // Unable to find WriteResourceID in MCReadAdvanceEntry Entries |
| 213 | return 0; |
| 214 | } |
| 215 | |