1//===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This tablegen backend emits subtarget enumerations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Common/CodeGenHwModes.h"
14#include "Common/CodeGenSchedule.h"
15#include "Common/CodeGenTarget.h"
16#include "Common/PredicateExpander.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/MC/MCInstrItineraries.h"
22#include "llvm/MC/MCSchedule.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/Format.h"
25#include "llvm/Support/raw_ostream.h"
26#include "llvm/TableGen/Error.h"
27#include "llvm/TableGen/Record.h"
28#include "llvm/TableGen/TableGenBackend.h"
29#include "llvm/TargetParser/SubtargetFeature.h"
30#include <algorithm>
31#include <cassert>
32#include <cstdint>
33#include <iterator>
34#include <map>
35#include <set>
36#include <string>
37#include <vector>
38
39using namespace llvm;
40
41#define DEBUG_TYPE "subtarget-emitter"
42
43namespace {
44
45/// Sorting predicate to sort record pointers by their
46/// FieldName field.
47struct LessRecordFieldFieldName {
48 bool operator()(const Record *Rec1, const Record *Rec2) const {
49 return Rec1->getValueAsString(FieldName: "FieldName") <
50 Rec2->getValueAsString(FieldName: "FieldName");
51 }
52};
53
54class SubtargetEmitter {
55 // Each processor has a SchedClassDesc table with an entry for each
56 // SchedClass. The SchedClassDesc table indexes into a global write resource
57 // table, write latency table, and read advance table.
58 struct SchedClassTables {
59 std::vector<std::vector<MCSchedClassDesc>> ProcSchedClasses;
60 std::vector<MCWriteProcResEntry> WriteProcResources;
61 std::vector<MCWriteLatencyEntry> WriteLatencies;
62 std::vector<std::string> WriterNames;
63 std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
64
65 // Reserve an invalid entry at index 0
66 SchedClassTables() {
67 ProcSchedClasses.resize(new_size: 1);
68 WriteProcResources.resize(new_size: 1);
69 WriteLatencies.resize(new_size: 1);
70 WriterNames.push_back(x: "InvalidWrite");
71 ReadAdvanceEntries.resize(new_size: 1);
72 }
73 };
74
75 struct LessWriteProcResources {
76 bool operator()(const MCWriteProcResEntry &LHS,
77 const MCWriteProcResEntry &RHS) {
78 return LHS.ProcResourceIdx < RHS.ProcResourceIdx;
79 }
80 };
81
82 CodeGenTarget TGT;
83 RecordKeeper &Records;
84 CodeGenSchedModels &SchedModels;
85 std::string Target;
86
87 void Enumeration(raw_ostream &OS, DenseMap<Record *, unsigned> &FeatureMap);
88 void EmitSubtargetInfoMacroCalls(raw_ostream &OS);
89 unsigned FeatureKeyValues(raw_ostream &OS,
90 const DenseMap<Record *, unsigned> &FeatureMap);
91 unsigned CPUKeyValues(raw_ostream &OS,
92 const DenseMap<Record *, unsigned> &FeatureMap);
93 void FormItineraryStageString(const std::string &Names, Record *ItinData,
94 std::string &ItinString, unsigned &NStages);
95 void FormItineraryOperandCycleString(Record *ItinData,
96 std::string &ItinString,
97 unsigned &NOperandCycles);
98 void FormItineraryBypassString(const std::string &Names, Record *ItinData,
99 std::string &ItinString,
100 unsigned NOperandCycles);
101 void EmitStageAndOperandCycleData(
102 raw_ostream &OS, std::vector<std::vector<InstrItinerary>> &ProcItinLists);
103 void EmitItineraries(raw_ostream &OS,
104 std::vector<std::vector<InstrItinerary>> &ProcItinLists);
105 unsigned EmitRegisterFileTables(const CodeGenProcModel &ProcModel,
106 raw_ostream &OS);
107 void EmitLoadStoreQueueInfo(const CodeGenProcModel &ProcModel,
108 raw_ostream &OS);
109 void EmitExtraProcessorInfo(const CodeGenProcModel &ProcModel,
110 raw_ostream &OS);
111 void EmitProcessorProp(raw_ostream &OS, const Record *R, StringRef Name,
112 char Separator);
113 void EmitProcessorResourceSubUnits(const CodeGenProcModel &ProcModel,
114 raw_ostream &OS);
115 void EmitProcessorResources(const CodeGenProcModel &ProcModel,
116 raw_ostream &OS);
117 Record *FindWriteResources(const CodeGenSchedRW &SchedWrite,
118 const CodeGenProcModel &ProcModel);
119 Record *FindReadAdvance(const CodeGenSchedRW &SchedRead,
120 const CodeGenProcModel &ProcModel);
121 void ExpandProcResources(RecVec &PRVec, std::vector<int64_t> &ReleaseAtCycles,
122 std::vector<int64_t> &AcquireAtCycles,
123 const CodeGenProcModel &ProcModel);
124 void GenSchedClassTables(const CodeGenProcModel &ProcModel,
125 SchedClassTables &SchedTables);
126 void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
127 void EmitProcessorModels(raw_ostream &OS);
128 void EmitSchedModelHelpers(const std::string &ClassName, raw_ostream &OS);
129 void emitSchedModelHelpersImpl(raw_ostream &OS,
130 bool OnlyExpandMCInstPredicates = false);
131 void emitGenMCSubtargetInfo(raw_ostream &OS);
132 void EmitMCInstrAnalysisPredicateFunctions(raw_ostream &OS);
133
134 void EmitSchedModel(raw_ostream &OS);
135 void emitGetMacroFusions(const std::string &ClassName, raw_ostream &OS);
136 void EmitHwModeCheck(const std::string &ClassName, raw_ostream &OS);
137 void ParseFeaturesFunction(raw_ostream &OS);
138
139public:
140 SubtargetEmitter(RecordKeeper &R)
141 : TGT(R), Records(R), SchedModels(TGT.getSchedModels()),
142 Target(TGT.getName()) {}
143
144 void run(raw_ostream &o);
145};
146
147} // end anonymous namespace
148
149//
150// Enumeration - Emit the specified class as an enumeration.
151//
152void SubtargetEmitter::Enumeration(raw_ostream &OS,
153 DenseMap<Record *, unsigned> &FeatureMap) {
154 // Get all records of class and sort
155 std::vector<Record *> DefList =
156 Records.getAllDerivedDefinitions(ClassName: "SubtargetFeature");
157 llvm::sort(C&: DefList, Comp: LessRecord());
158
159 unsigned N = DefList.size();
160 if (N == 0)
161 return;
162 if (N + 1 > MAX_SUBTARGET_FEATURES)
163 PrintFatalError(
164 Msg: "Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
165
166 OS << "namespace " << Target << " {\n";
167
168 // Open enumeration.
169 OS << "enum {\n";
170
171 // For each record
172 for (unsigned i = 0; i < N; ++i) {
173 // Next record
174 Record *Def = DefList[i];
175
176 // Get and emit name
177 OS << " " << Def->getName() << " = " << i << ",\n";
178
179 // Save the index for this feature.
180 FeatureMap[Def] = i;
181 }
182
183 OS << " "
184 << "NumSubtargetFeatures = " << N << "\n";
185
186 // Close enumeration and namespace
187 OS << "};\n";
188 OS << "} // end namespace " << Target << "\n";
189}
190
191static void printFeatureMask(raw_ostream &OS, RecVec &FeatureList,
192 const DenseMap<Record *, unsigned> &FeatureMap) {
193 std::array<uint64_t, MAX_SUBTARGET_WORDS> Mask = {};
194 for (const Record *Feature : FeatureList) {
195 unsigned Bit = FeatureMap.lookup(Val: Feature);
196 Mask[Bit / 64] |= 1ULL << (Bit % 64);
197 }
198
199 OS << "{ { { ";
200 for (unsigned i = 0; i != Mask.size(); ++i) {
201 OS << "0x";
202 OS.write_hex(N: Mask[i]);
203 OS << "ULL, ";
204 }
205 OS << "} } }";
206}
207
208/// Emit some information about the SubtargetFeature as calls to a macro so
209/// that they can be used from C++.
210void SubtargetEmitter::EmitSubtargetInfoMacroCalls(raw_ostream &OS) {
211 OS << "\n#ifdef GET_SUBTARGETINFO_MACRO\n";
212
213 std::vector<Record *> FeatureList =
214 Records.getAllDerivedDefinitions(ClassName: "SubtargetFeature");
215 llvm::sort(C&: FeatureList, Comp: LessRecordFieldFieldName());
216
217 for (const Record *Feature : FeatureList) {
218 const StringRef FieldName = Feature->getValueAsString(FieldName: "FieldName");
219 const StringRef Value = Feature->getValueAsString(FieldName: "Value");
220
221 // Only handle boolean features for now, excluding BitVectors and enums.
222 const bool IsBool = (Value == "false" || Value == "true") &&
223 !StringRef(FieldName).contains(C: '[');
224 if (!IsBool)
225 continue;
226
227 // Some features default to true, with values set to false if enabled.
228 const char *Default = Value == "false" ? "true" : "false";
229
230 // Define the getter with lowercased first char: xxxYyy() { return XxxYyy; }
231 const std::string Getter =
232 FieldName.substr(Start: 0, N: 1).lower() + FieldName.substr(Start: 1).str();
233
234 OS << "GET_SUBTARGETINFO_MACRO(" << FieldName << ", " << Default << ", "
235 << Getter << ")\n";
236 }
237 OS << "#undef GET_SUBTARGETINFO_MACRO\n";
238 OS << "#endif // GET_SUBTARGETINFO_MACRO\n\n";
239
240 OS << "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
241 OS << "#undef GET_SUBTARGETINFO_MC_DESC\n\n";
242
243 if (Target == "AArch64")
244 OS << "#include \"llvm/TargetParser/AArch64TargetParser.h\"\n\n";
245}
246
247//
248// FeatureKeyValues - Emit data of all the subtarget features. Used by the
249// command line.
250//
251unsigned SubtargetEmitter::FeatureKeyValues(
252 raw_ostream &OS, const DenseMap<Record *, unsigned> &FeatureMap) {
253 // Gather and sort all the features
254 std::vector<Record *> FeatureList =
255 Records.getAllDerivedDefinitions(ClassName: "SubtargetFeature");
256
257 if (FeatureList.empty())
258 return 0;
259
260 llvm::sort(C&: FeatureList, Comp: LessRecordFieldName());
261
262 // Check that there are no duplicate keys
263 std::set<StringRef> UniqueKeys;
264
265 // Begin feature table
266 OS << "// Sorted (by key) array of values for CPU features.\n"
267 << "extern const llvm::SubtargetFeatureKV " << Target
268 << "FeatureKV[] = {\n";
269
270 // For each feature
271 unsigned NumFeatures = 0;
272 for (const Record *Feature : FeatureList) {
273 // Next feature
274 StringRef Name = Feature->getName();
275 StringRef CommandLineName = Feature->getValueAsString(FieldName: "Name");
276 StringRef Desc = Feature->getValueAsString(FieldName: "Desc");
277
278 if (CommandLineName.empty())
279 continue;
280
281 // Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in
282 // } }
283 OS << " { "
284 << "\"" << CommandLineName << "\", "
285 << "\"" << Desc << "\", " << Target << "::" << Name << ", ";
286
287 RecVec ImpliesList = Feature->getValueAsListOfDefs(FieldName: "Implies");
288
289 printFeatureMask(OS, FeatureList&: ImpliesList, FeatureMap);
290
291 OS << " },\n";
292 ++NumFeatures;
293
294 if (!UniqueKeys.insert(x: CommandLineName).second)
295 PrintFatalError(Msg: "Duplicate key in SubtargetFeatureKV: " +
296 CommandLineName);
297 }
298
299 // End feature table
300 OS << "};\n";
301
302 return NumFeatures;
303}
304
305//
306// CPUKeyValues - Emit data of all the subtarget processors. Used by command
307// line.
308//
309unsigned
310SubtargetEmitter::CPUKeyValues(raw_ostream &OS,
311 const DenseMap<Record *, unsigned> &FeatureMap) {
312 // Gather and sort processor information
313 std::vector<Record *> ProcessorList =
314 Records.getAllDerivedDefinitions(ClassName: "Processor");
315 llvm::sort(C&: ProcessorList, Comp: LessRecordFieldName());
316
317 // Begin processor table
318 OS << "// Sorted (by key) array of values for CPU subtype.\n"
319 << "extern const llvm::SubtargetSubTypeKV " << Target
320 << "SubTypeKV[] = {\n";
321
322 // For each processor
323 for (Record *Processor : ProcessorList) {
324 StringRef Name = Processor->getValueAsString(FieldName: "Name");
325 RecVec FeatureList = Processor->getValueAsListOfDefs(FieldName: "Features");
326 RecVec TuneFeatureList = Processor->getValueAsListOfDefs(FieldName: "TuneFeatures");
327
328 // Emit as { "cpu", "description", 0, { f1 , f2 , ... fn } },
329 OS << " { "
330 << "\"" << Name << "\", ";
331
332 printFeatureMask(OS, FeatureList, FeatureMap);
333 OS << ", ";
334 printFeatureMask(OS, FeatureList&: TuneFeatureList, FeatureMap);
335
336 // Emit the scheduler model pointer.
337 const std::string &ProcModelName =
338 SchedModels.getModelForProc(ProcDef: Processor).ModelName;
339 OS << ", &" << ProcModelName << " },\n";
340 }
341
342 // End processor table
343 OS << "};\n";
344
345 return ProcessorList.size();
346}
347
348//
349// FormItineraryStageString - Compose a string containing the stage
350// data initialization for the specified itinerary. N is the number
351// of stages.
352//
353void SubtargetEmitter::FormItineraryStageString(const std::string &Name,
354 Record *ItinData,
355 std::string &ItinString,
356 unsigned &NStages) {
357 // Get states list
358 RecVec StageList = ItinData->getValueAsListOfDefs(FieldName: "Stages");
359
360 // For each stage
361 unsigned N = NStages = StageList.size();
362 for (unsigned i = 0; i < N;) {
363 // Next stage
364 const Record *Stage = StageList[i];
365
366 // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
367 int Cycles = Stage->getValueAsInt(FieldName: "Cycles");
368 ItinString += " { " + itostr(X: Cycles) + ", ";
369
370 // Get unit list
371 RecVec UnitList = Stage->getValueAsListOfDefs(FieldName: "Units");
372
373 // For each unit
374 for (unsigned j = 0, M = UnitList.size(); j < M;) {
375 // Add name and bitwise or
376 ItinString += Name + "FU::" + UnitList[j]->getName().str();
377 if (++j < M)
378 ItinString += " | ";
379 }
380
381 int TimeInc = Stage->getValueAsInt(FieldName: "TimeInc");
382 ItinString += ", " + itostr(X: TimeInc);
383
384 int Kind = Stage->getValueAsInt(FieldName: "Kind");
385 ItinString += ", (llvm::InstrStage::ReservationKinds)" + itostr(X: Kind);
386
387 // Close off stage
388 ItinString += " }";
389 if (++i < N)
390 ItinString += ", ";
391 }
392}
393
394//
395// FormItineraryOperandCycleString - Compose a string containing the
396// operand cycle initialization for the specified itinerary. N is the
397// number of operands that has cycles specified.
398//
399void SubtargetEmitter::FormItineraryOperandCycleString(
400 Record *ItinData, std::string &ItinString, unsigned &NOperandCycles) {
401 // Get operand cycle list
402 std::vector<int64_t> OperandCycleList =
403 ItinData->getValueAsListOfInts(FieldName: "OperandCycles");
404
405 // For each operand cycle
406 NOperandCycles = OperandCycleList.size();
407 ListSeparator LS;
408 for (int OCycle : OperandCycleList) {
409 // Next operand cycle
410 ItinString += LS;
411 ItinString += " " + itostr(X: OCycle);
412 }
413}
414
415void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
416 Record *ItinData,
417 std::string &ItinString,
418 unsigned NOperandCycles) {
419 RecVec BypassList = ItinData->getValueAsListOfDefs(FieldName: "Bypasses");
420 unsigned N = BypassList.size();
421 unsigned i = 0;
422 ListSeparator LS;
423 for (; i < N; ++i) {
424 ItinString += LS;
425 ItinString += Name + "Bypass::" + BypassList[i]->getName().str();
426 }
427 for (; i < NOperandCycles; ++i) {
428 ItinString += LS;
429 ItinString += " 0";
430 }
431}
432
433//
434// EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
435// cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
436// by CodeGenSchedClass::Index.
437//
438void SubtargetEmitter::EmitStageAndOperandCycleData(
439 raw_ostream &OS, std::vector<std::vector<InstrItinerary>> &ProcItinLists) {
440 // Multiple processor models may share an itinerary record. Emit it once.
441 SmallPtrSet<Record *, 8> ItinsDefSet;
442
443 // Emit functional units for all the itineraries.
444 for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
445
446 if (!ItinsDefSet.insert(Ptr: ProcModel.ItinsDef).second)
447 continue;
448
449 RecVec FUs = ProcModel.ItinsDef->getValueAsListOfDefs(FieldName: "FU");
450 if (FUs.empty())
451 continue;
452
453 StringRef Name = ProcModel.ItinsDef->getName();
454 OS << "\n// Functional units for \"" << Name << "\"\n"
455 << "namespace " << Name << "FU {\n";
456
457 for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
458 OS << " const InstrStage::FuncUnits " << FUs[j]->getName()
459 << " = 1ULL << " << j << ";\n";
460
461 OS << "} // end namespace " << Name << "FU\n";
462
463 RecVec BPs = ProcModel.ItinsDef->getValueAsListOfDefs(FieldName: "BP");
464 if (!BPs.empty()) {
465 OS << "\n// Pipeline forwarding paths for itineraries \"" << Name
466 << "\"\n"
467 << "namespace " << Name << "Bypass {\n";
468
469 OS << " const unsigned NoBypass = 0;\n";
470 for (unsigned j = 0, BPN = BPs.size(); j < BPN; ++j)
471 OS << " const unsigned " << BPs[j]->getName() << " = 1 << " << j
472 << ";\n";
473
474 OS << "} // end namespace " << Name << "Bypass\n";
475 }
476 }
477
478 // Begin stages table
479 std::string StageTable =
480 "\nextern const llvm::InstrStage " + Target + "Stages[] = {\n";
481 StageTable += " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
482
483 // Begin operand cycle table
484 std::string OperandCycleTable =
485 "extern const unsigned " + Target + "OperandCycles[] = {\n";
486 OperandCycleTable += " 0, // No itinerary\n";
487
488 // Begin pipeline bypass table
489 std::string BypassTable =
490 "extern const unsigned " + Target + "ForwardingPaths[] = {\n";
491 BypassTable += " 0, // No itinerary\n";
492
493 // For each Itinerary across all processors, add a unique entry to the stages,
494 // operand cycles, and pipeline bypass tables. Then add the new Itinerary
495 // object with computed offsets to the ProcItinLists result.
496 unsigned StageCount = 1, OperandCycleCount = 1;
497 std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
498 for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
499 // Add process itinerary to the list.
500 std::vector<InstrItinerary> &ItinList = ProcItinLists.emplace_back();
501
502 // If this processor defines no itineraries, then leave the itinerary list
503 // empty.
504 if (!ProcModel.hasItineraries())
505 continue;
506
507 StringRef Name = ProcModel.ItinsDef->getName();
508
509 ItinList.resize(new_size: SchedModels.numInstrSchedClasses());
510 assert(ProcModel.ItinDefList.size() == ItinList.size() && "bad Itins");
511
512 for (unsigned SchedClassIdx = 0, SchedClassEnd = ItinList.size();
513 SchedClassIdx < SchedClassEnd; ++SchedClassIdx) {
514
515 // Next itinerary data
516 Record *ItinData = ProcModel.ItinDefList[SchedClassIdx];
517
518 // Get string and stage count
519 std::string ItinStageString;
520 unsigned NStages = 0;
521 if (ItinData)
522 FormItineraryStageString(Name: std::string(Name), ItinData, ItinString&: ItinStageString,
523 NStages);
524
525 // Get string and operand cycle count
526 std::string ItinOperandCycleString;
527 unsigned NOperandCycles = 0;
528 std::string ItinBypassString;
529 if (ItinData) {
530 FormItineraryOperandCycleString(ItinData, ItinString&: ItinOperandCycleString,
531 NOperandCycles);
532
533 FormItineraryBypassString(Name: std::string(Name), ItinData, ItinString&: ItinBypassString,
534 NOperandCycles);
535 }
536
537 // Check to see if stage already exists and create if it doesn't
538 uint16_t FindStage = 0;
539 if (NStages > 0) {
540 FindStage = ItinStageMap[ItinStageString];
541 if (FindStage == 0) {
542 // Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
543 StageTable += ItinStageString + ", // " + itostr(X: StageCount);
544 if (NStages > 1)
545 StageTable += "-" + itostr(X: StageCount + NStages - 1);
546 StageTable += "\n";
547 // Record Itin class number.
548 ItinStageMap[ItinStageString] = FindStage = StageCount;
549 StageCount += NStages;
550 }
551 }
552
553 // Check to see if operand cycle already exists and create if it doesn't
554 uint16_t FindOperandCycle = 0;
555 if (NOperandCycles > 0) {
556 std::string ItinOperandString =
557 ItinOperandCycleString + ItinBypassString;
558 FindOperandCycle = ItinOperandMap[ItinOperandString];
559 if (FindOperandCycle == 0) {
560 // Emit as cycle, // index
561 OperandCycleTable += ItinOperandCycleString + ", // ";
562 std::string OperandIdxComment = itostr(X: OperandCycleCount);
563 if (NOperandCycles > 1)
564 OperandIdxComment +=
565 "-" + itostr(X: OperandCycleCount + NOperandCycles - 1);
566 OperandCycleTable += OperandIdxComment + "\n";
567 // Record Itin class number.
568 ItinOperandMap[ItinOperandCycleString] = FindOperandCycle =
569 OperandCycleCount;
570 // Emit as bypass, // index
571 BypassTable += ItinBypassString + ", // " + OperandIdxComment + "\n";
572 OperandCycleCount += NOperandCycles;
573 }
574 }
575
576 // Set up itinerary as location and location + stage count
577 int16_t NumUOps = ItinData ? ItinData->getValueAsInt(FieldName: "NumMicroOps") : 0;
578 InstrItinerary Intinerary = {
579 .NumMicroOps: NumUOps,
580 .FirstStage: FindStage,
581 .LastStage: uint16_t(FindStage + NStages),
582 .FirstOperandCycle: FindOperandCycle,
583 .LastOperandCycle: uint16_t(FindOperandCycle + NOperandCycles),
584 };
585
586 // Inject - empty slots will be 0, 0
587 ItinList[SchedClassIdx] = Intinerary;
588 }
589 }
590
591 // Closing stage
592 StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
593 StageTable += "};\n";
594
595 // Closing operand cycles
596 OperandCycleTable += " 0 // End operand cycles\n";
597 OperandCycleTable += "};\n";
598
599 BypassTable += " 0 // End bypass tables\n";
600 BypassTable += "};\n";
601
602 // Emit tables.
603 OS << StageTable;
604 OS << OperandCycleTable;
605 OS << BypassTable;
606}
607
608//
609// EmitProcessorData - Generate data for processor itineraries that were
610// computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
611// Itineraries for each processor. The Itinerary lists are indexed on
612// CodeGenSchedClass::Index.
613//
614void SubtargetEmitter::EmitItineraries(
615 raw_ostream &OS, std::vector<std::vector<InstrItinerary>> &ProcItinLists) {
616 // Multiple processor models may share an itinerary record. Emit it once.
617 SmallPtrSet<Record *, 8> ItinsDefSet;
618
619 // For each processor's machine model
620 std::vector<std::vector<InstrItinerary>>::iterator ProcItinListsIter =
621 ProcItinLists.begin();
622 for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
623 PE = SchedModels.procModelEnd();
624 PI != PE; ++PI, ++ProcItinListsIter) {
625
626 Record *ItinsDef = PI->ItinsDef;
627 if (!ItinsDefSet.insert(Ptr: ItinsDef).second)
628 continue;
629
630 // Get the itinerary list for the processor.
631 assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
632 std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
633
634 // Empty itineraries aren't referenced anywhere in the tablegen output
635 // so don't emit them.
636 if (ItinList.empty())
637 continue;
638
639 OS << "\n";
640 OS << "static const llvm::InstrItinerary ";
641
642 // Begin processor itinerary table
643 OS << ItinsDef->getName() << "[] = {\n";
644
645 // For each itinerary class in CodeGenSchedClass::Index order.
646 for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
647 InstrItinerary &Intinerary = ItinList[j];
648
649 // Emit Itinerary in the form of
650 // { firstStage, lastStage, firstCycle, lastCycle } // index
651 OS << " { " << Intinerary.NumMicroOps << ", " << Intinerary.FirstStage
652 << ", " << Intinerary.LastStage << ", " << Intinerary.FirstOperandCycle
653 << ", " << Intinerary.LastOperandCycle << " }"
654 << ", // " << j << " " << SchedModels.getSchedClass(Idx: j).Name << "\n";
655 }
656 // End processor itinerary table
657 OS << " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }"
658 "// end marker\n";
659 OS << "};\n";
660 }
661}
662
663// Emit either the value defined in the TableGen Record, or the default
664// value defined in the C++ header. The Record is null if the processor does not
665// define a model.
666void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
667 StringRef Name, char Separator) {
668 OS << " ";
669 int V = R ? R->getValueAsInt(FieldName: Name) : -1;
670 if (V >= 0)
671 OS << V << Separator << " // " << Name;
672 else
673 OS << "MCSchedModel::Default" << Name << Separator;
674 OS << '\n';
675}
676
677void SubtargetEmitter::EmitProcessorResourceSubUnits(
678 const CodeGenProcModel &ProcModel, raw_ostream &OS) {
679 OS << "\nstatic const unsigned " << ProcModel.ModelName
680 << "ProcResourceSubUnits[] = {\n"
681 << " 0, // Invalid\n";
682
683 for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
684 Record *PRDef = ProcModel.ProcResourceDefs[i];
685 if (!PRDef->isSubClassOf(Name: "ProcResGroup"))
686 continue;
687 RecVec ResUnits = PRDef->getValueAsListOfDefs(FieldName: "Resources");
688 for (Record *RUDef : ResUnits) {
689 Record *const RU =
690 SchedModels.findProcResUnits(ProcResKind: RUDef, PM: ProcModel, Loc: PRDef->getLoc());
691 for (unsigned J = 0; J < RU->getValueAsInt(FieldName: "NumUnits"); ++J) {
692 OS << " " << ProcModel.getProcResourceIdx(PRDef: RU) << ", ";
693 }
694 }
695 OS << " // " << PRDef->getName() << "\n";
696 }
697 OS << "};\n";
698}
699
700static void EmitRetireControlUnitInfo(const CodeGenProcModel &ProcModel,
701 raw_ostream &OS) {
702 int64_t ReorderBufferSize = 0, MaxRetirePerCycle = 0;
703 if (Record *RCU = ProcModel.RetireControlUnit) {
704 ReorderBufferSize =
705 std::max(a: ReorderBufferSize, b: RCU->getValueAsInt(FieldName: "ReorderBufferSize"));
706 MaxRetirePerCycle =
707 std::max(a: MaxRetirePerCycle, b: RCU->getValueAsInt(FieldName: "MaxRetirePerCycle"));
708 }
709
710 OS << ReorderBufferSize << ", // ReorderBufferSize\n ";
711 OS << MaxRetirePerCycle << ", // MaxRetirePerCycle\n ";
712}
713
714static void EmitRegisterFileInfo(const CodeGenProcModel &ProcModel,
715 unsigned NumRegisterFiles,
716 unsigned NumCostEntries, raw_ostream &OS) {
717 if (NumRegisterFiles)
718 OS << ProcModel.ModelName << "RegisterFiles,\n " << (1 + NumRegisterFiles);
719 else
720 OS << "nullptr,\n 0";
721
722 OS << ", // Number of register files.\n ";
723 if (NumCostEntries)
724 OS << ProcModel.ModelName << "RegisterCosts,\n ";
725 else
726 OS << "nullptr,\n ";
727 OS << NumCostEntries << ", // Number of register cost entries.\n";
728}
729
730unsigned
731SubtargetEmitter::EmitRegisterFileTables(const CodeGenProcModel &ProcModel,
732 raw_ostream &OS) {
733 if (llvm::all_of(Range: ProcModel.RegisterFiles, P: [](const CodeGenRegisterFile &RF) {
734 return RF.hasDefaultCosts();
735 }))
736 return 0;
737
738 // Print the RegisterCost table first.
739 OS << "\n// {RegisterClassID, Register Cost, AllowMoveElimination }\n";
740 OS << "static const llvm::MCRegisterCostEntry " << ProcModel.ModelName
741 << "RegisterCosts"
742 << "[] = {\n";
743
744 for (const CodeGenRegisterFile &RF : ProcModel.RegisterFiles) {
745 // Skip register files with a default cost table.
746 if (RF.hasDefaultCosts())
747 continue;
748 // Add entries to the cost table.
749 for (const CodeGenRegisterCost &RC : RF.Costs) {
750 OS << " { ";
751 Record *Rec = RC.RCDef;
752 if (Rec->getValue(Name: "Namespace"))
753 OS << Rec->getValueAsString(FieldName: "Namespace") << "::";
754 OS << Rec->getName() << "RegClassID, " << RC.Cost << ", "
755 << RC.AllowMoveElimination << "},\n";
756 }
757 }
758 OS << "};\n";
759
760 // Now generate a table with register file info.
761 OS << "\n // {Name, #PhysRegs, #CostEntries, IndexToCostTbl, "
762 << "MaxMovesEliminatedPerCycle, AllowZeroMoveEliminationOnly }\n";
763 OS << "static const llvm::MCRegisterFileDesc " << ProcModel.ModelName
764 << "RegisterFiles"
765 << "[] = {\n"
766 << " { \"InvalidRegisterFile\", 0, 0, 0, 0, 0 },\n";
767 unsigned CostTblIndex = 0;
768
769 for (const CodeGenRegisterFile &RD : ProcModel.RegisterFiles) {
770 OS << " { ";
771 OS << '"' << RD.Name << '"' << ", " << RD.NumPhysRegs << ", ";
772 unsigned NumCostEntries = RD.Costs.size();
773 OS << NumCostEntries << ", " << CostTblIndex << ", "
774 << RD.MaxMovesEliminatedPerCycle << ", "
775 << RD.AllowZeroMoveEliminationOnly << "},\n";
776 CostTblIndex += NumCostEntries;
777 }
778 OS << "};\n";
779
780 return CostTblIndex;
781}
782
783void SubtargetEmitter::EmitLoadStoreQueueInfo(const CodeGenProcModel &ProcModel,
784 raw_ostream &OS) {
785 unsigned QueueID = 0;
786 if (ProcModel.LoadQueue) {
787 const Record *Queue = ProcModel.LoadQueue->getValueAsDef(FieldName: "QueueDescriptor");
788 QueueID = 1 + std::distance(first: ProcModel.ProcResourceDefs.begin(),
789 last: find(Range: ProcModel.ProcResourceDefs, Val: Queue));
790 }
791 OS << " " << QueueID << ", // Resource Descriptor for the Load Queue\n";
792
793 QueueID = 0;
794 if (ProcModel.StoreQueue) {
795 const Record *Queue =
796 ProcModel.StoreQueue->getValueAsDef(FieldName: "QueueDescriptor");
797 QueueID = 1 + std::distance(first: ProcModel.ProcResourceDefs.begin(),
798 last: find(Range: ProcModel.ProcResourceDefs, Val: Queue));
799 }
800 OS << " " << QueueID << ", // Resource Descriptor for the Store Queue\n";
801}
802
803void SubtargetEmitter::EmitExtraProcessorInfo(const CodeGenProcModel &ProcModel,
804 raw_ostream &OS) {
805 // Generate a table of register file descriptors (one entry per each user
806 // defined register file), and a table of register costs.
807 unsigned NumCostEntries = EmitRegisterFileTables(ProcModel, OS);
808
809 // Now generate a table for the extra processor info.
810 OS << "\nstatic const llvm::MCExtraProcessorInfo " << ProcModel.ModelName
811 << "ExtraInfo = {\n ";
812
813 // Add information related to the retire control unit.
814 EmitRetireControlUnitInfo(ProcModel, OS);
815
816 // Add information related to the register files (i.e. where to find register
817 // file descriptors and register costs).
818 EmitRegisterFileInfo(ProcModel, NumRegisterFiles: ProcModel.RegisterFiles.size(),
819 NumCostEntries, OS);
820
821 // Add information about load/store queues.
822 EmitLoadStoreQueueInfo(ProcModel, OS);
823
824 OS << "};\n";
825}
826
827void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel,
828 raw_ostream &OS) {
829 EmitProcessorResourceSubUnits(ProcModel, OS);
830
831 OS << "\n// {Name, NumUnits, SuperIdx, BufferSize, SubUnitsIdxBegin}\n";
832 OS << "static const llvm::MCProcResourceDesc " << ProcModel.ModelName
833 << "ProcResources"
834 << "[] = {\n"
835 << " {\"InvalidUnit\", 0, 0, 0, 0},\n";
836
837 unsigned SubUnitsOffset = 1;
838 for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
839 Record *PRDef = ProcModel.ProcResourceDefs[i];
840
841 Record *SuperDef = nullptr;
842 unsigned SuperIdx = 0;
843 unsigned NumUnits = 0;
844 const unsigned SubUnitsBeginOffset = SubUnitsOffset;
845 int BufferSize = PRDef->getValueAsInt(FieldName: "BufferSize");
846 if (PRDef->isSubClassOf(Name: "ProcResGroup")) {
847 RecVec ResUnits = PRDef->getValueAsListOfDefs(FieldName: "Resources");
848 for (Record *RU : ResUnits) {
849 NumUnits += RU->getValueAsInt(FieldName: "NumUnits");
850 SubUnitsOffset += RU->getValueAsInt(FieldName: "NumUnits");
851 }
852 } else {
853 // Find the SuperIdx
854 if (PRDef->getValueInit(FieldName: "Super")->isComplete()) {
855 SuperDef = SchedModels.findProcResUnits(ProcResKind: PRDef->getValueAsDef(FieldName: "Super"),
856 PM: ProcModel, Loc: PRDef->getLoc());
857 SuperIdx = ProcModel.getProcResourceIdx(PRDef: SuperDef);
858 }
859 NumUnits = PRDef->getValueAsInt(FieldName: "NumUnits");
860 }
861 // Emit the ProcResourceDesc
862 OS << " {\"" << PRDef->getName() << "\", ";
863 if (PRDef->getName().size() < 15)
864 OS.indent(NumSpaces: 15 - PRDef->getName().size());
865 OS << NumUnits << ", " << SuperIdx << ", " << BufferSize << ", ";
866 if (SubUnitsBeginOffset != SubUnitsOffset) {
867 OS << ProcModel.ModelName << "ProcResourceSubUnits + "
868 << SubUnitsBeginOffset;
869 } else {
870 OS << "nullptr";
871 }
872 OS << "}, // #" << i + 1;
873 if (SuperDef)
874 OS << ", Super=" << SuperDef->getName();
875 OS << "\n";
876 }
877 OS << "};\n";
878}
879
880// Find the WriteRes Record that defines processor resources for this
881// SchedWrite.
882Record *
883SubtargetEmitter::FindWriteResources(const CodeGenSchedRW &SchedWrite,
884 const CodeGenProcModel &ProcModel) {
885
886 // Check if the SchedWrite is already subtarget-specific and directly
887 // specifies a set of processor resources.
888 if (SchedWrite.TheDef->isSubClassOf(Name: "SchedWriteRes"))
889 return SchedWrite.TheDef;
890
891 Record *AliasDef = nullptr;
892 for (Record *A : SchedWrite.Aliases) {
893 const CodeGenSchedRW &AliasRW =
894 SchedModels.getSchedRW(Def: A->getValueAsDef(FieldName: "AliasRW"));
895 if (AliasRW.TheDef->getValueInit(FieldName: "SchedModel")->isComplete()) {
896 Record *ModelDef = AliasRW.TheDef->getValueAsDef(FieldName: "SchedModel");
897 if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
898 continue;
899 }
900 if (AliasDef)
901 PrintFatalError(ErrorLoc: AliasRW.TheDef->getLoc(),
902 Msg: "Multiple aliases "
903 "defined for processor " +
904 ProcModel.ModelName +
905 " Ensure only one SchedAlias exists per RW.");
906 AliasDef = AliasRW.TheDef;
907 }
908 if (AliasDef && AliasDef->isSubClassOf(Name: "SchedWriteRes"))
909 return AliasDef;
910
911 // Check this processor's list of write resources.
912 Record *ResDef = nullptr;
913 for (Record *WR : ProcModel.WriteResDefs) {
914 if (!WR->isSubClassOf(Name: "WriteRes"))
915 continue;
916 Record *WRDef = WR->getValueAsDef(FieldName: "WriteType");
917 if (AliasDef == WRDef || SchedWrite.TheDef == WRDef) {
918 if (ResDef) {
919 PrintFatalError(ErrorLoc: WR->getLoc(), Msg: "Resources are defined for both "
920 "SchedWrite and its alias on processor " +
921 ProcModel.ModelName);
922 }
923 ResDef = WR;
924 // If there is no AliasDef and we find a match, we can early exit since
925 // there is no need to verify whether there are resources defined for both
926 // SchedWrite and its alias.
927 if (!AliasDef)
928 break;
929 }
930 }
931 // TODO: If ProcModel has a base model (previous generation processor),
932 // then call FindWriteResources recursively with that model here.
933 if (!ResDef) {
934 PrintFatalError(ErrorLoc: ProcModel.ModelDef->getLoc(),
935 Msg: Twine("Processor does not define resources for ") +
936 SchedWrite.TheDef->getName());
937 }
938 return ResDef;
939}
940
941/// Find the ReadAdvance record for the given SchedRead on this processor or
942/// return NULL.
943Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead,
944 const CodeGenProcModel &ProcModel) {
945 // Check for SchedReads that directly specify a ReadAdvance.
946 if (SchedRead.TheDef->isSubClassOf(Name: "SchedReadAdvance"))
947 return SchedRead.TheDef;
948
949 // Check this processor's list of aliases for SchedRead.
950 Record *AliasDef = nullptr;
951 for (Record *A : SchedRead.Aliases) {
952 const CodeGenSchedRW &AliasRW =
953 SchedModels.getSchedRW(Def: A->getValueAsDef(FieldName: "AliasRW"));
954 if (AliasRW.TheDef->getValueInit(FieldName: "SchedModel")->isComplete()) {
955 Record *ModelDef = AliasRW.TheDef->getValueAsDef(FieldName: "SchedModel");
956 if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
957 continue;
958 }
959 if (AliasDef)
960 PrintFatalError(ErrorLoc: AliasRW.TheDef->getLoc(),
961 Msg: "Multiple aliases "
962 "defined for processor " +
963 ProcModel.ModelName +
964 " Ensure only one SchedAlias exists per RW.");
965 AliasDef = AliasRW.TheDef;
966 }
967 if (AliasDef && AliasDef->isSubClassOf(Name: "SchedReadAdvance"))
968 return AliasDef;
969
970 // Check this processor's ReadAdvanceList.
971 Record *ResDef = nullptr;
972 for (Record *RA : ProcModel.ReadAdvanceDefs) {
973 if (!RA->isSubClassOf(Name: "ReadAdvance"))
974 continue;
975 Record *RADef = RA->getValueAsDef(FieldName: "ReadType");
976 if (AliasDef == RADef || SchedRead.TheDef == RADef) {
977 if (ResDef) {
978 PrintFatalError(ErrorLoc: RA->getLoc(), Msg: "Resources are defined for both "
979 "SchedRead and its alias on processor " +
980 ProcModel.ModelName);
981 }
982 ResDef = RA;
983 // If there is no AliasDef and we find a match, we can early exit since
984 // there is no need to verify whether there are resources defined for both
985 // SchedRead and its alias.
986 if (!AliasDef)
987 break;
988 }
989 }
990 // TODO: If ProcModel has a base model (previous generation processor),
991 // then call FindReadAdvance recursively with that model here.
992 if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") {
993 PrintFatalError(ErrorLoc: ProcModel.ModelDef->getLoc(),
994 Msg: Twine("Processor does not define resources for ") +
995 SchedRead.TheDef->getName());
996 }
997 return ResDef;
998}
999
1000// Expand an explicit list of processor resources into a full list of implied
1001// resource groups and super resources that cover them.
1002void SubtargetEmitter::ExpandProcResources(
1003 RecVec &PRVec, std::vector<int64_t> &ReleaseAtCycles,
1004 std::vector<int64_t> &AcquireAtCycles, const CodeGenProcModel &PM) {
1005 assert(PRVec.size() == ReleaseAtCycles.size() && "failed precondition");
1006 for (unsigned i = 0, e = PRVec.size(); i != e; ++i) {
1007 Record *PRDef = PRVec[i];
1008 RecVec SubResources;
1009 if (PRDef->isSubClassOf(Name: "ProcResGroup"))
1010 SubResources = PRDef->getValueAsListOfDefs(FieldName: "Resources");
1011 else {
1012 SubResources.push_back(x: PRDef);
1013 PRDef = SchedModels.findProcResUnits(ProcResKind: PRDef, PM, Loc: PRDef->getLoc());
1014 for (Record *SubDef = PRDef;
1015 SubDef->getValueInit(FieldName: "Super")->isComplete();) {
1016 if (SubDef->isSubClassOf(Name: "ProcResGroup")) {
1017 // Disallow this for simplicitly.
1018 PrintFatalError(ErrorLoc: SubDef->getLoc(), Msg: "Processor resource group "
1019 " cannot be a super resources.");
1020 }
1021 Record *SuperDef = SchedModels.findProcResUnits(
1022 ProcResKind: SubDef->getValueAsDef(FieldName: "Super"), PM, Loc: SubDef->getLoc());
1023 PRVec.push_back(x: SuperDef);
1024 ReleaseAtCycles.push_back(x: ReleaseAtCycles[i]);
1025 AcquireAtCycles.push_back(x: AcquireAtCycles[i]);
1026 SubDef = SuperDef;
1027 }
1028 }
1029 for (Record *PR : PM.ProcResourceDefs) {
1030 if (PR == PRDef || !PR->isSubClassOf(Name: "ProcResGroup"))
1031 continue;
1032 RecVec SuperResources = PR->getValueAsListOfDefs(FieldName: "Resources");
1033 RecIter SubI = SubResources.begin(), SubE = SubResources.end();
1034 for (; SubI != SubE; ++SubI) {
1035 if (!is_contained(Range&: SuperResources, Element: *SubI)) {
1036 break;
1037 }
1038 }
1039 if (SubI == SubE) {
1040 PRVec.push_back(x: PR);
1041 ReleaseAtCycles.push_back(x: ReleaseAtCycles[i]);
1042 AcquireAtCycles.push_back(x: AcquireAtCycles[i]);
1043 }
1044 }
1045 }
1046}
1047
1048// Generate the SchedClass table for this processor and update global
1049// tables. Must be called for each processor in order.
1050void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
1051 SchedClassTables &SchedTables) {
1052 std::vector<MCSchedClassDesc> &SCTab =
1053 SchedTables.ProcSchedClasses.emplace_back();
1054 if (!ProcModel.hasInstrSchedModel())
1055 return;
1056
1057 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
1058 for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
1059 LLVM_DEBUG(SC.dump(&SchedModels));
1060
1061 MCSchedClassDesc &SCDesc = SCTab.emplace_back();
1062 // SCDesc.Name is guarded by NDEBUG
1063 SCDesc.NumMicroOps = 0;
1064 SCDesc.BeginGroup = false;
1065 SCDesc.EndGroup = false;
1066 SCDesc.RetireOOO = false;
1067 SCDesc.WriteProcResIdx = 0;
1068 SCDesc.WriteLatencyIdx = 0;
1069 SCDesc.ReadAdvanceIdx = 0;
1070
1071 // A Variant SchedClass has no resources of its own.
1072 bool HasVariants = false;
1073 for (const CodeGenSchedTransition &CGT :
1074 make_range(x: SC.Transitions.begin(), y: SC.Transitions.end())) {
1075 if (CGT.ProcIndex == ProcModel.Index) {
1076 HasVariants = true;
1077 break;
1078 }
1079 }
1080 if (HasVariants) {
1081 SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps;
1082 continue;
1083 }
1084
1085 // Determine if the SchedClass is actually reachable on this processor. If
1086 // not don't try to locate the processor resources, it will fail.
1087 // If ProcIndices contains 0, this class applies to all processors.
1088 assert(!SC.ProcIndices.empty() && "expect at least one procidx");
1089 if (SC.ProcIndices[0] != 0) {
1090 if (!is_contained(Range: SC.ProcIndices, Element: ProcModel.Index))
1091 continue;
1092 }
1093 IdxVec Writes = SC.Writes;
1094 IdxVec Reads = SC.Reads;
1095 if (!SC.InstRWs.empty()) {
1096 // This class has a default ReadWrite list which can be overridden by
1097 // InstRW definitions.
1098 Record *RWDef = nullptr;
1099 for (Record *RW : SC.InstRWs) {
1100 Record *RWModelDef = RW->getValueAsDef(FieldName: "SchedModel");
1101 if (&ProcModel == &SchedModels.getProcModel(ModelDef: RWModelDef)) {
1102 RWDef = RW;
1103 break;
1104 }
1105 }
1106 if (RWDef) {
1107 Writes.clear();
1108 Reads.clear();
1109 SchedModels.findRWs(RWDefs: RWDef->getValueAsListOfDefs(FieldName: "OperandReadWrites"),
1110 Writes, Reads);
1111 }
1112 }
1113 if (Writes.empty()) {
1114 // Check this processor's itinerary class resources.
1115 for (Record *I : ProcModel.ItinRWDefs) {
1116 RecVec Matched = I->getValueAsListOfDefs(FieldName: "MatchedItinClasses");
1117 if (is_contained(Range&: Matched, Element: SC.ItinClassDef)) {
1118 SchedModels.findRWs(RWDefs: I->getValueAsListOfDefs(FieldName: "OperandReadWrites"),
1119 Writes, Reads);
1120 break;
1121 }
1122 }
1123 if (Writes.empty()) {
1124 LLVM_DEBUG(dbgs() << ProcModel.ModelName
1125 << " does not have resources for class " << SC.Name
1126 << '\n');
1127 SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
1128 }
1129 }
1130 // Sum resources across all operand writes.
1131 std::vector<MCWriteProcResEntry> WriteProcResources;
1132 std::vector<MCWriteLatencyEntry> WriteLatencies;
1133 std::vector<std::string> WriterNames;
1134 std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
1135 for (unsigned W : Writes) {
1136 IdxVec WriteSeq;
1137 SchedModels.expandRWSeqForProc(RWIdx: W, RWSeq&: WriteSeq, /*IsRead=*/false, ProcModel);
1138
1139 // For each operand, create a latency entry.
1140 MCWriteLatencyEntry WLEntry;
1141 WLEntry.Cycles = 0;
1142 unsigned WriteID = WriteSeq.back();
1143 WriterNames.push_back(x: SchedModels.getSchedWrite(Idx: WriteID).Name);
1144 // If this Write is not referenced by a ReadAdvance, don't distinguish it
1145 // from other WriteLatency entries.
1146 if (!ProcModel.hasReadOfWrite(WriteDef: SchedModels.getSchedWrite(Idx: WriteID).TheDef))
1147 WriteID = 0;
1148 WLEntry.WriteResourceID = WriteID;
1149
1150 for (unsigned WS : WriteSeq) {
1151
1152 Record *WriteRes =
1153 FindWriteResources(SchedWrite: SchedModels.getSchedWrite(Idx: WS), ProcModel);
1154
1155 // Mark the parent class as invalid for unsupported write types.
1156 if (WriteRes->getValueAsBit(FieldName: "Unsupported")) {
1157 SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
1158 break;
1159 }
1160 WLEntry.Cycles += WriteRes->getValueAsInt(FieldName: "Latency");
1161 SCDesc.NumMicroOps += WriteRes->getValueAsInt(FieldName: "NumMicroOps");
1162 SCDesc.BeginGroup |= WriteRes->getValueAsBit(FieldName: "BeginGroup");
1163 SCDesc.EndGroup |= WriteRes->getValueAsBit(FieldName: "EndGroup");
1164 SCDesc.BeginGroup |= WriteRes->getValueAsBit(FieldName: "SingleIssue");
1165 SCDesc.EndGroup |= WriteRes->getValueAsBit(FieldName: "SingleIssue");
1166 SCDesc.RetireOOO |= WriteRes->getValueAsBit(FieldName: "RetireOOO");
1167
1168 // Create an entry for each ProcResource listed in WriteRes.
1169 RecVec PRVec = WriteRes->getValueAsListOfDefs(FieldName: "ProcResources");
1170 std::vector<int64_t> ReleaseAtCycles =
1171 WriteRes->getValueAsListOfInts(FieldName: "ReleaseAtCycles");
1172
1173 std::vector<int64_t> AcquireAtCycles =
1174 WriteRes->getValueAsListOfInts(FieldName: "AcquireAtCycles");
1175
1176 // Check consistency of the two vectors carrying the start and
1177 // stop cycles of the resources.
1178 if (!ReleaseAtCycles.empty() &&
1179 ReleaseAtCycles.size() != PRVec.size()) {
1180 // If ReleaseAtCycles is provided, check consistency.
1181 PrintFatalError(
1182 ErrorLoc: WriteRes->getLoc(),
1183 Msg: Twine("Inconsistent release at cycles: size(ReleaseAtCycles) != "
1184 "size(ProcResources): ")
1185 .concat(Suffix: Twine(PRVec.size()))
1186 .concat(Suffix: " vs ")
1187 .concat(Suffix: Twine(ReleaseAtCycles.size())));
1188 }
1189
1190 if (!AcquireAtCycles.empty() &&
1191 AcquireAtCycles.size() != PRVec.size()) {
1192 PrintFatalError(
1193 ErrorLoc: WriteRes->getLoc(),
1194 Msg: Twine("Inconsistent resource cycles: size(AcquireAtCycles) != "
1195 "size(ProcResources): ")
1196 .concat(Suffix: Twine(AcquireAtCycles.size()))
1197 .concat(Suffix: " vs ")
1198 .concat(Suffix: Twine(PRVec.size())));
1199 }
1200
1201 if (ReleaseAtCycles.empty()) {
1202 // If ReleaseAtCycles is not provided, default to one cycle
1203 // per resource.
1204 ReleaseAtCycles.resize(new_size: PRVec.size(), x: 1);
1205 }
1206
1207 if (AcquireAtCycles.empty()) {
1208 // If AcquireAtCycles is not provided, reserve the resource
1209 // starting from cycle 0.
1210 AcquireAtCycles.resize(new_size: PRVec.size(), x: 0);
1211 }
1212
1213 assert(AcquireAtCycles.size() == ReleaseAtCycles.size());
1214
1215 ExpandProcResources(PRVec, ReleaseAtCycles, AcquireAtCycles, PM: ProcModel);
1216 assert(AcquireAtCycles.size() == ReleaseAtCycles.size());
1217
1218 for (unsigned PRIdx = 0, PREnd = PRVec.size(); PRIdx != PREnd;
1219 ++PRIdx) {
1220 MCWriteProcResEntry WPREntry;
1221 WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRDef: PRVec[PRIdx]);
1222 assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx");
1223 WPREntry.ReleaseAtCycle = ReleaseAtCycles[PRIdx];
1224 WPREntry.AcquireAtCycle = AcquireAtCycles[PRIdx];
1225 if (AcquireAtCycles[PRIdx] > ReleaseAtCycles[PRIdx]) {
1226 PrintFatalError(
1227 ErrorLoc: WriteRes->getLoc(),
1228 Msg: Twine("Inconsistent resource cycles: AcquireAtCycles "
1229 "< ReleaseAtCycles must hold."));
1230 }
1231 if (AcquireAtCycles[PRIdx] < 0) {
1232 PrintFatalError(ErrorLoc: WriteRes->getLoc(),
1233 Msg: Twine("Invalid value: AcquireAtCycle "
1234 "must be a non-negative value."));
1235 }
1236 // If this resource is already used in this sequence, add the current
1237 // entry's cycles so that the same resource appears to be used
1238 // serially, rather than multiple parallel uses. This is important for
1239 // in-order machine where the resource consumption is a hazard.
1240 unsigned WPRIdx = 0, WPREnd = WriteProcResources.size();
1241 for (; WPRIdx != WPREnd; ++WPRIdx) {
1242 if (WriteProcResources[WPRIdx].ProcResourceIdx ==
1243 WPREntry.ProcResourceIdx) {
1244 // TODO: multiple use of the same resources would
1245 // require either 1. thinking of how to handle multiple
1246 // intervals for the same resource in
1247 // `<Target>WriteProcResTable` (see
1248 // `SubtargetEmitter::EmitSchedClassTables`), or
1249 // 2. thinking how to merge multiple intervals into a
1250 // single interval.
1251 assert(WPREntry.AcquireAtCycle == 0 &&
1252 "multiple use ofthe same resource is not yet handled");
1253 WriteProcResources[WPRIdx].ReleaseAtCycle +=
1254 WPREntry.ReleaseAtCycle;
1255 break;
1256 }
1257 }
1258 if (WPRIdx == WPREnd)
1259 WriteProcResources.push_back(x: WPREntry);
1260 }
1261 }
1262 WriteLatencies.push_back(x: WLEntry);
1263 }
1264 // Create an entry for each operand Read in this SchedClass.
1265 // Entries must be sorted first by UseIdx then by WriteResourceID.
1266 for (unsigned UseIdx = 0, EndIdx = Reads.size(); UseIdx != EndIdx;
1267 ++UseIdx) {
1268 Record *ReadAdvance =
1269 FindReadAdvance(SchedRead: SchedModels.getSchedRead(Idx: Reads[UseIdx]), ProcModel);
1270 if (!ReadAdvance)
1271 continue;
1272
1273 // Mark the parent class as invalid for unsupported write types.
1274 if (ReadAdvance->getValueAsBit(FieldName: "Unsupported")) {
1275 SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
1276 break;
1277 }
1278 RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs(FieldName: "ValidWrites");
1279 IdxVec WriteIDs;
1280 if (ValidWrites.empty())
1281 WriteIDs.push_back(x: 0);
1282 else {
1283 for (Record *VW : ValidWrites) {
1284 unsigned WriteID = SchedModels.getSchedRWIdx(Def: VW, /*IsRead=*/false);
1285 assert(WriteID != 0 &&
1286 "Expected a valid SchedRW in the list of ValidWrites");
1287 WriteIDs.push_back(x: WriteID);
1288 }
1289 }
1290 llvm::sort(C&: WriteIDs);
1291 for (unsigned W : WriteIDs) {
1292 MCReadAdvanceEntry RAEntry;
1293 RAEntry.UseIdx = UseIdx;
1294 RAEntry.WriteResourceID = W;
1295 RAEntry.Cycles = ReadAdvance->getValueAsInt(FieldName: "Cycles");
1296 ReadAdvanceEntries.push_back(x: RAEntry);
1297 }
1298 }
1299 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
1300 WriteProcResources.clear();
1301 WriteLatencies.clear();
1302 ReadAdvanceEntries.clear();
1303 }
1304 // Add the information for this SchedClass to the global tables using basic
1305 // compression.
1306 //
1307 // WritePrecRes entries are sorted by ProcResIdx.
1308 llvm::sort(C&: WriteProcResources, Comp: LessWriteProcResources());
1309
1310 SCDesc.NumWriteProcResEntries = WriteProcResources.size();
1311 std::vector<MCWriteProcResEntry>::iterator WPRPos =
1312 std::search(first1: SchedTables.WriteProcResources.begin(),
1313 last1: SchedTables.WriteProcResources.end(),
1314 first2: WriteProcResources.begin(), last2: WriteProcResources.end());
1315 if (WPRPos != SchedTables.WriteProcResources.end())
1316 SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin();
1317 else {
1318 SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size();
1319 SchedTables.WriteProcResources.insert(position: WPRPos, first: WriteProcResources.begin(),
1320 last: WriteProcResources.end());
1321 }
1322 // Latency entries must remain in operand order.
1323 SCDesc.NumWriteLatencyEntries = WriteLatencies.size();
1324 std::vector<MCWriteLatencyEntry>::iterator WLPos = std::search(
1325 first1: SchedTables.WriteLatencies.begin(), last1: SchedTables.WriteLatencies.end(),
1326 first2: WriteLatencies.begin(), last2: WriteLatencies.end());
1327 if (WLPos != SchedTables.WriteLatencies.end()) {
1328 unsigned idx = WLPos - SchedTables.WriteLatencies.begin();
1329 SCDesc.WriteLatencyIdx = idx;
1330 for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i)
1331 if (SchedTables.WriterNames[idx + i].find(str: WriterNames[i]) ==
1332 std::string::npos) {
1333 SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i];
1334 }
1335 } else {
1336 SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
1337 llvm::append_range(C&: SchedTables.WriteLatencies, R&: WriteLatencies);
1338 llvm::append_range(C&: SchedTables.WriterNames, R&: WriterNames);
1339 }
1340 // ReadAdvanceEntries must remain in operand order.
1341 SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size();
1342 std::vector<MCReadAdvanceEntry>::iterator RAPos =
1343 std::search(first1: SchedTables.ReadAdvanceEntries.begin(),
1344 last1: SchedTables.ReadAdvanceEntries.end(),
1345 first2: ReadAdvanceEntries.begin(), last2: ReadAdvanceEntries.end());
1346 if (RAPos != SchedTables.ReadAdvanceEntries.end())
1347 SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin();
1348 else {
1349 SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size();
1350 llvm::append_range(C&: SchedTables.ReadAdvanceEntries, R&: ReadAdvanceEntries);
1351 }
1352 }
1353}
1354
1355// Emit SchedClass tables for all processors and associated global tables.
1356void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables,
1357 raw_ostream &OS) {
1358 // Emit global WriteProcResTable.
1359 OS << "\n// {ProcResourceIdx, ReleaseAtCycle, AcquireAtCycle}\n"
1360 << "extern const llvm::MCWriteProcResEntry " << Target
1361 << "WriteProcResTable[] = {\n"
1362 << " { 0, 0, 0 }, // Invalid\n";
1363 for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size();
1364 WPRIdx != WPREnd; ++WPRIdx) {
1365 MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx];
1366 OS << " {" << format(Fmt: "%2d", Vals: WPREntry.ProcResourceIdx) << ", "
1367 << format(Fmt: "%2d", Vals: WPREntry.ReleaseAtCycle) << ", "
1368 << format(Fmt: "%2d", Vals: WPREntry.AcquireAtCycle) << "}";
1369 if (WPRIdx + 1 < WPREnd)
1370 OS << ',';
1371 OS << " // #" << WPRIdx << '\n';
1372 }
1373 OS << "}; // " << Target << "WriteProcResTable\n";
1374
1375 // Emit global WriteLatencyTable.
1376 OS << "\n// {Cycles, WriteResourceID}\n"
1377 << "extern const llvm::MCWriteLatencyEntry " << Target
1378 << "WriteLatencyTable[] = {\n"
1379 << " { 0, 0}, // Invalid\n";
1380 for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size();
1381 WLIdx != WLEnd; ++WLIdx) {
1382 MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx];
1383 OS << " {" << format(Fmt: "%2d", Vals: WLEntry.Cycles) << ", "
1384 << format(Fmt: "%2d", Vals: WLEntry.WriteResourceID) << "}";
1385 if (WLIdx + 1 < WLEnd)
1386 OS << ',';
1387 OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n';
1388 }
1389 OS << "}; // " << Target << "WriteLatencyTable\n";
1390
1391 // Emit global ReadAdvanceTable.
1392 OS << "\n// {UseIdx, WriteResourceID, Cycles}\n"
1393 << "extern const llvm::MCReadAdvanceEntry " << Target
1394 << "ReadAdvanceTable[] = {\n"
1395 << " {0, 0, 0}, // Invalid\n";
1396 for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size();
1397 RAIdx != RAEnd; ++RAIdx) {
1398 MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx];
1399 OS << " {" << RAEntry.UseIdx << ", "
1400 << format(Fmt: "%2d", Vals: RAEntry.WriteResourceID) << ", "
1401 << format(Fmt: "%2d", Vals: RAEntry.Cycles) << "}";
1402 if (RAIdx + 1 < RAEnd)
1403 OS << ',';
1404 OS << " // #" << RAIdx << '\n';
1405 }
1406 OS << "}; // " << Target << "ReadAdvanceTable\n";
1407
1408 // Emit a SchedClass table for each processor.
1409 for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
1410 PE = SchedModels.procModelEnd();
1411 PI != PE; ++PI) {
1412 if (!PI->hasInstrSchedModel())
1413 continue;
1414
1415 std::vector<MCSchedClassDesc> &SCTab =
1416 SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())];
1417
1418 OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO,"
1419 << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
1420 OS << "static const llvm::MCSchedClassDesc " << PI->ModelName
1421 << "SchedClasses[] = {\n";
1422
1423 // The first class is always invalid. We no way to distinguish it except by
1424 // name and position.
1425 assert(SchedModels.getSchedClass(0).Name == "NoInstrModel" &&
1426 "invalid class not first");
1427 OS << " {DBGFIELD(\"InvalidSchedClass\") "
1428 << MCSchedClassDesc::InvalidNumMicroOps
1429 << ", false, false, false, 0, 0, 0, 0, 0, 0},\n";
1430
1431 for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) {
1432 MCSchedClassDesc &MCDesc = SCTab[SCIdx];
1433 const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(Idx: SCIdx);
1434 OS << " {DBGFIELD(\"" << SchedClass.Name << "\") ";
1435 if (SchedClass.Name.size() < 18)
1436 OS.indent(NumSpaces: 18 - SchedClass.Name.size());
1437 OS << MCDesc.NumMicroOps << ", " << (MCDesc.BeginGroup ? "true" : "false")
1438 << ", " << (MCDesc.EndGroup ? "true" : "false") << ", "
1439 << (MCDesc.RetireOOO ? "true" : "false") << ", "
1440 << format(Fmt: "%2d", Vals: MCDesc.WriteProcResIdx) << ", "
1441 << MCDesc.NumWriteProcResEntries << ", "
1442 << format(Fmt: "%2d", Vals: MCDesc.WriteLatencyIdx) << ", "
1443 << MCDesc.NumWriteLatencyEntries << ", "
1444 << format(Fmt: "%2d", Vals: MCDesc.ReadAdvanceIdx) << ", "
1445 << MCDesc.NumReadAdvanceEntries << "}, // #" << SCIdx << '\n';
1446 }
1447 OS << "}; // " << PI->ModelName << "SchedClasses\n";
1448 }
1449}
1450
1451void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
1452 // For each processor model.
1453 for (const CodeGenProcModel &PM : SchedModels.procModels()) {
1454 // Emit extra processor info if available.
1455 if (PM.hasExtraProcessorInfo())
1456 EmitExtraProcessorInfo(ProcModel: PM, OS);
1457 // Emit processor resource table.
1458 if (PM.hasInstrSchedModel())
1459 EmitProcessorResources(ProcModel: PM, OS);
1460 else if (!PM.ProcResourceDefs.empty())
1461 PrintFatalError(ErrorLoc: PM.ModelDef->getLoc(),
1462 Msg: "SchedMachineModel defines "
1463 "ProcResources without defining WriteRes SchedWriteRes");
1464
1465 // Begin processor itinerary properties
1466 OS << "\n";
1467 OS << "static const llvm::MCSchedModel " << PM.ModelName << " = {\n";
1468 EmitProcessorProp(OS, R: PM.ModelDef, Name: "IssueWidth", Separator: ',');
1469 EmitProcessorProp(OS, R: PM.ModelDef, Name: "MicroOpBufferSize", Separator: ',');
1470 EmitProcessorProp(OS, R: PM.ModelDef, Name: "LoopMicroOpBufferSize", Separator: ',');
1471 EmitProcessorProp(OS, R: PM.ModelDef, Name: "LoadLatency", Separator: ',');
1472 EmitProcessorProp(OS, R: PM.ModelDef, Name: "HighLatency", Separator: ',');
1473 EmitProcessorProp(OS, R: PM.ModelDef, Name: "MispredictPenalty", Separator: ',');
1474
1475 bool PostRAScheduler =
1476 (PM.ModelDef ? PM.ModelDef->getValueAsBit(FieldName: "PostRAScheduler") : false);
1477
1478 OS << " " << (PostRAScheduler ? "true" : "false") << ", // "
1479 << "PostRAScheduler\n";
1480
1481 bool CompleteModel =
1482 (PM.ModelDef ? PM.ModelDef->getValueAsBit(FieldName: "CompleteModel") : false);
1483
1484 OS << " " << (CompleteModel ? "true" : "false") << ", // "
1485 << "CompleteModel\n";
1486
1487 bool EnableIntervals =
1488 (PM.ModelDef ? PM.ModelDef->getValueAsBit(FieldName: "EnableIntervals") : false);
1489
1490 OS << " " << (EnableIntervals ? "true" : "false") << ", // "
1491 << "EnableIntervals\n";
1492
1493 OS << " " << PM.Index << ", // Processor ID\n";
1494 if (PM.hasInstrSchedModel())
1495 OS << " " << PM.ModelName << "ProcResources"
1496 << ",\n"
1497 << " " << PM.ModelName << "SchedClasses"
1498 << ",\n"
1499 << " " << PM.ProcResourceDefs.size() + 1 << ",\n"
1500 << " "
1501 << (SchedModels.schedClassEnd() - SchedModels.schedClassBegin())
1502 << ",\n";
1503 else
1504 OS << " nullptr, nullptr, 0, 0,"
1505 << " // No instruction-level machine model.\n";
1506 if (PM.hasItineraries())
1507 OS << " " << PM.ItinsDef->getName() << ",\n";
1508 else
1509 OS << " nullptr, // No Itinerary\n";
1510 if (PM.hasExtraProcessorInfo())
1511 OS << " &" << PM.ModelName << "ExtraInfo,\n";
1512 else
1513 OS << " nullptr // No extra processor descriptor\n";
1514 OS << "};\n";
1515 }
1516}
1517
1518//
1519// EmitSchedModel - Emits all scheduling model tables, folding common patterns.
1520//
1521void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
1522 OS << "#ifdef DBGFIELD\n"
1523 << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
1524 << "#endif\n"
1525 << "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)\n"
1526 << "#define DBGFIELD(x) x,\n"
1527 << "#else\n"
1528 << "#define DBGFIELD(x)\n"
1529 << "#endif\n";
1530
1531 if (SchedModels.hasItineraries()) {
1532 std::vector<std::vector<InstrItinerary>> ProcItinLists;
1533 // Emit the stage data
1534 EmitStageAndOperandCycleData(OS, ProcItinLists);
1535 EmitItineraries(OS, ProcItinLists);
1536 }
1537 OS << "\n// ===============================================================\n"
1538 << "// Data tables for the new per-operand machine model.\n";
1539
1540 SchedClassTables SchedTables;
1541 for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
1542 GenSchedClassTables(ProcModel, SchedTables);
1543 }
1544 EmitSchedClassTables(SchedTables, OS);
1545
1546 OS << "\n#undef DBGFIELD\n";
1547
1548 // Emit the processor machine model
1549 EmitProcessorModels(OS);
1550}
1551
1552static void emitPredicateProlog(const RecordKeeper &Records, raw_ostream &OS) {
1553 std::string Buffer;
1554 raw_string_ostream Stream(Buffer);
1555
1556 // Collect all the PredicateProlog records and print them to the output
1557 // stream.
1558 std::vector<Record *> Prologs =
1559 Records.getAllDerivedDefinitions(ClassName: "PredicateProlog");
1560 llvm::sort(C&: Prologs, Comp: LessRecord());
1561 for (Record *P : Prologs)
1562 Stream << P->getValueAsString(FieldName: "Code") << '\n';
1563
1564 OS << Buffer;
1565}
1566
1567static bool isTruePredicate(const Record *Rec) {
1568 return Rec->isSubClassOf(Name: "MCSchedPredicate") &&
1569 Rec->getValueAsDef(FieldName: "Pred")->isSubClassOf(Name: "MCTrue");
1570}
1571
1572static void emitPredicates(const CodeGenSchedTransition &T,
1573 const CodeGenSchedClass &SC, PredicateExpander &PE,
1574 raw_ostream &OS) {
1575 std::string Buffer;
1576 raw_string_ostream SS(Buffer);
1577
1578 // If not all predicates are MCTrue, then we need an if-stmt.
1579 unsigned NumNonTruePreds =
1580 T.PredTerm.size() - count_if(Range: T.PredTerm, P: isTruePredicate);
1581
1582 SS.indent(NumSpaces: PE.getIndentLevel() * 2);
1583
1584 if (NumNonTruePreds) {
1585 bool FirstNonTruePredicate = true;
1586 SS << "if (";
1587
1588 PE.setIndentLevel(PE.getIndentLevel() + 2);
1589
1590 for (const Record *Rec : T.PredTerm) {
1591 // Skip predicates that evaluate to "true".
1592 if (isTruePredicate(Rec))
1593 continue;
1594
1595 if (FirstNonTruePredicate) {
1596 FirstNonTruePredicate = false;
1597 } else {
1598 SS << "\n";
1599 SS.indent(NumSpaces: PE.getIndentLevel() * 2);
1600 SS << "&& ";
1601 }
1602
1603 if (Rec->isSubClassOf(Name: "MCSchedPredicate")) {
1604 PE.expandPredicate(OS&: SS, Rec: Rec->getValueAsDef(FieldName: "Pred"));
1605 continue;
1606 }
1607
1608 // Expand this legacy predicate and wrap it around braces if there is more
1609 // than one predicate to expand.
1610 SS << ((NumNonTruePreds > 1) ? "(" : "")
1611 << Rec->getValueAsString(FieldName: "Predicate")
1612 << ((NumNonTruePreds > 1) ? ")" : "");
1613 }
1614
1615 SS << ")\n"; // end of if-stmt
1616 PE.decreaseIndentLevel();
1617 SS.indent(NumSpaces: PE.getIndentLevel() * 2);
1618 PE.decreaseIndentLevel();
1619 }
1620
1621 SS << "return " << T.ToClassIdx << "; // " << SC.Name << '\n';
1622 OS << Buffer;
1623}
1624
1625// Used by method `SubtargetEmitter::emitSchedModelHelpersImpl()` to generate
1626// epilogue code for the auto-generated helper.
1627static void emitSchedModelHelperEpilogue(raw_ostream &OS,
1628 bool ShouldReturnZero) {
1629 if (ShouldReturnZero) {
1630 OS << " // Don't know how to resolve this scheduling class.\n"
1631 << " return 0;\n";
1632 return;
1633 }
1634
1635 OS << " report_fatal_error(\"Expected a variant SchedClass\");\n";
1636}
1637
1638static bool hasMCSchedPredicates(const CodeGenSchedTransition &T) {
1639 return all_of(Range: T.PredTerm, P: [](const Record *Rec) {
1640 return Rec->isSubClassOf(Name: "MCSchedPredicate");
1641 });
1642}
1643
1644static void collectVariantClasses(const CodeGenSchedModels &SchedModels,
1645 IdxVec &VariantClasses,
1646 bool OnlyExpandMCInstPredicates) {
1647 for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
1648 // Ignore non-variant scheduling classes.
1649 if (SC.Transitions.empty())
1650 continue;
1651
1652 if (OnlyExpandMCInstPredicates) {
1653 // Ignore this variant scheduling class no transitions use any meaningful
1654 // MCSchedPredicate definitions.
1655 if (llvm::none_of(Range: SC.Transitions, P: hasMCSchedPredicates))
1656 continue;
1657 }
1658
1659 VariantClasses.push_back(x: SC.Index);
1660 }
1661}
1662
1663static void collectProcessorIndices(const CodeGenSchedClass &SC,
1664 IdxVec &ProcIndices) {
1665 // A variant scheduling class may define transitions for multiple
1666 // processors. This function identifies wich processors are associated with
1667 // transition rules specified by variant class `SC`.
1668 for (const CodeGenSchedTransition &T : SC.Transitions) {
1669 IdxVec PI;
1670 std::set_union(first1: &T.ProcIndex, last1: &T.ProcIndex + 1, first2: ProcIndices.begin(),
1671 last2: ProcIndices.end(), result: std::back_inserter(x&: PI));
1672 ProcIndices = std::move(PI);
1673 }
1674}
1675
1676static bool isAlwaysTrue(const CodeGenSchedTransition &T) {
1677 return llvm::all_of(Range: T.PredTerm, P: isTruePredicate);
1678}
1679
1680void SubtargetEmitter::emitSchedModelHelpersImpl(
1681 raw_ostream &OS, bool OnlyExpandMCInstPredicates) {
1682 IdxVec VariantClasses;
1683 collectVariantClasses(SchedModels, VariantClasses,
1684 OnlyExpandMCInstPredicates);
1685
1686 if (VariantClasses.empty()) {
1687 emitSchedModelHelperEpilogue(OS, ShouldReturnZero: OnlyExpandMCInstPredicates);
1688 return;
1689 }
1690
1691 // Construct a switch statement where the condition is a check on the
1692 // scheduling class identifier. There is a `case` for every variant class
1693 // defined by the processor models of this target.
1694 // Each `case` implements a number of rules to resolve (i.e. to transition
1695 // from) a variant scheduling class to another scheduling class. Rules are
1696 // described by instances of CodeGenSchedTransition. Note that transitions may
1697 // not be valid for all processors.
1698 OS << " switch (SchedClass) {\n";
1699 for (unsigned VC : VariantClasses) {
1700 IdxVec ProcIndices;
1701 const CodeGenSchedClass &SC = SchedModels.getSchedClass(Idx: VC);
1702 collectProcessorIndices(SC, ProcIndices);
1703
1704 OS << " case " << VC << ": // " << SC.Name << '\n';
1705
1706 PredicateExpander PE(Target);
1707 PE.setByRef(false);
1708 PE.setExpandForMC(OnlyExpandMCInstPredicates);
1709 for (unsigned PI : ProcIndices) {
1710 OS << " ";
1711
1712 // Emit a guard on the processor ID.
1713 if (PI != 0) {
1714 OS << (OnlyExpandMCInstPredicates
1715 ? "if (CPUID == "
1716 : "if (SchedModel->getProcessorID() == ");
1717 OS << PI << ") ";
1718 OS << "{ // " << (SchedModels.procModelBegin() + PI)->ModelName << '\n';
1719 }
1720
1721 // Now emit transitions associated with processor PI.
1722 const CodeGenSchedTransition *FinalT = nullptr;
1723 for (const CodeGenSchedTransition &T : SC.Transitions) {
1724 if (PI != 0 && T.ProcIndex != PI)
1725 continue;
1726
1727 // Emit only transitions based on MCSchedPredicate, if it's the case.
1728 // At least the transition specified by NoSchedPred is emitted,
1729 // which becomes the default transition for those variants otherwise
1730 // not based on MCSchedPredicate.
1731 // FIXME: preferably, llvm-mca should instead assume a reasonable
1732 // default when a variant transition is not based on MCSchedPredicate
1733 // for a given processor.
1734 if (OnlyExpandMCInstPredicates && !hasMCSchedPredicates(T))
1735 continue;
1736
1737 // If transition is folded to 'return X' it should be the last one.
1738 if (isAlwaysTrue(T)) {
1739 FinalT = &T;
1740 continue;
1741 }
1742 PE.setIndentLevel(3);
1743 emitPredicates(T, SC: SchedModels.getSchedClass(Idx: T.ToClassIdx), PE, OS);
1744 }
1745 if (FinalT)
1746 emitPredicates(T: *FinalT, SC: SchedModels.getSchedClass(Idx: FinalT->ToClassIdx),
1747 PE, OS);
1748
1749 OS << " }\n";
1750
1751 if (PI == 0)
1752 break;
1753 }
1754
1755 if (SC.isInferred())
1756 OS << " return " << SC.Index << ";\n";
1757 OS << " break;\n";
1758 }
1759
1760 OS << " };\n";
1761
1762 emitSchedModelHelperEpilogue(OS, ShouldReturnZero: OnlyExpandMCInstPredicates);
1763}
1764
1765void SubtargetEmitter::EmitSchedModelHelpers(const std::string &ClassName,
1766 raw_ostream &OS) {
1767 OS << "unsigned " << ClassName
1768 << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
1769 << " const TargetSchedModel *SchedModel) const {\n";
1770
1771 // Emit the predicate prolog code.
1772 emitPredicateProlog(Records, OS);
1773
1774 // Emit target predicates.
1775 emitSchedModelHelpersImpl(OS);
1776
1777 OS << "} // " << ClassName << "::resolveSchedClass\n\n";
1778
1779 OS << "unsigned " << ClassName
1780 << "\n::resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI,"
1781 << " const MCInstrInfo *MCII, unsigned CPUID) const {\n"
1782 << " return " << Target << "_MC"
1783 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n"
1784 << "} // " << ClassName << "::resolveVariantSchedClass\n\n";
1785
1786 STIPredicateExpander PE(Target);
1787 PE.setClassPrefix(ClassName);
1788 PE.setExpandDefinition(true);
1789 PE.setByRef(false);
1790 PE.setIndentLevel(0);
1791
1792 for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
1793 PE.expandSTIPredicate(OS, Fn);
1794}
1795
1796void SubtargetEmitter::EmitHwModeCheck(const std::string &ClassName,
1797 raw_ostream &OS) {
1798 const CodeGenHwModes &CGH = TGT.getHwModes();
1799 assert(CGH.getNumModeIds() > 0);
1800 if (CGH.getNumModeIds() == 1)
1801 return;
1802
1803 // Collect all HwModes and related features defined in the TD files,
1804 // and store them as a bit set.
1805 unsigned ValueTypeModes = 0;
1806 unsigned RegInfoModes = 0;
1807 unsigned EncodingInfoModes = 0;
1808 for (const auto &MS : CGH.getHwModeSelects()) {
1809 for (const HwModeSelect::PairType &P : MS.second.Items) {
1810 if (P.first == DefaultMode)
1811 continue;
1812 if (P.second->isSubClassOf(Name: "ValueType")) {
1813 ValueTypeModes |= (1 << (P.first - 1));
1814 } else if (P.second->isSubClassOf(Name: "RegInfo") ||
1815 P.second->isSubClassOf(Name: "SubRegRange")) {
1816 RegInfoModes |= (1 << (P.first - 1));
1817 } else if (P.second->isSubClassOf(Name: "InstructionEncoding")) {
1818 EncodingInfoModes |= (1 << (P.first - 1));
1819 }
1820 }
1821 }
1822
1823 // Start emitting for getHwModeSet().
1824 OS << "unsigned " << ClassName << "::getHwModeSet() const {\n";
1825 OS << " // Collect HwModes and store them as a bit set.\n";
1826 OS << " unsigned Modes = 0;\n";
1827 for (unsigned M = 1, NumModes = CGH.getNumModeIds(); M != NumModes; ++M) {
1828 const HwMode &HM = CGH.getMode(Id: M);
1829 OS << " if (checkFeatures(\"" << HM.Features << "\")) Modes |= (1 << "
1830 << (M - 1) << ");\n";
1831 }
1832 OS << " return Modes;\n}\n";
1833 // End emitting for getHwModeSet().
1834
1835 auto handlePerMode = [&](std::string ModeType, unsigned ModeInBitSet) {
1836 OS << " case HwMode_" << ModeType << ":\n"
1837 << " Modes &= " << ModeInBitSet << ";\n"
1838 << " if (!Modes)\n return Modes;\n"
1839 << " if (!llvm::has_single_bit<unsigned>(Modes))\n"
1840 << " llvm_unreachable(\"Two or more HwModes for " << ModeType
1841 << " were found!\");\n"
1842 << " return llvm::countr_zero(Modes) + 1;\n";
1843 };
1844
1845 // Start emitting for getHwMode().
1846 OS << "unsigned " << ClassName
1847 << "::getHwMode(enum HwModeType type) const {\n";
1848 OS << " unsigned Modes = getHwModeSet();\n\n";
1849 OS << " if (!Modes)\n return Modes;\n\n";
1850 OS << " switch (type) {\n";
1851 OS << " case HwMode_Default:\n return llvm::countr_zero(Modes) + 1;\n";
1852 handlePerMode("ValueType", ValueTypeModes);
1853 handlePerMode("RegInfo", RegInfoModes);
1854 handlePerMode("EncodingInfo", EncodingInfoModes);
1855 OS << " }\n";
1856 OS << " llvm_unreachable(\"unexpected HwModeType\");\n"
1857 << " return 0; // should not get here\n}\n";
1858 // End emitting for getHwMode().
1859}
1860
1861void SubtargetEmitter::emitGetMacroFusions(const std::string &ClassName,
1862 raw_ostream &OS) {
1863 if (!TGT.hasMacroFusion())
1864 return;
1865
1866 OS << "std::vector<MacroFusionPredTy> " << ClassName
1867 << "::getMacroFusions() const {\n";
1868 OS.indent(NumSpaces: 2) << "std::vector<MacroFusionPredTy> Fusions;\n";
1869 for (auto *Fusion : TGT.getMacroFusions()) {
1870 std::string Name = Fusion->getNameInitAsString();
1871 OS.indent(NumSpaces: 2) << "if (hasFeature(" << Target << "::" << Name
1872 << ")) Fusions.push_back(llvm::is" << Name << ");\n";
1873 }
1874
1875 OS.indent(NumSpaces: 2) << "return Fusions;\n";
1876 OS << "}\n";
1877}
1878
1879// Produces a subtarget specific function for parsing
1880// the subtarget features string.
1881void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) {
1882 std::vector<Record *> Features =
1883 Records.getAllDerivedDefinitions(ClassName: "SubtargetFeature");
1884 llvm::sort(C&: Features, Comp: LessRecord());
1885
1886 OS << "// ParseSubtargetFeatures - Parses features string setting specified\n"
1887 << "// subtarget options.\n"
1888 << "void llvm::";
1889 OS << Target;
1890 OS << "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, "
1891 << "StringRef FS) {\n"
1892 << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
1893 << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n"
1894 << " LLVM_DEBUG(dbgs() << \"\\nTuneCPU:\" << TuneCPU << \"\\n\\n\");\n";
1895
1896 if (Features.empty()) {
1897 OS << "}\n";
1898 return;
1899 }
1900
1901 if (Target == "AArch64")
1902 OS << " CPU = AArch64::resolveCPUAlias(CPU);\n"
1903 << " TuneCPU = AArch64::resolveCPUAlias(TuneCPU);\n";
1904
1905 OS << " InitMCProcessorInfo(CPU, TuneCPU, FS);\n"
1906 << " const FeatureBitset &Bits = getFeatureBits();\n";
1907
1908 for (Record *R : Features) {
1909 // Next record
1910 StringRef Instance = R->getName();
1911 StringRef Value = R->getValueAsString(FieldName: "Value");
1912 StringRef FieldName = R->getValueAsString(FieldName: "FieldName");
1913
1914 if (Value == "true" || Value == "false")
1915 OS << " if (Bits[" << Target << "::" << Instance << "]) " << FieldName
1916 << " = " << Value << ";\n";
1917 else
1918 OS << " if (Bits[" << Target << "::" << Instance << "] && " << FieldName
1919 << " < " << Value << ") " << FieldName << " = " << Value << ";\n";
1920 }
1921
1922 OS << "}\n";
1923}
1924
1925void SubtargetEmitter::emitGenMCSubtargetInfo(raw_ostream &OS) {
1926 OS << "namespace " << Target << "_MC {\n"
1927 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,\n"
1928 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID) {\n";
1929 emitSchedModelHelpersImpl(OS, /* OnlyExpandMCPredicates */ OnlyExpandMCInstPredicates: true);
1930 OS << "}\n";
1931 OS << "} // end namespace " << Target << "_MC\n\n";
1932
1933 OS << "struct " << Target
1934 << "GenMCSubtargetInfo : public MCSubtargetInfo {\n";
1935 OS << " " << Target << "GenMCSubtargetInfo(const Triple &TT,\n"
1936 << " StringRef CPU, StringRef TuneCPU, StringRef FS,\n"
1937 << " ArrayRef<SubtargetFeatureKV> PF,\n"
1938 << " ArrayRef<SubtargetSubTypeKV> PD,\n"
1939 << " const MCWriteProcResEntry *WPR,\n"
1940 << " const MCWriteLatencyEntry *WL,\n"
1941 << " const MCReadAdvanceEntry *RA, const InstrStage *IS,\n"
1942 << " const unsigned *OC, const unsigned *FP) :\n"
1943 << " MCSubtargetInfo(TT, CPU, TuneCPU, FS, PF, PD,\n"
1944 << " WPR, WL, RA, IS, OC, FP) { }\n\n"
1945 << " unsigned resolveVariantSchedClass(unsigned SchedClass,\n"
1946 << " const MCInst *MI, const MCInstrInfo *MCII,\n"
1947 << " unsigned CPUID) const override {\n"
1948 << " return " << Target << "_MC"
1949 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n";
1950 OS << " }\n";
1951 if (TGT.getHwModes().getNumModeIds() > 1) {
1952 OS << " unsigned getHwModeSet() const override;\n";
1953 OS << " unsigned getHwMode(enum HwModeType type = HwMode_Default) const "
1954 "override;\n";
1955 }
1956 if (Target == "AArch64")
1957 OS << " bool isCPUStringValid(StringRef CPU) const override {\n"
1958 << " CPU = AArch64::resolveCPUAlias(CPU);\n"
1959 << " return MCSubtargetInfo::isCPUStringValid(CPU);\n"
1960 << " }\n";
1961 OS << "};\n";
1962 EmitHwModeCheck(ClassName: Target + "GenMCSubtargetInfo", OS);
1963}
1964
1965void SubtargetEmitter::EmitMCInstrAnalysisPredicateFunctions(raw_ostream &OS) {
1966 OS << "\n#ifdef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n";
1967 OS << "#undef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1968
1969 STIPredicateExpander PE(Target);
1970 PE.setExpandForMC(true);
1971 PE.setByRef(true);
1972 for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
1973 PE.expandSTIPredicate(OS, Fn);
1974
1975 OS << "#endif // GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1976
1977 OS << "\n#ifdef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n";
1978 OS << "#undef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1979
1980 std::string ClassPrefix = Target + "MCInstrAnalysis";
1981 PE.setExpandDefinition(true);
1982 PE.setClassPrefix(ClassPrefix);
1983 PE.setIndentLevel(0);
1984 for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
1985 PE.expandSTIPredicate(OS, Fn);
1986
1987 OS << "#endif // GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1988}
1989
1990//
1991// SubtargetEmitter::run - Main subtarget enumeration emitter.
1992//
1993void SubtargetEmitter::run(raw_ostream &OS) {
1994 emitSourceFileHeader(Desc: "Subtarget Enumeration Source Fragment", OS);
1995
1996 OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
1997 OS << "#undef GET_SUBTARGETINFO_ENUM\n\n";
1998
1999 DenseMap<Record *, unsigned> FeatureMap;
2000
2001 OS << "namespace llvm {\n";
2002 Enumeration(OS, FeatureMap);
2003 OS << "} // end namespace llvm\n\n";
2004 OS << "#endif // GET_SUBTARGETINFO_ENUM\n\n";
2005
2006 EmitSubtargetInfoMacroCalls(OS);
2007
2008 OS << "namespace llvm {\n";
2009#if 0
2010 OS << "namespace {\n";
2011#endif
2012 unsigned NumFeatures = FeatureKeyValues(OS, FeatureMap);
2013 OS << "\n";
2014 EmitSchedModel(OS);
2015 OS << "\n";
2016 unsigned NumProcs = CPUKeyValues(OS, FeatureMap);
2017 OS << "\n";
2018#if 0
2019 OS << "} // end anonymous namespace\n\n";
2020#endif
2021
2022 // MCInstrInfo initialization routine.
2023 emitGenMCSubtargetInfo(OS);
2024
2025 OS << "\nstatic inline MCSubtargetInfo *create" << Target
2026 << "MCSubtargetInfoImpl("
2027 << "const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS) {\n";
2028 if (Target == "AArch64")
2029 OS << " CPU = AArch64::resolveCPUAlias(CPU);\n"
2030 << " TuneCPU = AArch64::resolveCPUAlias(TuneCPU);\n";
2031 OS << " return new " << Target
2032 << "GenMCSubtargetInfo(TT, CPU, TuneCPU, FS, ";
2033 if (NumFeatures)
2034 OS << Target << "FeatureKV, ";
2035 else
2036 OS << "std::nullopt, ";
2037 if (NumProcs)
2038 OS << Target << "SubTypeKV, ";
2039 else
2040 OS << "std::nullopt, ";
2041 OS << '\n';
2042 OS.indent(NumSpaces: 22);
2043 OS << Target << "WriteProcResTable, " << Target << "WriteLatencyTable, "
2044 << Target << "ReadAdvanceTable, ";
2045 OS << '\n';
2046 OS.indent(NumSpaces: 22);
2047 if (SchedModels.hasItineraries()) {
2048 OS << Target << "Stages, " << Target << "OperandCycles, " << Target
2049 << "ForwardingPaths";
2050 } else
2051 OS << "nullptr, nullptr, nullptr";
2052 OS << ");\n}\n\n";
2053
2054 OS << "} // end namespace llvm\n\n";
2055
2056 OS << "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
2057
2058 OS << "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
2059 OS << "#undef GET_SUBTARGETINFO_TARGET_DESC\n\n";
2060
2061 OS << "#include \"llvm/Support/Debug.h\"\n";
2062 OS << "#include \"llvm/Support/raw_ostream.h\"\n\n";
2063 if (Target == "AArch64")
2064 OS << "#include \"llvm/TargetParser/AArch64TargetParser.h\"\n\n";
2065 ParseFeaturesFunction(OS);
2066
2067 OS << "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
2068
2069 // Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
2070 OS << "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
2071 OS << "#undef GET_SUBTARGETINFO_HEADER\n\n";
2072
2073 std::string ClassName = Target + "GenSubtargetInfo";
2074 OS << "namespace llvm {\n";
2075 OS << "class DFAPacketizer;\n";
2076 OS << "namespace " << Target << "_MC {\n"
2077 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,"
2078 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID);\n"
2079 << "} // end namespace " << Target << "_MC\n\n";
2080 OS << "struct " << ClassName << " : public TargetSubtargetInfo {\n"
2081 << " explicit " << ClassName << "(const Triple &TT, StringRef CPU, "
2082 << "StringRef TuneCPU, StringRef FS);\n"
2083 << "public:\n"
2084 << " unsigned resolveSchedClass(unsigned SchedClass, "
2085 << " const MachineInstr *DefMI,"
2086 << " const TargetSchedModel *SchedModel) const override;\n"
2087 << " unsigned resolveVariantSchedClass(unsigned SchedClass,"
2088 << " const MCInst *MI, const MCInstrInfo *MCII,"
2089 << " unsigned CPUID) const override;\n"
2090 << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
2091 << " const;\n";
2092 if (TGT.getHwModes().getNumModeIds() > 1) {
2093 OS << " unsigned getHwModeSet() const override;\n";
2094 OS << " unsigned getHwMode(enum HwModeType type = HwMode_Default) const "
2095 "override;\n";
2096 }
2097 if (TGT.hasMacroFusion())
2098 OS << " std::vector<MacroFusionPredTy> getMacroFusions() const "
2099 "override;\n";
2100
2101 STIPredicateExpander PE(Target);
2102 PE.setByRef(false);
2103 for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
2104 PE.expandSTIPredicate(OS, Fn);
2105
2106 OS << "};\n"
2107 << "} // end namespace llvm\n\n";
2108
2109 OS << "#endif // GET_SUBTARGETINFO_HEADER\n\n";
2110
2111 OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
2112 OS << "#undef GET_SUBTARGETINFO_CTOR\n\n";
2113
2114 OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n\n";
2115 OS << "namespace llvm {\n";
2116 OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
2117 OS << "extern const llvm::SubtargetSubTypeKV " << Target << "SubTypeKV[];\n";
2118 OS << "extern const llvm::MCWriteProcResEntry " << Target
2119 << "WriteProcResTable[];\n";
2120 OS << "extern const llvm::MCWriteLatencyEntry " << Target
2121 << "WriteLatencyTable[];\n";
2122 OS << "extern const llvm::MCReadAdvanceEntry " << Target
2123 << "ReadAdvanceTable[];\n";
2124
2125 if (SchedModels.hasItineraries()) {
2126 OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
2127 OS << "extern const unsigned " << Target << "OperandCycles[];\n";
2128 OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
2129 }
2130
2131 OS << ClassName << "::" << ClassName << "(const Triple &TT, StringRef CPU, "
2132 << "StringRef TuneCPU, StringRef FS)\n";
2133
2134 if (Target == "AArch64")
2135 OS << " : TargetSubtargetInfo(TT, AArch64::resolveCPUAlias(CPU),\n"
2136 << " AArch64::resolveCPUAlias(TuneCPU), FS, ";
2137 else
2138 OS << " : TargetSubtargetInfo(TT, CPU, TuneCPU, FS, ";
2139 if (NumFeatures)
2140 OS << "ArrayRef(" << Target << "FeatureKV, " << NumFeatures << "), ";
2141 else
2142 OS << "std::nullopt, ";
2143 if (NumProcs)
2144 OS << "ArrayRef(" << Target << "SubTypeKV, " << NumProcs << "), ";
2145 else
2146 OS << "std::nullopt, ";
2147 OS << '\n';
2148 OS.indent(NumSpaces: 24);
2149 OS << Target << "WriteProcResTable, " << Target << "WriteLatencyTable, "
2150 << Target << "ReadAdvanceTable, ";
2151 OS << '\n';
2152 OS.indent(NumSpaces: 24);
2153 if (SchedModels.hasItineraries()) {
2154 OS << Target << "Stages, " << Target << "OperandCycles, " << Target
2155 << "ForwardingPaths";
2156 } else
2157 OS << "nullptr, nullptr, nullptr";
2158 OS << ") {}\n\n";
2159
2160 EmitSchedModelHelpers(ClassName, OS);
2161 EmitHwModeCheck(ClassName, OS);
2162 emitGetMacroFusions(ClassName, OS);
2163
2164 OS << "} // end namespace llvm\n\n";
2165
2166 OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";
2167
2168 EmitMCInstrAnalysisPredicateFunctions(OS);
2169}
2170
2171static TableGen::Emitter::OptClass<SubtargetEmitter>
2172 X("gen-subtarget", "Generate subtarget enumerations");
2173