1//===- MLRegAllocPriorityAdvisor.cpp - ML priority advisor-----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of the ML priority advisor and reward injection pass
10//
11//===----------------------------------------------------------------------===//
12
13#include "AllocationOrder.h"
14#include "RegAllocGreedy.h"
15#include "llvm/Analysis/AliasAnalysis.h"
16#include "llvm/Analysis/InteractiveModelRunner.h"
17#include "llvm/Analysis/MLModelRunner.h"
18#include "llvm/Analysis/ReleaseModeModelRunner.h"
19#include "llvm/Analysis/TensorSpec.h"
20#include "llvm/CodeGen/CalcSpillWeights.h"
21#include "llvm/CodeGen/LiveRegMatrix.h"
22#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineLoopInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/RegAllocPriorityAdvisor.h"
28#include "llvm/CodeGen/RegisterClassInfo.h"
29#include "llvm/CodeGen/SlotIndexes.h"
30#include "llvm/CodeGen/VirtRegMap.h"
31#include "llvm/InitializePasses.h"
32#include "llvm/Pass.h"
33#include "llvm/PassRegistry.h"
34#include "llvm/Support/CommandLine.h"
35
36#if defined(LLVM_HAVE_TFLITE)
37#include "llvm/Analysis/ModelUnderTrainingRunner.h"
38#include "llvm/Analysis/NoInferenceModelRunner.h"
39#include "llvm/Analysis/Utils/TrainingLogger.h"
40#include "llvm/IR/Module.h"
41#endif
42
43using namespace llvm;
44
45static cl::opt<std::string> InteractiveChannelBaseName(
46 "regalloc-priority-interactive-channel-base", cl::Hidden,
47 cl::desc(
48 "Base file path for the interactive mode. The incoming filename should "
49 "have the name <regalloc-priority-interactive-channel-base>.in, while "
50 "the outgoing name should be "
51 "<regalloc-priority-interactive-channel-base>.out"));
52
53using CompiledModelType = NoopSavedModelImpl;
54
55// Options that only make sense in development mode
56#ifdef LLVM_HAVE_TFLITE
57#include "RegAllocScore.h"
58#include "llvm/Analysis/Utils/TFUtils.h"
59
60static cl::opt<std::string> TrainingLog(
61 "regalloc-priority-training-log", cl::Hidden,
62 cl::desc("Training log for the register allocator priority model"));
63
64static cl::opt<std::string> ModelUnderTraining(
65 "regalloc-priority-model", cl::Hidden,
66 cl::desc("The model being trained for register allocation priority"));
67
68#endif // #ifdef LLVM_HAVE_TFLITE
69
70namespace llvm {
71
72static const std::vector<int64_t> PerLiveRangeShape{1};
73
74#define RA_PRIORITY_FEATURES_LIST(M) \
75 M(int64_t, li_size, PerLiveRangeShape, "size") \
76 M(int64_t, stage, PerLiveRangeShape, "stage") \
77 M(float, weight, PerLiveRangeShape, "weight")
78
79#define DecisionName "priority"
80static const TensorSpec DecisionSpec =
81 TensorSpec::createSpec<float>(DecisionName, Shape: {1});
82
83
84// Named features index.
85enum FeatureIDs {
86#define _FEATURE_IDX(_, name, __, ___) name,
87 RA_PRIORITY_FEATURES_LIST(_FEATURE_IDX)
88#undef _FEATURE_IDX
89 FeatureCount
90};
91
92class MLPriorityAdvisor : public RegAllocPriorityAdvisor {
93public:
94 MLPriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA,
95 SlotIndexes *const Indexes, MLModelRunner *Runner);
96
97protected:
98 const RegAllocPriorityAdvisor &getDefaultAdvisor() const {
99 return static_cast<const RegAllocPriorityAdvisor &>(DefaultAdvisor);
100 }
101
102 // The assumption is that if the Runner could not be constructed, we emit-ed
103 // error, and we shouldn't be asking for it here.
104 const MLModelRunner &getRunner() const { return *Runner; }
105 float getPriorityImpl(const LiveInterval &LI) const;
106 unsigned getPriority(const LiveInterval &LI) const override;
107
108private:
109 const DefaultPriorityAdvisor DefaultAdvisor;
110 MLModelRunner *const Runner;
111};
112
113#define _DECL_FEATURES(type, name, shape, _) \
114 TensorSpec::createSpec<type>(#name, shape),
115
116static const std::vector<TensorSpec> InputFeatures{
117 {RA_PRIORITY_FEATURES_LIST(_DECL_FEATURES)},
118};
119#undef _DECL_FEATURES
120
121// ===================================
122// Release (AOT) - specifics
123// ===================================
124class ReleaseModePriorityAdvisorProvider final
125 : public RegAllocPriorityAdvisorProvider {
126public:
127 ReleaseModePriorityAdvisorProvider()
128 : RegAllocPriorityAdvisorProvider(AdvisorMode::Release) {}
129 std::unique_ptr<RegAllocPriorityAdvisor>
130 getAdvisor(const MachineFunction &MF, const RAGreedy &RA,
131 SlotIndexes &SI) override {
132 if (!Runner) {
133 if (InteractiveChannelBaseName.empty())
134 Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
135 args&: MF.getFunction().getContext(), args: InputFeatures, DecisionName);
136 else
137 Runner = std::make_unique<InteractiveModelRunner>(
138 args&: MF.getFunction().getContext(), args: InputFeatures, args: DecisionSpec,
139 args: InteractiveChannelBaseName + ".out",
140 args: InteractiveChannelBaseName + ".in");
141 }
142 return std::make_unique<MLPriorityAdvisor>(args: MF, args: RA, args: &SI, args: Runner.get());
143 }
144
145private:
146 std::unique_ptr<MLModelRunner> Runner;
147};
148
149class ReleaseModePriorityAdvisorAnalysisLegacy final
150 : public RegAllocPriorityAdvisorAnalysisLegacy {
151public:
152 ReleaseModePriorityAdvisorAnalysisLegacy()
153 : RegAllocPriorityAdvisorAnalysisLegacy(AdvisorMode::Release) {}
154 // support for isa<> and dyn_cast.
155 static bool classof(const RegAllocPriorityAdvisorAnalysisLegacy *R) {
156 return R->getAdvisorMode() == AdvisorMode::Release;
157 }
158
159private:
160 void getAnalysisUsage(AnalysisUsage &AU) const override {
161 AU.setPreservesAll();
162 AU.addRequired<SlotIndexesWrapperPass>();
163 RegAllocPriorityAdvisorAnalysisLegacy::getAnalysisUsage(AU);
164 }
165
166 bool doInitialization(Module &M) override {
167 Provider = std::make_unique<ReleaseModePriorityAdvisorProvider>();
168 return false;
169 }
170};
171
172// ===================================
173// Development mode-specifics
174// ===================================
175//
176// Features we log
177#ifdef LLVM_HAVE_TFLITE
178static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
179
180#define _DECL_TRAIN_FEATURES(type, name, shape, _) \
181 TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
182
183static const std::vector<TensorSpec> TrainingInputFeatures{
184 {RA_PRIORITY_FEATURES_LIST(_DECL_TRAIN_FEATURES)
185 TensorSpec::createSpec<float>("action_discount", {1}),
186 TensorSpec::createSpec<int32_t>("action_step_type", {1}),
187 TensorSpec::createSpec<float>("action_reward", {1})}};
188#undef _DECL_TRAIN_FEATURES
189
190class DevelopmentModePriorityAdvisor : public MLPriorityAdvisor {
191public:
192 DevelopmentModePriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA,
193 SlotIndexes *const Indexes,
194 MLModelRunner *Runner, Logger *Log)
195 : MLPriorityAdvisor(MF, RA, Indexes, Runner), Log(Log) {}
196
197private:
198 unsigned getPriority(const LiveInterval &LI) const override;
199 Logger *const Log;
200};
201
202class DevelopmentModePriorityAdvisorProvider final
203 : public RegAllocPriorityAdvisorProvider {
204
205public:
206 // Save all the logs (when requested).
207 DevelopmentModePriorityAdvisorProvider(LLVMContext &Ctx)
208 : RegAllocPriorityAdvisorProvider(AdvisorMode::Development) {
209 if (ModelUnderTraining.empty() && TrainingLog.empty()) {
210 Ctx.emitError("Regalloc development mode should be requested with at "
211 "least logging enabled and/or a training model");
212 return;
213 }
214 if (ModelUnderTraining.empty())
215 Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
216 else
217 Runner = ModelUnderTrainingRunner::createAndEnsureValid(
218 Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
219 if (!Runner) {
220 Ctx.emitError("Regalloc: could not set up the model runner");
221 return;
222 }
223 if (TrainingLog.empty())
224 return;
225 std::error_code EC;
226 auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
227 if (EC) {
228 Ctx.emitError(EC.message() + ":" + TrainingLog);
229 return;
230 }
231 std::vector<TensorSpec> LFS = InputFeatures;
232 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
233 append_range(LFS, MUTR->extraOutputsForLoggingSpecs());
234 // We always log the output; in particular, if we're not evaluating, we
235 // don't have an output spec json file. That's why we handle the
236 // 'normal' output separately.
237 LFS.push_back(DecisionSpec);
238
239 Log = std::make_unique<Logger>(std::move(OS), LFS, Reward,
240 /*IncludeReward*/ true);
241 }
242
243 void logRewardIfNeeded(const MachineFunction &MF,
244 llvm::function_ref<float()> GetReward) override {
245 if (!Log || !Log->hasAnyObservationForContext(MF.getName()))
246 return;
247 // The function pass manager would run all the function passes for a
248 // function, so we assume the last context belongs to this function. If
249 // this invariant ever changes, we can implement at that time switching
250 // contexts. At this point, it'd be an error
251 if (Log->currentContext() != MF.getName()) {
252 MF.getFunction().getContext().emitError(
253 "The training log context shouldn't have had changed.");
254 }
255 if (Log->hasObservationInProgress())
256 Log->logReward<float>(GetReward());
257 }
258
259 std::unique_ptr<RegAllocPriorityAdvisor>
260 getAdvisor(const MachineFunction &MF, const RAGreedy &RA,
261 SlotIndexes &SI) override {
262 if (!Runner)
263 return nullptr;
264 if (Log) {
265 Log->switchContext(MF.getName());
266 }
267 return std::make_unique<DevelopmentModePriorityAdvisor>(
268 MF, RA, &SI, Runner.get(), Log.get());
269 }
270
271 std::unique_ptr<MLModelRunner> Runner;
272 std::unique_ptr<Logger> Log;
273};
274
275class DevelopmentModePriorityAdvisorAnalysisLegacy final
276 : public RegAllocPriorityAdvisorAnalysisLegacy {
277public:
278 DevelopmentModePriorityAdvisorAnalysisLegacy()
279 : RegAllocPriorityAdvisorAnalysisLegacy(AdvisorMode::Development) {}
280
281 // support for isa<> and dyn_cast.
282 static bool classof(const RegAllocPriorityAdvisorAnalysisLegacy *R) {
283 return R->getAdvisorMode() == AdvisorMode::Development;
284 }
285
286 void logRewardIfNeeded(const MachineFunction &MF,
287 llvm::function_ref<float()> GetReward) override {
288 Provider->logRewardIfNeeded(MF, GetReward);
289 }
290
291private:
292 void getAnalysisUsage(AnalysisUsage &AU) const override {
293 AU.setPreservesAll();
294 AU.addRequired<SlotIndexesWrapperPass>();
295 RegAllocPriorityAdvisorAnalysisLegacy::getAnalysisUsage(AU);
296 }
297
298 // Save all the logs (when requested).
299 bool doInitialization(Module &M) override {
300 Provider = std::make_unique<DevelopmentModePriorityAdvisorProvider>(
301 M.getContext());
302 return false;
303 ;
304 }
305};
306#endif //#ifdef LLVM_HAVE_TFLITE
307
308} // namespace llvm
309
310RegAllocPriorityAdvisorAnalysisLegacy *
311llvm::createReleaseModePriorityAdvisorAnalysis() {
312 return llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() ||
313 !InteractiveChannelBaseName.empty()
314 ? new ReleaseModePriorityAdvisorAnalysisLegacy()
315 : nullptr;
316}
317
318MLPriorityAdvisor::MLPriorityAdvisor(const MachineFunction &MF,
319 const RAGreedy &RA,
320 SlotIndexes *const Indexes,
321 MLModelRunner *Runner)
322 : RegAllocPriorityAdvisor(MF, RA, Indexes), DefaultAdvisor(MF, RA, Indexes),
323 Runner(std::move(Runner)) {
324 assert(this->Runner);
325 Runner->switchContext(Name: MF.getName());
326}
327
328float MLPriorityAdvisor::getPriorityImpl(const LiveInterval &LI) const {
329 const unsigned Size = LI.getSize();
330 LiveRangeStage Stage = RA.getExtraInfo().getStage(VirtReg: LI);
331
332 *Runner->getTensor<int64_t>(FeatureID: 0) = static_cast<int64_t>(Size);
333 *Runner->getTensor<int64_t>(FeatureID: 1) = static_cast<int64_t>(Stage);
334 *Runner->getTensor<float>(FeatureID: 2) = static_cast<float>(LI.weight());
335
336 return Runner->evaluate<float>();
337}
338
339unsigned MLPriorityAdvisor::getPriority(const LiveInterval &LI) const {
340 return static_cast<unsigned>(getPriorityImpl(LI));
341}
342
343#ifdef LLVM_HAVE_TFLITE
344RegAllocPriorityAdvisorAnalysisLegacy *
345llvm::createDevelopmentModePriorityAdvisorAnalysis() {
346 return new DevelopmentModePriorityAdvisorAnalysisLegacy();
347}
348
349unsigned
350DevelopmentModePriorityAdvisor::getPriority(const LiveInterval &LI) const {
351 double Prio = 0;
352
353 if (isa<ModelUnderTrainingRunner>(getRunner())) {
354 Prio = MLPriorityAdvisor::getPriorityImpl(LI);
355 } else {
356 Prio = getDefaultAdvisor().getPriority(LI);
357 }
358
359 if (TrainingLog.empty())
360 return Prio;
361
362 // TODO(mtrofin): when we support optional rewards, this can go away. In the
363 // meantime, we log the "pretend" reward (0) for the previous observation
364 // before starting a new one.
365 if (Log->hasObservationInProgress())
366 Log->logReward<float>(0.0);
367
368 Log->startObservation();
369 size_t CurrentFeature = 0;
370 for (; CurrentFeature < InputFeatures.size(); ++CurrentFeature) {
371 Log->logTensorValue(CurrentFeature,
372 reinterpret_cast<const char *>(
373 getRunner().getTensorUntyped(CurrentFeature)));
374 }
375
376 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner())) {
377 for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size();
378 ++I, ++CurrentFeature)
379 Log->logTensorValue(
380 CurrentFeature,
381 reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I)));
382 }
383
384 float Ret = static_cast<float>(Prio);
385 Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret));
386 Log->endObservation();
387
388 return static_cast<unsigned>(Prio);
389}
390
391RegAllocPriorityAdvisorProvider *
392llvm::createDevelopmentModePriorityAdvisorProvider(LLVMContext &Ctx) {
393 return new DevelopmentModePriorityAdvisorProvider(Ctx);
394}
395
396#endif // #ifdef LLVM_HAVE_TFLITE
397
398RegAllocPriorityAdvisorProvider *
399llvm::createReleaseModePriorityAdvisorProvider() {
400 return new ReleaseModePriorityAdvisorProvider();
401}
402