1 | //===- MLRegAllocPriorityAdvisor.cpp - ML priority advisor-----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Implementation of the ML priority advisor and reward injection pass |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "AllocationOrder.h" |
14 | #include "RegAllocGreedy.h" |
15 | #include "RegAllocPriorityAdvisor.h" |
16 | #include "llvm/Analysis/AliasAnalysis.h" |
17 | #include "llvm/Analysis/InteractiveModelRunner.h" |
18 | #include "llvm/Analysis/MLModelRunner.h" |
19 | #include "llvm/Analysis/ReleaseModeModelRunner.h" |
20 | #include "llvm/Analysis/TensorSpec.h" |
21 | #include "llvm/CodeGen/CalcSpillWeights.h" |
22 | #include "llvm/CodeGen/LiveRegMatrix.h" |
23 | #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" |
24 | #include "llvm/CodeGen/MachineFunction.h" |
25 | #include "llvm/CodeGen/MachineLoopInfo.h" |
26 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
27 | #include "llvm/CodeGen/Passes.h" |
28 | #include "llvm/CodeGen/RegisterClassInfo.h" |
29 | #include "llvm/CodeGen/SlotIndexes.h" |
30 | #include "llvm/CodeGen/VirtRegMap.h" |
31 | #include "llvm/InitializePasses.h" |
32 | #include "llvm/Pass.h" |
33 | #include "llvm/PassRegistry.h" |
34 | #include "llvm/Support/CommandLine.h" |
35 | |
36 | #if defined(LLVM_HAVE_TFLITE) |
37 | #include "llvm/Analysis/ModelUnderTrainingRunner.h" |
38 | #include "llvm/Analysis/NoInferenceModelRunner.h" |
39 | #include "llvm/Analysis/Utils/TrainingLogger.h" |
40 | #include "llvm/IR/Module.h" |
41 | #endif |
42 | |
43 | using namespace llvm; |
44 | |
45 | static cl::opt<std::string> InteractiveChannelBaseName( |
46 | "regalloc-priority-interactive-channel-base" , cl::Hidden, |
47 | cl::desc( |
48 | "Base file path for the interactive mode. The incoming filename should " |
49 | "have the name <regalloc-priority-interactive-channel-base>.in, while " |
50 | "the outgoing name should be " |
51 | "<regalloc-priority-interactive-channel-base>.out" )); |
52 | |
53 | using CompiledModelType = NoopSavedModelImpl; |
54 | |
55 | // Options that only make sense in development mode |
56 | #ifdef LLVM_HAVE_TFLITE |
57 | #include "RegAllocScore.h" |
58 | #include "llvm/Analysis/Utils/TFUtils.h" |
59 | |
60 | static cl::opt<std::string> TrainingLog( |
61 | "regalloc-priority-training-log" , cl::Hidden, |
62 | cl::desc("Training log for the register allocator priority model" )); |
63 | |
64 | static cl::opt<std::string> ModelUnderTraining( |
65 | "regalloc-priority-model" , cl::Hidden, |
66 | cl::desc("The model being trained for register allocation priority" )); |
67 | |
68 | #endif // #ifdef LLVM_HAVE_TFLITE |
69 | |
70 | namespace llvm { |
71 | |
72 | static const std::vector<int64_t> PerLiveRangeShape{1}; |
73 | |
74 | #define RA_PRIORITY_FEATURES_LIST(M) \ |
75 | M(int64_t, li_size, PerLiveRangeShape, "size") \ |
76 | M(int64_t, stage, PerLiveRangeShape, "stage") \ |
77 | M(float, weight, PerLiveRangeShape, "weight") |
78 | |
79 | #define DecisionName "priority" |
80 | static const TensorSpec DecisionSpec = |
81 | TensorSpec::createSpec<float>(DecisionName, Shape: {1}); |
82 | |
83 | |
84 | // Named features index. |
85 | enum FeatureIDs { |
86 | #define _FEATURE_IDX(_, name, __, ___) name, |
87 | RA_PRIORITY_FEATURES_LIST(_FEATURE_IDX) |
88 | #undef _FEATURE_IDX |
89 | FeatureCount |
90 | }; |
91 | |
92 | class MLPriorityAdvisor : public RegAllocPriorityAdvisor { |
93 | public: |
94 | MLPriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA, |
95 | SlotIndexes *const Indexes, MLModelRunner *Runner); |
96 | |
97 | protected: |
98 | const RegAllocPriorityAdvisor &getDefaultAdvisor() const { |
99 | return static_cast<const RegAllocPriorityAdvisor &>(DefaultAdvisor); |
100 | } |
101 | |
102 | // The assumption is that if the Runner could not be constructed, we emit-ed |
103 | // error, and we shouldn't be asking for it here. |
104 | const MLModelRunner &getRunner() const { return *Runner; } |
105 | float getPriorityImpl(const LiveInterval &LI) const; |
106 | unsigned getPriority(const LiveInterval &LI) const override; |
107 | |
108 | private: |
109 | const DefaultPriorityAdvisor DefaultAdvisor; |
110 | MLModelRunner *const Runner; |
111 | }; |
112 | |
113 | #define _DECL_FEATURES(type, name, shape, _) \ |
114 | TensorSpec::createSpec<type>(#name, shape), |
115 | |
116 | static const std::vector<TensorSpec> InputFeatures{ |
117 | {RA_PRIORITY_FEATURES_LIST(_DECL_FEATURES)}, |
118 | }; |
119 | #undef _DECL_FEATURES |
120 | |
121 | // =================================== |
122 | // Release (AOT) - specifics |
123 | // =================================== |
124 | class ReleaseModePriorityAdvisorAnalysis final |
125 | : public RegAllocPriorityAdvisorAnalysis { |
126 | public: |
127 | ReleaseModePriorityAdvisorAnalysis() |
128 | : RegAllocPriorityAdvisorAnalysis(AdvisorMode::Release) {} |
129 | // support for isa<> and dyn_cast. |
130 | static bool classof(const RegAllocPriorityAdvisorAnalysis *R) { |
131 | return R->getAdvisorMode() == AdvisorMode::Release; |
132 | } |
133 | |
134 | private: |
135 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
136 | AU.setPreservesAll(); |
137 | AU.addRequired<SlotIndexesWrapperPass>(); |
138 | RegAllocPriorityAdvisorAnalysis::getAnalysisUsage(AU); |
139 | } |
140 | |
141 | std::unique_ptr<RegAllocPriorityAdvisor> |
142 | getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { |
143 | if (!Runner) { |
144 | if (InteractiveChannelBaseName.empty()) |
145 | Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>( |
146 | args&: MF.getFunction().getContext(), args: InputFeatures, DecisionName); |
147 | else |
148 | Runner = std::make_unique<InteractiveModelRunner>( |
149 | args&: MF.getFunction().getContext(), args: InputFeatures, args: DecisionSpec, |
150 | args: InteractiveChannelBaseName + ".out" , |
151 | args: InteractiveChannelBaseName + ".in" ); |
152 | } |
153 | return std::make_unique<MLPriorityAdvisor>( |
154 | args: MF, args: RA, args: &getAnalysis<SlotIndexesWrapperPass>().getSI(), args: Runner.get()); |
155 | } |
156 | std::unique_ptr<MLModelRunner> Runner; |
157 | }; |
158 | |
159 | // =================================== |
160 | // Development mode-specifics |
161 | // =================================== |
162 | // |
163 | // Features we log |
164 | #ifdef LLVM_HAVE_TFLITE |
165 | static const TensorSpec Reward = TensorSpec::createSpec<float>("reward" , {1}); |
166 | |
167 | #define _DECL_TRAIN_FEATURES(type, name, shape, _) \ |
168 | TensorSpec::createSpec<type>(std::string("action_") + #name, shape), |
169 | |
170 | static const std::vector<TensorSpec> TrainingInputFeatures{ |
171 | {RA_PRIORITY_FEATURES_LIST(_DECL_TRAIN_FEATURES) |
172 | TensorSpec::createSpec<float>("action_discount" , {1}), |
173 | TensorSpec::createSpec<int32_t>("action_step_type" , {1}), |
174 | TensorSpec::createSpec<float>("action_reward" , {1})}}; |
175 | #undef _DECL_TRAIN_FEATURES |
176 | |
177 | class DevelopmentModePriorityAdvisor : public MLPriorityAdvisor { |
178 | public: |
179 | DevelopmentModePriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA, |
180 | SlotIndexes *const Indexes, |
181 | MLModelRunner *Runner, Logger *Log) |
182 | : MLPriorityAdvisor(MF, RA, Indexes, Runner), Log(Log) {} |
183 | |
184 | private: |
185 | unsigned getPriority(const LiveInterval &LI) const override; |
186 | Logger *const Log; |
187 | }; |
188 | |
189 | class DevelopmentModePriorityAdvisorAnalysis final |
190 | : public RegAllocPriorityAdvisorAnalysis { |
191 | public: |
192 | DevelopmentModePriorityAdvisorAnalysis() |
193 | : RegAllocPriorityAdvisorAnalysis(AdvisorMode::Development) {} |
194 | // support for isa<> and dyn_cast. |
195 | static bool classof(const RegAllocPriorityAdvisorAnalysis *R) { |
196 | return R->getAdvisorMode() == AdvisorMode::Development; |
197 | } |
198 | |
199 | void logRewardIfNeeded(const MachineFunction &MF, |
200 | llvm::function_ref<float()> GetReward) override { |
201 | if (!Log || !Log->hasAnyObservationForContext(MF.getName())) |
202 | return; |
203 | // The function pass manager would run all the function passes for a |
204 | // function, so we assume the last context belongs to this function. If |
205 | // this invariant ever changes, we can implement at that time switching |
206 | // contexts. At this point, it'd be an error |
207 | if (Log->currentContext() != MF.getName()) { |
208 | MF.getFunction().getContext().emitError( |
209 | "The training log context shouldn't have had changed." ); |
210 | } |
211 | if (Log->hasObservationInProgress()) |
212 | Log->logReward<float>(GetReward()); |
213 | } |
214 | |
215 | private: |
216 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
217 | AU.setPreservesAll(); |
218 | AU.addRequired<SlotIndexesWrapperPass>(); |
219 | RegAllocPriorityAdvisorAnalysis::getAnalysisUsage(AU); |
220 | } |
221 | |
222 | // Save all the logs (when requested). |
223 | bool doInitialization(Module &M) override { |
224 | LLVMContext &Ctx = M.getContext(); |
225 | if (ModelUnderTraining.empty() && TrainingLog.empty()) { |
226 | Ctx.emitError("Regalloc development mode should be requested with at " |
227 | "least logging enabled and/or a training model" ); |
228 | return false; |
229 | } |
230 | if (ModelUnderTraining.empty()) |
231 | Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures); |
232 | else |
233 | Runner = ModelUnderTrainingRunner::createAndEnsureValid( |
234 | Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures); |
235 | if (!Runner) { |
236 | Ctx.emitError("Regalloc: could not set up the model runner" ); |
237 | return false; |
238 | } |
239 | if (TrainingLog.empty()) |
240 | return false; |
241 | std::error_code EC; |
242 | auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC); |
243 | if (EC) { |
244 | M.getContext().emitError(EC.message() + ":" + TrainingLog); |
245 | return false; |
246 | } |
247 | std::vector<TensorSpec> LFS = InputFeatures; |
248 | if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get())) |
249 | append_range(LFS, MUTR->extraOutputsForLoggingSpecs()); |
250 | // We always log the output; in particular, if we're not evaluating, we |
251 | // don't have an output spec json file. That's why we handle the |
252 | // 'normal' output separately. |
253 | LFS.push_back(DecisionSpec); |
254 | |
255 | Log = std::make_unique<Logger>(std::move(OS), LFS, Reward, |
256 | /*IncludeReward*/ true); |
257 | return false; |
258 | } |
259 | |
260 | std::unique_ptr<RegAllocPriorityAdvisor> |
261 | getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { |
262 | if (!Runner) |
263 | return nullptr; |
264 | if (Log) { |
265 | Log->switchContext(MF.getName()); |
266 | } |
267 | |
268 | return std::make_unique<DevelopmentModePriorityAdvisor>( |
269 | MF, RA, &getAnalysis<SlotIndexesWrapperPass>().getSI(), Runner.get(), |
270 | Log.get()); |
271 | } |
272 | |
273 | std::unique_ptr<MLModelRunner> Runner; |
274 | std::unique_ptr<Logger> Log; |
275 | }; |
276 | #endif //#ifdef LLVM_HAVE_TFLITE |
277 | |
278 | } // namespace llvm |
279 | |
280 | RegAllocPriorityAdvisorAnalysis *llvm::createReleaseModePriorityAdvisor() { |
281 | return llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() || |
282 | !InteractiveChannelBaseName.empty() |
283 | ? new ReleaseModePriorityAdvisorAnalysis() |
284 | : nullptr; |
285 | } |
286 | |
287 | MLPriorityAdvisor::MLPriorityAdvisor(const MachineFunction &MF, |
288 | const RAGreedy &RA, |
289 | SlotIndexes *const Indexes, |
290 | MLModelRunner *Runner) |
291 | : RegAllocPriorityAdvisor(MF, RA, Indexes), DefaultAdvisor(MF, RA, Indexes), |
292 | Runner(std::move(Runner)) { |
293 | assert(this->Runner); |
294 | Runner->switchContext(Name: MF.getName()); |
295 | } |
296 | |
297 | float MLPriorityAdvisor::getPriorityImpl(const LiveInterval &LI) const { |
298 | const unsigned Size = LI.getSize(); |
299 | LiveRangeStage Stage = RA.getExtraInfo().getStage(VirtReg: LI); |
300 | |
301 | *Runner->getTensor<int64_t>(FeatureID: 0) = static_cast<int64_t>(Size); |
302 | *Runner->getTensor<int64_t>(FeatureID: 1) = static_cast<int64_t>(Stage); |
303 | *Runner->getTensor<float>(FeatureID: 2) = static_cast<float>(LI.weight()); |
304 | |
305 | return Runner->evaluate<float>(); |
306 | } |
307 | |
308 | unsigned MLPriorityAdvisor::getPriority(const LiveInterval &LI) const { |
309 | return static_cast<unsigned>(getPriorityImpl(LI)); |
310 | } |
311 | |
312 | #ifdef LLVM_HAVE_TFLITE |
313 | RegAllocPriorityAdvisorAnalysis *llvm::createDevelopmentModePriorityAdvisor() { |
314 | return new DevelopmentModePriorityAdvisorAnalysis(); |
315 | } |
316 | |
317 | unsigned |
318 | DevelopmentModePriorityAdvisor::getPriority(const LiveInterval &LI) const { |
319 | double Prio = 0; |
320 | |
321 | if (isa<ModelUnderTrainingRunner>(getRunner())) { |
322 | Prio = MLPriorityAdvisor::getPriorityImpl(LI); |
323 | } else { |
324 | Prio = getDefaultAdvisor().getPriority(LI); |
325 | } |
326 | |
327 | if (TrainingLog.empty()) |
328 | return Prio; |
329 | |
330 | // TODO(mtrofin): when we support optional rewards, this can go away. In the |
331 | // meantime, we log the "pretend" reward (0) for the previous observation |
332 | // before starting a new one. |
333 | if (Log->hasObservationInProgress()) |
334 | Log->logReward<float>(0.0); |
335 | |
336 | Log->startObservation(); |
337 | size_t CurrentFeature = 0; |
338 | for (; CurrentFeature < InputFeatures.size(); ++CurrentFeature) { |
339 | Log->logTensorValue(CurrentFeature, |
340 | reinterpret_cast<const char *>( |
341 | getRunner().getTensorUntyped(CurrentFeature))); |
342 | } |
343 | |
344 | if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner())) { |
345 | for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size(); |
346 | ++I, ++CurrentFeature) |
347 | Log->logTensorValue( |
348 | CurrentFeature, |
349 | reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I))); |
350 | } |
351 | |
352 | float Ret = static_cast<float>(Prio); |
353 | Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret)); |
354 | Log->endObservation(); |
355 | |
356 | return static_cast<unsigned>(Prio); |
357 | } |
358 | |
359 | #endif // #ifdef LLVM_HAVE_TFLITE |
360 | |