1//===- CalcSpillWeights.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/CodeGen/CalcSpillWeights.h"
10#include "llvm/ADT/SmallPtrSet.h"
11#include "llvm/CodeGen/LiveInterval.h"
12#include "llvm/CodeGen/LiveIntervals.h"
13#include "llvm/CodeGen/MachineFunction.h"
14#include "llvm/CodeGen/MachineInstr.h"
15#include "llvm/CodeGen/MachineLoopInfo.h"
16#include "llvm/CodeGen/MachineOperand.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/CodeGen/StackMaps.h"
19#include "llvm/CodeGen/TargetInstrInfo.h"
20#include "llvm/CodeGen/TargetRegisterInfo.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/CodeGen/VirtRegMap.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/MathExtras.h"
25#include "llvm/Support/raw_ostream.h"
26#include <cassert>
27#include <tuple>
28
29using namespace llvm;
30
31#define DEBUG_TYPE "calcspillweights"
32
33void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
34 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
35 << "********** Function: " << MF.getName() << '\n');
36
37 MachineRegisterInfo &MRI = MF.getRegInfo();
38 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
39 Register Reg = Register::index2VirtReg(Index: I);
40 if (MRI.reg_nodbg_empty(RegNo: Reg))
41 continue;
42 calculateSpillWeightAndHint(LI&: LIS.getInterval(Reg));
43 }
44}
45
46// Return the preferred allocation register for reg, given a COPY instruction.
47Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, Register Reg,
48 const TargetRegisterInfo &TRI,
49 const MachineRegisterInfo &MRI) {
50 unsigned Sub, HSub;
51 Register HReg;
52 if (MI->getOperand(i: 0).getReg() == Reg) {
53 Sub = MI->getOperand(i: 0).getSubReg();
54 HReg = MI->getOperand(i: 1).getReg();
55 HSub = MI->getOperand(i: 1).getSubReg();
56 } else {
57 Sub = MI->getOperand(i: 1).getSubReg();
58 HReg = MI->getOperand(i: 0).getReg();
59 HSub = MI->getOperand(i: 0).getSubReg();
60 }
61
62 if (!HReg)
63 return 0;
64
65 if (HReg.isVirtual())
66 return Sub == HSub ? HReg : Register();
67
68 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
69 MCRegister CopiedPReg = HSub ? TRI.getSubReg(Reg: HReg, Idx: HSub) : HReg.asMCReg();
70 if (RC->contains(Reg: CopiedPReg))
71 return CopiedPReg;
72
73 // Check if reg:sub matches so that a super register could be hinted.
74 if (Sub)
75 return TRI.getMatchingSuperReg(Reg: CopiedPReg, SubIdx: Sub, RC);
76
77 return Register();
78}
79
80// Check if all values in LI are rematerializable
81bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
82 const LiveIntervals &LIS,
83 const VirtRegMap &VRM,
84 const MachineRegisterInfo &MRI,
85 const TargetInstrInfo &TII) {
86 Register Reg = LI.reg();
87 Register Original = VRM.getOriginal(VirtReg: Reg);
88 SmallDenseMap<unsigned, MachineInstr *> VNIDefs;
89 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
90 I != E; ++I) {
91 const VNInfo *VNI = *I;
92 const VNInfo *OrigVNI = VNI;
93 if (VNI->isUnused())
94 continue;
95 if (VNI->isPHIDef())
96 return false;
97
98 MachineInstr *MI = LIS.getInstructionFromIndex(index: VNI->def);
99 assert(MI && "Dead valno in interval");
100
101 // Trace copies introduced by live range splitting. The inline
102 // spiller can rematerialize through these copies, so the spill
103 // weight must reflect this.
104 while (TII.isFullCopyInstr(MI: *MI)) {
105 // The copy destination must match the interval register.
106 if (MI->getOperand(i: 0).getReg() != Reg)
107 return false;
108
109 // Get the source register.
110 Reg = MI->getOperand(i: 1).getReg();
111
112 // If the original (pre-splitting) registers match this
113 // copy came from a split.
114 if (!Reg.isVirtual() || VRM.getOriginal(VirtReg: Reg) != Original)
115 return false;
116
117 // Follow the copy live-in value.
118 const LiveInterval &SrcLI = LIS.getInterval(Reg);
119 LiveQueryResult SrcQ = SrcLI.Query(Idx: VNI->def);
120 VNI = SrcQ.valueIn();
121 assert(VNI && "Copy from non-existing value");
122 if (VNI->isPHIDef())
123 return false;
124 MI = LIS.getInstructionFromIndex(index: VNI->def);
125 assert(MI && "Dead valno in interval");
126 }
127
128 if (!TII.isReMaterializable(MI: *MI))
129 return false;
130
131 VNIDefs[OrigVNI->id] = MI;
132 }
133
134 // If MI has register uses, it will only be rematerializable if its uses are
135 // also live at the indices it will be rematerialized at.
136 for (MachineOperand &MO : MRI.reg_nodbg_operands(Reg: LI.reg())) {
137 if (!MO.readsReg())
138 continue;
139 SlotIndex UseIdx = LIS.getInstructionIndex(Instr: *MO.getParent());
140 MachineInstr *Def = VNIDefs[LI.getVNInfoAt(Idx: UseIdx)->id];
141 assert(Def && "Use with no def");
142 if (!allUsesAvailableAt(MI: Def, UseIdx, LIS, MRI, TII))
143 return false;
144 }
145
146 return true;
147}
148
149bool VirtRegAuxInfo::allUsesAvailableAt(const MachineInstr *MI,
150 SlotIndex UseIdx,
151 const LiveIntervals &LIS,
152 const MachineRegisterInfo &MRI,
153 const TargetInstrInfo &TII) {
154 SlotIndex OrigIdx = LIS.getInstructionIndex(Instr: *MI).getRegSlot(EC: true);
155 UseIdx = std::max(a: UseIdx, b: UseIdx.getRegSlot(EC: true));
156 for (const MachineOperand &MO : MI->operands()) {
157 if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
158 continue;
159
160 // We can't remat physreg uses, unless it is a constant or target wants
161 // to ignore this use.
162 if (MO.getReg().isPhysical()) {
163 if (MRI.isConstantPhysReg(PhysReg: MO.getReg()) || TII.isIgnorableUse(MO))
164 continue;
165 return false;
166 }
167
168 const LiveInterval &li = LIS.getInterval(Reg: MO.getReg());
169 const VNInfo *OVNI = li.getVNInfoAt(Idx: OrigIdx);
170 if (!OVNI)
171 continue;
172
173 // Don't allow rematerialization immediately after the original def.
174 // It would be incorrect if OrigMI redefines the register.
175 // See PR14098.
176 if (SlotIndex::isSameInstr(A: OrigIdx, B: UseIdx))
177 return false;
178
179 if (OVNI != li.getVNInfoAt(Idx: UseIdx))
180 return false;
181
182 // Check that subrange is live at UseIdx.
183 if (li.hasSubRanges()) {
184 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
185 unsigned SubReg = MO.getSubReg();
186 LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubIdx: SubReg)
187 : MRI.getMaxLaneMaskForVReg(Reg: MO.getReg());
188 for (const LiveInterval::SubRange &SR : li.subranges()) {
189 if ((SR.LaneMask & LM).none())
190 continue;
191 if (!SR.liveAt(index: UseIdx))
192 return false;
193 // Early exit if all used lanes are checked. No need to continue.
194 LM &= ~SR.LaneMask;
195 if (LM.none())
196 break;
197 }
198 }
199 }
200 return true;
201}
202
203bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) {
204 return any_of(Range: VRM.getRegInfo().reg_operands(Reg: LI.reg()),
205 P: [](MachineOperand &MO) {
206 MachineInstr *MI = MO.getParent();
207 if (MI->getOpcode() != TargetOpcode::STATEPOINT)
208 return false;
209 return StatepointOpers(MI).getVarIdx() <= MO.getOperandNo();
210 });
211}
212
213void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
214 float Weight = weightCalcHelper(LI);
215 // Check if unspillable.
216 if (Weight < 0)
217 return;
218 LI.setWeight(Weight);
219}
220
221static bool canMemFoldInlineAsm(LiveInterval &LI,
222 const MachineRegisterInfo &MRI) {
223 for (const MachineOperand &MO : MRI.reg_operands(Reg: LI.reg())) {
224 const MachineInstr *MI = MO.getParent();
225 if (MI->isInlineAsm() && MI->mayFoldInlineAsmRegOp(OpId: MI->getOperandNo(I: &MO)))
226 return true;
227 }
228
229 return false;
230}
231
232float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI) {
233 MachineRegisterInfo &MRI = MF.getRegInfo();
234 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
235 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
236 MachineBasicBlock *MBB = nullptr;
237 float TotalWeight = 0;
238 unsigned NumInstr = 0; // Number of instructions using LI
239 SmallPtrSet<MachineInstr *, 8> Visited;
240
241 std::pair<unsigned, Register> TargetHint = MRI.getRegAllocationHint(VReg: LI.reg());
242
243 if (LI.isSpillable()) {
244 Register Reg = LI.reg();
245 Register Original = VRM.getOriginal(VirtReg: Reg);
246 const LiveInterval &OrigInt = LIS.getInterval(Reg: Original);
247 // li comes from a split of OrigInt. If OrigInt was marked
248 // as not spillable, make sure the new interval is marked
249 // as not spillable as well.
250 if (!OrigInt.isSpillable())
251 LI.markNotSpillable();
252 }
253
254 // Don't recompute spill weight for an unspillable register.
255 bool IsSpillable = LI.isSpillable();
256
257 // CopyHint is a sortable hint derived from a COPY instruction.
258 struct CopyHint {
259 Register Reg;
260 float Weight;
261 bool IsCSR;
262 CopyHint(Register R, float W, bool IsCSR)
263 : Reg(R), Weight(W), IsCSR(IsCSR) {}
264 bool operator<(const CopyHint &Rhs) const {
265 // Always prefer any physreg hint.
266 if (Reg.isPhysical() != Rhs.Reg.isPhysical())
267 return Reg.isPhysical();
268 if (Weight != Rhs.Weight)
269 return (Weight > Rhs.Weight);
270 // Prefer non-CSR to CSR.
271 if (Reg.isPhysical() && IsCSR != Rhs.IsCSR)
272 return !IsCSR;
273 return Reg.id() < Rhs.Reg.id(); // Tie-breaker.
274 }
275 };
276
277 bool IsExiting = false;
278 SmallDenseMap<Register, float, 8> Hint;
279 for (MachineRegisterInfo::reg_instr_nodbg_iterator
280 I = MRI.reg_instr_nodbg_begin(RegNo: LI.reg()),
281 E = MRI.reg_instr_nodbg_end();
282 I != E;) {
283 MachineInstr *MI = &*(I++);
284
285 NumInstr++;
286 bool identityCopy = false;
287 auto DestSrc = TII.isCopyInstr(MI: *MI);
288 if (DestSrc) {
289 const MachineOperand *DestRegOp = DestSrc->Destination;
290 const MachineOperand *SrcRegOp = DestSrc->Source;
291 identityCopy = DestRegOp->getReg() == SrcRegOp->getReg() &&
292 DestRegOp->getSubReg() == SrcRegOp->getSubReg();
293 }
294
295 if (identityCopy || MI->isImplicitDef())
296 continue;
297 if (!Visited.insert(Ptr: MI).second)
298 continue;
299
300 // For terminators that produce values, ask the backend if the register is
301 // not spillable.
302 if (TII.isUnspillableTerminator(MI) &&
303 MI->definesRegister(Reg: LI.reg(), /*TRI=*/nullptr)) {
304 LI.markNotSpillable();
305 return -1.0f;
306 }
307
308 // Force Weight onto the stack so that x86 doesn't add hidden precision.
309 stack_float_t Weight = 1.0f;
310 if (IsSpillable) {
311 // Get loop info for mi.
312 if (MI->getParent() != MBB) {
313 MBB = MI->getParent();
314 const MachineLoop *Loop = Loops.getLoopFor(BB: MBB);
315 IsExiting = Loop ? Loop->isLoopExiting(BB: MBB) : false;
316 }
317
318 // Calculate instr weight.
319 bool Reads, Writes;
320 std::tie(args&: Reads, args&: Writes) = MI->readsWritesVirtualRegister(Reg: LI.reg());
321 Weight = LiveIntervals::getSpillWeight(isDef: Writes, isUse: Reads, MBFI: &MBFI, MI: *MI, PSI);
322
323 // Give extra weight to what looks like a loop induction variable update.
324 if (Writes && IsExiting && LIS.isLiveOutOfMBB(LR: LI, mbb: MBB))
325 Weight *= 3;
326
327 TotalWeight += Weight;
328 }
329
330 // Get allocation hints from copies.
331 if (!TII.isCopyInstr(MI: *MI))
332 continue;
333 Register HintReg = copyHint(MI, Reg: LI.reg(), TRI, MRI);
334 if (HintReg && (HintReg.isVirtual() || MRI.isAllocatable(PhysReg: HintReg)))
335 Hint[HintReg] += Weight;
336 }
337
338 // Pass all the sorted copy hints to mri.
339 if (Hint.size()) {
340 // Remove a generic hint if previously added by target.
341 if (TargetHint.first == 0 && TargetHint.second)
342 MRI.clearSimpleHint(VReg: LI.reg());
343
344 // Don't add the target-type hint again.
345 Register SkipReg = TargetHint.first != 0 ? TargetHint.second : Register();
346 SmallVector<CopyHint, 8> RegHints;
347 for (const auto &[Reg, Weight] : Hint) {
348 if (Reg != SkipReg)
349 RegHints.emplace_back(
350 Args: Reg, Args: Weight,
351 Args: Reg.isPhysical() ? TRI.isCalleeSavedPhysReg(PhysReg: Reg, MF) : false);
352 }
353 sort(C&: RegHints);
354 for (const auto &[Reg, _, __] : RegHints)
355 MRI.addRegAllocationHint(VReg: LI.reg(), PrefReg: Reg);
356
357 // Weakly boost the spill weight of hinted registers.
358 TotalWeight *= 1.01F;
359 }
360
361 // If the live interval was already unspillable, leave it that way.
362 if (!IsSpillable)
363 return -1.0;
364
365 // Mark li as unspillable if all live ranges are tiny and the interval
366 // is not live at any reg mask. If the interval is live at a reg mask
367 // spilling may be required. If li is live as use in statepoint instruction
368 // spilling may be required due to if we mark interval with use in statepoint
369 // as not spillable we are risky to end up with no register to allocate.
370 // At the same time STATEPOINT instruction is perfectly fine to have this
371 // operand on stack, so spilling such interval and folding its load from stack
372 // into instruction itself makes perfect sense.
373 if (LI.isZeroLength(Indexes: LIS.getSlotIndexes()) &&
374 !LI.isLiveAtIndexes(Slots: LIS.getRegMaskSlots()) &&
375 !isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) {
376 LI.markNotSpillable();
377 return -1.0;
378 }
379
380 // If all of the definitions of the interval are re-materializable,
381 // it is a preferred candidate for spilling.
382 // FIXME: this gets much more complicated once we support non-trivial
383 // re-materialization.
384 if (isRematerializable(LI, LIS, VRM, MRI, TII: *MF.getSubtarget().getInstrInfo()))
385 TotalWeight *= 0.5F;
386
387 // Finally, we scale the weight by the scale factor of register class.
388 const TargetRegisterClass *RC = MRI.getRegClass(Reg: LI.reg());
389 TotalWeight *= TRI.getSpillWeightScaleFactor(RC);
390
391 return normalize(UseDefFreq: TotalWeight, Size: LI.getSize(), NumInstr);
392}
393