1 | //===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file InstrRefBasedImpl.cpp |
9 | /// |
10 | /// This is a separate implementation of LiveDebugValues, see |
11 | /// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information. |
12 | /// |
13 | /// This pass propagates variable locations between basic blocks, resolving |
14 | /// control flow conflicts between them. The problem is SSA construction, where |
15 | /// each debug instruction assigns the *value* that a variable has, and every |
16 | /// instruction where the variable is in scope uses that variable. The resulting |
17 | /// map of instruction-to-value is then translated into a register (or spill) |
18 | /// location for each variable over each instruction. |
19 | /// |
20 | /// The primary difference from normal SSA construction is that we cannot |
21 | /// _create_ PHI values that contain variable values. CodeGen has already |
22 | /// completed, and we can't alter it just to make debug-info complete. Thus: |
23 | /// we can identify function positions where we would like a PHI value for a |
24 | /// variable, but must search the MachineFunction to see whether such a PHI is |
25 | /// available. If no such PHI exists, the variable location must be dropped. |
26 | /// |
27 | /// To achieve this, we perform two kinds of analysis. First, we identify |
28 | /// every value defined by every instruction (ignoring those that only move |
29 | /// another value), then re-compute an SSA-form representation of the |
30 | /// MachineFunction, using value propagation to eliminate any un-necessary |
31 | /// PHI values. This gives us a map of every value computed in the function, |
32 | /// and its location within the register file / stack. |
33 | /// |
34 | /// Secondly, for each variable we perform the same analysis, where each debug |
35 | /// instruction is considered a def, and every instruction where the variable |
36 | /// is in lexical scope as a use. Value propagation is used again to eliminate |
37 | /// any un-necessary PHIs. This gives us a map of each variable to the value |
38 | /// it should have in a block. |
39 | /// |
40 | /// Once both are complete, we have two maps for each block: |
41 | /// * Variables to the values they should have, |
42 | /// * Values to the register / spill slot they are located in. |
43 | /// After which we can marry-up variable values with a location, and emit |
44 | /// DBG_VALUE instructions specifying those locations. Variable locations may |
45 | /// be dropped in this process due to the desired variable value not being |
46 | /// resident in any machine location, or because there is no PHI value in any |
47 | /// location that accurately represents the desired value. The building of |
48 | /// location lists for each block is left to DbgEntityHistoryCalculator. |
49 | /// |
50 | /// This pass is kept efficient because the size of the first SSA problem |
51 | /// is proportional to the working-set size of the function, which the compiler |
52 | /// tries to keep small. (It's also proportional to the number of blocks). |
53 | /// Additionally, we repeatedly perform the second SSA problem analysis with |
54 | /// only the variables and blocks in a single lexical scope, exploiting their |
55 | /// locality. |
56 | /// |
57 | /// ### Terminology |
58 | /// |
59 | /// A machine location is a register or spill slot, a value is something that's |
60 | /// defined by an instruction or PHI node, while a variable value is the value |
61 | /// assigned to a variable. A variable location is a machine location, that must |
62 | /// contain the appropriate variable value. A value that is a PHI node is |
63 | /// occasionally called an mphi. |
64 | /// |
65 | /// The first SSA problem is the "machine value location" problem, |
66 | /// because we're determining which machine locations contain which values. |
67 | /// The "locations" are constant: what's unknown is what value they contain. |
68 | /// |
69 | /// The second SSA problem (the one for variables) is the "variable value |
70 | /// problem", because it's determining what values a variable has, rather than |
71 | /// what location those values are placed in. |
72 | /// |
73 | /// TODO: |
74 | /// Overlapping fragments |
75 | /// Entry values |
76 | /// Add back DEBUG statements for debugging this |
77 | /// Collect statistics |
78 | /// |
79 | //===----------------------------------------------------------------------===// |
80 | |
81 | #include "llvm/ADT/DenseMap.h" |
82 | #include "llvm/ADT/PostOrderIterator.h" |
83 | #include "llvm/ADT/STLExtras.h" |
84 | #include "llvm/ADT/SmallPtrSet.h" |
85 | #include "llvm/ADT/SmallSet.h" |
86 | #include "llvm/ADT/SmallVector.h" |
87 | #include "llvm/BinaryFormat/Dwarf.h" |
88 | #include "llvm/CodeGen/LexicalScopes.h" |
89 | #include "llvm/CodeGen/MachineBasicBlock.h" |
90 | #include "llvm/CodeGen/MachineDominators.h" |
91 | #include "llvm/CodeGen/MachineFrameInfo.h" |
92 | #include "llvm/CodeGen/MachineFunction.h" |
93 | #include "llvm/CodeGen/MachineInstr.h" |
94 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
95 | #include "llvm/CodeGen/MachineInstrBundle.h" |
96 | #include "llvm/CodeGen/MachineMemOperand.h" |
97 | #include "llvm/CodeGen/MachineOperand.h" |
98 | #include "llvm/CodeGen/PseudoSourceValue.h" |
99 | #include "llvm/CodeGen/TargetFrameLowering.h" |
100 | #include "llvm/CodeGen/TargetInstrInfo.h" |
101 | #include "llvm/CodeGen/TargetLowering.h" |
102 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
103 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
104 | #include "llvm/Config/llvm-config.h" |
105 | #include "llvm/IR/DebugInfoMetadata.h" |
106 | #include "llvm/IR/DebugLoc.h" |
107 | #include "llvm/IR/Function.h" |
108 | #include "llvm/MC/MCRegisterInfo.h" |
109 | #include "llvm/Support/Casting.h" |
110 | #include "llvm/Support/Compiler.h" |
111 | #include "llvm/Support/Debug.h" |
112 | #include "llvm/Support/GenericIteratedDominanceFrontier.h" |
113 | #include "llvm/Support/TypeSize.h" |
114 | #include "llvm/Support/raw_ostream.h" |
115 | #include "llvm/Target/TargetMachine.h" |
116 | #include "llvm/Transforms/Utils/SSAUpdaterImpl.h" |
117 | #include <algorithm> |
118 | #include <cassert> |
119 | #include <climits> |
120 | #include <cstdint> |
121 | #include <functional> |
122 | #include <queue> |
123 | #include <tuple> |
124 | #include <utility> |
125 | #include <vector> |
126 | |
127 | #include "InstrRefBasedImpl.h" |
128 | #include "LiveDebugValues.h" |
129 | #include <optional> |
130 | |
131 | using namespace llvm; |
132 | using namespace LiveDebugValues; |
133 | |
134 | // SSAUpdaterImple sets DEBUG_TYPE, change it. |
135 | #undef DEBUG_TYPE |
136 | #define DEBUG_TYPE "livedebugvalues" |
137 | |
138 | // Act more like the VarLoc implementation, by propagating some locations too |
139 | // far and ignoring some transfers. |
140 | static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues" , cl::Hidden, |
141 | cl::desc("Act like old LiveDebugValues did" ), |
142 | cl::init(Val: false)); |
143 | |
144 | // Limit for the maximum number of stack slots we should track, past which we |
145 | // will ignore any spills. InstrRefBasedLDV gathers detailed information on all |
146 | // stack slots which leads to high memory consumption, and in some scenarios |
147 | // (such as asan with very many locals) the working set of the function can be |
148 | // very large, causing many spills. In these scenarios, it is very unlikely that |
149 | // the developer has hundreds of variables live at the same time that they're |
150 | // carefully thinking about -- instead, they probably autogenerated the code. |
151 | // When this happens, gracefully stop tracking excess spill slots, rather than |
152 | // consuming all the developer's memory. |
153 | static cl::opt<unsigned> |
154 | StackWorkingSetLimit("livedebugvalues-max-stack-slots" , cl::Hidden, |
155 | cl::desc("livedebugvalues-stack-ws-limit" ), |
156 | cl::init(Val: 250)); |
157 | |
158 | DbgOpID DbgOpID::UndefID = DbgOpID(0xffffffff); |
159 | |
160 | /// Tracker for converting machine value locations and variable values into |
161 | /// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs |
162 | /// specifying block live-in locations and transfers within blocks. |
163 | /// |
164 | /// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker |
165 | /// and must be initialized with the set of variable values that are live-in to |
166 | /// the block. The caller then repeatedly calls process(). TransferTracker picks |
167 | /// out variable locations for the live-in variable values (if there _is_ a |
168 | /// location) and creates the corresponding DBG_VALUEs. Then, as the block is |
169 | /// stepped through, transfers of values between machine locations are |
170 | /// identified and if profitable, a DBG_VALUE created. |
171 | /// |
172 | /// This is where debug use-before-defs would be resolved: a variable with an |
173 | /// unavailable value could materialize in the middle of a block, when the |
174 | /// value becomes available. Or, we could detect clobbers and re-specify the |
175 | /// variable in a backup location. (XXX these are unimplemented). |
176 | class TransferTracker { |
177 | public: |
178 | const TargetInstrInfo *TII; |
179 | const TargetLowering *TLI; |
180 | /// This machine location tracker is assumed to always contain the up-to-date |
181 | /// value mapping for all machine locations. TransferTracker only reads |
182 | /// information from it. (XXX make it const?) |
183 | MLocTracker *MTracker; |
184 | MachineFunction &MF; |
185 | const DebugVariableMap &DVMap; |
186 | bool ShouldEmitDebugEntryValues; |
187 | |
188 | /// Record of all changes in variable locations at a block position. Awkwardly |
189 | /// we allow inserting either before or after the point: MBB != nullptr |
190 | /// indicates it's before, otherwise after. |
191 | struct Transfer { |
192 | MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes |
193 | MachineBasicBlock *MBB; /// non-null if we should insert after. |
194 | /// Vector of DBG_VALUEs to insert. Store with their DebugVariableID so that |
195 | /// they can be sorted into a stable order for emission at a later time. |
196 | SmallVector<std::pair<DebugVariableID, MachineInstr *>, 4> Insts; |
197 | }; |
198 | |
199 | /// Stores the resolved operands (machine locations and constants) and |
200 | /// qualifying meta-information needed to construct a concrete DBG_VALUE-like |
201 | /// instruction. |
202 | struct ResolvedDbgValue { |
203 | SmallVector<ResolvedDbgOp> Ops; |
204 | DbgValueProperties Properties; |
205 | |
206 | ResolvedDbgValue(SmallVectorImpl<ResolvedDbgOp> &Ops, |
207 | DbgValueProperties Properties) |
208 | : Ops(Ops.begin(), Ops.end()), Properties(Properties) {} |
209 | |
210 | /// Returns all the LocIdx values used in this struct, in the order in which |
211 | /// they appear as operands in the debug value; may contain duplicates. |
212 | auto loc_indices() const { |
213 | return map_range( |
214 | C: make_filter_range( |
215 | Range: Ops, Pred: [](const ResolvedDbgOp &Op) { return !Op.IsConst; }), |
216 | F: [](const ResolvedDbgOp &Op) { return Op.Loc; }); |
217 | } |
218 | }; |
219 | |
220 | /// Collection of transfers (DBG_VALUEs) to be inserted. |
221 | SmallVector<Transfer, 32> Transfers; |
222 | |
223 | /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences |
224 | /// between TransferTrackers view of variable locations and MLocTrackers. For |
225 | /// example, MLocTracker observes all clobbers, but TransferTracker lazily |
226 | /// does not. |
227 | SmallVector<ValueIDNum, 32> VarLocs; |
228 | |
229 | /// Map from LocIdxes to which DebugVariables are based that location. |
230 | /// Mantained while stepping through the block. Not accurate if |
231 | /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx]. |
232 | DenseMap<LocIdx, SmallSet<DebugVariableID, 4>> ActiveMLocs; |
233 | |
234 | /// Map from DebugVariable to it's current location and qualifying meta |
235 | /// information. To be used in conjunction with ActiveMLocs to construct |
236 | /// enough information for the DBG_VALUEs for a particular LocIdx. |
237 | DenseMap<DebugVariableID, ResolvedDbgValue> ActiveVLocs; |
238 | |
239 | /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection. |
240 | SmallVector<std::pair<DebugVariableID, MachineInstr *>, 4> PendingDbgValues; |
241 | |
242 | /// Record of a use-before-def: created when a value that's live-in to the |
243 | /// current block isn't available in any machine location, but it will be |
244 | /// defined in this block. |
245 | struct UseBeforeDef { |
246 | /// Value of this variable, def'd in block. |
247 | SmallVector<DbgOp> Values; |
248 | /// Identity of this variable. |
249 | DebugVariableID VarID; |
250 | /// Additional variable properties. |
251 | DbgValueProperties Properties; |
252 | UseBeforeDef(ArrayRef<DbgOp> Values, DebugVariableID VarID, |
253 | const DbgValueProperties &Properties) |
254 | : Values(Values), VarID(VarID), Properties(Properties) {} |
255 | }; |
256 | |
257 | /// Map from instruction index (within the block) to the set of UseBeforeDefs |
258 | /// that become defined at that instruction. |
259 | DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs; |
260 | |
261 | /// The set of variables that are in UseBeforeDefs and can become a location |
262 | /// once the relevant value is defined. An element being erased from this |
263 | /// collection prevents the use-before-def materializing. |
264 | DenseSet<DebugVariableID> UseBeforeDefVariables; |
265 | |
266 | const TargetRegisterInfo &TRI; |
267 | const BitVector &CalleeSavedRegs; |
268 | |
269 | TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker, |
270 | MachineFunction &MF, const DebugVariableMap &DVMap, |
271 | const TargetRegisterInfo &TRI, |
272 | const BitVector &CalleeSavedRegs, |
273 | bool ShouldEmitDebugEntryValues) |
274 | : TII(TII), MTracker(MTracker), MF(MF), DVMap(DVMap), TRI(TRI), |
275 | CalleeSavedRegs(CalleeSavedRegs) { |
276 | TLI = MF.getSubtarget().getTargetLowering(); |
277 | this->ShouldEmitDebugEntryValues = ShouldEmitDebugEntryValues; |
278 | } |
279 | |
280 | bool isCalleeSaved(LocIdx L) const { |
281 | unsigned Reg = MTracker->LocIdxToLocID[L]; |
282 | if (Reg >= MTracker->NumRegs) |
283 | return false; |
284 | for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI) |
285 | if (CalleeSavedRegs.test(Idx: (*RAI).id())) |
286 | return true; |
287 | return false; |
288 | }; |
289 | |
290 | // An estimate of the expected lifespan of values at a machine location, with |
291 | // a greater value corresponding to a longer expected lifespan, i.e. spill |
292 | // slots generally live longer than callee-saved registers which generally |
293 | // live longer than non-callee-saved registers. The minimum value of 0 |
294 | // corresponds to an illegal location that cannot have a "lifespan" at all. |
295 | enum class LocationQuality : unsigned char { |
296 | Illegal = 0, |
297 | Register, |
298 | CalleeSavedRegister, |
299 | SpillSlot, |
300 | Best = SpillSlot |
301 | }; |
302 | |
303 | class LocationAndQuality { |
304 | unsigned Location : 24; |
305 | unsigned Quality : 8; |
306 | |
307 | public: |
308 | LocationAndQuality() : Location(0), Quality(0) {} |
309 | LocationAndQuality(LocIdx L, LocationQuality Q) |
310 | : Location(L.asU64()), Quality(static_cast<unsigned>(Q)) {} |
311 | LocIdx getLoc() const { |
312 | if (!Quality) |
313 | return LocIdx::MakeIllegalLoc(); |
314 | return LocIdx(Location); |
315 | } |
316 | LocationQuality getQuality() const { return LocationQuality(Quality); } |
317 | bool isIllegal() const { return !Quality; } |
318 | bool isBest() const { return getQuality() == LocationQuality::Best; } |
319 | }; |
320 | |
321 | using ValueLocPair = std::pair<ValueIDNum, LocationAndQuality>; |
322 | |
323 | static inline bool ValueToLocSort(const ValueLocPair &A, |
324 | const ValueLocPair &B) { |
325 | return A.first < B.first; |
326 | }; |
327 | |
328 | // Returns the LocationQuality for the location L iff the quality of L is |
329 | // is strictly greater than the provided minimum quality. |
330 | std::optional<LocationQuality> |
331 | getLocQualityIfBetter(LocIdx L, LocationQuality Min) const { |
332 | if (L.isIllegal()) |
333 | return std::nullopt; |
334 | if (Min >= LocationQuality::SpillSlot) |
335 | return std::nullopt; |
336 | if (MTracker->isSpill(Idx: L)) |
337 | return LocationQuality::SpillSlot; |
338 | if (Min >= LocationQuality::CalleeSavedRegister) |
339 | return std::nullopt; |
340 | if (isCalleeSaved(L)) |
341 | return LocationQuality::CalleeSavedRegister; |
342 | if (Min >= LocationQuality::Register) |
343 | return std::nullopt; |
344 | return LocationQuality::Register; |
345 | } |
346 | |
347 | /// For a variable \p Var with the live-in value \p Value, attempts to resolve |
348 | /// the DbgValue to a concrete DBG_VALUE, emitting that value and loading the |
349 | /// tracking information to track Var throughout the block. |
350 | /// \p ValueToLoc is a map containing the best known location for every |
351 | /// ValueIDNum that Value may use. |
352 | /// \p MBB is the basic block that we are loading the live-in value for. |
353 | /// \p DbgOpStore is the map containing the DbgOpID->DbgOp mapping needed to |
354 | /// determine the values used by Value. |
355 | void loadVarInloc(MachineBasicBlock &MBB, DbgOpIDMap &DbgOpStore, |
356 | const SmallVectorImpl<ValueLocPair> &ValueToLoc, |
357 | DebugVariableID VarID, DbgValue Value) { |
358 | SmallVector<DbgOp> DbgOps; |
359 | SmallVector<ResolvedDbgOp> ResolvedDbgOps; |
360 | bool IsValueValid = true; |
361 | unsigned LastUseBeforeDef = 0; |
362 | bool DbgLocAvailableAndIsEntryVal = false; |
363 | |
364 | // If every value used by the incoming DbgValue is available at block |
365 | // entry, ResolvedDbgOps will contain the machine locations/constants for |
366 | // those values and will be used to emit a debug location. |
367 | // If one or more values are not yet available, but will all be defined in |
368 | // this block, then LastUseBeforeDef will track the instruction index in |
369 | // this BB at which the last of those values is defined, DbgOps will |
370 | // contain the values that we will emit when we reach that instruction. |
371 | // If one or more values are undef or not available throughout this block, |
372 | // and we can't recover as an entry value, we set IsValueValid=false and |
373 | // skip this variable. |
374 | for (DbgOpID ID : Value.getDbgOpIDs()) { |
375 | DbgOp Op = DbgOpStore.find(ID); |
376 | DbgOps.push_back(Elt: Op); |
377 | if (ID.isUndef()) { |
378 | IsValueValid = false; |
379 | break; |
380 | } |
381 | if (ID.isConst()) { |
382 | ResolvedDbgOps.push_back(Elt: Op.MO); |
383 | continue; |
384 | } |
385 | |
386 | // Search for the desired ValueIDNum, to examine the best location found |
387 | // for it. Use an empty ValueLocPair to search for an entry in ValueToLoc. |
388 | const ValueIDNum &Num = Op.ID; |
389 | ValueLocPair Probe(Num, LocationAndQuality()); |
390 | auto ValuesPreferredLoc = |
391 | llvm::lower_bound(Range: ValueToLoc, Value&: Probe, C: ValueToLocSort); |
392 | |
393 | // There must be a legitimate entry found for Num. |
394 | assert(ValuesPreferredLoc != ValueToLoc.end() && |
395 | ValuesPreferredLoc->first == Num); |
396 | |
397 | if (ValuesPreferredLoc->second.isIllegal()) { |
398 | // If it's a def that occurs in this block, register it as a |
399 | // use-before-def to be resolved as we step through the block. |
400 | // Continue processing values so that we add any other UseBeforeDef |
401 | // entries needed for later. |
402 | if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI()) { |
403 | LastUseBeforeDef = std::max(a: LastUseBeforeDef, |
404 | b: static_cast<unsigned>(Num.getInst())); |
405 | continue; |
406 | } |
407 | recoverAsEntryValue(VarID, Prop: Value.Properties, Num); |
408 | IsValueValid = false; |
409 | break; |
410 | } |
411 | |
412 | // Defer modifying ActiveVLocs until after we've confirmed we have a |
413 | // live range. |
414 | LocIdx M = ValuesPreferredLoc->second.getLoc(); |
415 | ResolvedDbgOps.push_back(Elt: M); |
416 | if (Value.Properties.DIExpr->isEntryValue()) |
417 | DbgLocAvailableAndIsEntryVal = true; |
418 | } |
419 | |
420 | // If we cannot produce a valid value for the LiveIn value within this |
421 | // block, skip this variable. |
422 | if (!IsValueValid) |
423 | return; |
424 | |
425 | // Add UseBeforeDef entry for the last value to be defined in this block. |
426 | if (LastUseBeforeDef) { |
427 | addUseBeforeDef(VarID, Properties: Value.Properties, DbgOps, Inst: LastUseBeforeDef); |
428 | return; |
429 | } |
430 | |
431 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
432 | PendingDbgValues.push_back( |
433 | Elt: std::make_pair(x&: VarID, y: &*MTracker->emitLoc(DbgOps: ResolvedDbgOps, Var, DILoc, |
434 | Properties: Value.Properties))); |
435 | |
436 | // If the location is available at block entry and is an entry value, skip |
437 | // tracking and recording thr transfer. |
438 | if (DbgLocAvailableAndIsEntryVal) |
439 | return; |
440 | |
441 | // The LiveIn value is available at block entry, begin tracking and record |
442 | // the transfer. |
443 | for (const ResolvedDbgOp &Op : ResolvedDbgOps) |
444 | if (!Op.IsConst) |
445 | ActiveMLocs[Op.Loc].insert(V: VarID); |
446 | auto NewValue = ResolvedDbgValue{ResolvedDbgOps, Value.Properties}; |
447 | auto Result = ActiveVLocs.insert(KV: std::make_pair(x&: VarID, y&: NewValue)); |
448 | if (!Result.second) |
449 | Result.first->second = NewValue; |
450 | } |
451 | |
452 | /// Load object with live-in variable values. \p mlocs contains the live-in |
453 | /// values in each machine location, while \p vlocs the live-in variable |
454 | /// values. This method picks variable locations for the live-in variables, |
455 | /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other |
456 | /// object fields to track variable locations as we step through the block. |
457 | /// FIXME: could just examine mloctracker instead of passing in \p mlocs? |
458 | void |
459 | loadInlocs(MachineBasicBlock &MBB, ValueTable &MLocs, DbgOpIDMap &DbgOpStore, |
460 | const SmallVectorImpl<std::pair<DebugVariableID, DbgValue>> &VLocs, |
461 | unsigned NumLocs) { |
462 | ActiveMLocs.clear(); |
463 | ActiveVLocs.clear(); |
464 | VarLocs.clear(); |
465 | VarLocs.reserve(N: NumLocs); |
466 | UseBeforeDefs.clear(); |
467 | UseBeforeDefVariables.clear(); |
468 | |
469 | // Mapping of the preferred locations for each value. Collected into this |
470 | // vector then sorted for easy searching. |
471 | SmallVector<ValueLocPair, 16> ValueToLoc; |
472 | |
473 | // Initialized the preferred-location map with illegal locations, to be |
474 | // filled in later. |
475 | for (const auto &VLoc : VLocs) |
476 | if (VLoc.second.Kind == DbgValue::Def) |
477 | for (DbgOpID OpID : VLoc.second.getDbgOpIDs()) |
478 | if (!OpID.ID.IsConst) |
479 | ValueToLoc.push_back( |
480 | Elt: {DbgOpStore.find(ID: OpID).ID, LocationAndQuality()}); |
481 | |
482 | llvm::sort(C&: ValueToLoc, Comp: ValueToLocSort); |
483 | ActiveMLocs.reserve(NumEntries: VLocs.size()); |
484 | ActiveVLocs.reserve(NumEntries: VLocs.size()); |
485 | |
486 | // Produce a map of value numbers to the current machine locs they live |
487 | // in. When emulating VarLocBasedImpl, there should only be one |
488 | // location; when not, we get to pick. |
489 | for (auto Location : MTracker->locations()) { |
490 | LocIdx Idx = Location.Idx; |
491 | ValueIDNum &VNum = MLocs[Idx.asU64()]; |
492 | if (VNum == ValueIDNum::EmptyValue) |
493 | continue; |
494 | VarLocs.push_back(Elt: VNum); |
495 | |
496 | // Is there a variable that wants a location for this value? If not, skip. |
497 | ValueLocPair Probe(VNum, LocationAndQuality()); |
498 | auto VIt = llvm::lower_bound(Range&: ValueToLoc, Value&: Probe, C: ValueToLocSort); |
499 | if (VIt == ValueToLoc.end() || VIt->first != VNum) |
500 | continue; |
501 | |
502 | auto &Previous = VIt->second; |
503 | // If this is the first location with that value, pick it. Otherwise, |
504 | // consider whether it's a "longer term" location. |
505 | std::optional<LocationQuality> ReplacementQuality = |
506 | getLocQualityIfBetter(L: Idx, Min: Previous.getQuality()); |
507 | if (ReplacementQuality) |
508 | Previous = LocationAndQuality(Idx, *ReplacementQuality); |
509 | } |
510 | |
511 | // Now map variables to their picked LocIdxes. |
512 | for (const auto &Var : VLocs) { |
513 | loadVarInloc(MBB, DbgOpStore, ValueToLoc, VarID: Var.first, Value: Var.second); |
514 | } |
515 | flushDbgValues(Pos: MBB.begin(), MBB: &MBB); |
516 | } |
517 | |
518 | /// Record that \p Var has value \p ID, a value that becomes available |
519 | /// later in the function. |
520 | void addUseBeforeDef(DebugVariableID VarID, |
521 | const DbgValueProperties &Properties, |
522 | const SmallVectorImpl<DbgOp> &DbgOps, unsigned Inst) { |
523 | UseBeforeDefs[Inst].emplace_back(Args: DbgOps, Args&: VarID, Args: Properties); |
524 | UseBeforeDefVariables.insert(V: VarID); |
525 | } |
526 | |
527 | /// After the instruction at index \p Inst and position \p pos has been |
528 | /// processed, check whether it defines a variable value in a use-before-def. |
529 | /// If so, and the variable value hasn't changed since the start of the |
530 | /// block, create a DBG_VALUE. |
531 | void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) { |
532 | auto MIt = UseBeforeDefs.find(Val: Inst); |
533 | if (MIt == UseBeforeDefs.end()) |
534 | return; |
535 | |
536 | // Map of values to the locations that store them for every value used by |
537 | // the variables that may have become available. |
538 | SmallDenseMap<ValueIDNum, LocationAndQuality> ValueToLoc; |
539 | |
540 | // Populate ValueToLoc with illegal default mappings for every value used by |
541 | // any UseBeforeDef variables for this instruction. |
542 | for (auto &Use : MIt->second) { |
543 | if (!UseBeforeDefVariables.count(V: Use.VarID)) |
544 | continue; |
545 | |
546 | for (DbgOp &Op : Use.Values) { |
547 | assert(!Op.isUndef() && "UseBeforeDef erroneously created for a " |
548 | "DbgValue with undef values." ); |
549 | if (Op.IsConst) |
550 | continue; |
551 | |
552 | ValueToLoc.insert(KV: {Op.ID, LocationAndQuality()}); |
553 | } |
554 | } |
555 | |
556 | // Exit early if we have no DbgValues to produce. |
557 | if (ValueToLoc.empty()) |
558 | return; |
559 | |
560 | // Determine the best location for each desired value. |
561 | for (auto Location : MTracker->locations()) { |
562 | LocIdx Idx = Location.Idx; |
563 | ValueIDNum &LocValueID = Location.Value; |
564 | |
565 | // Is there a variable that wants a location for this value? If not, skip. |
566 | auto VIt = ValueToLoc.find(Val: LocValueID); |
567 | if (VIt == ValueToLoc.end()) |
568 | continue; |
569 | |
570 | auto &Previous = VIt->second; |
571 | // If this is the first location with that value, pick it. Otherwise, |
572 | // consider whether it's a "longer term" location. |
573 | std::optional<LocationQuality> ReplacementQuality = |
574 | getLocQualityIfBetter(L: Idx, Min: Previous.getQuality()); |
575 | if (ReplacementQuality) |
576 | Previous = LocationAndQuality(Idx, *ReplacementQuality); |
577 | } |
578 | |
579 | // Using the map of values to locations, produce a final set of values for |
580 | // this variable. |
581 | for (auto &Use : MIt->second) { |
582 | if (!UseBeforeDefVariables.count(V: Use.VarID)) |
583 | continue; |
584 | |
585 | SmallVector<ResolvedDbgOp> DbgOps; |
586 | |
587 | for (DbgOp &Op : Use.Values) { |
588 | if (Op.IsConst) { |
589 | DbgOps.push_back(Elt: Op.MO); |
590 | continue; |
591 | } |
592 | LocIdx NewLoc = ValueToLoc.find(Val: Op.ID)->second.getLoc(); |
593 | if (NewLoc.isIllegal()) |
594 | break; |
595 | DbgOps.push_back(Elt: NewLoc); |
596 | } |
597 | |
598 | // If at least one value used by this debug value is no longer available, |
599 | // i.e. one of the values was killed before we finished defining all of |
600 | // the values used by this variable, discard. |
601 | if (DbgOps.size() != Use.Values.size()) |
602 | continue; |
603 | |
604 | // Otherwise, we're good to go. |
605 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: Use.VarID); |
606 | PendingDbgValues.push_back(Elt: std::make_pair( |
607 | x&: Use.VarID, y: MTracker->emitLoc(DbgOps, Var, DILoc, Properties: Use.Properties))); |
608 | } |
609 | flushDbgValues(Pos: pos, MBB: nullptr); |
610 | } |
611 | |
612 | /// Helper to move created DBG_VALUEs into Transfers collection. |
613 | void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) { |
614 | if (PendingDbgValues.size() == 0) |
615 | return; |
616 | |
617 | // Pick out the instruction start position. |
618 | MachineBasicBlock::instr_iterator BundleStart; |
619 | if (MBB && Pos == MBB->begin()) |
620 | BundleStart = MBB->instr_begin(); |
621 | else |
622 | BundleStart = getBundleStart(I: Pos->getIterator()); |
623 | |
624 | Transfers.push_back(Elt: {.Pos: BundleStart, .MBB: MBB, .Insts: PendingDbgValues}); |
625 | PendingDbgValues.clear(); |
626 | } |
627 | |
628 | bool isEntryValueVariable(const DebugVariable &Var, |
629 | const DIExpression *Expr) const { |
630 | if (!Var.getVariable()->isParameter()) |
631 | return false; |
632 | |
633 | if (Var.getInlinedAt()) |
634 | return false; |
635 | |
636 | if (Expr->getNumElements() > 0 && !Expr->isDeref()) |
637 | return false; |
638 | |
639 | return true; |
640 | } |
641 | |
642 | bool isEntryValueValue(const ValueIDNum &Val) const { |
643 | // Must be in entry block (block number zero), and be a PHI / live-in value. |
644 | if (Val.getBlock() || !Val.isPHI()) |
645 | return false; |
646 | |
647 | // Entry values must enter in a register. |
648 | if (MTracker->isSpill(Idx: Val.getLoc())) |
649 | return false; |
650 | |
651 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); |
652 | Register FP = TRI.getFrameRegister(MF); |
653 | Register Reg = MTracker->LocIdxToLocID[Val.getLoc()]; |
654 | return Reg != SP && Reg != FP; |
655 | } |
656 | |
657 | bool recoverAsEntryValue(DebugVariableID VarID, |
658 | const DbgValueProperties &Prop, |
659 | const ValueIDNum &Num) { |
660 | // Is this variable location a candidate to be an entry value. First, |
661 | // should we be trying this at all? |
662 | if (!ShouldEmitDebugEntryValues) |
663 | return false; |
664 | |
665 | const DIExpression *DIExpr = Prop.DIExpr; |
666 | |
667 | // We don't currently emit entry values for DBG_VALUE_LISTs. |
668 | if (Prop.IsVariadic) { |
669 | // If this debug value can be converted to be non-variadic, then do so; |
670 | // otherwise give up. |
671 | auto NonVariadicExpression = |
672 | DIExpression::convertToNonVariadicExpression(Expr: DIExpr); |
673 | if (!NonVariadicExpression) |
674 | return false; |
675 | DIExpr = *NonVariadicExpression; |
676 | } |
677 | |
678 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
679 | |
680 | // If the expression is a DW_OP_entry_value, emit the variable location |
681 | // as-is. |
682 | if (DIExpr->isEntryValue()) { |
683 | Register Reg = MTracker->LocIdxToLocID[Num.getLoc()]; |
684 | MachineOperand MO = MachineOperand::CreateReg(Reg, isDef: false); |
685 | PendingDbgValues.push_back(Elt: std::make_pair( |
686 | x&: VarID, y: &*emitMOLoc(MO, Var, Properties: {DIExpr, Prop.Indirect, false}))); |
687 | return true; |
688 | } |
689 | |
690 | // Is the variable appropriate for entry values (i.e., is a parameter). |
691 | if (!isEntryValueVariable(Var, Expr: DIExpr)) |
692 | return false; |
693 | |
694 | // Is the value assigned to this variable still the entry value? |
695 | if (!isEntryValueValue(Val: Num)) |
696 | return false; |
697 | |
698 | // Emit a variable location using an entry value expression. |
699 | DIExpression *NewExpr = |
700 | DIExpression::prepend(Expr: DIExpr, Flags: DIExpression::EntryValue); |
701 | Register Reg = MTracker->LocIdxToLocID[Num.getLoc()]; |
702 | MachineOperand MO = MachineOperand::CreateReg(Reg, isDef: false); |
703 | PendingDbgValues.push_back(Elt: std::make_pair( |
704 | x&: VarID, y: &*emitMOLoc(MO, Var, Properties: {NewExpr, Prop.Indirect, false}))); |
705 | return true; |
706 | } |
707 | |
708 | /// Change a variable value after encountering a DBG_VALUE inside a block. |
709 | void redefVar(const MachineInstr &MI) { |
710 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), |
711 | MI.getDebugLoc()->getInlinedAt()); |
712 | DbgValueProperties Properties(MI); |
713 | DebugVariableID VarID = DVMap.getDVID(Var); |
714 | |
715 | // Ignore non-register locations, we don't transfer those. |
716 | if (MI.isUndefDebugValue() || MI.getDebugExpression()->isEntryValue() || |
717 | all_of(Range: MI.debug_operands(), |
718 | P: [](const MachineOperand &MO) { return !MO.isReg(); })) { |
719 | auto It = ActiveVLocs.find(Val: VarID); |
720 | if (It != ActiveVLocs.end()) { |
721 | for (LocIdx Loc : It->second.loc_indices()) |
722 | ActiveMLocs[Loc].erase(V: VarID); |
723 | ActiveVLocs.erase(I: It); |
724 | } |
725 | // Any use-before-defs no longer apply. |
726 | UseBeforeDefVariables.erase(V: VarID); |
727 | return; |
728 | } |
729 | |
730 | SmallVector<ResolvedDbgOp> NewLocs; |
731 | for (const MachineOperand &MO : MI.debug_operands()) { |
732 | if (MO.isReg()) { |
733 | // Any undef regs have already been filtered out above. |
734 | Register Reg = MO.getReg(); |
735 | LocIdx NewLoc = MTracker->getRegMLoc(R: Reg); |
736 | NewLocs.push_back(Elt: NewLoc); |
737 | } else { |
738 | NewLocs.push_back(Elt: MO); |
739 | } |
740 | } |
741 | |
742 | redefVar(MI, Properties, NewLocs); |
743 | } |
744 | |
745 | /// Handle a change in variable location within a block. Terminate the |
746 | /// variables current location, and record the value it now refers to, so |
747 | /// that we can detect location transfers later on. |
748 | void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties, |
749 | SmallVectorImpl<ResolvedDbgOp> &NewLocs) { |
750 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), |
751 | MI.getDebugLoc()->getInlinedAt()); |
752 | DebugVariableID VarID = DVMap.getDVID(Var); |
753 | // Any use-before-defs no longer apply. |
754 | UseBeforeDefVariables.erase(V: VarID); |
755 | |
756 | // Erase any previous location. |
757 | auto It = ActiveVLocs.find(Val: VarID); |
758 | if (It != ActiveVLocs.end()) { |
759 | for (LocIdx Loc : It->second.loc_indices()) |
760 | ActiveMLocs[Loc].erase(V: VarID); |
761 | } |
762 | |
763 | // If there _is_ no new location, all we had to do was erase. |
764 | if (NewLocs.empty()) { |
765 | if (It != ActiveVLocs.end()) |
766 | ActiveVLocs.erase(I: It); |
767 | return; |
768 | } |
769 | |
770 | SmallVector<std::pair<LocIdx, DebugVariableID>> LostMLocs; |
771 | for (ResolvedDbgOp &Op : NewLocs) { |
772 | if (Op.IsConst) |
773 | continue; |
774 | |
775 | LocIdx NewLoc = Op.Loc; |
776 | |
777 | // Check whether our local copy of values-by-location in #VarLocs is out |
778 | // of date. Wipe old tracking data for the location if it's been clobbered |
779 | // in the meantime. |
780 | if (MTracker->readMLoc(L: NewLoc) != VarLocs[NewLoc.asU64()]) { |
781 | for (const auto &P : ActiveMLocs[NewLoc]) { |
782 | auto LostVLocIt = ActiveVLocs.find(Val: P); |
783 | if (LostVLocIt != ActiveVLocs.end()) { |
784 | for (LocIdx Loc : LostVLocIt->second.loc_indices()) { |
785 | // Every active variable mapping for NewLoc will be cleared, no |
786 | // need to track individual variables. |
787 | if (Loc == NewLoc) |
788 | continue; |
789 | LostMLocs.emplace_back(Args&: Loc, Args: P); |
790 | } |
791 | } |
792 | ActiveVLocs.erase(Val: P); |
793 | } |
794 | for (const auto &LostMLoc : LostMLocs) |
795 | ActiveMLocs[LostMLoc.first].erase(V: LostMLoc.second); |
796 | LostMLocs.clear(); |
797 | It = ActiveVLocs.find(Val: VarID); |
798 | ActiveMLocs[NewLoc.asU64()].clear(); |
799 | VarLocs[NewLoc.asU64()] = MTracker->readMLoc(L: NewLoc); |
800 | } |
801 | |
802 | ActiveMLocs[NewLoc].insert(V: VarID); |
803 | } |
804 | |
805 | if (It == ActiveVLocs.end()) { |
806 | ActiveVLocs.insert( |
807 | KV: std::make_pair(x&: VarID, y: ResolvedDbgValue(NewLocs, Properties))); |
808 | } else { |
809 | It->second.Ops.assign(RHS: NewLocs); |
810 | It->second.Properties = Properties; |
811 | } |
812 | } |
813 | |
814 | /// Account for a location \p mloc being clobbered. Examine the variable |
815 | /// locations that will be terminated: and try to recover them by using |
816 | /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to |
817 | /// explicitly terminate a location if it can't be recovered. |
818 | void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos, |
819 | bool MakeUndef = true) { |
820 | auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc); |
821 | if (ActiveMLocIt == ActiveMLocs.end()) |
822 | return; |
823 | |
824 | // What was the old variable value? |
825 | ValueIDNum OldValue = VarLocs[MLoc.asU64()]; |
826 | clobberMloc(MLoc, OldValue, Pos, MakeUndef); |
827 | } |
828 | /// Overload that takes an explicit value \p OldValue for when the value in |
829 | /// \p MLoc has changed and the TransferTracker's locations have not been |
830 | /// updated yet. |
831 | void clobberMloc(LocIdx MLoc, ValueIDNum OldValue, |
832 | MachineBasicBlock::iterator Pos, bool MakeUndef = true) { |
833 | auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc); |
834 | if (ActiveMLocIt == ActiveMLocs.end()) |
835 | return; |
836 | |
837 | VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue; |
838 | |
839 | // Examine the remaining variable locations: if we can find the same value |
840 | // again, we can recover the location. |
841 | std::optional<LocIdx> NewLoc; |
842 | for (auto Loc : MTracker->locations()) |
843 | if (Loc.Value == OldValue) |
844 | NewLoc = Loc.Idx; |
845 | |
846 | // If there is no location, and we weren't asked to make the variable |
847 | // explicitly undef, then stop here. |
848 | if (!NewLoc && !MakeUndef) { |
849 | // Try and recover a few more locations with entry values. |
850 | for (DebugVariableID VarID : ActiveMLocIt->second) { |
851 | auto &Prop = ActiveVLocs.find(Val: VarID)->second.Properties; |
852 | recoverAsEntryValue(VarID, Prop, Num: OldValue); |
853 | } |
854 | flushDbgValues(Pos, MBB: nullptr); |
855 | return; |
856 | } |
857 | |
858 | // Examine all the variables based on this location. |
859 | DenseSet<DebugVariableID> NewMLocs; |
860 | // If no new location has been found, every variable that depends on this |
861 | // MLoc is dead, so end their existing MLoc->Var mappings as well. |
862 | SmallVector<std::pair<LocIdx, DebugVariableID>> LostMLocs; |
863 | for (DebugVariableID VarID : ActiveMLocIt->second) { |
864 | auto ActiveVLocIt = ActiveVLocs.find(Val: VarID); |
865 | // Re-state the variable location: if there's no replacement then NewLoc |
866 | // is std::nullopt and a $noreg DBG_VALUE will be created. Otherwise, a |
867 | // DBG_VALUE identifying the alternative location will be emitted. |
868 | const DbgValueProperties &Properties = ActiveVLocIt->second.Properties; |
869 | |
870 | // Produce the new list of debug ops - an empty list if no new location |
871 | // was found, or the existing list with the substitution MLoc -> NewLoc |
872 | // otherwise. |
873 | SmallVector<ResolvedDbgOp> DbgOps; |
874 | if (NewLoc) { |
875 | ResolvedDbgOp OldOp(MLoc); |
876 | ResolvedDbgOp NewOp(*NewLoc); |
877 | // Insert illegal ops to overwrite afterwards. |
878 | DbgOps.insert(I: DbgOps.begin(), NumToInsert: ActiveVLocIt->second.Ops.size(), |
879 | Elt: ResolvedDbgOp(LocIdx::MakeIllegalLoc())); |
880 | replace_copy(Range&: ActiveVLocIt->second.Ops, Out: DbgOps.begin(), OldValue: OldOp, NewValue: NewOp); |
881 | } |
882 | |
883 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
884 | PendingDbgValues.push_back(Elt: std::make_pair( |
885 | x&: VarID, y: &*MTracker->emitLoc(DbgOps, Var, DILoc, Properties))); |
886 | |
887 | // Update machine locations <=> variable locations maps. Defer updating |
888 | // ActiveMLocs to avoid invalidating the ActiveMLocIt iterator. |
889 | if (!NewLoc) { |
890 | for (LocIdx Loc : ActiveVLocIt->second.loc_indices()) { |
891 | if (Loc != MLoc) |
892 | LostMLocs.emplace_back(Args&: Loc, Args&: VarID); |
893 | } |
894 | ActiveVLocs.erase(I: ActiveVLocIt); |
895 | } else { |
896 | ActiveVLocIt->second.Ops = DbgOps; |
897 | NewMLocs.insert(V: VarID); |
898 | } |
899 | } |
900 | |
901 | // Remove variables from ActiveMLocs if they no longer use any other MLocs |
902 | // due to being killed by this clobber. |
903 | for (auto &LocVarIt : LostMLocs) { |
904 | auto LostMLocIt = ActiveMLocs.find(Val: LocVarIt.first); |
905 | assert(LostMLocIt != ActiveMLocs.end() && |
906 | "Variable was using this MLoc, but ActiveMLocs[MLoc] has no " |
907 | "entries?" ); |
908 | LostMLocIt->second.erase(V: LocVarIt.second); |
909 | } |
910 | |
911 | // We lazily track what locations have which values; if we've found a new |
912 | // location for the clobbered value, remember it. |
913 | if (NewLoc) |
914 | VarLocs[NewLoc->asU64()] = OldValue; |
915 | |
916 | flushDbgValues(Pos, MBB: nullptr); |
917 | |
918 | // Commit ActiveMLoc changes. |
919 | ActiveMLocIt->second.clear(); |
920 | if (!NewMLocs.empty()) |
921 | ActiveMLocs[*NewLoc].insert_range(R&: NewMLocs); |
922 | } |
923 | |
924 | /// Transfer variables based on \p Src to be based on \p Dst. This handles |
925 | /// both register copies as well as spills and restores. Creates DBG_VALUEs |
926 | /// describing the movement. |
927 | void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) { |
928 | // Does Src still contain the value num we expect? If not, it's been |
929 | // clobbered in the meantime, and our variable locations are stale. |
930 | if (VarLocs[Src.asU64()] != MTracker->readMLoc(L: Src)) |
931 | return; |
932 | |
933 | // assert(ActiveMLocs[Dst].size() == 0); |
934 | //^^^ Legitimate scenario on account of un-clobbered slot being assigned to? |
935 | |
936 | // Move set of active variables from one location to another. |
937 | auto MovingVars = ActiveMLocs[Src]; |
938 | ActiveMLocs[Dst].insert_range(R&: MovingVars); |
939 | VarLocs[Dst.asU64()] = VarLocs[Src.asU64()]; |
940 | |
941 | // For each variable based on Src; create a location at Dst. |
942 | ResolvedDbgOp SrcOp(Src); |
943 | ResolvedDbgOp DstOp(Dst); |
944 | for (DebugVariableID VarID : MovingVars) { |
945 | auto ActiveVLocIt = ActiveVLocs.find(Val: VarID); |
946 | assert(ActiveVLocIt != ActiveVLocs.end()); |
947 | |
948 | // Update all instances of Src in the variable's tracked values to Dst. |
949 | llvm::replace(Range&: ActiveVLocIt->second.Ops, OldValue: SrcOp, NewValue: DstOp); |
950 | |
951 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
952 | MachineInstr *MI = MTracker->emitLoc(DbgOps: ActiveVLocIt->second.Ops, Var, DILoc, |
953 | Properties: ActiveVLocIt->second.Properties); |
954 | PendingDbgValues.push_back(Elt: std::make_pair(x&: VarID, y&: MI)); |
955 | } |
956 | ActiveMLocs[Src].clear(); |
957 | flushDbgValues(Pos, MBB: nullptr); |
958 | |
959 | // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data |
960 | // about the old location. |
961 | if (EmulateOldLDV) |
962 | VarLocs[Src.asU64()] = ValueIDNum::EmptyValue; |
963 | } |
964 | |
965 | MachineInstrBuilder emitMOLoc(const MachineOperand &MO, |
966 | const DebugVariable &Var, |
967 | const DbgValueProperties &Properties) { |
968 | DebugLoc DL = DILocation::get(Context&: Var.getVariable()->getContext(), Line: 0, Column: 0, |
969 | Scope: Var.getVariable()->getScope(), |
970 | InlinedAt: const_cast<DILocation *>(Var.getInlinedAt())); |
971 | auto MIB = BuildMI(MF, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE)); |
972 | MIB.add(MO); |
973 | if (Properties.Indirect) |
974 | MIB.addImm(Val: 0); |
975 | else |
976 | MIB.addReg(RegNo: 0); |
977 | MIB.addMetadata(MD: Var.getVariable()); |
978 | MIB.addMetadata(MD: Properties.DIExpr); |
979 | return MIB; |
980 | } |
981 | }; |
982 | |
983 | //===----------------------------------------------------------------------===// |
984 | // Implementation |
985 | //===----------------------------------------------------------------------===// |
986 | |
987 | ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX, UINT_MAX, UINT_MAX}; |
988 | ValueIDNum ValueIDNum::TombstoneValue = {UINT_MAX, UINT_MAX, UINT_MAX - 1}; |
989 | |
990 | #ifndef NDEBUG |
991 | void ResolvedDbgOp::dump(const MLocTracker *MTrack) const { |
992 | if (IsConst) { |
993 | dbgs() << MO; |
994 | } else { |
995 | dbgs() << MTrack->LocIdxToName(Loc); |
996 | } |
997 | } |
998 | void DbgOp::dump(const MLocTracker *MTrack) const { |
999 | if (IsConst) { |
1000 | dbgs() << MO; |
1001 | } else if (!isUndef()) { |
1002 | dbgs() << MTrack->IDAsString(ID); |
1003 | } |
1004 | } |
1005 | void DbgOpID::dump(const MLocTracker *MTrack, const DbgOpIDMap *OpStore) const { |
1006 | if (!OpStore) { |
1007 | dbgs() << "ID(" << asU32() << ")" ; |
1008 | } else { |
1009 | OpStore->find(*this).dump(MTrack); |
1010 | } |
1011 | } |
1012 | void DbgValue::dump(const MLocTracker *MTrack, |
1013 | const DbgOpIDMap *OpStore) const { |
1014 | if (Kind == NoVal) { |
1015 | dbgs() << "NoVal(" << BlockNo << ")" ; |
1016 | } else if (Kind == VPHI || Kind == Def) { |
1017 | if (Kind == VPHI) |
1018 | dbgs() << "VPHI(" << BlockNo << "," ; |
1019 | else |
1020 | dbgs() << "Def(" ; |
1021 | for (unsigned Idx = 0; Idx < getDbgOpIDs().size(); ++Idx) { |
1022 | getDbgOpID(Idx).dump(MTrack, OpStore); |
1023 | if (Idx != 0) |
1024 | dbgs() << "," ; |
1025 | } |
1026 | dbgs() << ")" ; |
1027 | } |
1028 | if (Properties.Indirect) |
1029 | dbgs() << " indir" ; |
1030 | if (Properties.DIExpr) |
1031 | dbgs() << " " << *Properties.DIExpr; |
1032 | } |
1033 | #endif |
1034 | |
1035 | MLocTracker::MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII, |
1036 | const TargetRegisterInfo &TRI, |
1037 | const TargetLowering &TLI) |
1038 | : MF(MF), TII(TII), TRI(TRI), TLI(TLI), |
1039 | LocIdxToIDNum(ValueIDNum::EmptyValue), LocIdxToLocID(0) { |
1040 | NumRegs = TRI.getNumRegs(); |
1041 | reset(); |
1042 | LocIDToLocIdx.resize(new_size: NumRegs, x: LocIdx::MakeIllegalLoc()); |
1043 | assert(NumRegs < (1u << NUM_LOC_BITS)); // Detect bit packing failure |
1044 | |
1045 | // Always track SP. This avoids the implicit clobbering caused by regmasks |
1046 | // from affectings its values. (LiveDebugValues disbelieves calls and |
1047 | // regmasks that claim to clobber SP). |
1048 | Register SP = TLI.getStackPointerRegisterToSaveRestore(); |
1049 | if (SP) { |
1050 | unsigned ID = getLocID(Reg: SP); |
1051 | (void)lookupOrTrackRegister(ID); |
1052 | |
1053 | for (MCRegAliasIterator RAI(SP, &TRI, true); RAI.isValid(); ++RAI) |
1054 | SPAliases.insert(V: *RAI); |
1055 | } |
1056 | |
1057 | // Build some common stack positions -- full registers being spilt to the |
1058 | // stack. |
1059 | StackSlotIdxes.insert(KV: {{8, 0}, 0}); |
1060 | StackSlotIdxes.insert(KV: {{16, 0}, 1}); |
1061 | StackSlotIdxes.insert(KV: {{32, 0}, 2}); |
1062 | StackSlotIdxes.insert(KV: {{64, 0}, 3}); |
1063 | StackSlotIdxes.insert(KV: {{128, 0}, 4}); |
1064 | StackSlotIdxes.insert(KV: {{256, 0}, 5}); |
1065 | StackSlotIdxes.insert(KV: {{512, 0}, 6}); |
1066 | |
1067 | // Traverse all the subregister idxes, and ensure there's an index for them. |
1068 | // Duplicates are no problem: we're interested in their position in the |
1069 | // stack slot, we don't want to type the slot. |
1070 | for (unsigned int I = 1; I < TRI.getNumSubRegIndices(); ++I) { |
1071 | unsigned Size = TRI.getSubRegIdxSize(Idx: I); |
1072 | unsigned Offs = TRI.getSubRegIdxOffset(Idx: I); |
1073 | unsigned Idx = StackSlotIdxes.size(); |
1074 | |
1075 | // Some subregs have -1, -2 and so forth fed into their fields, to mean |
1076 | // special backend things. Ignore those. |
1077 | if (Size > 60000 || Offs > 60000) |
1078 | continue; |
1079 | |
1080 | StackSlotIdxes.insert(KV: {{Size, Offs}, Idx}); |
1081 | } |
1082 | |
1083 | // There may also be strange register class sizes (think x86 fp80s). |
1084 | for (const TargetRegisterClass *RC : TRI.regclasses()) { |
1085 | unsigned Size = TRI.getRegSizeInBits(RC: *RC); |
1086 | |
1087 | // We might see special reserved values as sizes, and classes for other |
1088 | // stuff the machine tries to model. If it's more than 512 bits, then it |
1089 | // is very unlikely to be a register than can be spilt. |
1090 | if (Size > 512) |
1091 | continue; |
1092 | |
1093 | unsigned Idx = StackSlotIdxes.size(); |
1094 | StackSlotIdxes.insert(KV: {{Size, 0}, Idx}); |
1095 | } |
1096 | |
1097 | for (auto &Idx : StackSlotIdxes) |
1098 | StackIdxesToPos[Idx.second] = Idx.first; |
1099 | |
1100 | NumSlotIdxes = StackSlotIdxes.size(); |
1101 | } |
1102 | |
1103 | LocIdx MLocTracker::trackRegister(unsigned ID) { |
1104 | assert(ID != 0); |
1105 | LocIdx NewIdx = LocIdx(LocIdxToIDNum.size()); |
1106 | LocIdxToIDNum.grow(n: NewIdx); |
1107 | LocIdxToLocID.grow(n: NewIdx); |
1108 | |
1109 | // Default: it's an mphi. |
1110 | ValueIDNum ValNum = {CurBB, 0, NewIdx}; |
1111 | // Was this reg ever touched by a regmask? |
1112 | for (const auto &MaskPair : reverse(C&: Masks)) { |
1113 | if (MaskPair.first->clobbersPhysReg(PhysReg: ID)) { |
1114 | // There was an earlier def we skipped. |
1115 | ValNum = {CurBB, MaskPair.second, NewIdx}; |
1116 | break; |
1117 | } |
1118 | } |
1119 | |
1120 | LocIdxToIDNum[NewIdx] = ValNum; |
1121 | LocIdxToLocID[NewIdx] = ID; |
1122 | return NewIdx; |
1123 | } |
1124 | |
1125 | void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB, |
1126 | unsigned InstID) { |
1127 | // Def any register we track have that isn't preserved. The regmask |
1128 | // terminates the liveness of a register, meaning its value can't be |
1129 | // relied upon -- we represent this by giving it a new value. |
1130 | for (auto Location : locations()) { |
1131 | unsigned ID = LocIdxToLocID[Location.Idx]; |
1132 | // Don't clobber SP, even if the mask says it's clobbered. |
1133 | if (ID < NumRegs && !SPAliases.count(V: ID) && MO->clobbersPhysReg(PhysReg: ID)) |
1134 | defReg(R: ID, BB: CurBB, Inst: InstID); |
1135 | } |
1136 | Masks.push_back(Elt: std::make_pair(x&: MO, y&: InstID)); |
1137 | } |
1138 | |
1139 | std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { |
1140 | SpillLocationNo SpillID(SpillLocs.idFor(Entry: L)); |
1141 | |
1142 | if (SpillID.id() == 0) { |
1143 | // If there is no location, and we have reached the limit of how many stack |
1144 | // slots to track, then don't track this one. |
1145 | if (SpillLocs.size() >= StackWorkingSetLimit) |
1146 | return std::nullopt; |
1147 | |
1148 | // Spill location is untracked: create record for this one, and all |
1149 | // subregister slots too. |
1150 | SpillID = SpillLocationNo(SpillLocs.insert(Entry: L)); |
1151 | for (unsigned StackIdx = 0; StackIdx < NumSlotIdxes; ++StackIdx) { |
1152 | unsigned L = getSpillIDWithIdx(Spill: SpillID, Idx: StackIdx); |
1153 | LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx |
1154 | LocIdxToIDNum.grow(n: Idx); |
1155 | LocIdxToLocID.grow(n: Idx); |
1156 | LocIDToLocIdx.push_back(x: Idx); |
1157 | LocIdxToLocID[Idx] = L; |
1158 | // Initialize to PHI value; corresponds to the location's live-in value |
1159 | // during transfer function construction. |
1160 | LocIdxToIDNum[Idx] = ValueIDNum(CurBB, 0, Idx); |
1161 | } |
1162 | } |
1163 | return SpillID; |
1164 | } |
1165 | |
1166 | std::string MLocTracker::LocIdxToName(LocIdx Idx) const { |
1167 | unsigned ID = LocIdxToLocID[Idx]; |
1168 | if (ID >= NumRegs) { |
1169 | StackSlotPos Pos = locIDToSpillIdx(ID); |
1170 | ID -= NumRegs; |
1171 | unsigned Slot = ID / NumSlotIdxes; |
1172 | return Twine("slot " ) |
1173 | .concat(Suffix: Twine(Slot).concat(Suffix: Twine(" sz " ).concat(Suffix: Twine(Pos.first) |
1174 | .concat(Suffix: Twine(" offs " ).concat(Suffix: Twine(Pos.second)))))) |
1175 | .str(); |
1176 | } else { |
1177 | return TRI.getRegAsmName(Reg: ID).str(); |
1178 | } |
1179 | } |
1180 | |
1181 | std::string MLocTracker::IDAsString(const ValueIDNum &Num) const { |
1182 | std::string DefName = LocIdxToName(Idx: Num.getLoc()); |
1183 | return Num.asString(mlocname: DefName); |
1184 | } |
1185 | |
1186 | #ifndef NDEBUG |
1187 | LLVM_DUMP_METHOD void MLocTracker::dump() { |
1188 | for (auto Location : locations()) { |
1189 | std::string MLocName = LocIdxToName(Location.Value.getLoc()); |
1190 | std::string DefName = Location.Value.asString(MLocName); |
1191 | dbgs() << LocIdxToName(Location.Idx) << " --> " << DefName << "\n" ; |
1192 | } |
1193 | } |
1194 | |
1195 | LLVM_DUMP_METHOD void MLocTracker::dump_mloc_map() { |
1196 | for (auto Location : locations()) { |
1197 | std::string foo = LocIdxToName(Location.Idx); |
1198 | dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n" ; |
1199 | } |
1200 | } |
1201 | #endif |
1202 | |
1203 | MachineInstrBuilder |
1204 | MLocTracker::emitLoc(const SmallVectorImpl<ResolvedDbgOp> &DbgOps, |
1205 | const DebugVariable &Var, const DILocation *DILoc, |
1206 | const DbgValueProperties &Properties) { |
1207 | DebugLoc DL = DebugLoc(DILoc); |
1208 | |
1209 | const MCInstrDesc &Desc = Properties.IsVariadic |
1210 | ? TII.get(Opcode: TargetOpcode::DBG_VALUE_LIST) |
1211 | : TII.get(Opcode: TargetOpcode::DBG_VALUE); |
1212 | |
1213 | #ifdef EXPENSIVE_CHECKS |
1214 | assert(all_of(DbgOps, |
1215 | [](const ResolvedDbgOp &Op) { |
1216 | return Op.IsConst || !Op.Loc.isIllegal(); |
1217 | }) && |
1218 | "Did not expect illegal ops in DbgOps." ); |
1219 | assert((DbgOps.size() == 0 || |
1220 | DbgOps.size() == Properties.getLocationOpCount()) && |
1221 | "Expected to have either one DbgOp per MI LocationOp, or none." ); |
1222 | #endif |
1223 | |
1224 | auto GetRegOp = [](unsigned Reg) -> MachineOperand { |
1225 | return MachineOperand::CreateReg( |
1226 | /* Reg */ Reg, /* isDef */ false, /* isImp */ false, |
1227 | /* isKill */ false, /* isDead */ false, |
1228 | /* isUndef */ false, /* isEarlyClobber */ false, |
1229 | /* SubReg */ 0, /* isDebug */ true); |
1230 | }; |
1231 | |
1232 | SmallVector<MachineOperand> MOs; |
1233 | |
1234 | auto EmitUndef = [&]() { |
1235 | MOs.clear(); |
1236 | MOs.assign(NumElts: Properties.getLocationOpCount(), Elt: GetRegOp(0)); |
1237 | return BuildMI(MF, DL, MCID: Desc, IsIndirect: false, MOs, Variable: Var.getVariable(), |
1238 | Expr: Properties.DIExpr); |
1239 | }; |
1240 | |
1241 | // Don't bother passing any real operands to BuildMI if any of them would be |
1242 | // $noreg. |
1243 | if (DbgOps.empty()) |
1244 | return EmitUndef(); |
1245 | |
1246 | bool Indirect = Properties.Indirect; |
1247 | |
1248 | const DIExpression *Expr = Properties.DIExpr; |
1249 | |
1250 | assert(DbgOps.size() == Properties.getLocationOpCount()); |
1251 | |
1252 | // If all locations are valid, accumulate them into our list of |
1253 | // MachineOperands. For any spilled locations, either update the indirectness |
1254 | // register or apply the appropriate transformations in the DIExpression. |
1255 | for (size_t Idx = 0; Idx < Properties.getLocationOpCount(); ++Idx) { |
1256 | const ResolvedDbgOp &Op = DbgOps[Idx]; |
1257 | |
1258 | if (Op.IsConst) { |
1259 | MOs.push_back(Elt: Op.MO); |
1260 | continue; |
1261 | } |
1262 | |
1263 | LocIdx MLoc = Op.Loc; |
1264 | unsigned LocID = LocIdxToLocID[MLoc]; |
1265 | if (LocID >= NumRegs) { |
1266 | SpillLocationNo SpillID = locIDToSpill(ID: LocID); |
1267 | StackSlotPos StackIdx = locIDToSpillIdx(ID: LocID); |
1268 | unsigned short Offset = StackIdx.second; |
1269 | |
1270 | // TODO: support variables that are located in spill slots, with non-zero |
1271 | // offsets from the start of the spill slot. It would require some more |
1272 | // complex DIExpression calculations. This doesn't seem to be produced by |
1273 | // LLVM right now, so don't try and support it. |
1274 | // Accept no-subregister slots and subregisters where the offset is zero. |
1275 | // The consumer should already have type information to work out how large |
1276 | // the variable is. |
1277 | if (Offset == 0) { |
1278 | const SpillLoc &Spill = SpillLocs[SpillID.id()]; |
1279 | unsigned Base = Spill.SpillBase; |
1280 | |
1281 | // There are several ways we can dereference things, and several inputs |
1282 | // to consider: |
1283 | // * NRVO variables will appear with IsIndirect set, but should have |
1284 | // nothing else in their DIExpressions, |
1285 | // * Variables with DW_OP_stack_value in their expr already need an |
1286 | // explicit dereference of the stack location, |
1287 | // * Values that don't match the variable size need DW_OP_deref_size, |
1288 | // * Everything else can just become a simple location expression. |
1289 | |
1290 | // We need to use deref_size whenever there's a mismatch between the |
1291 | // size of value and the size of variable portion being read. |
1292 | // Additionally, we should use it whenever dealing with stack_value |
1293 | // fragments, to avoid the consumer having to determine the deref size |
1294 | // from DW_OP_piece. |
1295 | bool UseDerefSize = false; |
1296 | unsigned ValueSizeInBits = getLocSizeInBits(L: MLoc); |
1297 | unsigned DerefSizeInBytes = ValueSizeInBits / 8; |
1298 | if (auto Fragment = Var.getFragment()) { |
1299 | unsigned VariableSizeInBits = Fragment->SizeInBits; |
1300 | if (VariableSizeInBits != ValueSizeInBits || Expr->isComplex()) |
1301 | UseDerefSize = true; |
1302 | } else if (auto Size = Var.getVariable()->getSizeInBits()) { |
1303 | if (*Size != ValueSizeInBits) { |
1304 | UseDerefSize = true; |
1305 | } |
1306 | } |
1307 | |
1308 | // https://github.com/llvm/llvm-project/issues/64093 |
1309 | // in particular #issuecomment-2531264124. We use variable locations |
1310 | // such as DBG_VALUE $xmm0 as shorthand to refer to "the low lane of |
1311 | // $xmm0", and this is reflected in how DWARF is interpreted too. |
1312 | // However InstrRefBasedLDV tries to be smart and interprets such a |
1313 | // DBG_VALUE as a 128-bit reference. We then issue a DW_OP_deref_size |
1314 | // of 128 bits to the stack, which isn't permitted by DWARF (it's |
1315 | // larger than a pointer). |
1316 | // |
1317 | // Solve this for now by not using DW_OP_deref_size if it would be |
1318 | // illegal. Instead we'll use DW_OP_deref, and the consumer will load |
1319 | // the variable type from the stack, which should be correct. |
1320 | // |
1321 | // There's still a risk of imprecision when LLVM decides to use |
1322 | // smaller or larger value types than the source-variable type, which |
1323 | // manifests as too-little or too-much memory being read from the stack. |
1324 | // However we can't solve that without putting more type information in |
1325 | // debug-info. |
1326 | if (ValueSizeInBits > MF.getTarget().getPointerSizeInBits(AS: 0)) |
1327 | UseDerefSize = false; |
1328 | |
1329 | SmallVector<uint64_t, 5> OffsetOps; |
1330 | TRI.getOffsetOpcodes(Offset: Spill.SpillOffset, Ops&: OffsetOps); |
1331 | bool StackValue = false; |
1332 | |
1333 | if (Properties.Indirect) { |
1334 | // This is something like an NRVO variable, where the pointer has been |
1335 | // spilt to the stack. It should end up being a memory location, with |
1336 | // the pointer to the variable loaded off the stack with a deref: |
1337 | assert(!Expr->isImplicit()); |
1338 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref); |
1339 | } else if (UseDerefSize && Expr->isSingleLocationExpression()) { |
1340 | // TODO: Figure out how to handle deref size issues for variadic |
1341 | // values. |
1342 | // We're loading a value off the stack that's not the same size as the |
1343 | // variable. Add / subtract stack offset, explicitly deref with a |
1344 | // size, and add DW_OP_stack_value if not already present. |
1345 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref_size); |
1346 | OffsetOps.push_back(Elt: DerefSizeInBytes); |
1347 | StackValue = true; |
1348 | } else if (Expr->isComplex() || Properties.IsVariadic) { |
1349 | // A variable with no size ambiguity, but with extra elements in it's |
1350 | // expression. Manually dereference the stack location. |
1351 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref); |
1352 | } else { |
1353 | // A plain value that has been spilt to the stack, with no further |
1354 | // context. Request a location expression, marking the DBG_VALUE as |
1355 | // IsIndirect. |
1356 | Indirect = true; |
1357 | } |
1358 | |
1359 | Expr = DIExpression::appendOpsToArg(Expr, Ops: OffsetOps, ArgNo: Idx, StackValue); |
1360 | MOs.push_back(Elt: GetRegOp(Base)); |
1361 | } else { |
1362 | // This is a stack location with a weird subregister offset: emit an |
1363 | // undef DBG_VALUE instead. |
1364 | return EmitUndef(); |
1365 | } |
1366 | } else { |
1367 | // Non-empty, non-stack slot, must be a plain register. |
1368 | MOs.push_back(Elt: GetRegOp(LocID)); |
1369 | } |
1370 | } |
1371 | |
1372 | return BuildMI(MF, DL, MCID: Desc, IsIndirect: Indirect, MOs, Variable: Var.getVariable(), Expr); |
1373 | } |
1374 | |
1375 | /// Default construct and initialize the pass. |
1376 | InstrRefBasedLDV::InstrRefBasedLDV() = default; |
1377 | |
1378 | bool InstrRefBasedLDV::isCalleeSaved(LocIdx L) const { |
1379 | unsigned Reg = MTracker->LocIdxToLocID[L]; |
1380 | return isCalleeSavedReg(R: Reg); |
1381 | } |
1382 | bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const { |
1383 | for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) |
1384 | if (CalleeSavedRegs.test(Idx: (*RAI).id())) |
1385 | return true; |
1386 | return false; |
1387 | } |
1388 | |
1389 | //===----------------------------------------------------------------------===// |
1390 | // Debug Range Extension Implementation |
1391 | //===----------------------------------------------------------------------===// |
1392 | |
1393 | #ifndef NDEBUG |
1394 | // Something to restore in the future. |
1395 | // void InstrRefBasedLDV::printVarLocInMBB(..) |
1396 | #endif |
1397 | |
1398 | std::optional<SpillLocationNo> |
1399 | InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { |
1400 | assert(MI.hasOneMemOperand() && |
1401 | "Spill instruction does not have exactly one memory operand?" ); |
1402 | auto MMOI = MI.memoperands_begin(); |
1403 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); |
1404 | assert(PVal->kind() == PseudoSourceValue::FixedStack && |
1405 | "Inconsistent memory operand in spill instruction" ); |
1406 | int FI = cast<FixedStackPseudoSourceValue>(Val: PVal)->getFrameIndex(); |
1407 | const MachineBasicBlock *MBB = MI.getParent(); |
1408 | Register Reg; |
1409 | StackOffset Offset = TFI->getFrameIndexReference(MF: *MBB->getParent(), FI, FrameReg&: Reg); |
1410 | return MTracker->getOrTrackSpillLoc(L: {.SpillBase: Reg, .SpillOffset: Offset}); |
1411 | } |
1412 | |
1413 | std::optional<LocIdx> |
1414 | InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { |
1415 | std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); |
1416 | if (!SpillLoc) |
1417 | return std::nullopt; |
1418 | |
1419 | // Where in the stack slot is this value defined -- i.e., what size of value |
1420 | // is this? An important question, because it could be loaded into a register |
1421 | // from the stack at some point. Happily the memory operand will tell us |
1422 | // the size written to the stack. |
1423 | auto *MemOperand = *MI.memoperands_begin(); |
1424 | LocationSize SizeInBits = MemOperand->getSizeInBits(); |
1425 | assert(SizeInBits.hasValue() && "Expected to find a valid size!" ); |
1426 | |
1427 | // Find that position in the stack indexes we're tracking. |
1428 | auto IdxIt = MTracker->StackSlotIdxes.find(Val: {SizeInBits.getValue(), 0}); |
1429 | if (IdxIt == MTracker->StackSlotIdxes.end()) |
1430 | // That index is not tracked. This is suprising, and unlikely to ever |
1431 | // occur, but the safe action is to indicate the variable is optimised out. |
1432 | return std::nullopt; |
1433 | |
1434 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillLoc, Idx: IdxIt->second); |
1435 | return MTracker->getSpillMLoc(SpillID); |
1436 | } |
1437 | |
1438 | /// End all previous ranges related to @MI and start a new range from @MI |
1439 | /// if it is a DBG_VALUE instr. |
1440 | bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) { |
1441 | if (!MI.isDebugValue()) |
1442 | return false; |
1443 | |
1444 | assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && |
1445 | "Expected inlined-at fields to agree" ); |
1446 | |
1447 | // If there are no instructions in this lexical scope, do no location tracking |
1448 | // at all, this variable shouldn't get a legitimate location range. |
1449 | auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get()); |
1450 | if (Scope == nullptr) |
1451 | return true; // handled it; by doing nothing |
1452 | |
1453 | // MLocTracker needs to know that this register is read, even if it's only |
1454 | // read by a debug inst. |
1455 | for (const MachineOperand &MO : MI.debug_operands()) |
1456 | if (MO.isReg() && MO.getReg() != 0) |
1457 | (void)MTracker->readReg(R: MO.getReg()); |
1458 | |
1459 | // If we're preparing for the second analysis (variables), the machine value |
1460 | // locations are already solved, and we report this DBG_VALUE and the value |
1461 | // it refers to to VLocTracker. |
1462 | if (VTracker) { |
1463 | SmallVector<DbgOpID> DebugOps; |
1464 | // Feed defVar the new variable location, or if this is a DBG_VALUE $noreg, |
1465 | // feed defVar None. |
1466 | if (!MI.isUndefDebugValue()) { |
1467 | for (const MachineOperand &MO : MI.debug_operands()) { |
1468 | // There should be no undef registers here, as we've screened for undef |
1469 | // debug values. |
1470 | if (MO.isReg()) { |
1471 | DebugOps.push_back(Elt: DbgOpStore.insert(Op: MTracker->readReg(R: MO.getReg()))); |
1472 | } else if (MO.isImm() || MO.isFPImm() || MO.isCImm()) { |
1473 | DebugOps.push_back(Elt: DbgOpStore.insert(Op: MO)); |
1474 | } else { |
1475 | llvm_unreachable("Unexpected debug operand type." ); |
1476 | } |
1477 | } |
1478 | } |
1479 | VTracker->defVar(MI, Properties: DbgValueProperties(MI), DebugOps); |
1480 | } |
1481 | |
1482 | // If performing final tracking of transfers, report this variable definition |
1483 | // to the TransferTracker too. |
1484 | if (TTracker) |
1485 | TTracker->redefVar(MI); |
1486 | return true; |
1487 | } |
1488 | |
1489 | std::optional<ValueIDNum> InstrRefBasedLDV::getValueForInstrRef( |
1490 | unsigned InstNo, unsigned OpNo, MachineInstr &MI, |
1491 | const FuncValueTable *MLiveOuts, const FuncValueTable *MLiveIns) { |
1492 | // Various optimizations may have happened to the value during codegen, |
1493 | // recorded in the value substitution table. Apply any substitutions to |
1494 | // the instruction / operand number in this DBG_INSTR_REF, and collect |
1495 | // any subregister extractions performed during optimization. |
1496 | const MachineFunction &MF = *MI.getParent()->getParent(); |
1497 | |
1498 | // Create dummy substitution with Src set, for lookup. |
1499 | auto SoughtSub = |
1500 | MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0); |
1501 | |
1502 | SmallVector<unsigned, 4> SeenSubregs; |
1503 | auto LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub); |
1504 | while (LowerBoundIt != MF.DebugValueSubstitutions.end() && |
1505 | LowerBoundIt->Src == SoughtSub.Src) { |
1506 | std::tie(args&: InstNo, args&: OpNo) = LowerBoundIt->Dest; |
1507 | SoughtSub.Src = LowerBoundIt->Dest; |
1508 | if (unsigned Subreg = LowerBoundIt->Subreg) |
1509 | SeenSubregs.push_back(Elt: Subreg); |
1510 | LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub); |
1511 | } |
1512 | |
1513 | // Default machine value number is <None> -- if no instruction defines |
1514 | // the corresponding value, it must have been optimized out. |
1515 | std::optional<ValueIDNum> NewID; |
1516 | |
1517 | // Try to lookup the instruction number, and find the machine value number |
1518 | // that it defines. It could be an instruction, or a PHI. |
1519 | auto InstrIt = DebugInstrNumToInstr.find(x: InstNo); |
1520 | auto PHIIt = llvm::lower_bound(Range&: DebugPHINumToValue, Value&: InstNo); |
1521 | if (InstrIt != DebugInstrNumToInstr.end()) { |
1522 | const MachineInstr &TargetInstr = *InstrIt->second.first; |
1523 | uint64_t BlockNo = TargetInstr.getParent()->getNumber(); |
1524 | |
1525 | // Pick out the designated operand. It might be a memory reference, if |
1526 | // a register def was folded into a stack store. |
1527 | if (OpNo == MachineFunction::DebugOperandMemNumber && |
1528 | TargetInstr.hasOneMemOperand()) { |
1529 | std::optional<LocIdx> L = findLocationForMemOperand(MI: TargetInstr); |
1530 | if (L) |
1531 | NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L); |
1532 | } else if (OpNo != MachineFunction::DebugOperandMemNumber) { |
1533 | // Permit the debug-info to be completely wrong: identifying a nonexistant |
1534 | // operand, or one that is not a register definition, means something |
1535 | // unexpected happened during optimisation. Broken debug-info, however, |
1536 | // shouldn't crash the compiler -- instead leave the variable value as |
1537 | // None, which will make it appear "optimised out". |
1538 | if (OpNo < TargetInstr.getNumOperands()) { |
1539 | const MachineOperand &MO = TargetInstr.getOperand(i: OpNo); |
1540 | |
1541 | if (MO.isReg() && MO.isDef() && MO.getReg()) { |
1542 | unsigned LocID = MTracker->getLocID(Reg: MO.getReg()); |
1543 | LocIdx L = MTracker->LocIDToLocIdx[LocID]; |
1544 | NewID = ValueIDNum(BlockNo, InstrIt->second.second, L); |
1545 | } |
1546 | } |
1547 | |
1548 | if (!NewID) { |
1549 | LLVM_DEBUG( |
1550 | { dbgs() << "Seen instruction reference to illegal operand\n" ; }); |
1551 | } |
1552 | } |
1553 | // else: NewID is left as None. |
1554 | } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) { |
1555 | // It's actually a PHI value. Which value it is might not be obvious, use |
1556 | // the resolver helper to find out. |
1557 | assert(MLiveOuts && MLiveIns); |
1558 | NewID = resolveDbgPHIs(MF&: *MI.getParent()->getParent(), MLiveOuts: *MLiveOuts, MLiveIns: *MLiveIns, |
1559 | Here&: MI, InstrNum: InstNo); |
1560 | } |
1561 | |
1562 | // Apply any subregister extractions, in reverse. We might have seen code |
1563 | // like this: |
1564 | // CALL64 @foo, implicit-def $rax |
1565 | // %0:gr64 = COPY $rax |
1566 | // %1:gr32 = COPY %0.sub_32bit |
1567 | // %2:gr16 = COPY %1.sub_16bit |
1568 | // %3:gr8 = COPY %2.sub_8bit |
1569 | // In which case each copy would have been recorded as a substitution with |
1570 | // a subregister qualifier. Apply those qualifiers now. |
1571 | if (NewID && !SeenSubregs.empty()) { |
1572 | unsigned Offset = 0; |
1573 | unsigned Size = 0; |
1574 | |
1575 | // Look at each subregister that we passed through, and progressively |
1576 | // narrow in, accumulating any offsets that occur. Substitutions should |
1577 | // only ever be the same or narrower width than what they read from; |
1578 | // iterate in reverse order so that we go from wide to small. |
1579 | for (unsigned Subreg : reverse(C&: SeenSubregs)) { |
1580 | unsigned ThisSize = TRI->getSubRegIdxSize(Idx: Subreg); |
1581 | unsigned ThisOffset = TRI->getSubRegIdxOffset(Idx: Subreg); |
1582 | Offset += ThisOffset; |
1583 | Size = (Size == 0) ? ThisSize : std::min(a: Size, b: ThisSize); |
1584 | } |
1585 | |
1586 | // If that worked, look for an appropriate subregister with the register |
1587 | // where the define happens. Don't look at values that were defined during |
1588 | // a stack write: we can't currently express register locations within |
1589 | // spills. |
1590 | LocIdx L = NewID->getLoc(); |
1591 | if (NewID && !MTracker->isSpill(Idx: L)) { |
1592 | // Find the register class for the register where this def happened. |
1593 | // FIXME: no index for this? |
1594 | Register Reg = MTracker->LocIdxToLocID[L]; |
1595 | const TargetRegisterClass *TRC = nullptr; |
1596 | for (const auto *TRCI : TRI->regclasses()) |
1597 | if (TRCI->contains(Reg)) |
1598 | TRC = TRCI; |
1599 | assert(TRC && "Couldn't find target register class?" ); |
1600 | |
1601 | // If the register we have isn't the right size or in the right place, |
1602 | // Try to find a subregister inside it. |
1603 | unsigned MainRegSize = TRI->getRegSizeInBits(RC: *TRC); |
1604 | if (Size != MainRegSize || Offset) { |
1605 | // Enumerate all subregisters, searching. |
1606 | Register NewReg = 0; |
1607 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
1608 | unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
1609 | unsigned SubregSize = TRI->getSubRegIdxSize(Idx: Subreg); |
1610 | unsigned SubregOffset = TRI->getSubRegIdxOffset(Idx: Subreg); |
1611 | if (SubregSize == Size && SubregOffset == Offset) { |
1612 | NewReg = SR; |
1613 | break; |
1614 | } |
1615 | } |
1616 | |
1617 | // If we didn't find anything: there's no way to express our value. |
1618 | if (!NewReg) { |
1619 | NewID = std::nullopt; |
1620 | } else { |
1621 | // Re-state the value as being defined within the subregister |
1622 | // that we found. |
1623 | LocIdx NewLoc = MTracker->lookupOrTrackRegister(ID: NewReg); |
1624 | NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc); |
1625 | } |
1626 | } |
1627 | } else { |
1628 | // If we can't handle subregisters, unset the new value. |
1629 | NewID = std::nullopt; |
1630 | } |
1631 | } |
1632 | |
1633 | return NewID; |
1634 | } |
1635 | |
1636 | bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, |
1637 | const FuncValueTable *MLiveOuts, |
1638 | const FuncValueTable *MLiveIns) { |
1639 | if (!MI.isDebugRef()) |
1640 | return false; |
1641 | |
1642 | // Only handle this instruction when we are building the variable value |
1643 | // transfer function. |
1644 | if (!VTracker && !TTracker) |
1645 | return false; |
1646 | |
1647 | const DILocalVariable *Var = MI.getDebugVariable(); |
1648 | const DIExpression *Expr = MI.getDebugExpression(); |
1649 | const DILocation *DebugLoc = MI.getDebugLoc(); |
1650 | const DILocation *InlinedAt = DebugLoc->getInlinedAt(); |
1651 | assert(Var->isValidLocationForIntrinsic(DebugLoc) && |
1652 | "Expected inlined-at fields to agree" ); |
1653 | |
1654 | DebugVariable V(Var, Expr, InlinedAt); |
1655 | |
1656 | auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get()); |
1657 | if (Scope == nullptr) |
1658 | return true; // Handled by doing nothing. This variable is never in scope. |
1659 | |
1660 | SmallVector<DbgOpID> DbgOpIDs; |
1661 | for (const MachineOperand &MO : MI.debug_operands()) { |
1662 | if (!MO.isDbgInstrRef()) { |
1663 | assert(!MO.isReg() && "DBG_INSTR_REF should not contain registers" ); |
1664 | DbgOpID ConstOpID = DbgOpStore.insert(Op: DbgOp(MO)); |
1665 | DbgOpIDs.push_back(Elt: ConstOpID); |
1666 | continue; |
1667 | } |
1668 | |
1669 | unsigned InstNo = MO.getInstrRefInstrIndex(); |
1670 | unsigned OpNo = MO.getInstrRefOpIndex(); |
1671 | |
1672 | // Default machine value number is <None> -- if no instruction defines |
1673 | // the corresponding value, it must have been optimized out. |
1674 | std::optional<ValueIDNum> NewID = |
1675 | getValueForInstrRef(InstNo, OpNo, MI, MLiveOuts, MLiveIns); |
1676 | // We have a value number or std::nullopt. If the latter, then kill the |
1677 | // entire debug value. |
1678 | if (NewID) { |
1679 | DbgOpIDs.push_back(Elt: DbgOpStore.insert(Op: *NewID)); |
1680 | } else { |
1681 | DbgOpIDs.clear(); |
1682 | break; |
1683 | } |
1684 | } |
1685 | |
1686 | // We have a DbgOpID for every value or for none. Tell the variable value |
1687 | // tracker about it. The rest of this LiveDebugValues implementation acts |
1688 | // exactly the same for DBG_INSTR_REFs as DBG_VALUEs (just, the former can |
1689 | // refer to values that aren't immediately available). |
1690 | DbgValueProperties Properties(Expr, false, true); |
1691 | if (VTracker) |
1692 | VTracker->defVar(MI, Properties, DebugOps: DbgOpIDs); |
1693 | |
1694 | // If we're on the final pass through the function, decompose this INSTR_REF |
1695 | // into a plain DBG_VALUE. |
1696 | if (!TTracker) |
1697 | return true; |
1698 | |
1699 | // Fetch the concrete DbgOps now, as we will need them later. |
1700 | SmallVector<DbgOp> DbgOps; |
1701 | for (DbgOpID OpID : DbgOpIDs) { |
1702 | DbgOps.push_back(Elt: DbgOpStore.find(ID: OpID)); |
1703 | } |
1704 | |
1705 | // Pick a location for the machine value number, if such a location exists. |
1706 | // (This information could be stored in TransferTracker to make it faster). |
1707 | SmallDenseMap<ValueIDNum, TransferTracker::LocationAndQuality> FoundLocs; |
1708 | SmallVector<ValueIDNum> ValuesToFind; |
1709 | // Initialized the preferred-location map with illegal locations, to be |
1710 | // filled in later. |
1711 | for (const DbgOp &Op : DbgOps) { |
1712 | if (!Op.IsConst) |
1713 | if (FoundLocs.try_emplace(Key: Op.ID).second) |
1714 | ValuesToFind.push_back(Elt: Op.ID); |
1715 | } |
1716 | |
1717 | for (auto Location : MTracker->locations()) { |
1718 | LocIdx CurL = Location.Idx; |
1719 | ValueIDNum ID = MTracker->readMLoc(L: CurL); |
1720 | auto ValueToFindIt = find(Range&: ValuesToFind, Val: ID); |
1721 | if (ValueToFindIt == ValuesToFind.end()) |
1722 | continue; |
1723 | auto &Previous = FoundLocs.find(Val: ID)->second; |
1724 | // If this is the first location with that value, pick it. Otherwise, |
1725 | // consider whether it's a "longer term" location. |
1726 | std::optional<TransferTracker::LocationQuality> ReplacementQuality = |
1727 | TTracker->getLocQualityIfBetter(L: CurL, Min: Previous.getQuality()); |
1728 | if (ReplacementQuality) { |
1729 | Previous = TransferTracker::LocationAndQuality(CurL, *ReplacementQuality); |
1730 | if (Previous.isBest()) { |
1731 | ValuesToFind.erase(CI: ValueToFindIt); |
1732 | if (ValuesToFind.empty()) |
1733 | break; |
1734 | } |
1735 | } |
1736 | } |
1737 | |
1738 | SmallVector<ResolvedDbgOp> NewLocs; |
1739 | for (const DbgOp &DbgOp : DbgOps) { |
1740 | if (DbgOp.IsConst) { |
1741 | NewLocs.push_back(Elt: DbgOp.MO); |
1742 | continue; |
1743 | } |
1744 | LocIdx FoundLoc = FoundLocs.find(Val: DbgOp.ID)->second.getLoc(); |
1745 | if (FoundLoc.isIllegal()) { |
1746 | NewLocs.clear(); |
1747 | break; |
1748 | } |
1749 | NewLocs.push_back(Elt: FoundLoc); |
1750 | } |
1751 | // Tell transfer tracker that the variable value has changed. |
1752 | TTracker->redefVar(MI, Properties, NewLocs); |
1753 | |
1754 | // If there were values with no location, but all such values are defined in |
1755 | // later instructions in this block, this is a block-local use-before-def. |
1756 | if (!DbgOps.empty() && NewLocs.empty()) { |
1757 | bool IsValidUseBeforeDef = true; |
1758 | uint64_t LastUseBeforeDef = 0; |
1759 | for (auto ValueLoc : FoundLocs) { |
1760 | ValueIDNum NewID = ValueLoc.first; |
1761 | LocIdx FoundLoc = ValueLoc.second.getLoc(); |
1762 | if (!FoundLoc.isIllegal()) |
1763 | continue; |
1764 | // If we have an value with no location that is not defined in this block, |
1765 | // then it has no location in this block, leaving this value undefined. |
1766 | if (NewID.getBlock() != CurBB || NewID.getInst() <= CurInst) { |
1767 | IsValidUseBeforeDef = false; |
1768 | break; |
1769 | } |
1770 | LastUseBeforeDef = std::max(a: LastUseBeforeDef, b: NewID.getInst()); |
1771 | } |
1772 | if (IsValidUseBeforeDef) { |
1773 | DebugVariableID VID = DVMap.insertDVID(Var&: V, Loc: MI.getDebugLoc().get()); |
1774 | TTracker->addUseBeforeDef(VarID: VID, Properties: {MI.getDebugExpression(), false, true}, |
1775 | DbgOps, Inst: LastUseBeforeDef); |
1776 | } |
1777 | } |
1778 | |
1779 | // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant. |
1780 | // This DBG_VALUE is potentially a $noreg / undefined location, if |
1781 | // FoundLoc is illegal. |
1782 | // (XXX -- could morph the DBG_INSTR_REF in the future). |
1783 | MachineInstr *DbgMI = |
1784 | MTracker->emitLoc(DbgOps: NewLocs, Var: V, DILoc: MI.getDebugLoc().get(), Properties); |
1785 | DebugVariableID ID = DVMap.getDVID(Var: V); |
1786 | |
1787 | TTracker->PendingDbgValues.push_back(Elt: std::make_pair(x&: ID, y&: DbgMI)); |
1788 | TTracker->flushDbgValues(Pos: MI.getIterator(), MBB: nullptr); |
1789 | return true; |
1790 | } |
1791 | |
1792 | bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { |
1793 | if (!MI.isDebugPHI()) |
1794 | return false; |
1795 | |
1796 | // Analyse these only when solving the machine value location problem. |
1797 | if (VTracker || TTracker) |
1798 | return true; |
1799 | |
1800 | // First operand is the value location, either a stack slot or register. |
1801 | // Second is the debug instruction number of the original PHI. |
1802 | const MachineOperand &MO = MI.getOperand(i: 0); |
1803 | unsigned InstrNum = MI.getOperand(i: 1).getImm(); |
1804 | |
1805 | auto EmitBadPHI = [this, &MI, InstrNum]() -> bool { |
1806 | // Helper lambda to do any accounting when we fail to find a location for |
1807 | // a DBG_PHI. This can happen if DBG_PHIs are malformed, or refer to a |
1808 | // dead stack slot, for example. |
1809 | // Record a DebugPHIRecord with an empty value + location. |
1810 | DebugPHINumToValue.push_back( |
1811 | Elt: {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: std::nullopt, .ReadLoc: std::nullopt}); |
1812 | return true; |
1813 | }; |
1814 | |
1815 | if (MO.isReg() && MO.getReg()) { |
1816 | // The value is whatever's currently in the register. Read and record it, |
1817 | // to be analysed later. |
1818 | Register Reg = MO.getReg(); |
1819 | ValueIDNum Num = MTracker->readReg(R: Reg); |
1820 | auto PHIRec = DebugPHIRecord( |
1821 | {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Num, .ReadLoc: MTracker->lookupOrTrackRegister(ID: Reg)}); |
1822 | DebugPHINumToValue.push_back(Elt: PHIRec); |
1823 | |
1824 | // Ensure this register is tracked. |
1825 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) |
1826 | MTracker->lookupOrTrackRegister(ID: *RAI); |
1827 | } else if (MO.isFI()) { |
1828 | // The value is whatever's in this stack slot. |
1829 | unsigned FI = MO.getIndex(); |
1830 | |
1831 | // If the stack slot is dead, then this was optimized away. |
1832 | // FIXME: stack slot colouring should account for slots that get merged. |
1833 | if (MFI->isDeadObjectIndex(ObjectIdx: FI)) |
1834 | return EmitBadPHI(); |
1835 | |
1836 | // Identify this spill slot, ensure it's tracked. |
1837 | Register Base; |
1838 | StackOffset Offs = TFI->getFrameIndexReference(MF: *MI.getMF(), FI, FrameReg&: Base); |
1839 | SpillLoc SL = {.SpillBase: Base, .SpillOffset: Offs}; |
1840 | std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(L: SL); |
1841 | |
1842 | // We might be able to find a value, but have chosen not to, to avoid |
1843 | // tracking too much stack information. |
1844 | if (!SpillNo) |
1845 | return EmitBadPHI(); |
1846 | |
1847 | // Any stack location DBG_PHI should have an associate bit-size. |
1848 | assert(MI.getNumOperands() == 3 && "Stack DBG_PHI with no size?" ); |
1849 | unsigned slotBitSize = MI.getOperand(i: 2).getImm(); |
1850 | |
1851 | unsigned SpillID = MTracker->getLocID(Spill: *SpillNo, Idx: {slotBitSize, 0}); |
1852 | LocIdx SpillLoc = MTracker->getSpillMLoc(SpillID); |
1853 | ValueIDNum Result = MTracker->readMLoc(L: SpillLoc); |
1854 | |
1855 | // Record this DBG_PHI for later analysis. |
1856 | auto DbgPHI = DebugPHIRecord({.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Result, .ReadLoc: SpillLoc}); |
1857 | DebugPHINumToValue.push_back(Elt: DbgPHI); |
1858 | } else { |
1859 | // Else: if the operand is neither a legal register or a stack slot, then |
1860 | // we're being fed illegal debug-info. Record an empty PHI, so that any |
1861 | // debug users trying to read this number will be put off trying to |
1862 | // interpret the value. |
1863 | LLVM_DEBUG( |
1864 | { dbgs() << "Seen DBG_PHI with unrecognised operand format\n" ; }); |
1865 | return EmitBadPHI(); |
1866 | } |
1867 | |
1868 | return true; |
1869 | } |
1870 | |
1871 | void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { |
1872 | // Meta Instructions do not affect the debug liveness of any register they |
1873 | // define. |
1874 | if (MI.isImplicitDef()) { |
1875 | // Except when there's an implicit def, and the location it's defining has |
1876 | // no value number. The whole point of an implicit def is to announce that |
1877 | // the register is live, without be specific about it's value. So define |
1878 | // a value if there isn't one already. |
1879 | ValueIDNum Num = MTracker->readReg(R: MI.getOperand(i: 0).getReg()); |
1880 | // Has a legitimate value -> ignore the implicit def. |
1881 | if (Num.getLoc() != 0) |
1882 | return; |
1883 | // Otherwise, def it here. |
1884 | } else if (MI.isMetaInstruction()) |
1885 | return; |
1886 | |
1887 | // We always ignore SP defines on call instructions, they don't actually |
1888 | // change the value of the stack pointer... except for win32's _chkstk. This |
1889 | // is rare: filter quickly for the common case (no stack adjustments, not a |
1890 | // call, etc). If it is a call that modifies SP, recognise the SP register |
1891 | // defs. |
1892 | bool CallChangesSP = false; |
1893 | if (AdjustsStackInCalls && MI.isCall() && MI.getOperand(i: 0).isSymbol() && |
1894 | !strcmp(s1: MI.getOperand(i: 0).getSymbolName(), s2: StackProbeSymbolName.data())) |
1895 | CallChangesSP = true; |
1896 | |
1897 | // Test whether we should ignore a def of this register due to it being part |
1898 | // of the stack pointer. |
1899 | auto IgnoreSPAlias = [this, &MI, CallChangesSP](Register R) -> bool { |
1900 | if (CallChangesSP) |
1901 | return false; |
1902 | return MI.isCall() && MTracker->SPAliases.count(V: R); |
1903 | }; |
1904 | |
1905 | // Find the regs killed by MI, and find regmasks of preserved regs. |
1906 | // Max out the number of statically allocated elements in `DeadRegs`, as this |
1907 | // prevents fallback to std::set::count() operations. |
1908 | SmallSet<uint32_t, 32> DeadRegs; |
1909 | SmallVector<const uint32_t *, 4> RegMasks; |
1910 | SmallVector<const MachineOperand *, 4> RegMaskPtrs; |
1911 | for (const MachineOperand &MO : MI.operands()) { |
1912 | // Determine whether the operand is a register def. |
1913 | if (MO.isReg() && MO.isDef() && MO.getReg() && MO.getReg().isPhysical() && |
1914 | !IgnoreSPAlias(MO.getReg())) { |
1915 | // Remove ranges of all aliased registers. |
1916 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) |
1917 | // FIXME: Can we break out of this loop early if no insertion occurs? |
1918 | DeadRegs.insert(V: (*RAI).id()); |
1919 | } else if (MO.isRegMask()) { |
1920 | RegMasks.push_back(Elt: MO.getRegMask()); |
1921 | RegMaskPtrs.push_back(Elt: &MO); |
1922 | } |
1923 | } |
1924 | |
1925 | // Tell MLocTracker about all definitions, of regmasks and otherwise. |
1926 | for (uint32_t DeadReg : DeadRegs) |
1927 | MTracker->defReg(R: DeadReg, BB: CurBB, Inst: CurInst); |
1928 | |
1929 | for (const auto *MO : RegMaskPtrs) |
1930 | MTracker->writeRegMask(MO, CurBB, InstID: CurInst); |
1931 | |
1932 | // If this instruction writes to a spill slot, def that slot. |
1933 | if (hasFoldedStackStore(MI)) { |
1934 | if (std::optional<SpillLocationNo> SpillNo = |
1935 | extractSpillBaseRegAndOffset(MI)) { |
1936 | for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { |
1937 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I); |
1938 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
1939 | MTracker->setMLoc(L, Num: ValueIDNum(CurBB, CurInst, L)); |
1940 | } |
1941 | } |
1942 | } |
1943 | |
1944 | if (!TTracker) |
1945 | return; |
1946 | |
1947 | // When committing variable values to locations: tell transfer tracker that |
1948 | // we've clobbered things. It may be able to recover the variable from a |
1949 | // different location. |
1950 | |
1951 | // Inform TTracker about any direct clobbers. |
1952 | for (uint32_t DeadReg : DeadRegs) { |
1953 | LocIdx Loc = MTracker->lookupOrTrackRegister(ID: DeadReg); |
1954 | TTracker->clobberMloc(MLoc: Loc, Pos: MI.getIterator(), MakeUndef: false); |
1955 | } |
1956 | |
1957 | // Look for any clobbers performed by a register mask. Only test locations |
1958 | // that are actually being tracked. |
1959 | if (!RegMaskPtrs.empty()) { |
1960 | for (auto L : MTracker->locations()) { |
1961 | // Stack locations can't be clobbered by regmasks. |
1962 | if (MTracker->isSpill(Idx: L.Idx)) |
1963 | continue; |
1964 | |
1965 | Register Reg = MTracker->LocIdxToLocID[L.Idx]; |
1966 | if (IgnoreSPAlias(Reg)) |
1967 | continue; |
1968 | |
1969 | for (const auto *MO : RegMaskPtrs) |
1970 | if (MO->clobbersPhysReg(PhysReg: Reg)) |
1971 | TTracker->clobberMloc(MLoc: L.Idx, Pos: MI.getIterator(), MakeUndef: false); |
1972 | } |
1973 | } |
1974 | |
1975 | // Tell TTracker about any folded stack store. |
1976 | if (hasFoldedStackStore(MI)) { |
1977 | if (std::optional<SpillLocationNo> SpillNo = |
1978 | extractSpillBaseRegAndOffset(MI)) { |
1979 | for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { |
1980 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I); |
1981 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
1982 | TTracker->clobberMloc(MLoc: L, Pos: MI.getIterator(), MakeUndef: true); |
1983 | } |
1984 | } |
1985 | } |
1986 | } |
1987 | |
1988 | void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { |
1989 | // In all circumstances, re-def all aliases. It's definitely a new value now. |
1990 | for (MCRegAliasIterator RAI(DstRegNum, TRI, true); RAI.isValid(); ++RAI) |
1991 | MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst); |
1992 | |
1993 | ValueIDNum SrcValue = MTracker->readReg(R: SrcRegNum); |
1994 | MTracker->setReg(R: DstRegNum, ValueID: SrcValue); |
1995 | |
1996 | // Copy subregisters from one location to another. |
1997 | for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) { |
1998 | unsigned SrcSubReg = SRI.getSubReg(); |
1999 | unsigned SubRegIdx = SRI.getSubRegIndex(); |
2000 | unsigned DstSubReg = TRI->getSubReg(Reg: DstRegNum, Idx: SubRegIdx); |
2001 | if (!DstSubReg) |
2002 | continue; |
2003 | |
2004 | // Do copy. There are two matching subregisters, the source value should |
2005 | // have been def'd when the super-reg was, the latter might not be tracked |
2006 | // yet. |
2007 | // This will force SrcSubReg to be tracked, if it isn't yet. Will read |
2008 | // mphi values if it wasn't tracked. |
2009 | LocIdx SrcL = MTracker->lookupOrTrackRegister(ID: SrcSubReg); |
2010 | LocIdx DstL = MTracker->lookupOrTrackRegister(ID: DstSubReg); |
2011 | (void)SrcL; |
2012 | (void)DstL; |
2013 | ValueIDNum CpyValue = MTracker->readReg(R: SrcSubReg); |
2014 | |
2015 | MTracker->setReg(R: DstSubReg, ValueID: CpyValue); |
2016 | } |
2017 | } |
2018 | |
2019 | std::optional<SpillLocationNo> |
2020 | InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, |
2021 | MachineFunction *MF) { |
2022 | // TODO: Handle multiple stores folded into one. |
2023 | if (!MI.hasOneMemOperand()) |
2024 | return std::nullopt; |
2025 | |
2026 | // Reject any memory operand that's aliased -- we can't guarantee its value. |
2027 | auto MMOI = MI.memoperands_begin(); |
2028 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); |
2029 | if (PVal->isAliased(MFI)) |
2030 | return std::nullopt; |
2031 | |
2032 | if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) |
2033 | return std::nullopt; // This is not a spill instruction, since no valid size |
2034 | // was returned from either function. |
2035 | |
2036 | return extractSpillBaseRegAndOffset(MI); |
2037 | } |
2038 | |
2039 | bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, |
2040 | MachineFunction *MF, unsigned &Reg) { |
2041 | if (!isSpillInstruction(MI, MF)) |
2042 | return false; |
2043 | |
2044 | int FI; |
2045 | Reg = TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FI); |
2046 | return Reg != 0; |
2047 | } |
2048 | |
2049 | std::optional<SpillLocationNo> |
2050 | InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, |
2051 | MachineFunction *MF, unsigned &Reg) { |
2052 | if (!MI.hasOneMemOperand()) |
2053 | return std::nullopt; |
2054 | |
2055 | // FIXME: Handle folded restore instructions with more than one memory |
2056 | // operand. |
2057 | if (MI.getRestoreSize(TII)) { |
2058 | Reg = MI.getOperand(i: 0).getReg(); |
2059 | return extractSpillBaseRegAndOffset(MI); |
2060 | } |
2061 | return std::nullopt; |
2062 | } |
2063 | |
2064 | bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { |
2065 | // XXX -- it's too difficult to implement VarLocBasedImpl's stack location |
2066 | // limitations under the new model. Therefore, when comparing them, compare |
2067 | // versions that don't attempt spills or restores at all. |
2068 | if (EmulateOldLDV) |
2069 | return false; |
2070 | |
2071 | // Strictly limit ourselves to plain loads and stores, not all instructions |
2072 | // that can access the stack. |
2073 | int DummyFI = -1; |
2074 | if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: DummyFI) && |
2075 | !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: DummyFI)) |
2076 | return false; |
2077 | |
2078 | MachineFunction *MF = MI.getMF(); |
2079 | unsigned Reg; |
2080 | |
2081 | LLVM_DEBUG(dbgs() << "Examining instruction: " ; MI.dump();); |
2082 | |
2083 | // Strictly limit ourselves to plain loads and stores, not all instructions |
2084 | // that can access the stack. |
2085 | int FIDummy; |
2086 | if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FIDummy) && |
2087 | !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: FIDummy)) |
2088 | return false; |
2089 | |
2090 | // First, if there are any DBG_VALUEs pointing at a spill slot that is |
2091 | // written to, terminate that variable location. The value in memory |
2092 | // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. |
2093 | if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { |
2094 | // Un-set this location and clobber, so that earlier locations don't |
2095 | // continue past this store. |
2096 | for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) { |
2097 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *Loc, Idx: SlotIdx); |
2098 | std::optional<LocIdx> MLoc = MTracker->getSpillMLoc(SpillID); |
2099 | if (!MLoc) |
2100 | continue; |
2101 | |
2102 | // We need to over-write the stack slot with something (here, a def at |
2103 | // this instruction) to ensure no values are preserved in this stack slot |
2104 | // after the spill. It also prevents TTracker from trying to recover the |
2105 | // location and re-installing it in the same place. |
2106 | ValueIDNum Def(CurBB, CurInst, *MLoc); |
2107 | MTracker->setMLoc(L: *MLoc, Num: Def); |
2108 | if (TTracker) |
2109 | TTracker->clobberMloc(MLoc: *MLoc, Pos: MI.getIterator()); |
2110 | } |
2111 | } |
2112 | |
2113 | // Try to recognise spill and restore instructions that may transfer a value. |
2114 | if (isLocationSpill(MI, MF, Reg)) { |
2115 | // isLocationSpill returning true should guarantee we can extract a |
2116 | // location. |
2117 | SpillLocationNo Loc = *extractSpillBaseRegAndOffset(MI); |
2118 | |
2119 | auto DoTransfer = [&](Register SrcReg, unsigned SpillID) { |
2120 | auto ReadValue = MTracker->readReg(R: SrcReg); |
2121 | LocIdx DstLoc = MTracker->getSpillMLoc(SpillID); |
2122 | MTracker->setMLoc(L: DstLoc, Num: ReadValue); |
2123 | |
2124 | if (TTracker) { |
2125 | LocIdx SrcLoc = MTracker->getRegMLoc(R: SrcReg); |
2126 | TTracker->transferMlocs(Src: SrcLoc, Dst: DstLoc, Pos: MI.getIterator()); |
2127 | } |
2128 | }; |
2129 | |
2130 | // Then, transfer subreg bits. |
2131 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
2132 | // Ensure this reg is tracked, |
2133 | (void)MTracker->lookupOrTrackRegister(ID: SR); |
2134 | unsigned SubregIdx = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
2135 | unsigned SpillID = MTracker->getLocID(Spill: Loc, SpillSubReg: SubregIdx); |
2136 | DoTransfer(SR, SpillID); |
2137 | } |
2138 | |
2139 | // Directly lookup size of main source reg, and transfer. |
2140 | unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI); |
2141 | unsigned SpillID = MTracker->getLocID(Spill: Loc, Idx: {Size, 0}); |
2142 | DoTransfer(Reg, SpillID); |
2143 | } else { |
2144 | std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); |
2145 | if (!Loc) |
2146 | return false; |
2147 | |
2148 | // Assumption: we're reading from the base of the stack slot, not some |
2149 | // offset into it. It seems very unlikely LLVM would ever generate |
2150 | // restores where this wasn't true. This then becomes a question of what |
2151 | // subregisters in the destination register line up with positions in the |
2152 | // stack slot. |
2153 | |
2154 | // Def all registers that alias the destination. |
2155 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) |
2156 | MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst); |
2157 | |
2158 | // Now find subregisters within the destination register, and load values |
2159 | // from stack slot positions. |
2160 | auto DoTransfer = [&](Register DestReg, unsigned SpillID) { |
2161 | LocIdx SrcIdx = MTracker->getSpillMLoc(SpillID); |
2162 | auto ReadValue = MTracker->readMLoc(L: SrcIdx); |
2163 | MTracker->setReg(R: DestReg, ValueID: ReadValue); |
2164 | }; |
2165 | |
2166 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
2167 | unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
2168 | unsigned SpillID = MTracker->getLocID(Spill: *Loc, SpillSubReg: Subreg); |
2169 | DoTransfer(SR, SpillID); |
2170 | } |
2171 | |
2172 | // Directly look up this registers slot idx by size, and transfer. |
2173 | unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI); |
2174 | unsigned SpillID = MTracker->getLocID(Spill: *Loc, Idx: {Size, 0}); |
2175 | DoTransfer(Reg, SpillID); |
2176 | } |
2177 | return true; |
2178 | } |
2179 | |
2180 | bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) { |
2181 | auto DestSrc = TII->isCopyLikeInstr(MI); |
2182 | if (!DestSrc) |
2183 | return false; |
2184 | |
2185 | const MachineOperand *DestRegOp = DestSrc->Destination; |
2186 | const MachineOperand *SrcRegOp = DestSrc->Source; |
2187 | |
2188 | Register SrcReg = SrcRegOp->getReg(); |
2189 | Register DestReg = DestRegOp->getReg(); |
2190 | |
2191 | // Ignore identity copies. Yep, these make it as far as LiveDebugValues. |
2192 | if (SrcReg == DestReg) |
2193 | return true; |
2194 | |
2195 | // For emulating VarLocBasedImpl: |
2196 | // We want to recognize instructions where destination register is callee |
2197 | // saved register. If register that could be clobbered by the call is |
2198 | // included, there would be a great chance that it is going to be clobbered |
2199 | // soon. It is more likely that previous register, which is callee saved, is |
2200 | // going to stay unclobbered longer, even if it is killed. |
2201 | // |
2202 | // For InstrRefBasedImpl, we can track multiple locations per value, so |
2203 | // ignore this condition. |
2204 | if (EmulateOldLDV && !isCalleeSavedReg(R: DestReg)) |
2205 | return false; |
2206 | |
2207 | // InstrRefBasedImpl only followed killing copies. |
2208 | if (EmulateOldLDV && !SrcRegOp->isKill()) |
2209 | return false; |
2210 | |
2211 | // Before we update MTracker, remember which values were present in each of |
2212 | // the locations about to be overwritten, so that we can recover any |
2213 | // potentially clobbered variables. |
2214 | DenseMap<LocIdx, ValueIDNum> ClobberedLocs; |
2215 | if (TTracker) { |
2216 | for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) { |
2217 | LocIdx ClobberedLoc = MTracker->getRegMLoc(R: *RAI); |
2218 | auto MLocIt = TTracker->ActiveMLocs.find(Val: ClobberedLoc); |
2219 | // If ActiveMLocs isn't tracking this location or there are no variables |
2220 | // using it, don't bother remembering. |
2221 | if (MLocIt == TTracker->ActiveMLocs.end() || MLocIt->second.empty()) |
2222 | continue; |
2223 | ValueIDNum Value = MTracker->readReg(R: *RAI); |
2224 | ClobberedLocs[ClobberedLoc] = Value; |
2225 | } |
2226 | } |
2227 | |
2228 | // Copy MTracker info, including subregs if available. |
2229 | InstrRefBasedLDV::performCopy(SrcRegNum: SrcReg, DstRegNum: DestReg); |
2230 | |
2231 | // The copy might have clobbered variables based on the destination register. |
2232 | // Tell TTracker about it, passing the old ValueIDNum to search for |
2233 | // alternative locations (or else terminating those variables). |
2234 | if (TTracker) { |
2235 | for (auto LocVal : ClobberedLocs) { |
2236 | TTracker->clobberMloc(MLoc: LocVal.first, OldValue: LocVal.second, Pos: MI.getIterator(), MakeUndef: false); |
2237 | } |
2238 | } |
2239 | |
2240 | // Only produce a transfer of DBG_VALUE within a block where old LDV |
2241 | // would have. We might make use of the additional value tracking in some |
2242 | // other way, later. |
2243 | if (TTracker && isCalleeSavedReg(R: DestReg) && SrcRegOp->isKill()) |
2244 | TTracker->transferMlocs(Src: MTracker->getRegMLoc(R: SrcReg), |
2245 | Dst: MTracker->getRegMLoc(R: DestReg), Pos: MI.getIterator()); |
2246 | |
2247 | // VarLocBasedImpl would quit tracking the old location after copying. |
2248 | if (EmulateOldLDV && SrcReg != DestReg) |
2249 | MTracker->defReg(R: SrcReg, BB: CurBB, Inst: CurInst); |
2250 | |
2251 | return true; |
2252 | } |
2253 | |
2254 | /// Accumulate a mapping between each DILocalVariable fragment and other |
2255 | /// fragments of that DILocalVariable which overlap. This reduces work during |
2256 | /// the data-flow stage from "Find any overlapping fragments" to "Check if the |
2257 | /// known-to-overlap fragments are present". |
2258 | /// \param MI A previously unprocessed debug instruction to analyze for |
2259 | /// fragment usage. |
2260 | void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) { |
2261 | assert(MI.isDebugValueLike()); |
2262 | DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), |
2263 | MI.getDebugLoc()->getInlinedAt()); |
2264 | FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); |
2265 | |
2266 | // If this is the first sighting of this variable, then we are guaranteed |
2267 | // there are currently no overlapping fragments either. Initialize the set |
2268 | // of seen fragments, record no overlaps for the current one, and return. |
2269 | auto [SeenIt, Inserted] = SeenFragments.try_emplace(Key: MIVar.getVariable()); |
2270 | if (Inserted) { |
2271 | SeenIt->second.insert(V: ThisFragment); |
2272 | |
2273 | OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
2274 | return; |
2275 | } |
2276 | |
2277 | // If this particular Variable/Fragment pair already exists in the overlap |
2278 | // map, it has already been accounted for. |
2279 | auto IsInOLapMap = |
2280 | OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
2281 | if (!IsInOLapMap.second) |
2282 | return; |
2283 | |
2284 | auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; |
2285 | auto &AllSeenFragments = SeenIt->second; |
2286 | |
2287 | // Otherwise, examine all other seen fragments for this variable, with "this" |
2288 | // fragment being a previously unseen fragment. Record any pair of |
2289 | // overlapping fragments. |
2290 | for (const auto &ASeenFragment : AllSeenFragments) { |
2291 | // Does this previously seen fragment overlap? |
2292 | if (DIExpression::fragmentsOverlap(A: ThisFragment, B: ASeenFragment)) { |
2293 | // Yes: Mark the current fragment as being overlapped. |
2294 | ThisFragmentsOverlaps.push_back(Elt: ASeenFragment); |
2295 | // Mark the previously seen fragment as being overlapped by the current |
2296 | // one. |
2297 | auto ASeenFragmentsOverlaps = |
2298 | OverlapFragments.find(Val: {MIVar.getVariable(), ASeenFragment}); |
2299 | assert(ASeenFragmentsOverlaps != OverlapFragments.end() && |
2300 | "Previously seen var fragment has no vector of overlaps" ); |
2301 | ASeenFragmentsOverlaps->second.push_back(Elt: ThisFragment); |
2302 | } |
2303 | } |
2304 | |
2305 | AllSeenFragments.insert(V: ThisFragment); |
2306 | } |
2307 | |
2308 | void InstrRefBasedLDV::process(MachineInstr &MI, |
2309 | const FuncValueTable *MLiveOuts, |
2310 | const FuncValueTable *MLiveIns) { |
2311 | // Try to interpret an MI as a debug or transfer instruction. Only if it's |
2312 | // none of these should we interpret it's register defs as new value |
2313 | // definitions. |
2314 | if (transferDebugValue(MI)) |
2315 | return; |
2316 | if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns)) |
2317 | return; |
2318 | if (transferDebugPHI(MI)) |
2319 | return; |
2320 | if (transferRegisterCopy(MI)) |
2321 | return; |
2322 | if (transferSpillOrRestoreInst(MI)) |
2323 | return; |
2324 | transferRegisterDef(MI); |
2325 | } |
2326 | |
2327 | void InstrRefBasedLDV::produceMLocTransferFunction( |
2328 | MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer, |
2329 | unsigned MaxNumBlocks) { |
2330 | // Because we try to optimize around register mask operands by ignoring regs |
2331 | // that aren't currently tracked, we set up something ugly for later: RegMask |
2332 | // operands that are seen earlier than the first use of a register, still need |
2333 | // to clobber that register in the transfer function. But this information |
2334 | // isn't actively recorded. Instead, we track each RegMask used in each block, |
2335 | // and accumulated the clobbered but untracked registers in each block into |
2336 | // the following bitvector. Later, if new values are tracked, we can add |
2337 | // appropriate clobbers. |
2338 | SmallVector<BitVector, 32> BlockMasks; |
2339 | BlockMasks.resize(N: MaxNumBlocks); |
2340 | |
2341 | // Reserve one bit per register for the masks described above. |
2342 | unsigned BVWords = MachineOperand::getRegMaskSize(NumRegs: TRI->getNumRegs()); |
2343 | for (auto &BV : BlockMasks) |
2344 | BV.resize(N: TRI->getNumRegs(), t: true); |
2345 | |
2346 | // Step through all instructions and inhale the transfer function. |
2347 | for (auto &MBB : MF) { |
2348 | // Object fields that are read by trackers to know where we are in the |
2349 | // function. |
2350 | CurBB = MBB.getNumber(); |
2351 | CurInst = 1; |
2352 | |
2353 | // Set all machine locations to a PHI value. For transfer function |
2354 | // production only, this signifies the live-in value to the block. |
2355 | MTracker->reset(); |
2356 | MTracker->setMPhis(CurBB); |
2357 | |
2358 | // Step through each instruction in this block. |
2359 | for (auto &MI : MBB) { |
2360 | // Pass in an empty unique_ptr for the value tables when accumulating the |
2361 | // machine transfer function. |
2362 | process(MI, MLiveOuts: nullptr, MLiveIns: nullptr); |
2363 | |
2364 | // Also accumulate fragment map. |
2365 | if (MI.isDebugValueLike()) |
2366 | accumulateFragmentMap(MI); |
2367 | |
2368 | // Create a map from the instruction number (if present) to the |
2369 | // MachineInstr and its position. |
2370 | if (uint64_t InstrNo = MI.peekDebugInstrNum()) { |
2371 | auto InstrAndPos = std::make_pair(x: &MI, y&: CurInst); |
2372 | auto InsertResult = |
2373 | DebugInstrNumToInstr.insert(x: std::make_pair(x&: InstrNo, y&: InstrAndPos)); |
2374 | |
2375 | // There should never be duplicate instruction numbers. |
2376 | assert(InsertResult.second); |
2377 | (void)InsertResult; |
2378 | } |
2379 | |
2380 | ++CurInst; |
2381 | } |
2382 | |
2383 | // Produce the transfer function, a map of machine location to new value. If |
2384 | // any machine location has the live-in phi value from the start of the |
2385 | // block, it's live-through and doesn't need recording in the transfer |
2386 | // function. |
2387 | for (auto Location : MTracker->locations()) { |
2388 | LocIdx Idx = Location.Idx; |
2389 | ValueIDNum &P = Location.Value; |
2390 | if (P.isPHI() && P.getLoc() == Idx.asU64()) |
2391 | continue; |
2392 | |
2393 | // Insert-or-update. |
2394 | auto &TransferMap = MLocTransfer[CurBB]; |
2395 | auto Result = TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: P)); |
2396 | if (!Result.second) |
2397 | Result.first->second = P; |
2398 | } |
2399 | |
2400 | // Accumulate any bitmask operands into the clobbered reg mask for this |
2401 | // block. |
2402 | for (auto &P : MTracker->Masks) { |
2403 | BlockMasks[CurBB].clearBitsNotInMask(Mask: P.first->getRegMask(), MaskWords: BVWords); |
2404 | } |
2405 | } |
2406 | |
2407 | // Compute a bitvector of all the registers that are tracked in this block. |
2408 | BitVector UsedRegs(TRI->getNumRegs()); |
2409 | for (auto Location : MTracker->locations()) { |
2410 | unsigned ID = MTracker->LocIdxToLocID[Location.Idx]; |
2411 | // Ignore stack slots, and aliases of the stack pointer. |
2412 | if (ID >= TRI->getNumRegs() || MTracker->SPAliases.count(V: ID)) |
2413 | continue; |
2414 | UsedRegs.set(ID); |
2415 | } |
2416 | |
2417 | // Check that any regmask-clobber of a register that gets tracked, is not |
2418 | // live-through in the transfer function. It needs to be clobbered at the |
2419 | // very least. |
2420 | for (unsigned int I = 0; I < MaxNumBlocks; ++I) { |
2421 | BitVector &BV = BlockMasks[I]; |
2422 | BV.flip(); |
2423 | BV &= UsedRegs; |
2424 | // This produces all the bits that we clobber, but also use. Check that |
2425 | // they're all clobbered or at least set in the designated transfer |
2426 | // elem. |
2427 | for (unsigned Bit : BV.set_bits()) { |
2428 | unsigned ID = MTracker->getLocID(Reg: Bit); |
2429 | LocIdx Idx = MTracker->LocIDToLocIdx[ID]; |
2430 | auto &TransferMap = MLocTransfer[I]; |
2431 | |
2432 | // Install a value representing the fact that this location is effectively |
2433 | // written to in this block. As there's no reserved value, instead use |
2434 | // a value number that is never generated. Pick the value number for the |
2435 | // first instruction in the block, def'ing this location, which we know |
2436 | // this block never used anyway. |
2437 | ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx); |
2438 | auto Result = |
2439 | TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: NotGeneratedNum)); |
2440 | if (!Result.second) { |
2441 | ValueIDNum &ValueID = Result.first->second; |
2442 | if (ValueID.getBlock() == I && ValueID.isPHI()) |
2443 | // It was left as live-through. Set it to clobbered. |
2444 | ValueID = NotGeneratedNum; |
2445 | } |
2446 | } |
2447 | } |
2448 | } |
2449 | |
2450 | bool InstrRefBasedLDV::mlocJoin( |
2451 | MachineBasicBlock &MBB, SmallPtrSet<const MachineBasicBlock *, 16> &Visited, |
2452 | FuncValueTable &OutLocs, ValueTable &InLocs) { |
2453 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n" ); |
2454 | bool Changed = false; |
2455 | |
2456 | // Handle value-propagation when control flow merges on entry to a block. For |
2457 | // any location without a PHI already placed, the location has the same value |
2458 | // as its predecessors. If a PHI is placed, test to see whether it's now a |
2459 | // redundant PHI that we can eliminate. |
2460 | |
2461 | SmallVector<const MachineBasicBlock *, 8> BlockOrders(MBB.predecessors()); |
2462 | |
2463 | // Visit predecessors in RPOT order. |
2464 | auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { |
2465 | return BBToOrder.find(Val: A)->second < BBToOrder.find(Val: B)->second; |
2466 | }; |
2467 | llvm::sort(C&: BlockOrders, Comp: Cmp); |
2468 | |
2469 | // Skip entry block. |
2470 | if (BlockOrders.size() == 0) { |
2471 | // FIXME: We don't use assert here to prevent instr-ref-unreachable.mir |
2472 | // failing. |
2473 | LLVM_DEBUG(if (!MBB.isEntryBlock()) dbgs() |
2474 | << "Found not reachable block " << MBB.getFullName() |
2475 | << " from entry which may lead out of " |
2476 | "bound access to VarLocs\n" ); |
2477 | return false; |
2478 | } |
2479 | |
2480 | // Step through all machine locations, look at each predecessor and test |
2481 | // whether we can eliminate redundant PHIs. |
2482 | for (auto Location : MTracker->locations()) { |
2483 | LocIdx Idx = Location.Idx; |
2484 | |
2485 | // Pick out the first predecessors live-out value for this location. It's |
2486 | // guaranteed to not be a backedge, as we order by RPO. |
2487 | ValueIDNum FirstVal = OutLocs[*BlockOrders[0]][Idx.asU64()]; |
2488 | |
2489 | // If we've already eliminated a PHI here, do no further checking, just |
2490 | // propagate the first live-in value into this block. |
2491 | if (InLocs[Idx.asU64()] != ValueIDNum(MBB.getNumber(), 0, Idx)) { |
2492 | if (InLocs[Idx.asU64()] != FirstVal) { |
2493 | InLocs[Idx.asU64()] = FirstVal; |
2494 | Changed |= true; |
2495 | } |
2496 | continue; |
2497 | } |
2498 | |
2499 | // We're now examining a PHI to see whether it's un-necessary. Loop around |
2500 | // the other live-in values and test whether they're all the same. |
2501 | bool Disagree = false; |
2502 | for (unsigned int I = 1; I < BlockOrders.size(); ++I) { |
2503 | const MachineBasicBlock *PredMBB = BlockOrders[I]; |
2504 | const ValueIDNum &PredLiveOut = OutLocs[*PredMBB][Idx.asU64()]; |
2505 | |
2506 | // Incoming values agree, continue trying to eliminate this PHI. |
2507 | if (FirstVal == PredLiveOut) |
2508 | continue; |
2509 | |
2510 | // We can also accept a PHI value that feeds back into itself. |
2511 | if (PredLiveOut == ValueIDNum(MBB.getNumber(), 0, Idx)) |
2512 | continue; |
2513 | |
2514 | // Live-out of a predecessor disagrees with the first predecessor. |
2515 | Disagree = true; |
2516 | } |
2517 | |
2518 | // No disagreement? No PHI. Otherwise, leave the PHI in live-ins. |
2519 | if (!Disagree) { |
2520 | InLocs[Idx.asU64()] = FirstVal; |
2521 | Changed |= true; |
2522 | } |
2523 | } |
2524 | |
2525 | // TODO: Reimplement NumInserted and NumRemoved. |
2526 | return Changed; |
2527 | } |
2528 | |
2529 | void InstrRefBasedLDV::findStackIndexInterference( |
2530 | SmallVectorImpl<unsigned> &Slots) { |
2531 | // We could spend a bit of time finding the exact, minimal, set of stack |
2532 | // indexes that interfere with each other, much like reg units. Or, we can |
2533 | // rely on the fact that: |
2534 | // * The smallest / lowest index will interfere with everything at zero |
2535 | // offset, which will be the largest set of registers, |
2536 | // * Most indexes with non-zero offset will end up being interference units |
2537 | // anyway. |
2538 | // So just pick those out and return them. |
2539 | |
2540 | // We can rely on a single-byte stack index existing already, because we |
2541 | // initialize them in MLocTracker. |
2542 | auto It = MTracker->StackSlotIdxes.find(Val: {8, 0}); |
2543 | assert(It != MTracker->StackSlotIdxes.end()); |
2544 | Slots.push_back(Elt: It->second); |
2545 | |
2546 | // Find anything that has a non-zero offset and add that too. |
2547 | for (auto &Pair : MTracker->StackSlotIdxes) { |
2548 | // Is offset zero? If so, ignore. |
2549 | if (!Pair.first.second) |
2550 | continue; |
2551 | Slots.push_back(Elt: Pair.second); |
2552 | } |
2553 | } |
2554 | |
2555 | void InstrRefBasedLDV::placeMLocPHIs( |
2556 | MachineFunction &MF, SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, |
2557 | FuncValueTable &MInLocs, SmallVectorImpl<MLocTransferMap> &MLocTransfer) { |
2558 | SmallVector<unsigned, 4> StackUnits; |
2559 | findStackIndexInterference(Slots&: StackUnits); |
2560 | |
2561 | // To avoid repeatedly running the PHI placement algorithm, leverage the |
2562 | // fact that a def of register MUST also def its register units. Find the |
2563 | // units for registers, place PHIs for them, and then replicate them for |
2564 | // aliasing registers. Some inputs that are never def'd (DBG_PHIs of |
2565 | // arguments) don't lead to register units being tracked, just place PHIs for |
2566 | // those registers directly. Stack slots have their own form of "unit", |
2567 | // store them to one side. |
2568 | SmallSet<Register, 32> RegUnitsToPHIUp; |
2569 | SmallSet<LocIdx, 32> NormalLocsToPHI; |
2570 | SmallSet<SpillLocationNo, 32> StackSlots; |
2571 | for (auto Location : MTracker->locations()) { |
2572 | LocIdx L = Location.Idx; |
2573 | if (MTracker->isSpill(Idx: L)) { |
2574 | StackSlots.insert(V: MTracker->locIDToSpill(ID: MTracker->LocIdxToLocID[L])); |
2575 | continue; |
2576 | } |
2577 | |
2578 | Register R = MTracker->LocIdxToLocID[L]; |
2579 | SmallSet<Register, 8> FoundRegUnits; |
2580 | bool AnyIllegal = false; |
2581 | for (MCRegUnit Unit : TRI->regunits(Reg: R.asMCReg())) { |
2582 | for (MCRegUnitRootIterator URoot(Unit, TRI); URoot.isValid(); ++URoot) { |
2583 | if (!MTracker->isRegisterTracked(R: *URoot)) { |
2584 | // Not all roots were loaded into the tracking map: this register |
2585 | // isn't actually def'd anywhere, we only read from it. Generate PHIs |
2586 | // for this reg, but don't iterate units. |
2587 | AnyIllegal = true; |
2588 | } else { |
2589 | FoundRegUnits.insert(V: *URoot); |
2590 | } |
2591 | } |
2592 | } |
2593 | |
2594 | if (AnyIllegal) { |
2595 | NormalLocsToPHI.insert(V: L); |
2596 | continue; |
2597 | } |
2598 | |
2599 | RegUnitsToPHIUp.insert_range(R&: FoundRegUnits); |
2600 | } |
2601 | |
2602 | // Lambda to fetch PHIs for a given location, and write into the PHIBlocks |
2603 | // collection. |
2604 | SmallVector<MachineBasicBlock *, 32> PHIBlocks; |
2605 | auto CollectPHIsForLoc = [&](LocIdx L) { |
2606 | // Collect the set of defs. |
2607 | SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; |
2608 | for (MachineBasicBlock *MBB : OrderToBB) { |
2609 | const auto &TransferFunc = MLocTransfer[MBB->getNumber()]; |
2610 | if (TransferFunc.contains(Val: L)) |
2611 | DefBlocks.insert(Ptr: MBB); |
2612 | } |
2613 | |
2614 | // The entry block defs the location too: it's the live-in / argument value. |
2615 | // Only insert if there are other defs though; everything is trivially live |
2616 | // through otherwise. |
2617 | if (!DefBlocks.empty()) |
2618 | DefBlocks.insert(Ptr: &*MF.begin()); |
2619 | |
2620 | // Ask the SSA construction algorithm where we should put PHIs. Clear |
2621 | // anything that might have been hanging around from earlier. |
2622 | PHIBlocks.clear(); |
2623 | BlockPHIPlacement(AllBlocks, DefBlocks, PHIBlocks); |
2624 | }; |
2625 | |
2626 | auto InstallPHIsAtLoc = [&PHIBlocks, &MInLocs](LocIdx L) { |
2627 | for (const MachineBasicBlock *MBB : PHIBlocks) |
2628 | MInLocs[*MBB][L.asU64()] = ValueIDNum(MBB->getNumber(), 0, L); |
2629 | }; |
2630 | |
2631 | // For locations with no reg units, just place PHIs. |
2632 | for (LocIdx L : NormalLocsToPHI) { |
2633 | CollectPHIsForLoc(L); |
2634 | // Install those PHI values into the live-in value array. |
2635 | InstallPHIsAtLoc(L); |
2636 | } |
2637 | |
2638 | // For stack slots, calculate PHIs for the equivalent of the units, then |
2639 | // install for each index. |
2640 | for (SpillLocationNo Slot : StackSlots) { |
2641 | for (unsigned Idx : StackUnits) { |
2642 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx); |
2643 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
2644 | CollectPHIsForLoc(L); |
2645 | InstallPHIsAtLoc(L); |
2646 | |
2647 | // Find anything that aliases this stack index, install PHIs for it too. |
2648 | unsigned Size, Offset; |
2649 | std::tie(args&: Size, args&: Offset) = MTracker->StackIdxesToPos[Idx]; |
2650 | for (auto &Pair : MTracker->StackSlotIdxes) { |
2651 | unsigned ThisSize, ThisOffset; |
2652 | std::tie(args&: ThisSize, args&: ThisOffset) = Pair.first; |
2653 | if (ThisSize + ThisOffset <= Offset || Size + Offset <= ThisOffset) |
2654 | continue; |
2655 | |
2656 | unsigned ThisID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx: Pair.second); |
2657 | LocIdx ThisL = MTracker->getSpillMLoc(SpillID: ThisID); |
2658 | InstallPHIsAtLoc(ThisL); |
2659 | } |
2660 | } |
2661 | } |
2662 | |
2663 | // For reg units, place PHIs, and then place them for any aliasing registers. |
2664 | for (Register R : RegUnitsToPHIUp) { |
2665 | LocIdx L = MTracker->lookupOrTrackRegister(ID: R); |
2666 | CollectPHIsForLoc(L); |
2667 | |
2668 | // Install those PHI values into the live-in value array. |
2669 | InstallPHIsAtLoc(L); |
2670 | |
2671 | // Now find aliases and install PHIs for those. |
2672 | for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) { |
2673 | // Super-registers that are "above" the largest register read/written by |
2674 | // the function will alias, but will not be tracked. |
2675 | if (!MTracker->isRegisterTracked(R: *RAI)) |
2676 | continue; |
2677 | |
2678 | LocIdx AliasLoc = MTracker->lookupOrTrackRegister(ID: *RAI); |
2679 | InstallPHIsAtLoc(AliasLoc); |
2680 | } |
2681 | } |
2682 | } |
2683 | |
2684 | void InstrRefBasedLDV::buildMLocValueMap( |
2685 | MachineFunction &MF, FuncValueTable &MInLocs, FuncValueTable &MOutLocs, |
2686 | SmallVectorImpl<MLocTransferMap> &MLocTransfer) { |
2687 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2688 | std::greater<unsigned int>> |
2689 | Worklist, Pending; |
2690 | |
2691 | // We track what is on the current and pending worklist to avoid inserting |
2692 | // the same thing twice. We could avoid this with a custom priority queue, |
2693 | // but this is probably not worth it. |
2694 | SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist; |
2695 | |
2696 | // Initialize worklist with every block to be visited. Also produce list of |
2697 | // all blocks. |
2698 | SmallPtrSet<MachineBasicBlock *, 32> AllBlocks; |
2699 | for (unsigned int I = 0; I < BBToOrder.size(); ++I) { |
2700 | Worklist.push(x: I); |
2701 | OnWorklist.insert(Ptr: OrderToBB[I]); |
2702 | AllBlocks.insert(Ptr: OrderToBB[I]); |
2703 | } |
2704 | |
2705 | // Initialize entry block to PHIs. These represent arguments. |
2706 | for (auto Location : MTracker->locations()) |
2707 | MInLocs.tableForEntryMBB()[Location.Idx.asU64()] = |
2708 | ValueIDNum(0, 0, Location.Idx); |
2709 | |
2710 | MTracker->reset(); |
2711 | |
2712 | // Start by placing PHIs, using the usual SSA constructor algorithm. Consider |
2713 | // any machine-location that isn't live-through a block to be def'd in that |
2714 | // block. |
2715 | placeMLocPHIs(MF, AllBlocks, MInLocs, MLocTransfer); |
2716 | |
2717 | // Propagate values to eliminate redundant PHIs. At the same time, this |
2718 | // produces the table of Block x Location => Value for the entry to each |
2719 | // block. |
2720 | // The kind of PHIs we can eliminate are, for example, where one path in a |
2721 | // conditional spills and restores a register, and the register still has |
2722 | // the same value once control flow joins, unbeknowns to the PHI placement |
2723 | // code. Propagating values allows us to identify such un-necessary PHIs and |
2724 | // remove them. |
2725 | SmallPtrSet<const MachineBasicBlock *, 16> Visited; |
2726 | while (!Worklist.empty() || !Pending.empty()) { |
2727 | // Vector for storing the evaluated block transfer function. |
2728 | SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap; |
2729 | |
2730 | while (!Worklist.empty()) { |
2731 | MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; |
2732 | CurBB = MBB->getNumber(); |
2733 | Worklist.pop(); |
2734 | |
2735 | // Join the values in all predecessor blocks. |
2736 | bool InLocsChanged; |
2737 | InLocsChanged = mlocJoin(MBB&: *MBB, Visited, OutLocs&: MOutLocs, InLocs&: MInLocs[*MBB]); |
2738 | InLocsChanged |= Visited.insert(Ptr: MBB).second; |
2739 | |
2740 | // Don't examine transfer function if we've visited this loc at least |
2741 | // once, and inlocs haven't changed. |
2742 | if (!InLocsChanged) |
2743 | continue; |
2744 | |
2745 | // Load the current set of live-ins into MLocTracker. |
2746 | MTracker->loadFromArray(Locs&: MInLocs[*MBB], NewCurBB: CurBB); |
2747 | |
2748 | // Each element of the transfer function can be a new def, or a read of |
2749 | // a live-in value. Evaluate each element, and store to "ToRemap". |
2750 | ToRemap.clear(); |
2751 | for (auto &P : MLocTransfer[CurBB]) { |
2752 | if (P.second.getBlock() == CurBB && P.second.isPHI()) { |
2753 | // This is a movement of whatever was live in. Read it. |
2754 | ValueIDNum NewID = MTracker->readMLoc(L: P.second.getLoc()); |
2755 | ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: NewID)); |
2756 | } else { |
2757 | // It's a def. Just set it. |
2758 | assert(P.second.getBlock() == CurBB); |
2759 | ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: P.second)); |
2760 | } |
2761 | } |
2762 | |
2763 | // Commit the transfer function changes into mloc tracker, which |
2764 | // transforms the contents of the MLocTracker into the live-outs. |
2765 | for (auto &P : ToRemap) |
2766 | MTracker->setMLoc(L: P.first, Num: P.second); |
2767 | |
2768 | // Now copy out-locs from mloc tracker into out-loc vector, checking |
2769 | // whether changes have occurred. These changes can have come from both |
2770 | // the transfer function, and mlocJoin. |
2771 | bool OLChanged = false; |
2772 | for (auto Location : MTracker->locations()) { |
2773 | OLChanged |= MOutLocs[*MBB][Location.Idx.asU64()] != Location.Value; |
2774 | MOutLocs[*MBB][Location.Idx.asU64()] = Location.Value; |
2775 | } |
2776 | |
2777 | MTracker->reset(); |
2778 | |
2779 | // No need to examine successors again if out-locs didn't change. |
2780 | if (!OLChanged) |
2781 | continue; |
2782 | |
2783 | // All successors should be visited: put any back-edges on the pending |
2784 | // list for the next pass-through, and any other successors to be |
2785 | // visited this pass, if they're not going to be already. |
2786 | for (auto *s : MBB->successors()) { |
2787 | // Does branching to this successor represent a back-edge? |
2788 | unsigned Order = BBToOrder[s]; |
2789 | if (Order > BBToOrder[MBB]) { |
2790 | // No: visit it during this dataflow iteration. |
2791 | if (OnWorklist.insert(Ptr: s).second) |
2792 | Worklist.push(x: Order); |
2793 | } else { |
2794 | // Yes: visit it on the next iteration. |
2795 | if (OnPending.insert(Ptr: s).second) |
2796 | Pending.push(x: Order); |
2797 | } |
2798 | } |
2799 | } |
2800 | |
2801 | Worklist.swap(pq&: Pending); |
2802 | std::swap(LHS&: OnPending, RHS&: OnWorklist); |
2803 | OnPending.clear(); |
2804 | // At this point, pending must be empty, since it was just the empty |
2805 | // worklist |
2806 | assert(Pending.empty() && "Pending should be empty" ); |
2807 | } |
2808 | |
2809 | // Once all the live-ins don't change on mlocJoin(), we've eliminated all |
2810 | // redundant PHIs. |
2811 | } |
2812 | |
2813 | void InstrRefBasedLDV::BlockPHIPlacement( |
2814 | const SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, |
2815 | const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks, |
2816 | SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) { |
2817 | // Apply IDF calculator to the designated set of location defs, storing |
2818 | // required PHIs into PHIBlocks. Uses the dominator tree stored in the |
2819 | // InstrRefBasedLDV object. |
2820 | IDFCalculatorBase<MachineBasicBlock, false> IDF(*DomTree); |
2821 | |
2822 | IDF.setLiveInBlocks(AllBlocks); |
2823 | IDF.setDefiningBlocks(DefBlocks); |
2824 | IDF.calculate(IDFBlocks&: PHIBlocks); |
2825 | } |
2826 | |
2827 | bool InstrRefBasedLDV::pickVPHILoc( |
2828 | SmallVectorImpl<DbgOpID> &OutValues, const MachineBasicBlock &MBB, |
2829 | const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, |
2830 | const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { |
2831 | |
2832 | // No predecessors means no PHIs. |
2833 | if (BlockOrders.empty()) |
2834 | return false; |
2835 | |
2836 | // All the location operands that do not already agree need to be joined, |
2837 | // track the indices of each such location operand here. |
2838 | SmallDenseSet<unsigned> LocOpsToJoin; |
2839 | |
2840 | auto FirstValueIt = LiveOuts.find(Val: BlockOrders[0]); |
2841 | if (FirstValueIt == LiveOuts.end()) |
2842 | return false; |
2843 | const DbgValue &FirstValue = *FirstValueIt->second; |
2844 | |
2845 | for (const auto p : BlockOrders) { |
2846 | auto OutValIt = LiveOuts.find(Val: p); |
2847 | if (OutValIt == LiveOuts.end()) |
2848 | // If we have a predecessor not in scope, we'll never find a PHI position. |
2849 | return false; |
2850 | const DbgValue &OutVal = *OutValIt->second; |
2851 | |
2852 | // No-values cannot have locations we can join on. |
2853 | if (OutVal.Kind == DbgValue::NoVal) |
2854 | return false; |
2855 | |
2856 | // For unjoined VPHIs where we don't know the location, we definitely |
2857 | // can't find a join loc unless the VPHI is a backedge. |
2858 | if (OutVal.isUnjoinedPHI() && OutVal.BlockNo != MBB.getNumber()) |
2859 | return false; |
2860 | |
2861 | if (!FirstValue.Properties.isJoinable(Other: OutVal.Properties)) |
2862 | return false; |
2863 | |
2864 | for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) { |
2865 | // An unjoined PHI has no defined locations, and so a shared location must |
2866 | // be found for every operand. |
2867 | if (OutVal.isUnjoinedPHI()) { |
2868 | LocOpsToJoin.insert(V: Idx); |
2869 | continue; |
2870 | } |
2871 | DbgOpID FirstValOp = FirstValue.getDbgOpID(Index: Idx); |
2872 | DbgOpID OutValOp = OutVal.getDbgOpID(Index: Idx); |
2873 | if (FirstValOp != OutValOp) { |
2874 | // We can never join constant ops - the ops must either both be equal |
2875 | // constant ops or non-const ops. |
2876 | if (FirstValOp.isConst() || OutValOp.isConst()) |
2877 | return false; |
2878 | else |
2879 | LocOpsToJoin.insert(V: Idx); |
2880 | } |
2881 | } |
2882 | } |
2883 | |
2884 | SmallVector<DbgOpID> NewDbgOps; |
2885 | |
2886 | for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) { |
2887 | // If this op doesn't need to be joined because the values agree, use that |
2888 | // already-agreed value. |
2889 | if (!LocOpsToJoin.contains(V: Idx)) { |
2890 | NewDbgOps.push_back(Elt: FirstValue.getDbgOpID(Index: Idx)); |
2891 | continue; |
2892 | } |
2893 | |
2894 | std::optional<ValueIDNum> JoinedOpLoc = |
2895 | pickOperandPHILoc(DbgOpIdx: Idx, MBB, LiveOuts, MOutLocs, BlockOrders); |
2896 | |
2897 | if (!JoinedOpLoc) |
2898 | return false; |
2899 | |
2900 | NewDbgOps.push_back(Elt: DbgOpStore.insert(Op: *JoinedOpLoc)); |
2901 | } |
2902 | |
2903 | OutValues.append(RHS: NewDbgOps); |
2904 | return true; |
2905 | } |
2906 | |
2907 | std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( |
2908 | unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, |
2909 | FuncValueTable &MOutLocs, |
2910 | const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { |
2911 | |
2912 | // Collect a set of locations from predecessor where its live-out value can |
2913 | // be found. |
2914 | SmallVector<SmallVector<LocIdx, 4>, 8> Locs; |
2915 | unsigned NumLocs = MTracker->getNumLocs(); |
2916 | |
2917 | for (const auto p : BlockOrders) { |
2918 | auto OutValIt = LiveOuts.find(Val: p); |
2919 | assert(OutValIt != LiveOuts.end()); |
2920 | const DbgValue &OutVal = *OutValIt->second; |
2921 | DbgOpID OutValOpID = OutVal.getDbgOpID(Index: DbgOpIdx); |
2922 | DbgOp OutValOp = DbgOpStore.find(ID: OutValOpID); |
2923 | assert(!OutValOp.IsConst); |
2924 | |
2925 | // Create new empty vector of locations. |
2926 | Locs.resize(N: Locs.size() + 1); |
2927 | |
2928 | // If the live-in value is a def, find the locations where that value is |
2929 | // present. Do the same for VPHIs where we know the VPHI value. |
2930 | if (OutVal.Kind == DbgValue::Def || |
2931 | (OutVal.Kind == DbgValue::VPHI && OutVal.BlockNo != MBB.getNumber() && |
2932 | !OutValOp.isUndef())) { |
2933 | ValueIDNum ValToLookFor = OutValOp.ID; |
2934 | // Search the live-outs of the predecessor for the specified value. |
2935 | for (unsigned int I = 0; I < NumLocs; ++I) { |
2936 | if (MOutLocs[*p][I] == ValToLookFor) |
2937 | Locs.back().push_back(Elt: LocIdx(I)); |
2938 | } |
2939 | } else { |
2940 | assert(OutVal.Kind == DbgValue::VPHI); |
2941 | // Otherwise: this is a VPHI on a backedge feeding back into itself, i.e. |
2942 | // a value that's live-through the whole loop. (It has to be a backedge, |
2943 | // because a block can't dominate itself). We can accept as a PHI location |
2944 | // any location where the other predecessors agree, _and_ the machine |
2945 | // locations feed back into themselves. Therefore, add all self-looping |
2946 | // machine-value PHI locations. |
2947 | for (unsigned int I = 0; I < NumLocs; ++I) { |
2948 | ValueIDNum MPHI(MBB.getNumber(), 0, LocIdx(I)); |
2949 | if (MOutLocs[*p][I] == MPHI) |
2950 | Locs.back().push_back(Elt: LocIdx(I)); |
2951 | } |
2952 | } |
2953 | } |
2954 | // We should have found locations for all predecessors, or returned. |
2955 | assert(Locs.size() == BlockOrders.size()); |
2956 | |
2957 | // Starting with the first set of locations, take the intersection with |
2958 | // subsequent sets. |
2959 | SmallVector<LocIdx, 4> CandidateLocs = Locs[0]; |
2960 | for (unsigned int I = 1; I < Locs.size(); ++I) { |
2961 | auto &LocVec = Locs[I]; |
2962 | SmallVector<LocIdx, 4> NewCandidates; |
2963 | std::set_intersection(first1: CandidateLocs.begin(), last1: CandidateLocs.end(), |
2964 | first2: LocVec.begin(), last2: LocVec.end(), result: std::inserter(x&: NewCandidates, i: NewCandidates.begin())); |
2965 | CandidateLocs = std::move(NewCandidates); |
2966 | } |
2967 | if (CandidateLocs.empty()) |
2968 | return std::nullopt; |
2969 | |
2970 | // We now have a set of LocIdxes that contain the right output value in |
2971 | // each of the predecessors. Pick the lowest; if there's a register loc, |
2972 | // that'll be it. |
2973 | LocIdx L = *CandidateLocs.begin(); |
2974 | |
2975 | // Return a PHI-value-number for the found location. |
2976 | ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L}; |
2977 | return PHIVal; |
2978 | } |
2979 | |
2980 | bool InstrRefBasedLDV::vlocJoin( |
2981 | MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, |
2982 | SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore, |
2983 | DbgValue &LiveIn) { |
2984 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n" ); |
2985 | bool Changed = false; |
2986 | |
2987 | // Order predecessors by RPOT order, for exploring them in that order. |
2988 | SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors()); |
2989 | |
2990 | auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { |
2991 | return BBToOrder[A] < BBToOrder[B]; |
2992 | }; |
2993 | |
2994 | llvm::sort(C&: BlockOrders, Comp: Cmp); |
2995 | |
2996 | unsigned CurBlockRPONum = BBToOrder[&MBB]; |
2997 | |
2998 | // Collect all the incoming DbgValues for this variable, from predecessor |
2999 | // live-out values. |
3000 | SmallVector<InValueT, 8> Values; |
3001 | bool Bail = false; |
3002 | int BackEdgesStart = 0; |
3003 | for (auto *p : BlockOrders) { |
3004 | // If the predecessor isn't in scope / to be explored, we'll never be |
3005 | // able to join any locations. |
3006 | if (!BlocksToExplore.contains(Ptr: p)) { |
3007 | Bail = true; |
3008 | break; |
3009 | } |
3010 | |
3011 | // All Live-outs will have been initialized. |
3012 | DbgValue &OutLoc = *VLOCOutLocs.find(Val: p)->second; |
3013 | |
3014 | // Keep track of where back-edges begin in the Values vector. Relies on |
3015 | // BlockOrders being sorted by RPO. |
3016 | unsigned ThisBBRPONum = BBToOrder[p]; |
3017 | if (ThisBBRPONum < CurBlockRPONum) |
3018 | ++BackEdgesStart; |
3019 | |
3020 | Values.push_back(Elt: std::make_pair(x&: p, y: &OutLoc)); |
3021 | } |
3022 | |
3023 | // If there were no values, or one of the predecessors couldn't have a |
3024 | // value, then give up immediately. It's not safe to produce a live-in |
3025 | // value. Leave as whatever it was before. |
3026 | if (Bail || Values.size() == 0) |
3027 | return false; |
3028 | |
3029 | // All (non-entry) blocks have at least one non-backedge predecessor. |
3030 | // Pick the variable value from the first of these, to compare against |
3031 | // all others. |
3032 | const DbgValue &FirstVal = *Values[0].second; |
3033 | |
3034 | // If the old live-in value is not a PHI then either a) no PHI is needed |
3035 | // here, or b) we eliminated the PHI that was here. If so, we can just |
3036 | // propagate in the first parent's incoming value. |
3037 | if (LiveIn.Kind != DbgValue::VPHI || LiveIn.BlockNo != MBB.getNumber()) { |
3038 | Changed = LiveIn != FirstVal; |
3039 | if (Changed) |
3040 | LiveIn = FirstVal; |
3041 | return Changed; |
3042 | } |
3043 | |
3044 | // Scan for variable values that can never be resolved: if they have |
3045 | // different DIExpressions, different indirectness, or are mixed constants / |
3046 | // non-constants. |
3047 | for (const auto &V : Values) { |
3048 | if (!V.second->Properties.isJoinable(Other: FirstVal.Properties)) |
3049 | return false; |
3050 | if (V.second->Kind == DbgValue::NoVal) |
3051 | return false; |
3052 | if (!V.second->hasJoinableLocOps(Other: FirstVal)) |
3053 | return false; |
3054 | } |
3055 | |
3056 | // Try to eliminate this PHI. Do the incoming values all agree? |
3057 | bool Disagree = false; |
3058 | for (auto &V : Values) { |
3059 | if (*V.second == FirstVal) |
3060 | continue; // No disagreement. |
3061 | |
3062 | // If both values are not equal but have equal non-empty IDs then they refer |
3063 | // to the same value from different sources (e.g. one is VPHI and the other |
3064 | // is Def), which does not cause disagreement. |
3065 | if (V.second->hasIdenticalValidLocOps(Other: FirstVal)) |
3066 | continue; |
3067 | |
3068 | // Eliminate if a backedge feeds a VPHI back into itself. |
3069 | if (V.second->Kind == DbgValue::VPHI && |
3070 | V.second->BlockNo == MBB.getNumber() && |
3071 | // Is this a backedge? |
3072 | std::distance(first: Values.begin(), last: &V) >= BackEdgesStart) |
3073 | continue; |
3074 | |
3075 | Disagree = true; |
3076 | } |
3077 | |
3078 | // No disagreement -> live-through value. |
3079 | if (!Disagree) { |
3080 | Changed = LiveIn != FirstVal; |
3081 | if (Changed) |
3082 | LiveIn = FirstVal; |
3083 | return Changed; |
3084 | } else { |
3085 | // Otherwise use a VPHI. |
3086 | DbgValue VPHI(MBB.getNumber(), FirstVal.Properties, DbgValue::VPHI); |
3087 | Changed = LiveIn != VPHI; |
3088 | if (Changed) |
3089 | LiveIn = VPHI; |
3090 | return Changed; |
3091 | } |
3092 | } |
3093 | |
3094 | void InstrRefBasedLDV::getBlocksForScope( |
3095 | const DILocation *DILoc, |
3096 | SmallPtrSetImpl<const MachineBasicBlock *> &BlocksToExplore, |
3097 | const SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks) { |
3098 | // Get the set of "normal" in-lexical-scope blocks. |
3099 | LS.getMachineBasicBlocks(DL: DILoc, MBBs&: BlocksToExplore); |
3100 | |
3101 | // VarLoc LiveDebugValues tracks variable locations that are defined in |
3102 | // blocks not in scope. This is something we could legitimately ignore, but |
3103 | // lets allow it for now for the sake of coverage. |
3104 | BlocksToExplore.insert_range(R: AssignBlocks); |
3105 | |
3106 | // Storage for artificial blocks we intend to add to BlocksToExplore. |
3107 | DenseSet<const MachineBasicBlock *> ToAdd; |
3108 | |
3109 | // To avoid needlessly dropping large volumes of variable locations, propagate |
3110 | // variables through aritifical blocks, i.e. those that don't have any |
3111 | // instructions in scope at all. To accurately replicate VarLoc |
3112 | // LiveDebugValues, this means exploring all artificial successors too. |
3113 | // Perform a depth-first-search to enumerate those blocks. |
3114 | for (const auto *MBB : BlocksToExplore) { |
3115 | // Depth-first-search state: each node is a block and which successor |
3116 | // we're currently exploring. |
3117 | SmallVector<std::pair<const MachineBasicBlock *, |
3118 | MachineBasicBlock::const_succ_iterator>, |
3119 | 8> |
3120 | DFS; |
3121 | |
3122 | // Find any artificial successors not already tracked. |
3123 | for (auto *succ : MBB->successors()) { |
3124 | if (BlocksToExplore.count(Ptr: succ)) |
3125 | continue; |
3126 | if (!ArtificialBlocks.count(Ptr: succ)) |
3127 | continue; |
3128 | ToAdd.insert(V: succ); |
3129 | DFS.push_back(Elt: {succ, succ->succ_begin()}); |
3130 | } |
3131 | |
3132 | // Search all those blocks, depth first. |
3133 | while (!DFS.empty()) { |
3134 | const MachineBasicBlock *CurBB = DFS.back().first; |
3135 | MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second; |
3136 | // Walk back if we've explored this blocks successors to the end. |
3137 | if (CurSucc == CurBB->succ_end()) { |
3138 | DFS.pop_back(); |
3139 | continue; |
3140 | } |
3141 | |
3142 | // If the current successor is artificial and unexplored, descend into |
3143 | // it. |
3144 | if (!ToAdd.count(V: *CurSucc) && ArtificialBlocks.count(Ptr: *CurSucc)) { |
3145 | ToAdd.insert(V: *CurSucc); |
3146 | DFS.push_back(Elt: {*CurSucc, (*CurSucc)->succ_begin()}); |
3147 | continue; |
3148 | } |
3149 | |
3150 | ++CurSucc; |
3151 | } |
3152 | }; |
3153 | |
3154 | BlocksToExplore.insert_range(R&: ToAdd); |
3155 | } |
3156 | |
3157 | void InstrRefBasedLDV::buildVLocValueMap( |
3158 | const DILocation *DILoc, |
3159 | const SmallSet<DebugVariableID, 4> &VarsWeCareAbout, |
3160 | SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output, |
3161 | FuncValueTable &MOutLocs, FuncValueTable &MInLocs, |
3162 | SmallVectorImpl<VLocTracker> &AllTheVLocs) { |
3163 | // This method is much like buildMLocValueMap: but focuses on a single |
3164 | // LexicalScope at a time. Pick out a set of blocks and variables that are |
3165 | // to have their value assignments solved, then run our dataflow algorithm |
3166 | // until a fixedpoint is reached. |
3167 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
3168 | std::greater<unsigned int>> |
3169 | Worklist, Pending; |
3170 | SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending; |
3171 | |
3172 | // The set of blocks we'll be examining. |
3173 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3174 | |
3175 | // The order in which to examine them (RPO). |
3176 | SmallVector<MachineBasicBlock *, 16> BlockOrders; |
3177 | SmallVector<unsigned, 32> BlockOrderNums; |
3178 | |
3179 | getBlocksForScope(DILoc, BlocksToExplore, AssignBlocks); |
3180 | |
3181 | // Single block scope: not interesting! No propagation at all. Note that |
3182 | // this could probably go above ArtificialBlocks without damage, but |
3183 | // that then produces output differences from original-live-debug-values, |
3184 | // which propagates from a single block into many artificial ones. |
3185 | if (BlocksToExplore.size() == 1) |
3186 | return; |
3187 | |
3188 | // Convert a const set to a non-const set. LexicalScopes |
3189 | // getMachineBasicBlocks returns const MBB pointers, IDF wants mutable ones. |
3190 | // (Neither of them mutate anything). |
3191 | SmallPtrSet<MachineBasicBlock *, 8> MutBlocksToExplore; |
3192 | for (const auto *MBB : BlocksToExplore) |
3193 | MutBlocksToExplore.insert(Ptr: const_cast<MachineBasicBlock *>(MBB)); |
3194 | |
3195 | // Picks out relevants blocks RPO order and sort them. Sort their |
3196 | // order-numbers and map back to MBB pointers later, to avoid repeated |
3197 | // DenseMap queries during comparisons. |
3198 | for (const auto *MBB : BlocksToExplore) |
3199 | BlockOrderNums.push_back(Elt: BBToOrder[MBB]); |
3200 | |
3201 | llvm::sort(C&: BlockOrderNums); |
3202 | for (unsigned int I : BlockOrderNums) |
3203 | BlockOrders.push_back(Elt: OrderToBB[I]); |
3204 | BlockOrderNums.clear(); |
3205 | unsigned NumBlocks = BlockOrders.size(); |
3206 | |
3207 | // Allocate some vectors for storing the live ins and live outs. Large. |
3208 | SmallVector<DbgValue, 32> LiveIns, LiveOuts; |
3209 | LiveIns.reserve(N: NumBlocks); |
3210 | LiveOuts.reserve(N: NumBlocks); |
3211 | |
3212 | // Initialize all values to start as NoVals. This signifies "it's live |
3213 | // through, but we don't know what it is". |
3214 | DbgValueProperties EmptyProperties(EmptyExpr, false, false); |
3215 | for (unsigned int I = 0; I < NumBlocks; ++I) { |
3216 | DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); |
3217 | LiveIns.push_back(Elt: EmptyDbgValue); |
3218 | LiveOuts.push_back(Elt: EmptyDbgValue); |
3219 | } |
3220 | |
3221 | // Produce by-MBB indexes of live-in/live-outs, to ease lookup within |
3222 | // vlocJoin. |
3223 | LiveIdxT LiveOutIdx, LiveInIdx; |
3224 | LiveOutIdx.reserve(NumEntries: NumBlocks); |
3225 | LiveInIdx.reserve(NumEntries: NumBlocks); |
3226 | for (unsigned I = 0; I < NumBlocks; ++I) { |
3227 | LiveOutIdx[BlockOrders[I]] = &LiveOuts[I]; |
3228 | LiveInIdx[BlockOrders[I]] = &LiveIns[I]; |
3229 | } |
3230 | |
3231 | // Loop over each variable and place PHIs for it, then propagate values |
3232 | // between blocks. This keeps the locality of working on one lexical scope at |
3233 | // at time, but avoids re-processing variable values because some other |
3234 | // variable has been assigned. |
3235 | for (DebugVariableID VarID : VarsWeCareAbout) { |
3236 | // Re-initialize live-ins and live-outs, to clear the remains of previous |
3237 | // variables live-ins / live-outs. |
3238 | for (unsigned int I = 0; I < NumBlocks; ++I) { |
3239 | DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); |
3240 | LiveIns[I] = EmptyDbgValue; |
3241 | LiveOuts[I] = EmptyDbgValue; |
3242 | } |
3243 | |
3244 | // Place PHIs for variable values, using the LLVM IDF calculator. |
3245 | // Collect the set of blocks where variables are def'd. |
3246 | SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; |
3247 | for (const MachineBasicBlock *ExpMBB : BlocksToExplore) { |
3248 | auto &TransferFunc = AllTheVLocs[ExpMBB->getNumber()].Vars; |
3249 | if (TransferFunc.contains(Key: VarID)) |
3250 | DefBlocks.insert(Ptr: const_cast<MachineBasicBlock *>(ExpMBB)); |
3251 | } |
3252 | |
3253 | SmallVector<MachineBasicBlock *, 32> PHIBlocks; |
3254 | |
3255 | // Request the set of PHIs we should insert for this variable. If there's |
3256 | // only one value definition, things are very simple. |
3257 | if (DefBlocks.size() == 1) { |
3258 | placePHIsForSingleVarDefinition(InScopeBlocks: MutBlocksToExplore, MBB: *DefBlocks.begin(), |
3259 | AllTheVLocs, Var: VarID, Output); |
3260 | continue; |
3261 | } |
3262 | |
3263 | // Otherwise: we need to place PHIs through SSA and propagate values. |
3264 | BlockPHIPlacement(AllBlocks: MutBlocksToExplore, DefBlocks, PHIBlocks); |
3265 | |
3266 | // Insert PHIs into the per-block live-in tables for this variable. |
3267 | for (MachineBasicBlock *PHIMBB : PHIBlocks) { |
3268 | unsigned BlockNo = PHIMBB->getNumber(); |
3269 | DbgValue *LiveIn = LiveInIdx[PHIMBB]; |
3270 | *LiveIn = DbgValue(BlockNo, EmptyProperties, DbgValue::VPHI); |
3271 | } |
3272 | |
3273 | for (auto *MBB : BlockOrders) { |
3274 | Worklist.push(x: BBToOrder[MBB]); |
3275 | OnWorklist.insert(Ptr: MBB); |
3276 | } |
3277 | |
3278 | // Iterate over all the blocks we selected, propagating the variables value. |
3279 | // This loop does two things: |
3280 | // * Eliminates un-necessary VPHIs in vlocJoin, |
3281 | // * Evaluates the blocks transfer function (i.e. variable assignments) and |
3282 | // stores the result to the blocks live-outs. |
3283 | // Always evaluate the transfer function on the first iteration, and when |
3284 | // the live-ins change thereafter. |
3285 | bool FirstTrip = true; |
3286 | while (!Worklist.empty() || !Pending.empty()) { |
3287 | while (!Worklist.empty()) { |
3288 | auto *MBB = OrderToBB[Worklist.top()]; |
3289 | CurBB = MBB->getNumber(); |
3290 | Worklist.pop(); |
3291 | |
3292 | auto LiveInsIt = LiveInIdx.find(Val: MBB); |
3293 | assert(LiveInsIt != LiveInIdx.end()); |
3294 | DbgValue *LiveIn = LiveInsIt->second; |
3295 | |
3296 | // Join values from predecessors. Updates LiveInIdx, and writes output |
3297 | // into JoinedInLocs. |
3298 | bool InLocsChanged = |
3299 | vlocJoin(MBB&: *MBB, VLOCOutLocs&: LiveOutIdx, BlocksToExplore, LiveIn&: *LiveIn); |
3300 | |
3301 | SmallVector<const MachineBasicBlock *, 8> Preds(MBB->predecessors()); |
3302 | |
3303 | // If this block's live-in value is a VPHI, try to pick a machine-value |
3304 | // for it. This makes the machine-value available and propagated |
3305 | // through all blocks by the time value propagation finishes. We can't |
3306 | // do this any earlier as it needs to read the block live-outs. |
3307 | if (LiveIn->Kind == DbgValue::VPHI && LiveIn->BlockNo == (int)CurBB) { |
3308 | // There's a small possibility that on a preceeding path, a VPHI is |
3309 | // eliminated and transitions from VPHI-with-location to |
3310 | // live-through-value. As a result, the selected location of any VPHI |
3311 | // might change, so we need to re-compute it on each iteration. |
3312 | SmallVector<DbgOpID> JoinedOps; |
3313 | |
3314 | if (pickVPHILoc(OutValues&: JoinedOps, MBB: *MBB, LiveOuts: LiveOutIdx, MOutLocs, BlockOrders: Preds)) { |
3315 | bool NewLocPicked = !equal(LRange: LiveIn->getDbgOpIDs(), RRange&: JoinedOps); |
3316 | InLocsChanged |= NewLocPicked; |
3317 | if (NewLocPicked) |
3318 | LiveIn->setDbgOpIDs(JoinedOps); |
3319 | } |
3320 | } |
3321 | |
3322 | if (!InLocsChanged && !FirstTrip) |
3323 | continue; |
3324 | |
3325 | DbgValue *LiveOut = LiveOutIdx[MBB]; |
3326 | bool OLChanged = false; |
3327 | |
3328 | // Do transfer function. |
3329 | auto &VTracker = AllTheVLocs[MBB->getNumber()]; |
3330 | auto TransferIt = VTracker.Vars.find(Key: VarID); |
3331 | if (TransferIt != VTracker.Vars.end()) { |
3332 | // Erase on empty transfer (DBG_VALUE $noreg). |
3333 | if (TransferIt->second.Kind == DbgValue::Undef) { |
3334 | DbgValue NewVal(MBB->getNumber(), EmptyProperties, DbgValue::NoVal); |
3335 | if (*LiveOut != NewVal) { |
3336 | *LiveOut = NewVal; |
3337 | OLChanged = true; |
3338 | } |
3339 | } else { |
3340 | // Insert new variable value; or overwrite. |
3341 | if (*LiveOut != TransferIt->second) { |
3342 | *LiveOut = TransferIt->second; |
3343 | OLChanged = true; |
3344 | } |
3345 | } |
3346 | } else { |
3347 | // Just copy live-ins to live-outs, for anything not transferred. |
3348 | if (*LiveOut != *LiveIn) { |
3349 | *LiveOut = *LiveIn; |
3350 | OLChanged = true; |
3351 | } |
3352 | } |
3353 | |
3354 | // If no live-out value changed, there's no need to explore further. |
3355 | if (!OLChanged) |
3356 | continue; |
3357 | |
3358 | // We should visit all successors. Ensure we'll visit any non-backedge |
3359 | // successors during this dataflow iteration; book backedge successors |
3360 | // to be visited next time around. |
3361 | for (auto *s : MBB->successors()) { |
3362 | // Ignore out of scope / not-to-be-explored successors. |
3363 | if (!LiveInIdx.contains(Val: s)) |
3364 | continue; |
3365 | |
3366 | unsigned Order = BBToOrder[s]; |
3367 | if (Order > BBToOrder[MBB]) { |
3368 | if (OnWorklist.insert(Ptr: s).second) |
3369 | Worklist.push(x: Order); |
3370 | } else if (OnPending.insert(Ptr: s).second && (FirstTrip || OLChanged)) { |
3371 | Pending.push(x: Order); |
3372 | } |
3373 | } |
3374 | } |
3375 | Worklist.swap(pq&: Pending); |
3376 | std::swap(LHS&: OnWorklist, RHS&: OnPending); |
3377 | OnPending.clear(); |
3378 | assert(Pending.empty()); |
3379 | FirstTrip = false; |
3380 | } |
3381 | |
3382 | // Save live-ins to output vector. Ignore any that are still marked as being |
3383 | // VPHIs with no location -- those are variables that we know the value of, |
3384 | // but are not actually available in the register file. |
3385 | for (auto *MBB : BlockOrders) { |
3386 | DbgValue *BlockLiveIn = LiveInIdx[MBB]; |
3387 | if (BlockLiveIn->Kind == DbgValue::NoVal) |
3388 | continue; |
3389 | if (BlockLiveIn->isUnjoinedPHI()) |
3390 | continue; |
3391 | if (BlockLiveIn->Kind == DbgValue::VPHI) |
3392 | BlockLiveIn->Kind = DbgValue::Def; |
3393 | [[maybe_unused]] auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
3394 | assert(BlockLiveIn->Properties.DIExpr->getFragmentInfo() == |
3395 | Var.getFragment() && |
3396 | "Fragment info missing during value prop" ); |
3397 | Output[MBB->getNumber()].push_back(Elt: std::make_pair(x&: VarID, y&: *BlockLiveIn)); |
3398 | } |
3399 | } // Per-variable loop. |
3400 | |
3401 | BlockOrders.clear(); |
3402 | BlocksToExplore.clear(); |
3403 | } |
3404 | |
3405 | void InstrRefBasedLDV::placePHIsForSingleVarDefinition( |
3406 | const SmallPtrSetImpl<MachineBasicBlock *> &InScopeBlocks, |
3407 | MachineBasicBlock *AssignMBB, SmallVectorImpl<VLocTracker> &AllTheVLocs, |
3408 | DebugVariableID VarID, LiveInsT &Output) { |
3409 | // If there is a single definition of the variable, then working out it's |
3410 | // value everywhere is very simple: it's every block dominated by the |
3411 | // definition. At the dominance frontier, the usual algorithm would: |
3412 | // * Place PHIs, |
3413 | // * Propagate values into them, |
3414 | // * Find there's no incoming variable value from the other incoming branches |
3415 | // of the dominance frontier, |
3416 | // * Specify there's no variable value in blocks past the frontier. |
3417 | // This is a common case, hence it's worth special-casing it. |
3418 | |
3419 | // Pick out the variables value from the block transfer function. |
3420 | VLocTracker &VLocs = AllTheVLocs[AssignMBB->getNumber()]; |
3421 | auto ValueIt = VLocs.Vars.find(Key: VarID); |
3422 | const DbgValue &Value = ValueIt->second; |
3423 | |
3424 | // If it's an explicit assignment of "undef", that means there is no location |
3425 | // anyway, anywhere. |
3426 | if (Value.Kind == DbgValue::Undef) |
3427 | return; |
3428 | |
3429 | // Assign the variable value to entry to each dominated block that's in scope. |
3430 | // Skip the definition block -- it's assigned the variable value in the middle |
3431 | // of the block somewhere. |
3432 | for (auto *ScopeBlock : InScopeBlocks) { |
3433 | if (!DomTree->properlyDominates(A: AssignMBB, B: ScopeBlock)) |
3434 | continue; |
3435 | |
3436 | Output[ScopeBlock->getNumber()].push_back(Elt: {VarID, Value}); |
3437 | } |
3438 | |
3439 | // All blocks that aren't dominated have no live-in value, thus no variable |
3440 | // value will be given to them. |
3441 | } |
3442 | |
3443 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
3444 | void InstrRefBasedLDV::dump_mloc_transfer( |
3445 | const MLocTransferMap &mloc_transfer) const { |
3446 | for (const auto &P : mloc_transfer) { |
3447 | std::string foo = MTracker->LocIdxToName(P.first); |
3448 | std::string bar = MTracker->IDAsString(P.second); |
3449 | dbgs() << "Loc " << foo << " --> " << bar << "\n" ; |
3450 | } |
3451 | } |
3452 | #endif |
3453 | |
3454 | void InstrRefBasedLDV::initialSetup(MachineFunction &MF) { |
3455 | // Build some useful data structures. |
3456 | |
3457 | LLVMContext &Context = MF.getFunction().getContext(); |
3458 | EmptyExpr = DIExpression::get(Context, Elements: {}); |
3459 | |
3460 | auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { |
3461 | if (const DebugLoc &DL = MI.getDebugLoc()) |
3462 | return DL.getLine() != 0; |
3463 | return false; |
3464 | }; |
3465 | |
3466 | // Collect a set of all the artificial blocks. Collect the size too, ilist |
3467 | // size calls are O(n). |
3468 | unsigned int Size = 0; |
3469 | for (auto &MBB : MF) { |
3470 | ++Size; |
3471 | if (none_of(Range: MBB.instrs(), P: hasNonArtificialLocation)) |
3472 | ArtificialBlocks.insert(Ptr: &MBB); |
3473 | } |
3474 | |
3475 | // Compute mappings of block <=> RPO order. |
3476 | ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); |
3477 | unsigned int RPONumber = 0; |
3478 | OrderToBB.reserve(N: Size); |
3479 | BBToOrder.reserve(NumEntries: Size); |
3480 | BBNumToRPO.reserve(NumEntries: Size); |
3481 | auto processMBB = [&](MachineBasicBlock *MBB) { |
3482 | OrderToBB.push_back(Elt: MBB); |
3483 | BBToOrder[MBB] = RPONumber; |
3484 | BBNumToRPO[MBB->getNumber()] = RPONumber; |
3485 | ++RPONumber; |
3486 | }; |
3487 | for (MachineBasicBlock *MBB : RPOT) |
3488 | processMBB(MBB); |
3489 | for (MachineBasicBlock &MBB : MF) |
3490 | if (!BBToOrder.contains(Val: &MBB)) |
3491 | processMBB(&MBB); |
3492 | |
3493 | // Order value substitutions by their "source" operand pair, for quick lookup. |
3494 | llvm::sort(C&: MF.DebugValueSubstitutions); |
3495 | |
3496 | #ifdef EXPENSIVE_CHECKS |
3497 | // As an expensive check, test whether there are any duplicate substitution |
3498 | // sources in the collection. |
3499 | if (MF.DebugValueSubstitutions.size() > 2) { |
3500 | for (auto It = MF.DebugValueSubstitutions.begin(); |
3501 | It != std::prev(MF.DebugValueSubstitutions.end()); ++It) { |
3502 | assert(It->Src != std::next(It)->Src && "Duplicate variable location " |
3503 | "substitution seen" ); |
3504 | } |
3505 | } |
3506 | #endif |
3507 | } |
3508 | |
3509 | // Produce an "ejection map" for blocks, i.e., what's the highest-numbered |
3510 | // lexical scope it's used in. When exploring in DFS order and we pass that |
3511 | // scope, the block can be processed and any tracking information freed. |
3512 | void InstrRefBasedLDV::makeDepthFirstEjectionMap( |
3513 | SmallVectorImpl<unsigned> &EjectionMap, |
3514 | const ScopeToDILocT &ScopeToDILocation, |
3515 | ScopeToAssignBlocksT &ScopeToAssignBlocks) { |
3516 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3517 | SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; |
3518 | auto *TopScope = LS.getCurrentFunctionScope(); |
3519 | |
3520 | // Unlike lexical scope explorers, we explore in reverse order, to find the |
3521 | // "last" lexical scope used for each block early. |
3522 | WorkStack.push_back(Elt: {TopScope, TopScope->getChildren().size() - 1}); |
3523 | |
3524 | while (!WorkStack.empty()) { |
3525 | auto &ScopePosition = WorkStack.back(); |
3526 | LexicalScope *WS = ScopePosition.first; |
3527 | ssize_t ChildNum = ScopePosition.second--; |
3528 | |
3529 | const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); |
3530 | if (ChildNum >= 0) { |
3531 | // If ChildNum is positive, there are remaining children to explore. |
3532 | // Push the child and its children-count onto the stack. |
3533 | auto &ChildScope = Children[ChildNum]; |
3534 | WorkStack.push_back( |
3535 | Elt: std::make_pair(x: ChildScope, y: ChildScope->getChildren().size() - 1)); |
3536 | } else { |
3537 | WorkStack.pop_back(); |
3538 | |
3539 | // We've explored all children and any later blocks: examine all blocks |
3540 | // in our scope. If they haven't yet had an ejection number set, then |
3541 | // this scope will be the last to use that block. |
3542 | auto DILocationIt = ScopeToDILocation.find(Val: WS); |
3543 | if (DILocationIt != ScopeToDILocation.end()) { |
3544 | getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore, |
3545 | AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second); |
3546 | for (const auto *MBB : BlocksToExplore) { |
3547 | unsigned BBNum = MBB->getNumber(); |
3548 | if (EjectionMap[BBNum] == 0) |
3549 | EjectionMap[BBNum] = WS->getDFSOut(); |
3550 | } |
3551 | |
3552 | BlocksToExplore.clear(); |
3553 | } |
3554 | } |
3555 | } |
3556 | } |
3557 | |
3558 | bool InstrRefBasedLDV::depthFirstVLocAndEmit( |
3559 | unsigned MaxNumBlocks, const ScopeToDILocT &ScopeToDILocation, |
3560 | const ScopeToVarsT &ScopeToVars, ScopeToAssignBlocksT &ScopeToAssignBlocks, |
3561 | LiveInsT &Output, FuncValueTable &MOutLocs, FuncValueTable &MInLocs, |
3562 | SmallVectorImpl<VLocTracker> &AllTheVLocs, MachineFunction &MF, |
3563 | bool ShouldEmitDebugEntryValues) { |
3564 | TTracker = new TransferTracker(TII, MTracker, MF, DVMap, *TRI, |
3565 | CalleeSavedRegs, ShouldEmitDebugEntryValues); |
3566 | unsigned NumLocs = MTracker->getNumLocs(); |
3567 | VTracker = nullptr; |
3568 | |
3569 | // No scopes? No variable locations. |
3570 | if (!LS.getCurrentFunctionScope()) |
3571 | return false; |
3572 | |
3573 | // Build map from block number to the last scope that uses the block. |
3574 | SmallVector<unsigned, 16> EjectionMap; |
3575 | EjectionMap.resize(N: MaxNumBlocks, NV: 0); |
3576 | makeDepthFirstEjectionMap(EjectionMap, ScopeToDILocation, |
3577 | ScopeToAssignBlocks); |
3578 | |
3579 | // Helper lambda for ejecting a block -- if nothing is going to use the block, |
3580 | // we can translate the variable location information into DBG_VALUEs and then |
3581 | // free all of InstrRefBasedLDV's data structures. |
3582 | auto EjectBlock = [&](MachineBasicBlock &MBB) -> void { |
3583 | unsigned BBNum = MBB.getNumber(); |
3584 | AllTheVLocs[BBNum].clear(); |
3585 | |
3586 | // Prime the transfer-tracker, and then step through all the block |
3587 | // instructions, installing transfers. |
3588 | MTracker->reset(); |
3589 | MTracker->loadFromArray(Locs&: MInLocs[MBB], NewCurBB: BBNum); |
3590 | TTracker->loadInlocs(MBB, MLocs&: MInLocs[MBB], DbgOpStore, VLocs: Output[BBNum], NumLocs); |
3591 | |
3592 | CurBB = BBNum; |
3593 | CurInst = 1; |
3594 | for (auto &MI : MBB) { |
3595 | process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs); |
3596 | TTracker->checkInstForNewValues(Inst: CurInst, pos: MI.getIterator()); |
3597 | ++CurInst; |
3598 | } |
3599 | |
3600 | // Free machine-location tables for this block. |
3601 | MInLocs.ejectTableForBlock(MBB); |
3602 | MOutLocs.ejectTableForBlock(MBB); |
3603 | // We don't need live-in variable values for this block either. |
3604 | Output[BBNum].clear(); |
3605 | AllTheVLocs[BBNum].clear(); |
3606 | }; |
3607 | |
3608 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3609 | SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; |
3610 | WorkStack.push_back(Elt: {LS.getCurrentFunctionScope(), 0}); |
3611 | unsigned HighestDFSIn = 0; |
3612 | |
3613 | // Proceed to explore in depth first order. |
3614 | while (!WorkStack.empty()) { |
3615 | auto &ScopePosition = WorkStack.back(); |
3616 | LexicalScope *WS = ScopePosition.first; |
3617 | ssize_t ChildNum = ScopePosition.second++; |
3618 | |
3619 | // We obesrve scopes with children twice here, once descending in, once |
3620 | // ascending out of the scope nest. Use HighestDFSIn as a ratchet to ensure |
3621 | // we don't process a scope twice. Additionally, ignore scopes that don't |
3622 | // have a DILocation -- by proxy, this means we never tracked any variable |
3623 | // assignments in that scope. |
3624 | auto DILocIt = ScopeToDILocation.find(Val: WS); |
3625 | if (HighestDFSIn <= WS->getDFSIn() && DILocIt != ScopeToDILocation.end()) { |
3626 | const DILocation *DILoc = DILocIt->second; |
3627 | auto &VarsWeCareAbout = ScopeToVars.find(Val: WS)->second; |
3628 | auto &BlocksInScope = ScopeToAssignBlocks.find(Val: WS)->second; |
3629 | |
3630 | buildVLocValueMap(DILoc, VarsWeCareAbout, AssignBlocks&: BlocksInScope, Output, MOutLocs, |
3631 | MInLocs, AllTheVLocs); |
3632 | } |
3633 | |
3634 | HighestDFSIn = std::max(a: HighestDFSIn, b: WS->getDFSIn()); |
3635 | |
3636 | // Descend into any scope nests. |
3637 | const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); |
3638 | if (ChildNum < (ssize_t)Children.size()) { |
3639 | // There are children to explore -- push onto stack and continue. |
3640 | auto &ChildScope = Children[ChildNum]; |
3641 | WorkStack.push_back(Elt: std::make_pair(x: ChildScope, y: 0)); |
3642 | } else { |
3643 | WorkStack.pop_back(); |
3644 | |
3645 | // We've explored a leaf, or have explored all the children of a scope. |
3646 | // Try to eject any blocks where this is the last scope it's relevant to. |
3647 | auto DILocationIt = ScopeToDILocation.find(Val: WS); |
3648 | if (DILocationIt == ScopeToDILocation.end()) |
3649 | continue; |
3650 | |
3651 | getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore, |
3652 | AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second); |
3653 | for (const auto *MBB : BlocksToExplore) |
3654 | if (WS->getDFSOut() == EjectionMap[MBB->getNumber()]) |
3655 | EjectBlock(const_cast<MachineBasicBlock &>(*MBB)); |
3656 | |
3657 | BlocksToExplore.clear(); |
3658 | } |
3659 | } |
3660 | |
3661 | // Some artificial blocks may not have been ejected, meaning they're not |
3662 | // connected to an actual legitimate scope. This can technically happen |
3663 | // with things like the entry block. In theory, we shouldn't need to do |
3664 | // anything for such out-of-scope blocks, but for the sake of being similar |
3665 | // to VarLocBasedLDV, eject these too. |
3666 | for (auto *MBB : ArtificialBlocks) |
3667 | if (MInLocs.hasTableFor(MBB&: *MBB)) |
3668 | EjectBlock(*MBB); |
3669 | |
3670 | return emitTransfers(); |
3671 | } |
3672 | |
3673 | bool InstrRefBasedLDV::emitTransfers() { |
3674 | // Go through all the transfers recorded in the TransferTracker -- this is |
3675 | // both the live-ins to a block, and any movements of values that happen |
3676 | // in the middle. |
3677 | for (auto &P : TTracker->Transfers) { |
3678 | // We have to insert DBG_VALUEs in a consistent order, otherwise they |
3679 | // appear in DWARF in different orders. Use the order that they appear |
3680 | // when walking through each block / each instruction, stored in |
3681 | // DVMap. |
3682 | llvm::sort(C&: P.Insts, Comp: llvm::less_first()); |
3683 | |
3684 | // Insert either before or after the designated point... |
3685 | if (P.MBB) { |
3686 | MachineBasicBlock &MBB = *P.MBB; |
3687 | for (const auto &Pair : P.Insts) |
3688 | MBB.insert(I: P.Pos, M: Pair.second); |
3689 | } else { |
3690 | // Terminators, like tail calls, can clobber things. Don't try and place |
3691 | // transfers after them. |
3692 | if (P.Pos->isTerminator()) |
3693 | continue; |
3694 | |
3695 | MachineBasicBlock &MBB = *P.Pos->getParent(); |
3696 | for (const auto &Pair : P.Insts) |
3697 | MBB.insertAfterBundle(I: P.Pos, MI: Pair.second); |
3698 | } |
3699 | } |
3700 | |
3701 | return TTracker->Transfers.size() != 0; |
3702 | } |
3703 | |
3704 | /// Calculate the liveness information for the given machine function and |
3705 | /// extend ranges across basic blocks. |
3706 | bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF, |
3707 | MachineDominatorTree *DomTree, |
3708 | bool ShouldEmitDebugEntryValues, |
3709 | unsigned InputBBLimit, |
3710 | unsigned InputDbgValLimit) { |
3711 | // No subprogram means this function contains no debuginfo. |
3712 | if (!MF.getFunction().getSubprogram()) |
3713 | return false; |
3714 | |
3715 | LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n" ); |
3716 | |
3717 | this->DomTree = DomTree; |
3718 | TRI = MF.getSubtarget().getRegisterInfo(); |
3719 | MRI = &MF.getRegInfo(); |
3720 | TII = MF.getSubtarget().getInstrInfo(); |
3721 | TFI = MF.getSubtarget().getFrameLowering(); |
3722 | TFI->getCalleeSaves(MF, SavedRegs&: CalleeSavedRegs); |
3723 | MFI = &MF.getFrameInfo(); |
3724 | LS.initialize(MF); |
3725 | |
3726 | const auto &STI = MF.getSubtarget(); |
3727 | AdjustsStackInCalls = MFI->adjustsStack() && |
3728 | STI.getFrameLowering()->stackProbeFunctionModifiesSP(); |
3729 | if (AdjustsStackInCalls) |
3730 | StackProbeSymbolName = STI.getTargetLowering()->getStackProbeSymbolName(MF); |
3731 | |
3732 | MTracker = |
3733 | new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering()); |
3734 | VTracker = nullptr; |
3735 | TTracker = nullptr; |
3736 | |
3737 | SmallVector<MLocTransferMap, 32> MLocTransfer; |
3738 | SmallVector<VLocTracker, 8> vlocs; |
3739 | LiveInsT SavedLiveIns; |
3740 | |
3741 | int MaxNumBlocks = -1; |
3742 | for (auto &MBB : MF) |
3743 | MaxNumBlocks = std::max(a: MBB.getNumber(), b: MaxNumBlocks); |
3744 | assert(MaxNumBlocks >= 0); |
3745 | ++MaxNumBlocks; |
3746 | |
3747 | initialSetup(MF); |
3748 | |
3749 | MLocTransfer.resize(N: MaxNumBlocks); |
3750 | vlocs.resize(N: MaxNumBlocks, NV: VLocTracker(DVMap, OverlapFragments, EmptyExpr)); |
3751 | SavedLiveIns.resize(N: MaxNumBlocks); |
3752 | |
3753 | produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks); |
3754 | |
3755 | // Allocate and initialize two array-of-arrays for the live-in and live-out |
3756 | // machine values. The outer dimension is the block number; while the inner |
3757 | // dimension is a LocIdx from MLocTracker. |
3758 | unsigned NumLocs = MTracker->getNumLocs(); |
3759 | FuncValueTable MOutLocs(MaxNumBlocks, NumLocs); |
3760 | FuncValueTable MInLocs(MaxNumBlocks, NumLocs); |
3761 | |
3762 | // Solve the machine value dataflow problem using the MLocTransfer function, |
3763 | // storing the computed live-ins / live-outs into the array-of-arrays. We use |
3764 | // both live-ins and live-outs for decision making in the variable value |
3765 | // dataflow problem. |
3766 | buildMLocValueMap(MF, MInLocs, MOutLocs, MLocTransfer); |
3767 | |
3768 | // Patch up debug phi numbers, turning unknown block-live-in values into |
3769 | // either live-through machine values, or PHIs. |
3770 | for (auto &DBG_PHI : DebugPHINumToValue) { |
3771 | // Identify unresolved block-live-ins. |
3772 | if (!DBG_PHI.ValueRead) |
3773 | continue; |
3774 | |
3775 | ValueIDNum &Num = *DBG_PHI.ValueRead; |
3776 | if (!Num.isPHI()) |
3777 | continue; |
3778 | |
3779 | unsigned BlockNo = Num.getBlock(); |
3780 | LocIdx LocNo = Num.getLoc(); |
3781 | ValueIDNum ResolvedValue = MInLocs[BlockNo][LocNo.asU64()]; |
3782 | // If there is no resolved value for this live-in then it is not directly |
3783 | // reachable from the entry block -- model it as a PHI on entry to this |
3784 | // block, which means we leave the ValueIDNum unchanged. |
3785 | if (ResolvedValue != ValueIDNum::EmptyValue) |
3786 | Num = ResolvedValue; |
3787 | } |
3788 | // Later, we'll be looking up ranges of instruction numbers. |
3789 | llvm::sort(C&: DebugPHINumToValue); |
3790 | |
3791 | // Walk back through each block / instruction, collecting DBG_VALUE |
3792 | // instructions and recording what machine value their operands refer to. |
3793 | for (MachineBasicBlock *MBB : OrderToBB) { |
3794 | CurBB = MBB->getNumber(); |
3795 | VTracker = &vlocs[CurBB]; |
3796 | VTracker->MBB = MBB; |
3797 | MTracker->loadFromArray(Locs&: MInLocs[*MBB], NewCurBB: CurBB); |
3798 | CurInst = 1; |
3799 | for (auto &MI : *MBB) { |
3800 | process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs); |
3801 | ++CurInst; |
3802 | } |
3803 | MTracker->reset(); |
3804 | } |
3805 | |
3806 | // Map from one LexicalScope to all the variables in that scope. |
3807 | ScopeToVarsT ScopeToVars; |
3808 | |
3809 | // Map from One lexical scope to all blocks where assignments happen for |
3810 | // that scope. |
3811 | ScopeToAssignBlocksT ScopeToAssignBlocks; |
3812 | |
3813 | // Store map of DILocations that describes scopes. |
3814 | ScopeToDILocT ScopeToDILocation; |
3815 | |
3816 | // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise |
3817 | // the order is unimportant, it just has to be stable. |
3818 | unsigned VarAssignCount = 0; |
3819 | for (MachineBasicBlock *MBB : OrderToBB) { |
3820 | auto *VTracker = &vlocs[MBB->getNumber()]; |
3821 | // Collect each variable with a DBG_VALUE in this block. |
3822 | for (auto &idx : VTracker->Vars) { |
3823 | DebugVariableID VarID = idx.first; |
3824 | const DILocation *ScopeLoc = VTracker->Scopes[VarID]; |
3825 | assert(ScopeLoc != nullptr); |
3826 | auto *Scope = LS.findLexicalScope(DL: ScopeLoc); |
3827 | |
3828 | // No insts in scope -> shouldn't have been recorded. |
3829 | assert(Scope != nullptr); |
3830 | |
3831 | ScopeToVars[Scope].insert(V: VarID); |
3832 | ScopeToAssignBlocks[Scope].insert(Ptr: VTracker->MBB); |
3833 | ScopeToDILocation[Scope] = ScopeLoc; |
3834 | ++VarAssignCount; |
3835 | } |
3836 | } |
3837 | |
3838 | bool Changed = false; |
3839 | |
3840 | // If we have an extremely large number of variable assignments and blocks, |
3841 | // bail out at this point. We've burnt some time doing analysis already, |
3842 | // however we should cut our losses. |
3843 | if ((unsigned)MaxNumBlocks > InputBBLimit && |
3844 | VarAssignCount > InputDbgValLimit) { |
3845 | LLVM_DEBUG(dbgs() << "Disabling InstrRefBasedLDV: " << MF.getName() |
3846 | << " has " << MaxNumBlocks << " basic blocks and " |
3847 | << VarAssignCount |
3848 | << " variable assignments, exceeding limits.\n" ); |
3849 | } else { |
3850 | // Optionally, solve the variable value problem and emit to blocks by using |
3851 | // a lexical-scope-depth search. It should be functionally identical to |
3852 | // the "else" block of this condition. |
3853 | Changed = depthFirstVLocAndEmit( |
3854 | MaxNumBlocks, ScopeToDILocation, ScopeToVars, ScopeToAssignBlocks, |
3855 | Output&: SavedLiveIns, MOutLocs, MInLocs, AllTheVLocs&: vlocs, MF, ShouldEmitDebugEntryValues); |
3856 | } |
3857 | |
3858 | delete MTracker; |
3859 | delete TTracker; |
3860 | MTracker = nullptr; |
3861 | VTracker = nullptr; |
3862 | TTracker = nullptr; |
3863 | |
3864 | ArtificialBlocks.clear(); |
3865 | OrderToBB.clear(); |
3866 | BBToOrder.clear(); |
3867 | BBNumToRPO.clear(); |
3868 | DebugInstrNumToInstr.clear(); |
3869 | DebugPHINumToValue.clear(); |
3870 | OverlapFragments.clear(); |
3871 | SeenFragments.clear(); |
3872 | SeenDbgPHIs.clear(); |
3873 | DbgOpStore.clear(); |
3874 | DVMap.clear(); |
3875 | |
3876 | return Changed; |
3877 | } |
3878 | |
3879 | LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() { |
3880 | return new InstrRefBasedLDV(); |
3881 | } |
3882 | |
3883 | namespace { |
3884 | class LDVSSABlock; |
3885 | class LDVSSAUpdater; |
3886 | |
3887 | // Pick a type to identify incoming block values as we construct SSA. We |
3888 | // can't use anything more robust than an integer unfortunately, as SSAUpdater |
3889 | // expects to zero-initialize the type. |
3890 | typedef uint64_t BlockValueNum; |
3891 | |
3892 | /// Represents an SSA PHI node for the SSA updater class. Contains the block |
3893 | /// this PHI is in, the value number it would have, and the expected incoming |
3894 | /// values from parent blocks. |
3895 | class LDVSSAPhi { |
3896 | public: |
3897 | SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues; |
3898 | LDVSSABlock *ParentBlock; |
3899 | BlockValueNum PHIValNum; |
3900 | LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock) |
3901 | : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {} |
3902 | |
3903 | LDVSSABlock *getParent() { return ParentBlock; } |
3904 | }; |
3905 | |
3906 | /// Thin wrapper around a block predecessor iterator. Only difference from a |
3907 | /// normal block iterator is that it dereferences to an LDVSSABlock. |
3908 | class LDVSSABlockIterator { |
3909 | public: |
3910 | MachineBasicBlock::pred_iterator PredIt; |
3911 | LDVSSAUpdater &Updater; |
3912 | |
3913 | LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt, |
3914 | LDVSSAUpdater &Updater) |
3915 | : PredIt(PredIt), Updater(Updater) {} |
3916 | |
3917 | bool operator!=(const LDVSSABlockIterator &OtherIt) const { |
3918 | return OtherIt.PredIt != PredIt; |
3919 | } |
3920 | |
3921 | LDVSSABlockIterator &operator++() { |
3922 | ++PredIt; |
3923 | return *this; |
3924 | } |
3925 | |
3926 | LDVSSABlock *operator*(); |
3927 | }; |
3928 | |
3929 | /// Thin wrapper around a block for SSA Updater interface. Necessary because |
3930 | /// we need to track the PHI value(s) that we may have observed as necessary |
3931 | /// in this block. |
3932 | class LDVSSABlock { |
3933 | public: |
3934 | MachineBasicBlock &BB; |
3935 | LDVSSAUpdater &Updater; |
3936 | using PHIListT = SmallVector<LDVSSAPhi, 1>; |
3937 | /// List of PHIs in this block. There should only ever be one. |
3938 | PHIListT PHIList; |
3939 | |
3940 | LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater) |
3941 | : BB(BB), Updater(Updater) {} |
3942 | |
3943 | LDVSSABlockIterator succ_begin() { |
3944 | return LDVSSABlockIterator(BB.succ_begin(), Updater); |
3945 | } |
3946 | |
3947 | LDVSSABlockIterator succ_end() { |
3948 | return LDVSSABlockIterator(BB.succ_end(), Updater); |
3949 | } |
3950 | |
3951 | /// SSAUpdater has requested a PHI: create that within this block record. |
3952 | LDVSSAPhi *newPHI(BlockValueNum Value) { |
3953 | PHIList.emplace_back(Args&: Value, Args: this); |
3954 | return &PHIList.back(); |
3955 | } |
3956 | |
3957 | /// SSAUpdater wishes to know what PHIs already exist in this block. |
3958 | PHIListT &phis() { return PHIList; } |
3959 | }; |
3960 | |
3961 | /// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values |
3962 | /// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to |
3963 | // SSAUpdaterTraits<LDVSSAUpdater>. |
3964 | class LDVSSAUpdater { |
3965 | public: |
3966 | /// Map of value numbers to PHI records. |
3967 | DenseMap<BlockValueNum, LDVSSAPhi *> PHIs; |
3968 | /// Map of which blocks generate Undef values -- blocks that are not |
3969 | /// dominated by any Def. |
3970 | DenseMap<MachineBasicBlock *, BlockValueNum> PoisonMap; |
3971 | /// Map of machine blocks to our own records of them. |
3972 | DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap; |
3973 | /// Machine location where any PHI must occur. |
3974 | LocIdx Loc; |
3975 | /// Table of live-in machine value numbers for blocks / locations. |
3976 | const FuncValueTable &MLiveIns; |
3977 | |
3978 | LDVSSAUpdater(LocIdx L, const FuncValueTable &MLiveIns) |
3979 | : Loc(L), MLiveIns(MLiveIns) {} |
3980 | |
3981 | void reset() { |
3982 | for (auto &Block : BlockMap) |
3983 | delete Block.second; |
3984 | |
3985 | PHIs.clear(); |
3986 | PoisonMap.clear(); |
3987 | BlockMap.clear(); |
3988 | } |
3989 | |
3990 | ~LDVSSAUpdater() { reset(); } |
3991 | |
3992 | /// For a given MBB, create a wrapper block for it. Stores it in the |
3993 | /// LDVSSAUpdater block map. |
3994 | LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) { |
3995 | auto [It, Inserted] = BlockMap.try_emplace(Key: BB); |
3996 | if (Inserted) |
3997 | It->second = new LDVSSABlock(*BB, *this); |
3998 | return It->second; |
3999 | } |
4000 | |
4001 | /// Find the live-in value number for the given block. Looks up the value at |
4002 | /// the PHI location on entry. |
4003 | BlockValueNum getValue(LDVSSABlock *LDVBB) { |
4004 | return MLiveIns[LDVBB->BB][Loc.asU64()].asU64(); |
4005 | } |
4006 | }; |
4007 | |
4008 | LDVSSABlock *LDVSSABlockIterator::operator*() { |
4009 | return Updater.getSSALDVBlock(BB: *PredIt); |
4010 | } |
4011 | |
4012 | #ifndef NDEBUG |
4013 | |
4014 | raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) { |
4015 | out << "SSALDVPHI " << PHI.PHIValNum; |
4016 | return out; |
4017 | } |
4018 | |
4019 | #endif |
4020 | |
4021 | } // namespace |
4022 | |
4023 | namespace llvm { |
4024 | |
4025 | /// Template specialization to give SSAUpdater access to CFG and value |
4026 | /// information. SSAUpdater calls methods in these traits, passing in the |
4027 | /// LDVSSAUpdater object, to learn about blocks and the values they define. |
4028 | /// It also provides methods to create PHI nodes and track them. |
4029 | template <> class SSAUpdaterTraits<LDVSSAUpdater> { |
4030 | public: |
4031 | using BlkT = LDVSSABlock; |
4032 | using ValT = BlockValueNum; |
4033 | using PhiT = LDVSSAPhi; |
4034 | using BlkSucc_iterator = LDVSSABlockIterator; |
4035 | |
4036 | // Methods to access block successors -- dereferencing to our wrapper class. |
4037 | static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); } |
4038 | static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); } |
4039 | |
4040 | /// Iterator for PHI operands. |
4041 | class PHI_iterator { |
4042 | private: |
4043 | LDVSSAPhi *PHI; |
4044 | unsigned Idx; |
4045 | |
4046 | public: |
4047 | explicit PHI_iterator(LDVSSAPhi *P) // begin iterator |
4048 | : PHI(P), Idx(0) {} |
4049 | PHI_iterator(LDVSSAPhi *P, bool) // end iterator |
4050 | : PHI(P), Idx(PHI->IncomingValues.size()) {} |
4051 | |
4052 | PHI_iterator &operator++() { |
4053 | Idx++; |
4054 | return *this; |
4055 | } |
4056 | bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; } |
4057 | bool operator!=(const PHI_iterator &X) const { return !operator==(X); } |
4058 | |
4059 | BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; } |
4060 | |
4061 | LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; } |
4062 | }; |
4063 | |
4064 | static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); } |
4065 | |
4066 | static inline PHI_iterator PHI_end(PhiT *PHI) { |
4067 | return PHI_iterator(PHI, true); |
4068 | } |
4069 | |
4070 | /// FindPredecessorBlocks - Put the predecessors of BB into the Preds |
4071 | /// vector. |
4072 | static void FindPredecessorBlocks(LDVSSABlock *BB, |
4073 | SmallVectorImpl<LDVSSABlock *> *Preds) { |
4074 | for (MachineBasicBlock *Pred : BB->BB.predecessors()) |
4075 | Preds->push_back(Elt: BB->Updater.getSSALDVBlock(BB: Pred)); |
4076 | } |
4077 | |
4078 | /// GetPoisonVal - Normally creates an IMPLICIT_DEF instruction with a new |
4079 | /// register. For LiveDebugValues, represents a block identified as not having |
4080 | /// any DBG_PHI predecessors. |
4081 | static BlockValueNum GetPoisonVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) { |
4082 | // Create a value number for this block -- it needs to be unique and in the |
4083 | // "poison" collection, so that we know it's not real. Use a number |
4084 | // representing a PHI into this block. |
4085 | BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64(); |
4086 | Updater->PoisonMap[&BB->BB] = Num; |
4087 | return Num; |
4088 | } |
4089 | |
4090 | /// CreateEmptyPHI - Create a (representation of a) PHI in the given block. |
4091 | /// SSAUpdater will populate it with information about incoming values. The |
4092 | /// value number of this PHI is whatever the machine value number problem |
4093 | /// solution determined it to be. This includes non-phi values if SSAUpdater |
4094 | /// tries to create a PHI where the incoming values are identical. |
4095 | static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds, |
4096 | LDVSSAUpdater *Updater) { |
4097 | BlockValueNum PHIValNum = Updater->getValue(LDVBB: BB); |
4098 | LDVSSAPhi *PHI = BB->newPHI(Value: PHIValNum); |
4099 | Updater->PHIs[PHIValNum] = PHI; |
4100 | return PHIValNum; |
4101 | } |
4102 | |
4103 | /// AddPHIOperand - Add the specified value as an operand of the PHI for |
4104 | /// the specified predecessor block. |
4105 | static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) { |
4106 | PHI->IncomingValues.push_back(Elt: std::make_pair(x&: Pred, y&: Val)); |
4107 | } |
4108 | |
4109 | /// ValueIsPHI - Check if the instruction that defines the specified value |
4110 | /// is a PHI instruction. |
4111 | static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { |
4112 | return Updater->PHIs.lookup(Val); |
4113 | } |
4114 | |
4115 | /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source |
4116 | /// operands, i.e., it was just added. |
4117 | static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { |
4118 | LDVSSAPhi *PHI = ValueIsPHI(Val, Updater); |
4119 | if (PHI && PHI->IncomingValues.size() == 0) |
4120 | return PHI; |
4121 | return nullptr; |
4122 | } |
4123 | |
4124 | /// GetPHIValue - For the specified PHI instruction, return the value |
4125 | /// that it defines. |
4126 | static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; } |
4127 | }; |
4128 | |
4129 | } // end namespace llvm |
4130 | |
4131 | std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( |
4132 | MachineFunction &MF, const FuncValueTable &MLiveOuts, |
4133 | const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) { |
4134 | // This function will be called twice per DBG_INSTR_REF, and might end up |
4135 | // computing lots of SSA information: memoize it. |
4136 | auto SeenDbgPHIIt = SeenDbgPHIs.find(Val: std::make_pair(x: &Here, y&: InstrNum)); |
4137 | if (SeenDbgPHIIt != SeenDbgPHIs.end()) |
4138 | return SeenDbgPHIIt->second; |
4139 | |
4140 | std::optional<ValueIDNum> Result = |
4141 | resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum); |
4142 | SeenDbgPHIs.insert(KV: {std::make_pair(x: &Here, y&: InstrNum), Result}); |
4143 | return Result; |
4144 | } |
4145 | |
4146 | std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( |
4147 | MachineFunction &MF, const FuncValueTable &MLiveOuts, |
4148 | const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) { |
4149 | // Pick out records of DBG_PHI instructions that have been observed. If there |
4150 | // are none, then we cannot compute a value number. |
4151 | auto RangePair = std::equal_range(first: DebugPHINumToValue.begin(), |
4152 | last: DebugPHINumToValue.end(), val: InstrNum); |
4153 | auto LowerIt = RangePair.first; |
4154 | auto UpperIt = RangePair.second; |
4155 | |
4156 | // No DBG_PHI means there can be no location. |
4157 | if (LowerIt == UpperIt) |
4158 | return std::nullopt; |
4159 | |
4160 | // If any DBG_PHIs referred to a location we didn't understand, don't try to |
4161 | // compute a value. There might be scenarios where we could recover a value |
4162 | // for some range of DBG_INSTR_REFs, but at this point we can have high |
4163 | // confidence that we've seen a bug. |
4164 | auto DBGPHIRange = make_range(x: LowerIt, y: UpperIt); |
4165 | for (const DebugPHIRecord &DBG_PHI : DBGPHIRange) |
4166 | if (!DBG_PHI.ValueRead) |
4167 | return std::nullopt; |
4168 | |
4169 | // If there's only one DBG_PHI, then that is our value number. |
4170 | if (std::distance(first: LowerIt, last: UpperIt) == 1) |
4171 | return *LowerIt->ValueRead; |
4172 | |
4173 | // Pick out the location (physreg, slot) where any PHIs must occur. It's |
4174 | // technically possible for us to merge values in different registers in each |
4175 | // block, but highly unlikely that LLVM will generate such code after register |
4176 | // allocation. |
4177 | LocIdx Loc = *LowerIt->ReadLoc; |
4178 | |
4179 | // We have several DBG_PHIs, and a use position (the Here inst). All each |
4180 | // DBG_PHI does is identify a value at a program position. We can treat each |
4181 | // DBG_PHI like it's a Def of a value, and the use position is a Use of a |
4182 | // value, just like SSA. We use the bulk-standard LLVM SSA updater class to |
4183 | // determine which Def is used at the Use, and any PHIs that happen along |
4184 | // the way. |
4185 | // Adapted LLVM SSA Updater: |
4186 | LDVSSAUpdater Updater(Loc, MLiveIns); |
4187 | // Map of which Def or PHI is the current value in each block. |
4188 | DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues; |
4189 | // Set of PHIs that we have created along the way. |
4190 | SmallVector<LDVSSAPhi *, 8> CreatedPHIs; |
4191 | |
4192 | // Each existing DBG_PHI is a Def'd value under this model. Record these Defs |
4193 | // for the SSAUpdater. |
4194 | for (const auto &DBG_PHI : DBGPHIRange) { |
4195 | LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB); |
4196 | const ValueIDNum &Num = *DBG_PHI.ValueRead; |
4197 | AvailableValues.insert(KV: std::make_pair(x&: Block, y: Num.asU64())); |
4198 | } |
4199 | |
4200 | LDVSSABlock *HereBlock = Updater.getSSALDVBlock(BB: Here.getParent()); |
4201 | const auto &AvailIt = AvailableValues.find(Val: HereBlock); |
4202 | if (AvailIt != AvailableValues.end()) { |
4203 | // Actually, we already know what the value is -- the Use is in the same |
4204 | // block as the Def. |
4205 | return ValueIDNum::fromU64(v: AvailIt->second); |
4206 | } |
4207 | |
4208 | // Otherwise, we must use the SSA Updater. It will identify the value number |
4209 | // that we are to use, and the PHIs that must happen along the way. |
4210 | SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs); |
4211 | BlockValueNum ResultInt = Impl.GetValue(BB: Updater.getSSALDVBlock(BB: Here.getParent())); |
4212 | ValueIDNum Result = ValueIDNum::fromU64(v: ResultInt); |
4213 | |
4214 | // We have the number for a PHI, or possibly live-through value, to be used |
4215 | // at this Use. There are a number of things we have to check about it though: |
4216 | // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this |
4217 | // Use was not completely dominated by DBG_PHIs and we should abort. |
4218 | // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that |
4219 | // we've left SSA form. Validate that the inputs to each PHI are the |
4220 | // expected values. |
4221 | // * Is a PHI we've created actually a merging of values, or are all the |
4222 | // predecessor values the same, leading to a non-PHI machine value number? |
4223 | // (SSAUpdater doesn't know that either). Remap validated PHIs into the |
4224 | // the ValidatedValues collection below to sort this out. |
4225 | DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues; |
4226 | |
4227 | // Define all the input DBG_PHI values in ValidatedValues. |
4228 | for (const auto &DBG_PHI : DBGPHIRange) { |
4229 | LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB); |
4230 | const ValueIDNum &Num = *DBG_PHI.ValueRead; |
4231 | ValidatedValues.insert(KV: std::make_pair(x&: Block, y: Num)); |
4232 | } |
4233 | |
4234 | // Sort PHIs to validate into RPO-order. |
4235 | SmallVector<LDVSSAPhi *, 8> SortedPHIs(CreatedPHIs); |
4236 | |
4237 | llvm::sort(C&: SortedPHIs, Comp: [&](LDVSSAPhi *A, LDVSSAPhi *B) { |
4238 | return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB]; |
4239 | }); |
4240 | |
4241 | for (auto &PHI : SortedPHIs) { |
4242 | ValueIDNum ThisBlockValueNum = MLiveIns[PHI->ParentBlock->BB][Loc.asU64()]; |
4243 | |
4244 | // Are all these things actually defined? |
4245 | for (auto &PHIIt : PHI->IncomingValues) { |
4246 | // Any undef input means DBG_PHIs didn't dominate the use point. |
4247 | if (Updater.PoisonMap.contains(Val: &PHIIt.first->BB)) |
4248 | return std::nullopt; |
4249 | |
4250 | ValueIDNum ValueToCheck; |
4251 | const ValueTable &BlockLiveOuts = MLiveOuts[PHIIt.first->BB]; |
4252 | |
4253 | auto VVal = ValidatedValues.find(Val: PHIIt.first); |
4254 | if (VVal == ValidatedValues.end()) { |
4255 | // We cross a loop, and this is a backedge. LLVMs tail duplication |
4256 | // happens so late that DBG_PHI instructions should not be able to |
4257 | // migrate into loops -- meaning we can only be live-through this |
4258 | // loop. |
4259 | ValueToCheck = ThisBlockValueNum; |
4260 | } else { |
4261 | // Does the block have as a live-out, in the location we're examining, |
4262 | // the value that we expect? If not, it's been moved or clobbered. |
4263 | ValueToCheck = VVal->second; |
4264 | } |
4265 | |
4266 | if (BlockLiveOuts[Loc.asU64()] != ValueToCheck) |
4267 | return std::nullopt; |
4268 | } |
4269 | |
4270 | // Record this value as validated. |
4271 | ValidatedValues.insert(KV: {PHI->ParentBlock, ThisBlockValueNum}); |
4272 | } |
4273 | |
4274 | // All the PHIs are valid: we can return what the SSAUpdater said our value |
4275 | // number was. |
4276 | return Result; |
4277 | } |
4278 | |