1 | //===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file InstrRefBasedImpl.cpp |
9 | /// |
10 | /// This is a separate implementation of LiveDebugValues, see |
11 | /// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information. |
12 | /// |
13 | /// This pass propagates variable locations between basic blocks, resolving |
14 | /// control flow conflicts between them. The problem is SSA construction, where |
15 | /// each debug instruction assigns the *value* that a variable has, and every |
16 | /// instruction where the variable is in scope uses that variable. The resulting |
17 | /// map of instruction-to-value is then translated into a register (or spill) |
18 | /// location for each variable over each instruction. |
19 | /// |
20 | /// The primary difference from normal SSA construction is that we cannot |
21 | /// _create_ PHI values that contain variable values. CodeGen has already |
22 | /// completed, and we can't alter it just to make debug-info complete. Thus: |
23 | /// we can identify function positions where we would like a PHI value for a |
24 | /// variable, but must search the MachineFunction to see whether such a PHI is |
25 | /// available. If no such PHI exists, the variable location must be dropped. |
26 | /// |
27 | /// To achieve this, we perform two kinds of analysis. First, we identify |
28 | /// every value defined by every instruction (ignoring those that only move |
29 | /// another value), then re-compute an SSA-form representation of the |
30 | /// MachineFunction, using value propagation to eliminate any un-necessary |
31 | /// PHI values. This gives us a map of every value computed in the function, |
32 | /// and its location within the register file / stack. |
33 | /// |
34 | /// Secondly, for each variable we perform the same analysis, where each debug |
35 | /// instruction is considered a def, and every instruction where the variable |
36 | /// is in lexical scope as a use. Value propagation is used again to eliminate |
37 | /// any un-necessary PHIs. This gives us a map of each variable to the value |
38 | /// it should have in a block. |
39 | /// |
40 | /// Once both are complete, we have two maps for each block: |
41 | /// * Variables to the values they should have, |
42 | /// * Values to the register / spill slot they are located in. |
43 | /// After which we can marry-up variable values with a location, and emit |
44 | /// DBG_VALUE instructions specifying those locations. Variable locations may |
45 | /// be dropped in this process due to the desired variable value not being |
46 | /// resident in any machine location, or because there is no PHI value in any |
47 | /// location that accurately represents the desired value. The building of |
48 | /// location lists for each block is left to DbgEntityHistoryCalculator. |
49 | /// |
50 | /// This pass is kept efficient because the size of the first SSA problem |
51 | /// is proportional to the working-set size of the function, which the compiler |
52 | /// tries to keep small. (It's also proportional to the number of blocks). |
53 | /// Additionally, we repeatedly perform the second SSA problem analysis with |
54 | /// only the variables and blocks in a single lexical scope, exploiting their |
55 | /// locality. |
56 | /// |
57 | /// ### Terminology |
58 | /// |
59 | /// A machine location is a register or spill slot, a value is something that's |
60 | /// defined by an instruction or PHI node, while a variable value is the value |
61 | /// assigned to a variable. A variable location is a machine location, that must |
62 | /// contain the appropriate variable value. A value that is a PHI node is |
63 | /// occasionally called an mphi. |
64 | /// |
65 | /// The first SSA problem is the "machine value location" problem, |
66 | /// because we're determining which machine locations contain which values. |
67 | /// The "locations" are constant: what's unknown is what value they contain. |
68 | /// |
69 | /// The second SSA problem (the one for variables) is the "variable value |
70 | /// problem", because it's determining what values a variable has, rather than |
71 | /// what location those values are placed in. |
72 | /// |
73 | /// TODO: |
74 | /// Overlapping fragments |
75 | /// Entry values |
76 | /// Add back DEBUG statements for debugging this |
77 | /// Collect statistics |
78 | /// |
79 | //===----------------------------------------------------------------------===// |
80 | |
81 | #include "llvm/ADT/DenseMap.h" |
82 | #include "llvm/ADT/PostOrderIterator.h" |
83 | #include "llvm/ADT/STLExtras.h" |
84 | #include "llvm/ADT/SmallPtrSet.h" |
85 | #include "llvm/ADT/SmallSet.h" |
86 | #include "llvm/ADT/SmallVector.h" |
87 | #include "llvm/BinaryFormat/Dwarf.h" |
88 | #include "llvm/CodeGen/LexicalScopes.h" |
89 | #include "llvm/CodeGen/MachineBasicBlock.h" |
90 | #include "llvm/CodeGen/MachineDominators.h" |
91 | #include "llvm/CodeGen/MachineFrameInfo.h" |
92 | #include "llvm/CodeGen/MachineFunction.h" |
93 | #include "llvm/CodeGen/MachineInstr.h" |
94 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
95 | #include "llvm/CodeGen/MachineInstrBundle.h" |
96 | #include "llvm/CodeGen/MachineMemOperand.h" |
97 | #include "llvm/CodeGen/MachineOperand.h" |
98 | #include "llvm/CodeGen/PseudoSourceValue.h" |
99 | #include "llvm/CodeGen/TargetFrameLowering.h" |
100 | #include "llvm/CodeGen/TargetInstrInfo.h" |
101 | #include "llvm/CodeGen/TargetLowering.h" |
102 | #include "llvm/CodeGen/TargetPassConfig.h" |
103 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
104 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
105 | #include "llvm/Config/llvm-config.h" |
106 | #include "llvm/IR/DebugInfoMetadata.h" |
107 | #include "llvm/IR/DebugLoc.h" |
108 | #include "llvm/IR/Function.h" |
109 | #include "llvm/MC/MCRegisterInfo.h" |
110 | #include "llvm/Support/Casting.h" |
111 | #include "llvm/Support/Compiler.h" |
112 | #include "llvm/Support/Debug.h" |
113 | #include "llvm/Support/GenericIteratedDominanceFrontier.h" |
114 | #include "llvm/Support/TypeSize.h" |
115 | #include "llvm/Support/raw_ostream.h" |
116 | #include "llvm/Target/TargetMachine.h" |
117 | #include "llvm/Transforms/Utils/SSAUpdaterImpl.h" |
118 | #include <algorithm> |
119 | #include <cassert> |
120 | #include <climits> |
121 | #include <cstdint> |
122 | #include <functional> |
123 | #include <queue> |
124 | #include <tuple> |
125 | #include <utility> |
126 | #include <vector> |
127 | |
128 | #include "InstrRefBasedImpl.h" |
129 | #include "LiveDebugValues.h" |
130 | #include <optional> |
131 | |
132 | using namespace llvm; |
133 | using namespace LiveDebugValues; |
134 | |
135 | // SSAUpdaterImple sets DEBUG_TYPE, change it. |
136 | #undef DEBUG_TYPE |
137 | #define DEBUG_TYPE "livedebugvalues" |
138 | |
139 | // Act more like the VarLoc implementation, by propagating some locations too |
140 | // far and ignoring some transfers. |
141 | static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues" , cl::Hidden, |
142 | cl::desc("Act like old LiveDebugValues did" ), |
143 | cl::init(Val: false)); |
144 | |
145 | // Limit for the maximum number of stack slots we should track, past which we |
146 | // will ignore any spills. InstrRefBasedLDV gathers detailed information on all |
147 | // stack slots which leads to high memory consumption, and in some scenarios |
148 | // (such as asan with very many locals) the working set of the function can be |
149 | // very large, causing many spills. In these scenarios, it is very unlikely that |
150 | // the developer has hundreds of variables live at the same time that they're |
151 | // carefully thinking about -- instead, they probably autogenerated the code. |
152 | // When this happens, gracefully stop tracking excess spill slots, rather than |
153 | // consuming all the developer's memory. |
154 | static cl::opt<unsigned> |
155 | StackWorkingSetLimit("livedebugvalues-max-stack-slots" , cl::Hidden, |
156 | cl::desc("livedebugvalues-stack-ws-limit" ), |
157 | cl::init(Val: 250)); |
158 | |
159 | DbgOpID DbgOpID::UndefID = DbgOpID(0xffffffff); |
160 | |
161 | /// Tracker for converting machine value locations and variable values into |
162 | /// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs |
163 | /// specifying block live-in locations and transfers within blocks. |
164 | /// |
165 | /// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker |
166 | /// and must be initialized with the set of variable values that are live-in to |
167 | /// the block. The caller then repeatedly calls process(). TransferTracker picks |
168 | /// out variable locations for the live-in variable values (if there _is_ a |
169 | /// location) and creates the corresponding DBG_VALUEs. Then, as the block is |
170 | /// stepped through, transfers of values between machine locations are |
171 | /// identified and if profitable, a DBG_VALUE created. |
172 | /// |
173 | /// This is where debug use-before-defs would be resolved: a variable with an |
174 | /// unavailable value could materialize in the middle of a block, when the |
175 | /// value becomes available. Or, we could detect clobbers and re-specify the |
176 | /// variable in a backup location. (XXX these are unimplemented). |
177 | class TransferTracker { |
178 | public: |
179 | const TargetInstrInfo *TII; |
180 | const TargetLowering *TLI; |
181 | /// This machine location tracker is assumed to always contain the up-to-date |
182 | /// value mapping for all machine locations. TransferTracker only reads |
183 | /// information from it. (XXX make it const?) |
184 | MLocTracker *MTracker; |
185 | MachineFunction &MF; |
186 | const DebugVariableMap &DVMap; |
187 | bool ShouldEmitDebugEntryValues; |
188 | |
189 | /// Record of all changes in variable locations at a block position. Awkwardly |
190 | /// we allow inserting either before or after the point: MBB != nullptr |
191 | /// indicates it's before, otherwise after. |
192 | struct Transfer { |
193 | MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes |
194 | MachineBasicBlock *MBB; /// non-null if we should insert after. |
195 | /// Vector of DBG_VALUEs to insert. Store with their DebugVariableID so that |
196 | /// they can be sorted into a stable order for emission at a later time. |
197 | SmallVector<std::pair<DebugVariableID, MachineInstr *>, 4> Insts; |
198 | }; |
199 | |
200 | /// Stores the resolved operands (machine locations and constants) and |
201 | /// qualifying meta-information needed to construct a concrete DBG_VALUE-like |
202 | /// instruction. |
203 | struct ResolvedDbgValue { |
204 | SmallVector<ResolvedDbgOp> Ops; |
205 | DbgValueProperties Properties; |
206 | |
207 | ResolvedDbgValue(SmallVectorImpl<ResolvedDbgOp> &Ops, |
208 | DbgValueProperties Properties) |
209 | : Ops(Ops.begin(), Ops.end()), Properties(Properties) {} |
210 | |
211 | /// Returns all the LocIdx values used in this struct, in the order in which |
212 | /// they appear as operands in the debug value; may contain duplicates. |
213 | auto loc_indices() const { |
214 | return map_range( |
215 | C: make_filter_range( |
216 | Range: Ops, Pred: [](const ResolvedDbgOp &Op) { return !Op.IsConst; }), |
217 | F: [](const ResolvedDbgOp &Op) { return Op.Loc; }); |
218 | } |
219 | }; |
220 | |
221 | /// Collection of transfers (DBG_VALUEs) to be inserted. |
222 | SmallVector<Transfer, 32> Transfers; |
223 | |
224 | /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences |
225 | /// between TransferTrackers view of variable locations and MLocTrackers. For |
226 | /// example, MLocTracker observes all clobbers, but TransferTracker lazily |
227 | /// does not. |
228 | SmallVector<ValueIDNum, 32> VarLocs; |
229 | |
230 | /// Map from LocIdxes to which DebugVariables are based that location. |
231 | /// Mantained while stepping through the block. Not accurate if |
232 | /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx]. |
233 | DenseMap<LocIdx, SmallSet<DebugVariableID, 4>> ActiveMLocs; |
234 | |
235 | /// Map from DebugVariable to it's current location and qualifying meta |
236 | /// information. To be used in conjunction with ActiveMLocs to construct |
237 | /// enough information for the DBG_VALUEs for a particular LocIdx. |
238 | DenseMap<DebugVariableID, ResolvedDbgValue> ActiveVLocs; |
239 | |
240 | /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection. |
241 | SmallVector<std::pair<DebugVariableID, MachineInstr *>, 4> PendingDbgValues; |
242 | |
243 | /// Record of a use-before-def: created when a value that's live-in to the |
244 | /// current block isn't available in any machine location, but it will be |
245 | /// defined in this block. |
246 | struct UseBeforeDef { |
247 | /// Value of this variable, def'd in block. |
248 | SmallVector<DbgOp> Values; |
249 | /// Identity of this variable. |
250 | DebugVariableID VarID; |
251 | /// Additional variable properties. |
252 | DbgValueProperties Properties; |
253 | UseBeforeDef(ArrayRef<DbgOp> Values, DebugVariableID VarID, |
254 | const DbgValueProperties &Properties) |
255 | : Values(Values.begin(), Values.end()), VarID(VarID), |
256 | Properties(Properties) {} |
257 | }; |
258 | |
259 | /// Map from instruction index (within the block) to the set of UseBeforeDefs |
260 | /// that become defined at that instruction. |
261 | DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs; |
262 | |
263 | /// The set of variables that are in UseBeforeDefs and can become a location |
264 | /// once the relevant value is defined. An element being erased from this |
265 | /// collection prevents the use-before-def materializing. |
266 | DenseSet<DebugVariableID> UseBeforeDefVariables; |
267 | |
268 | const TargetRegisterInfo &TRI; |
269 | const BitVector &CalleeSavedRegs; |
270 | |
271 | TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker, |
272 | MachineFunction &MF, const DebugVariableMap &DVMap, |
273 | const TargetRegisterInfo &TRI, |
274 | const BitVector &CalleeSavedRegs, const TargetPassConfig &TPC) |
275 | : TII(TII), MTracker(MTracker), MF(MF), DVMap(DVMap), TRI(TRI), |
276 | CalleeSavedRegs(CalleeSavedRegs) { |
277 | TLI = MF.getSubtarget().getTargetLowering(); |
278 | auto &TM = TPC.getTM<TargetMachine>(); |
279 | ShouldEmitDebugEntryValues = TM.Options.ShouldEmitDebugEntryValues(); |
280 | } |
281 | |
282 | bool isCalleeSaved(LocIdx L) const { |
283 | unsigned Reg = MTracker->LocIdxToLocID[L]; |
284 | if (Reg >= MTracker->NumRegs) |
285 | return false; |
286 | for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI) |
287 | if (CalleeSavedRegs.test(Idx: *RAI)) |
288 | return true; |
289 | return false; |
290 | }; |
291 | |
292 | // An estimate of the expected lifespan of values at a machine location, with |
293 | // a greater value corresponding to a longer expected lifespan, i.e. spill |
294 | // slots generally live longer than callee-saved registers which generally |
295 | // live longer than non-callee-saved registers. The minimum value of 0 |
296 | // corresponds to an illegal location that cannot have a "lifespan" at all. |
297 | enum class LocationQuality : unsigned char { |
298 | Illegal = 0, |
299 | Register, |
300 | CalleeSavedRegister, |
301 | SpillSlot, |
302 | Best = SpillSlot |
303 | }; |
304 | |
305 | class LocationAndQuality { |
306 | unsigned Location : 24; |
307 | unsigned Quality : 8; |
308 | |
309 | public: |
310 | LocationAndQuality() : Location(0), Quality(0) {} |
311 | LocationAndQuality(LocIdx L, LocationQuality Q) |
312 | : Location(L.asU64()), Quality(static_cast<unsigned>(Q)) {} |
313 | LocIdx getLoc() const { |
314 | if (!Quality) |
315 | return LocIdx::MakeIllegalLoc(); |
316 | return LocIdx(Location); |
317 | } |
318 | LocationQuality getQuality() const { return LocationQuality(Quality); } |
319 | bool isIllegal() const { return !Quality; } |
320 | bool isBest() const { return getQuality() == LocationQuality::Best; } |
321 | }; |
322 | |
323 | using ValueLocPair = std::pair<ValueIDNum, LocationAndQuality>; |
324 | |
325 | static inline bool ValueToLocSort(const ValueLocPair &A, |
326 | const ValueLocPair &B) { |
327 | return A.first < B.first; |
328 | }; |
329 | |
330 | // Returns the LocationQuality for the location L iff the quality of L is |
331 | // is strictly greater than the provided minimum quality. |
332 | std::optional<LocationQuality> |
333 | getLocQualityIfBetter(LocIdx L, LocationQuality Min) const { |
334 | if (L.isIllegal()) |
335 | return std::nullopt; |
336 | if (Min >= LocationQuality::SpillSlot) |
337 | return std::nullopt; |
338 | if (MTracker->isSpill(Idx: L)) |
339 | return LocationQuality::SpillSlot; |
340 | if (Min >= LocationQuality::CalleeSavedRegister) |
341 | return std::nullopt; |
342 | if (isCalleeSaved(L)) |
343 | return LocationQuality::CalleeSavedRegister; |
344 | if (Min >= LocationQuality::Register) |
345 | return std::nullopt; |
346 | return LocationQuality::Register; |
347 | } |
348 | |
349 | /// For a variable \p Var with the live-in value \p Value, attempts to resolve |
350 | /// the DbgValue to a concrete DBG_VALUE, emitting that value and loading the |
351 | /// tracking information to track Var throughout the block. |
352 | /// \p ValueToLoc is a map containing the best known location for every |
353 | /// ValueIDNum that Value may use. |
354 | /// \p MBB is the basic block that we are loading the live-in value for. |
355 | /// \p DbgOpStore is the map containing the DbgOpID->DbgOp mapping needed to |
356 | /// determine the values used by Value. |
357 | void loadVarInloc(MachineBasicBlock &MBB, DbgOpIDMap &DbgOpStore, |
358 | const SmallVectorImpl<ValueLocPair> &ValueToLoc, |
359 | DebugVariableID VarID, DbgValue Value) { |
360 | SmallVector<DbgOp> DbgOps; |
361 | SmallVector<ResolvedDbgOp> ResolvedDbgOps; |
362 | bool IsValueValid = true; |
363 | unsigned LastUseBeforeDef = 0; |
364 | |
365 | // If every value used by the incoming DbgValue is available at block |
366 | // entry, ResolvedDbgOps will contain the machine locations/constants for |
367 | // those values and will be used to emit a debug location. |
368 | // If one or more values are not yet available, but will all be defined in |
369 | // this block, then LastUseBeforeDef will track the instruction index in |
370 | // this BB at which the last of those values is defined, DbgOps will |
371 | // contain the values that we will emit when we reach that instruction. |
372 | // If one or more values are undef or not available throughout this block, |
373 | // and we can't recover as an entry value, we set IsValueValid=false and |
374 | // skip this variable. |
375 | for (DbgOpID ID : Value.getDbgOpIDs()) { |
376 | DbgOp Op = DbgOpStore.find(ID); |
377 | DbgOps.push_back(Elt: Op); |
378 | if (ID.isUndef()) { |
379 | IsValueValid = false; |
380 | break; |
381 | } |
382 | if (ID.isConst()) { |
383 | ResolvedDbgOps.push_back(Elt: Op.MO); |
384 | continue; |
385 | } |
386 | |
387 | // Search for the desired ValueIDNum, to examine the best location found |
388 | // for it. Use an empty ValueLocPair to search for an entry in ValueToLoc. |
389 | const ValueIDNum &Num = Op.ID; |
390 | ValueLocPair Probe(Num, LocationAndQuality()); |
391 | auto ValuesPreferredLoc = std::lower_bound( |
392 | first: ValueToLoc.begin(), last: ValueToLoc.end(), val: Probe, comp: ValueToLocSort); |
393 | |
394 | // There must be a legitimate entry found for Num. |
395 | assert(ValuesPreferredLoc != ValueToLoc.end() && |
396 | ValuesPreferredLoc->first == Num); |
397 | |
398 | if (ValuesPreferredLoc->second.isIllegal()) { |
399 | // If it's a def that occurs in this block, register it as a |
400 | // use-before-def to be resolved as we step through the block. |
401 | // Continue processing values so that we add any other UseBeforeDef |
402 | // entries needed for later. |
403 | if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI()) { |
404 | LastUseBeforeDef = std::max(a: LastUseBeforeDef, |
405 | b: static_cast<unsigned>(Num.getInst())); |
406 | continue; |
407 | } |
408 | recoverAsEntryValue(VarID, Prop: Value.Properties, Num); |
409 | IsValueValid = false; |
410 | break; |
411 | } |
412 | |
413 | // Defer modifying ActiveVLocs until after we've confirmed we have a |
414 | // live range. |
415 | LocIdx M = ValuesPreferredLoc->second.getLoc(); |
416 | ResolvedDbgOps.push_back(Elt: M); |
417 | } |
418 | |
419 | // If we cannot produce a valid value for the LiveIn value within this |
420 | // block, skip this variable. |
421 | if (!IsValueValid) |
422 | return; |
423 | |
424 | // Add UseBeforeDef entry for the last value to be defined in this block. |
425 | if (LastUseBeforeDef) { |
426 | addUseBeforeDef(VarID, Properties: Value.Properties, DbgOps, Inst: LastUseBeforeDef); |
427 | return; |
428 | } |
429 | |
430 | // The LiveIn value is available at block entry, begin tracking and record |
431 | // the transfer. |
432 | for (const ResolvedDbgOp &Op : ResolvedDbgOps) |
433 | if (!Op.IsConst) |
434 | ActiveMLocs[Op.Loc].insert(V: VarID); |
435 | auto NewValue = ResolvedDbgValue{ResolvedDbgOps, Value.Properties}; |
436 | auto Result = ActiveVLocs.insert(KV: std::make_pair(x&: VarID, y&: NewValue)); |
437 | if (!Result.second) |
438 | Result.first->second = NewValue; |
439 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
440 | PendingDbgValues.push_back( |
441 | Elt: std::make_pair(x&: VarID, y: &*MTracker->emitLoc(DbgOps: ResolvedDbgOps, Var, DILoc, |
442 | Properties: Value.Properties))); |
443 | } |
444 | |
445 | /// Load object with live-in variable values. \p mlocs contains the live-in |
446 | /// values in each machine location, while \p vlocs the live-in variable |
447 | /// values. This method picks variable locations for the live-in variables, |
448 | /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other |
449 | /// object fields to track variable locations as we step through the block. |
450 | /// FIXME: could just examine mloctracker instead of passing in \p mlocs? |
451 | void |
452 | loadInlocs(MachineBasicBlock &MBB, ValueTable &MLocs, DbgOpIDMap &DbgOpStore, |
453 | const SmallVectorImpl<std::pair<DebugVariableID, DbgValue>> &VLocs, |
454 | unsigned NumLocs) { |
455 | ActiveMLocs.clear(); |
456 | ActiveVLocs.clear(); |
457 | VarLocs.clear(); |
458 | VarLocs.reserve(N: NumLocs); |
459 | UseBeforeDefs.clear(); |
460 | UseBeforeDefVariables.clear(); |
461 | |
462 | // Mapping of the preferred locations for each value. Collected into this |
463 | // vector then sorted for easy searching. |
464 | SmallVector<ValueLocPair, 16> ValueToLoc; |
465 | |
466 | // Initialized the preferred-location map with illegal locations, to be |
467 | // filled in later. |
468 | for (const auto &VLoc : VLocs) |
469 | if (VLoc.second.Kind == DbgValue::Def) |
470 | for (DbgOpID OpID : VLoc.second.getDbgOpIDs()) |
471 | if (!OpID.ID.IsConst) |
472 | ValueToLoc.push_back( |
473 | Elt: {DbgOpStore.find(ID: OpID).ID, LocationAndQuality()}); |
474 | |
475 | llvm::sort(C&: ValueToLoc, Comp: ValueToLocSort); |
476 | ActiveMLocs.reserve(NumEntries: VLocs.size()); |
477 | ActiveVLocs.reserve(NumEntries: VLocs.size()); |
478 | |
479 | // Produce a map of value numbers to the current machine locs they live |
480 | // in. When emulating VarLocBasedImpl, there should only be one |
481 | // location; when not, we get to pick. |
482 | for (auto Location : MTracker->locations()) { |
483 | LocIdx Idx = Location.Idx; |
484 | ValueIDNum &VNum = MLocs[Idx.asU64()]; |
485 | if (VNum == ValueIDNum::EmptyValue) |
486 | continue; |
487 | VarLocs.push_back(Elt: VNum); |
488 | |
489 | // Is there a variable that wants a location for this value? If not, skip. |
490 | ValueLocPair Probe(VNum, LocationAndQuality()); |
491 | auto VIt = std::lower_bound(first: ValueToLoc.begin(), last: ValueToLoc.end(), val: Probe, |
492 | comp: ValueToLocSort); |
493 | if (VIt == ValueToLoc.end() || VIt->first != VNum) |
494 | continue; |
495 | |
496 | auto &Previous = VIt->second; |
497 | // If this is the first location with that value, pick it. Otherwise, |
498 | // consider whether it's a "longer term" location. |
499 | std::optional<LocationQuality> ReplacementQuality = |
500 | getLocQualityIfBetter(L: Idx, Min: Previous.getQuality()); |
501 | if (ReplacementQuality) |
502 | Previous = LocationAndQuality(Idx, *ReplacementQuality); |
503 | } |
504 | |
505 | // Now map variables to their picked LocIdxes. |
506 | for (const auto &Var : VLocs) { |
507 | loadVarInloc(MBB, DbgOpStore, ValueToLoc, VarID: Var.first, Value: Var.second); |
508 | } |
509 | flushDbgValues(Pos: MBB.begin(), MBB: &MBB); |
510 | } |
511 | |
512 | /// Record that \p Var has value \p ID, a value that becomes available |
513 | /// later in the function. |
514 | void addUseBeforeDef(DebugVariableID VarID, |
515 | const DbgValueProperties &Properties, |
516 | const SmallVectorImpl<DbgOp> &DbgOps, unsigned Inst) { |
517 | UseBeforeDefs[Inst].emplace_back(Args: DbgOps, Args&: VarID, Args: Properties); |
518 | UseBeforeDefVariables.insert(V: VarID); |
519 | } |
520 | |
521 | /// After the instruction at index \p Inst and position \p pos has been |
522 | /// processed, check whether it defines a variable value in a use-before-def. |
523 | /// If so, and the variable value hasn't changed since the start of the |
524 | /// block, create a DBG_VALUE. |
525 | void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) { |
526 | auto MIt = UseBeforeDefs.find(Val: Inst); |
527 | if (MIt == UseBeforeDefs.end()) |
528 | return; |
529 | |
530 | // Map of values to the locations that store them for every value used by |
531 | // the variables that may have become available. |
532 | SmallDenseMap<ValueIDNum, LocationAndQuality> ValueToLoc; |
533 | |
534 | // Populate ValueToLoc with illegal default mappings for every value used by |
535 | // any UseBeforeDef variables for this instruction. |
536 | for (auto &Use : MIt->second) { |
537 | if (!UseBeforeDefVariables.count(V: Use.VarID)) |
538 | continue; |
539 | |
540 | for (DbgOp &Op : Use.Values) { |
541 | assert(!Op.isUndef() && "UseBeforeDef erroneously created for a " |
542 | "DbgValue with undef values." ); |
543 | if (Op.IsConst) |
544 | continue; |
545 | |
546 | ValueToLoc.insert(KV: {Op.ID, LocationAndQuality()}); |
547 | } |
548 | } |
549 | |
550 | // Exit early if we have no DbgValues to produce. |
551 | if (ValueToLoc.empty()) |
552 | return; |
553 | |
554 | // Determine the best location for each desired value. |
555 | for (auto Location : MTracker->locations()) { |
556 | LocIdx Idx = Location.Idx; |
557 | ValueIDNum &LocValueID = Location.Value; |
558 | |
559 | // Is there a variable that wants a location for this value? If not, skip. |
560 | auto VIt = ValueToLoc.find(Val: LocValueID); |
561 | if (VIt == ValueToLoc.end()) |
562 | continue; |
563 | |
564 | auto &Previous = VIt->second; |
565 | // If this is the first location with that value, pick it. Otherwise, |
566 | // consider whether it's a "longer term" location. |
567 | std::optional<LocationQuality> ReplacementQuality = |
568 | getLocQualityIfBetter(L: Idx, Min: Previous.getQuality()); |
569 | if (ReplacementQuality) |
570 | Previous = LocationAndQuality(Idx, *ReplacementQuality); |
571 | } |
572 | |
573 | // Using the map of values to locations, produce a final set of values for |
574 | // this variable. |
575 | for (auto &Use : MIt->second) { |
576 | if (!UseBeforeDefVariables.count(V: Use.VarID)) |
577 | continue; |
578 | |
579 | SmallVector<ResolvedDbgOp> DbgOps; |
580 | |
581 | for (DbgOp &Op : Use.Values) { |
582 | if (Op.IsConst) { |
583 | DbgOps.push_back(Elt: Op.MO); |
584 | continue; |
585 | } |
586 | LocIdx NewLoc = ValueToLoc.find(Val: Op.ID)->second.getLoc(); |
587 | if (NewLoc.isIllegal()) |
588 | break; |
589 | DbgOps.push_back(Elt: NewLoc); |
590 | } |
591 | |
592 | // If at least one value used by this debug value is no longer available, |
593 | // i.e. one of the values was killed before we finished defining all of |
594 | // the values used by this variable, discard. |
595 | if (DbgOps.size() != Use.Values.size()) |
596 | continue; |
597 | |
598 | // Otherwise, we're good to go. |
599 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: Use.VarID); |
600 | PendingDbgValues.push_back(Elt: std::make_pair( |
601 | x&: Use.VarID, y: MTracker->emitLoc(DbgOps, Var, DILoc, Properties: Use.Properties))); |
602 | } |
603 | flushDbgValues(Pos: pos, MBB: nullptr); |
604 | } |
605 | |
606 | /// Helper to move created DBG_VALUEs into Transfers collection. |
607 | void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) { |
608 | if (PendingDbgValues.size() == 0) |
609 | return; |
610 | |
611 | // Pick out the instruction start position. |
612 | MachineBasicBlock::instr_iterator BundleStart; |
613 | if (MBB && Pos == MBB->begin()) |
614 | BundleStart = MBB->instr_begin(); |
615 | else |
616 | BundleStart = getBundleStart(I: Pos->getIterator()); |
617 | |
618 | Transfers.push_back(Elt: {.Pos: BundleStart, .MBB: MBB, .Insts: PendingDbgValues}); |
619 | PendingDbgValues.clear(); |
620 | } |
621 | |
622 | bool isEntryValueVariable(const DebugVariable &Var, |
623 | const DIExpression *Expr) const { |
624 | if (!Var.getVariable()->isParameter()) |
625 | return false; |
626 | |
627 | if (Var.getInlinedAt()) |
628 | return false; |
629 | |
630 | if (Expr->getNumElements() > 0 && !Expr->isDeref()) |
631 | return false; |
632 | |
633 | return true; |
634 | } |
635 | |
636 | bool isEntryValueValue(const ValueIDNum &Val) const { |
637 | // Must be in entry block (block number zero), and be a PHI / live-in value. |
638 | if (Val.getBlock() || !Val.isPHI()) |
639 | return false; |
640 | |
641 | // Entry values must enter in a register. |
642 | if (MTracker->isSpill(Idx: Val.getLoc())) |
643 | return false; |
644 | |
645 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); |
646 | Register FP = TRI.getFrameRegister(MF); |
647 | Register Reg = MTracker->LocIdxToLocID[Val.getLoc()]; |
648 | return Reg != SP && Reg != FP; |
649 | } |
650 | |
651 | bool recoverAsEntryValue(DebugVariableID VarID, |
652 | const DbgValueProperties &Prop, |
653 | const ValueIDNum &Num) { |
654 | // Is this variable location a candidate to be an entry value. First, |
655 | // should we be trying this at all? |
656 | if (!ShouldEmitDebugEntryValues) |
657 | return false; |
658 | |
659 | const DIExpression *DIExpr = Prop.DIExpr; |
660 | |
661 | // We don't currently emit entry values for DBG_VALUE_LISTs. |
662 | if (Prop.IsVariadic) { |
663 | // If this debug value can be converted to be non-variadic, then do so; |
664 | // otherwise give up. |
665 | auto NonVariadicExpression = |
666 | DIExpression::convertToNonVariadicExpression(Expr: DIExpr); |
667 | if (!NonVariadicExpression) |
668 | return false; |
669 | DIExpr = *NonVariadicExpression; |
670 | } |
671 | |
672 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
673 | |
674 | // Is the variable appropriate for entry values (i.e., is a parameter). |
675 | if (!isEntryValueVariable(Var, Expr: DIExpr)) |
676 | return false; |
677 | |
678 | // Is the value assigned to this variable still the entry value? |
679 | if (!isEntryValueValue(Val: Num)) |
680 | return false; |
681 | |
682 | // Emit a variable location using an entry value expression. |
683 | DIExpression *NewExpr = |
684 | DIExpression::prepend(Expr: DIExpr, Flags: DIExpression::EntryValue); |
685 | Register Reg = MTracker->LocIdxToLocID[Num.getLoc()]; |
686 | MachineOperand MO = MachineOperand::CreateReg(Reg, isDef: false); |
687 | PendingDbgValues.push_back(Elt: std::make_pair( |
688 | x&: VarID, y: &*emitMOLoc(MO, Var, Properties: {NewExpr, Prop.Indirect, false}))); |
689 | return true; |
690 | } |
691 | |
692 | /// Change a variable value after encountering a DBG_VALUE inside a block. |
693 | void redefVar(const MachineInstr &MI) { |
694 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), |
695 | MI.getDebugLoc()->getInlinedAt()); |
696 | DbgValueProperties Properties(MI); |
697 | DebugVariableID VarID = DVMap.getDVID(Var); |
698 | |
699 | // Ignore non-register locations, we don't transfer those. |
700 | if (MI.isUndefDebugValue() || |
701 | all_of(Range: MI.debug_operands(), |
702 | P: [](const MachineOperand &MO) { return !MO.isReg(); })) { |
703 | auto It = ActiveVLocs.find(Val: VarID); |
704 | if (It != ActiveVLocs.end()) { |
705 | for (LocIdx Loc : It->second.loc_indices()) |
706 | ActiveMLocs[Loc].erase(V: VarID); |
707 | ActiveVLocs.erase(I: It); |
708 | } |
709 | // Any use-before-defs no longer apply. |
710 | UseBeforeDefVariables.erase(V: VarID); |
711 | return; |
712 | } |
713 | |
714 | SmallVector<ResolvedDbgOp> NewLocs; |
715 | for (const MachineOperand &MO : MI.debug_operands()) { |
716 | if (MO.isReg()) { |
717 | // Any undef regs have already been filtered out above. |
718 | Register Reg = MO.getReg(); |
719 | LocIdx NewLoc = MTracker->getRegMLoc(R: Reg); |
720 | NewLocs.push_back(Elt: NewLoc); |
721 | } else { |
722 | NewLocs.push_back(Elt: MO); |
723 | } |
724 | } |
725 | |
726 | redefVar(MI, Properties, NewLocs); |
727 | } |
728 | |
729 | /// Handle a change in variable location within a block. Terminate the |
730 | /// variables current location, and record the value it now refers to, so |
731 | /// that we can detect location transfers later on. |
732 | void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties, |
733 | SmallVectorImpl<ResolvedDbgOp> &NewLocs) { |
734 | DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), |
735 | MI.getDebugLoc()->getInlinedAt()); |
736 | DebugVariableID VarID = DVMap.getDVID(Var); |
737 | // Any use-before-defs no longer apply. |
738 | UseBeforeDefVariables.erase(V: VarID); |
739 | |
740 | // Erase any previous location. |
741 | auto It = ActiveVLocs.find(Val: VarID); |
742 | if (It != ActiveVLocs.end()) { |
743 | for (LocIdx Loc : It->second.loc_indices()) |
744 | ActiveMLocs[Loc].erase(V: VarID); |
745 | } |
746 | |
747 | // If there _is_ no new location, all we had to do was erase. |
748 | if (NewLocs.empty()) { |
749 | if (It != ActiveVLocs.end()) |
750 | ActiveVLocs.erase(I: It); |
751 | return; |
752 | } |
753 | |
754 | SmallVector<std::pair<LocIdx, DebugVariableID>> LostMLocs; |
755 | for (ResolvedDbgOp &Op : NewLocs) { |
756 | if (Op.IsConst) |
757 | continue; |
758 | |
759 | LocIdx NewLoc = Op.Loc; |
760 | |
761 | // Check whether our local copy of values-by-location in #VarLocs is out |
762 | // of date. Wipe old tracking data for the location if it's been clobbered |
763 | // in the meantime. |
764 | if (MTracker->readMLoc(L: NewLoc) != VarLocs[NewLoc.asU64()]) { |
765 | for (const auto &P : ActiveMLocs[NewLoc]) { |
766 | auto LostVLocIt = ActiveVLocs.find(Val: P); |
767 | if (LostVLocIt != ActiveVLocs.end()) { |
768 | for (LocIdx Loc : LostVLocIt->second.loc_indices()) { |
769 | // Every active variable mapping for NewLoc will be cleared, no |
770 | // need to track individual variables. |
771 | if (Loc == NewLoc) |
772 | continue; |
773 | LostMLocs.emplace_back(Args&: Loc, Args: P); |
774 | } |
775 | } |
776 | ActiveVLocs.erase(Val: P); |
777 | } |
778 | for (const auto &LostMLoc : LostMLocs) |
779 | ActiveMLocs[LostMLoc.first].erase(V: LostMLoc.second); |
780 | LostMLocs.clear(); |
781 | It = ActiveVLocs.find(Val: VarID); |
782 | ActiveMLocs[NewLoc.asU64()].clear(); |
783 | VarLocs[NewLoc.asU64()] = MTracker->readMLoc(L: NewLoc); |
784 | } |
785 | |
786 | ActiveMLocs[NewLoc].insert(V: VarID); |
787 | } |
788 | |
789 | if (It == ActiveVLocs.end()) { |
790 | ActiveVLocs.insert( |
791 | KV: std::make_pair(x&: VarID, y: ResolvedDbgValue(NewLocs, Properties))); |
792 | } else { |
793 | It->second.Ops.assign(RHS: NewLocs); |
794 | It->second.Properties = Properties; |
795 | } |
796 | } |
797 | |
798 | /// Account for a location \p mloc being clobbered. Examine the variable |
799 | /// locations that will be terminated: and try to recover them by using |
800 | /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to |
801 | /// explicitly terminate a location if it can't be recovered. |
802 | void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos, |
803 | bool MakeUndef = true) { |
804 | auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc); |
805 | if (ActiveMLocIt == ActiveMLocs.end()) |
806 | return; |
807 | |
808 | // What was the old variable value? |
809 | ValueIDNum OldValue = VarLocs[MLoc.asU64()]; |
810 | clobberMloc(MLoc, OldValue, Pos, MakeUndef); |
811 | } |
812 | /// Overload that takes an explicit value \p OldValue for when the value in |
813 | /// \p MLoc has changed and the TransferTracker's locations have not been |
814 | /// updated yet. |
815 | void clobberMloc(LocIdx MLoc, ValueIDNum OldValue, |
816 | MachineBasicBlock::iterator Pos, bool MakeUndef = true) { |
817 | auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc); |
818 | if (ActiveMLocIt == ActiveMLocs.end()) |
819 | return; |
820 | |
821 | VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue; |
822 | |
823 | // Examine the remaining variable locations: if we can find the same value |
824 | // again, we can recover the location. |
825 | std::optional<LocIdx> NewLoc; |
826 | for (auto Loc : MTracker->locations()) |
827 | if (Loc.Value == OldValue) |
828 | NewLoc = Loc.Idx; |
829 | |
830 | // If there is no location, and we weren't asked to make the variable |
831 | // explicitly undef, then stop here. |
832 | if (!NewLoc && !MakeUndef) { |
833 | // Try and recover a few more locations with entry values. |
834 | for (DebugVariableID VarID : ActiveMLocIt->second) { |
835 | auto &Prop = ActiveVLocs.find(Val: VarID)->second.Properties; |
836 | recoverAsEntryValue(VarID, Prop, Num: OldValue); |
837 | } |
838 | flushDbgValues(Pos, MBB: nullptr); |
839 | return; |
840 | } |
841 | |
842 | // Examine all the variables based on this location. |
843 | DenseSet<DebugVariableID> NewMLocs; |
844 | // If no new location has been found, every variable that depends on this |
845 | // MLoc is dead, so end their existing MLoc->Var mappings as well. |
846 | SmallVector<std::pair<LocIdx, DebugVariableID>> LostMLocs; |
847 | for (DebugVariableID VarID : ActiveMLocIt->second) { |
848 | auto ActiveVLocIt = ActiveVLocs.find(Val: VarID); |
849 | // Re-state the variable location: if there's no replacement then NewLoc |
850 | // is std::nullopt and a $noreg DBG_VALUE will be created. Otherwise, a |
851 | // DBG_VALUE identifying the alternative location will be emitted. |
852 | const DbgValueProperties &Properties = ActiveVLocIt->second.Properties; |
853 | |
854 | // Produce the new list of debug ops - an empty list if no new location |
855 | // was found, or the existing list with the substitution MLoc -> NewLoc |
856 | // otherwise. |
857 | SmallVector<ResolvedDbgOp> DbgOps; |
858 | if (NewLoc) { |
859 | ResolvedDbgOp OldOp(MLoc); |
860 | ResolvedDbgOp NewOp(*NewLoc); |
861 | // Insert illegal ops to overwrite afterwards. |
862 | DbgOps.insert(I: DbgOps.begin(), NumToInsert: ActiveVLocIt->second.Ops.size(), |
863 | Elt: ResolvedDbgOp(LocIdx::MakeIllegalLoc())); |
864 | replace_copy(Range&: ActiveVLocIt->second.Ops, Out: DbgOps.begin(), OldValue: OldOp, NewValue: NewOp); |
865 | } |
866 | |
867 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
868 | PendingDbgValues.push_back(Elt: std::make_pair( |
869 | x&: VarID, y: &*MTracker->emitLoc(DbgOps, Var, DILoc, Properties))); |
870 | |
871 | // Update machine locations <=> variable locations maps. Defer updating |
872 | // ActiveMLocs to avoid invalidating the ActiveMLocIt iterator. |
873 | if (!NewLoc) { |
874 | for (LocIdx Loc : ActiveVLocIt->second.loc_indices()) { |
875 | if (Loc != MLoc) |
876 | LostMLocs.emplace_back(Args&: Loc, Args&: VarID); |
877 | } |
878 | ActiveVLocs.erase(I: ActiveVLocIt); |
879 | } else { |
880 | ActiveVLocIt->second.Ops = DbgOps; |
881 | NewMLocs.insert(V: VarID); |
882 | } |
883 | } |
884 | |
885 | // Remove variables from ActiveMLocs if they no longer use any other MLocs |
886 | // due to being killed by this clobber. |
887 | for (auto &LocVarIt : LostMLocs) { |
888 | auto LostMLocIt = ActiveMLocs.find(Val: LocVarIt.first); |
889 | assert(LostMLocIt != ActiveMLocs.end() && |
890 | "Variable was using this MLoc, but ActiveMLocs[MLoc] has no " |
891 | "entries?" ); |
892 | LostMLocIt->second.erase(V: LocVarIt.second); |
893 | } |
894 | |
895 | // We lazily track what locations have which values; if we've found a new |
896 | // location for the clobbered value, remember it. |
897 | if (NewLoc) |
898 | VarLocs[NewLoc->asU64()] = OldValue; |
899 | |
900 | flushDbgValues(Pos, MBB: nullptr); |
901 | |
902 | // Commit ActiveMLoc changes. |
903 | ActiveMLocIt->second.clear(); |
904 | if (!NewMLocs.empty()) |
905 | for (DebugVariableID VarID : NewMLocs) |
906 | ActiveMLocs[*NewLoc].insert(V: VarID); |
907 | } |
908 | |
909 | /// Transfer variables based on \p Src to be based on \p Dst. This handles |
910 | /// both register copies as well as spills and restores. Creates DBG_VALUEs |
911 | /// describing the movement. |
912 | void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) { |
913 | // Does Src still contain the value num we expect? If not, it's been |
914 | // clobbered in the meantime, and our variable locations are stale. |
915 | if (VarLocs[Src.asU64()] != MTracker->readMLoc(L: Src)) |
916 | return; |
917 | |
918 | // assert(ActiveMLocs[Dst].size() == 0); |
919 | //^^^ Legitimate scenario on account of un-clobbered slot being assigned to? |
920 | |
921 | // Move set of active variables from one location to another. |
922 | auto MovingVars = ActiveMLocs[Src]; |
923 | ActiveMLocs[Dst].insert(I: MovingVars.begin(), E: MovingVars.end()); |
924 | VarLocs[Dst.asU64()] = VarLocs[Src.asU64()]; |
925 | |
926 | // For each variable based on Src; create a location at Dst. |
927 | ResolvedDbgOp SrcOp(Src); |
928 | ResolvedDbgOp DstOp(Dst); |
929 | for (DebugVariableID VarID : MovingVars) { |
930 | auto ActiveVLocIt = ActiveVLocs.find(Val: VarID); |
931 | assert(ActiveVLocIt != ActiveVLocs.end()); |
932 | |
933 | // Update all instances of Src in the variable's tracked values to Dst. |
934 | std::replace(first: ActiveVLocIt->second.Ops.begin(), |
935 | last: ActiveVLocIt->second.Ops.end(), old_value: SrcOp, new_value: DstOp); |
936 | |
937 | auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
938 | MachineInstr *MI = MTracker->emitLoc(DbgOps: ActiveVLocIt->second.Ops, Var, DILoc, |
939 | Properties: ActiveVLocIt->second.Properties); |
940 | PendingDbgValues.push_back(Elt: std::make_pair(x&: VarID, y&: MI)); |
941 | } |
942 | ActiveMLocs[Src].clear(); |
943 | flushDbgValues(Pos, MBB: nullptr); |
944 | |
945 | // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data |
946 | // about the old location. |
947 | if (EmulateOldLDV) |
948 | VarLocs[Src.asU64()] = ValueIDNum::EmptyValue; |
949 | } |
950 | |
951 | MachineInstrBuilder emitMOLoc(const MachineOperand &MO, |
952 | const DebugVariable &Var, |
953 | const DbgValueProperties &Properties) { |
954 | DebugLoc DL = DILocation::get(Context&: Var.getVariable()->getContext(), Line: 0, Column: 0, |
955 | Scope: Var.getVariable()->getScope(), |
956 | InlinedAt: const_cast<DILocation *>(Var.getInlinedAt())); |
957 | auto MIB = BuildMI(MF, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE)); |
958 | MIB.add(MO); |
959 | if (Properties.Indirect) |
960 | MIB.addImm(Val: 0); |
961 | else |
962 | MIB.addReg(RegNo: 0); |
963 | MIB.addMetadata(MD: Var.getVariable()); |
964 | MIB.addMetadata(MD: Properties.DIExpr); |
965 | return MIB; |
966 | } |
967 | }; |
968 | |
969 | //===----------------------------------------------------------------------===// |
970 | // Implementation |
971 | //===----------------------------------------------------------------------===// |
972 | |
973 | ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX, UINT_MAX, UINT_MAX}; |
974 | ValueIDNum ValueIDNum::TombstoneValue = {UINT_MAX, UINT_MAX, UINT_MAX - 1}; |
975 | |
976 | #ifndef NDEBUG |
977 | void ResolvedDbgOp::dump(const MLocTracker *MTrack) const { |
978 | if (IsConst) { |
979 | dbgs() << MO; |
980 | } else { |
981 | dbgs() << MTrack->LocIdxToName(Loc); |
982 | } |
983 | } |
984 | void DbgOp::dump(const MLocTracker *MTrack) const { |
985 | if (IsConst) { |
986 | dbgs() << MO; |
987 | } else if (!isUndef()) { |
988 | dbgs() << MTrack->IDAsString(ID); |
989 | } |
990 | } |
991 | void DbgOpID::dump(const MLocTracker *MTrack, const DbgOpIDMap *OpStore) const { |
992 | if (!OpStore) { |
993 | dbgs() << "ID(" << asU32() << ")" ; |
994 | } else { |
995 | OpStore->find(*this).dump(MTrack); |
996 | } |
997 | } |
998 | void DbgValue::dump(const MLocTracker *MTrack, |
999 | const DbgOpIDMap *OpStore) const { |
1000 | if (Kind == NoVal) { |
1001 | dbgs() << "NoVal(" << BlockNo << ")" ; |
1002 | } else if (Kind == VPHI || Kind == Def) { |
1003 | if (Kind == VPHI) |
1004 | dbgs() << "VPHI(" << BlockNo << "," ; |
1005 | else |
1006 | dbgs() << "Def(" ; |
1007 | for (unsigned Idx = 0; Idx < getDbgOpIDs().size(); ++Idx) { |
1008 | getDbgOpID(Idx).dump(MTrack, OpStore); |
1009 | if (Idx != 0) |
1010 | dbgs() << "," ; |
1011 | } |
1012 | dbgs() << ")" ; |
1013 | } |
1014 | if (Properties.Indirect) |
1015 | dbgs() << " indir" ; |
1016 | if (Properties.DIExpr) |
1017 | dbgs() << " " << *Properties.DIExpr; |
1018 | } |
1019 | #endif |
1020 | |
1021 | MLocTracker::MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII, |
1022 | const TargetRegisterInfo &TRI, |
1023 | const TargetLowering &TLI) |
1024 | : MF(MF), TII(TII), TRI(TRI), TLI(TLI), |
1025 | LocIdxToIDNum(ValueIDNum::EmptyValue), LocIdxToLocID(0) { |
1026 | NumRegs = TRI.getNumRegs(); |
1027 | reset(); |
1028 | LocIDToLocIdx.resize(new_size: NumRegs, x: LocIdx::MakeIllegalLoc()); |
1029 | assert(NumRegs < (1u << NUM_LOC_BITS)); // Detect bit packing failure |
1030 | |
1031 | // Always track SP. This avoids the implicit clobbering caused by regmasks |
1032 | // from affectings its values. (LiveDebugValues disbelieves calls and |
1033 | // regmasks that claim to clobber SP). |
1034 | Register SP = TLI.getStackPointerRegisterToSaveRestore(); |
1035 | if (SP) { |
1036 | unsigned ID = getLocID(Reg: SP); |
1037 | (void)lookupOrTrackRegister(ID); |
1038 | |
1039 | for (MCRegAliasIterator RAI(SP, &TRI, true); RAI.isValid(); ++RAI) |
1040 | SPAliases.insert(V: *RAI); |
1041 | } |
1042 | |
1043 | // Build some common stack positions -- full registers being spilt to the |
1044 | // stack. |
1045 | StackSlotIdxes.insert(KV: {{8, 0}, 0}); |
1046 | StackSlotIdxes.insert(KV: {{16, 0}, 1}); |
1047 | StackSlotIdxes.insert(KV: {{32, 0}, 2}); |
1048 | StackSlotIdxes.insert(KV: {{64, 0}, 3}); |
1049 | StackSlotIdxes.insert(KV: {{128, 0}, 4}); |
1050 | StackSlotIdxes.insert(KV: {{256, 0}, 5}); |
1051 | StackSlotIdxes.insert(KV: {{512, 0}, 6}); |
1052 | |
1053 | // Traverse all the subregister idxes, and ensure there's an index for them. |
1054 | // Duplicates are no problem: we're interested in their position in the |
1055 | // stack slot, we don't want to type the slot. |
1056 | for (unsigned int I = 1; I < TRI.getNumSubRegIndices(); ++I) { |
1057 | unsigned Size = TRI.getSubRegIdxSize(Idx: I); |
1058 | unsigned Offs = TRI.getSubRegIdxOffset(Idx: I); |
1059 | unsigned Idx = StackSlotIdxes.size(); |
1060 | |
1061 | // Some subregs have -1, -2 and so forth fed into their fields, to mean |
1062 | // special backend things. Ignore those. |
1063 | if (Size > 60000 || Offs > 60000) |
1064 | continue; |
1065 | |
1066 | StackSlotIdxes.insert(KV: {{Size, Offs}, Idx}); |
1067 | } |
1068 | |
1069 | // There may also be strange register class sizes (think x86 fp80s). |
1070 | for (const TargetRegisterClass *RC : TRI.regclasses()) { |
1071 | unsigned Size = TRI.getRegSizeInBits(RC: *RC); |
1072 | |
1073 | // We might see special reserved values as sizes, and classes for other |
1074 | // stuff the machine tries to model. If it's more than 512 bits, then it |
1075 | // is very unlikely to be a register than can be spilt. |
1076 | if (Size > 512) |
1077 | continue; |
1078 | |
1079 | unsigned Idx = StackSlotIdxes.size(); |
1080 | StackSlotIdxes.insert(KV: {{Size, 0}, Idx}); |
1081 | } |
1082 | |
1083 | for (auto &Idx : StackSlotIdxes) |
1084 | StackIdxesToPos[Idx.second] = Idx.first; |
1085 | |
1086 | NumSlotIdxes = StackSlotIdxes.size(); |
1087 | } |
1088 | |
1089 | LocIdx MLocTracker::trackRegister(unsigned ID) { |
1090 | assert(ID != 0); |
1091 | LocIdx NewIdx = LocIdx(LocIdxToIDNum.size()); |
1092 | LocIdxToIDNum.grow(n: NewIdx); |
1093 | LocIdxToLocID.grow(n: NewIdx); |
1094 | |
1095 | // Default: it's an mphi. |
1096 | ValueIDNum ValNum = {CurBB, 0, NewIdx}; |
1097 | // Was this reg ever touched by a regmask? |
1098 | for (const auto &MaskPair : reverse(C&: Masks)) { |
1099 | if (MaskPair.first->clobbersPhysReg(PhysReg: ID)) { |
1100 | // There was an earlier def we skipped. |
1101 | ValNum = {CurBB, MaskPair.second, NewIdx}; |
1102 | break; |
1103 | } |
1104 | } |
1105 | |
1106 | LocIdxToIDNum[NewIdx] = ValNum; |
1107 | LocIdxToLocID[NewIdx] = ID; |
1108 | return NewIdx; |
1109 | } |
1110 | |
1111 | void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB, |
1112 | unsigned InstID) { |
1113 | // Def any register we track have that isn't preserved. The regmask |
1114 | // terminates the liveness of a register, meaning its value can't be |
1115 | // relied upon -- we represent this by giving it a new value. |
1116 | for (auto Location : locations()) { |
1117 | unsigned ID = LocIdxToLocID[Location.Idx]; |
1118 | // Don't clobber SP, even if the mask says it's clobbered. |
1119 | if (ID < NumRegs && !SPAliases.count(V: ID) && MO->clobbersPhysReg(PhysReg: ID)) |
1120 | defReg(R: ID, BB: CurBB, Inst: InstID); |
1121 | } |
1122 | Masks.push_back(Elt: std::make_pair(x&: MO, y&: InstID)); |
1123 | } |
1124 | |
1125 | std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { |
1126 | SpillLocationNo SpillID(SpillLocs.idFor(Entry: L)); |
1127 | |
1128 | if (SpillID.id() == 0) { |
1129 | // If there is no location, and we have reached the limit of how many stack |
1130 | // slots to track, then don't track this one. |
1131 | if (SpillLocs.size() >= StackWorkingSetLimit) |
1132 | return std::nullopt; |
1133 | |
1134 | // Spill location is untracked: create record for this one, and all |
1135 | // subregister slots too. |
1136 | SpillID = SpillLocationNo(SpillLocs.insert(Entry: L)); |
1137 | for (unsigned StackIdx = 0; StackIdx < NumSlotIdxes; ++StackIdx) { |
1138 | unsigned L = getSpillIDWithIdx(Spill: SpillID, Idx: StackIdx); |
1139 | LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx |
1140 | LocIdxToIDNum.grow(n: Idx); |
1141 | LocIdxToLocID.grow(n: Idx); |
1142 | LocIDToLocIdx.push_back(x: Idx); |
1143 | LocIdxToLocID[Idx] = L; |
1144 | // Initialize to PHI value; corresponds to the location's live-in value |
1145 | // during transfer function construction. |
1146 | LocIdxToIDNum[Idx] = ValueIDNum(CurBB, 0, Idx); |
1147 | } |
1148 | } |
1149 | return SpillID; |
1150 | } |
1151 | |
1152 | std::string MLocTracker::LocIdxToName(LocIdx Idx) const { |
1153 | unsigned ID = LocIdxToLocID[Idx]; |
1154 | if (ID >= NumRegs) { |
1155 | StackSlotPos Pos = locIDToSpillIdx(ID); |
1156 | ID -= NumRegs; |
1157 | unsigned Slot = ID / NumSlotIdxes; |
1158 | return Twine("slot " ) |
1159 | .concat(Suffix: Twine(Slot).concat(Suffix: Twine(" sz " ).concat(Suffix: Twine(Pos.first) |
1160 | .concat(Suffix: Twine(" offs " ).concat(Suffix: Twine(Pos.second)))))) |
1161 | .str(); |
1162 | } else { |
1163 | return TRI.getRegAsmName(Reg: ID).str(); |
1164 | } |
1165 | } |
1166 | |
1167 | std::string MLocTracker::IDAsString(const ValueIDNum &Num) const { |
1168 | std::string DefName = LocIdxToName(Idx: Num.getLoc()); |
1169 | return Num.asString(mlocname: DefName); |
1170 | } |
1171 | |
1172 | #ifndef NDEBUG |
1173 | LLVM_DUMP_METHOD void MLocTracker::dump() { |
1174 | for (auto Location : locations()) { |
1175 | std::string MLocName = LocIdxToName(Location.Value.getLoc()); |
1176 | std::string DefName = Location.Value.asString(MLocName); |
1177 | dbgs() << LocIdxToName(Location.Idx) << " --> " << DefName << "\n" ; |
1178 | } |
1179 | } |
1180 | |
1181 | LLVM_DUMP_METHOD void MLocTracker::dump_mloc_map() { |
1182 | for (auto Location : locations()) { |
1183 | std::string foo = LocIdxToName(Location.Idx); |
1184 | dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n" ; |
1185 | } |
1186 | } |
1187 | #endif |
1188 | |
1189 | MachineInstrBuilder |
1190 | MLocTracker::emitLoc(const SmallVectorImpl<ResolvedDbgOp> &DbgOps, |
1191 | const DebugVariable &Var, const DILocation *DILoc, |
1192 | const DbgValueProperties &Properties) { |
1193 | DebugLoc DL = DebugLoc(DILoc); |
1194 | |
1195 | const MCInstrDesc &Desc = Properties.IsVariadic |
1196 | ? TII.get(Opcode: TargetOpcode::DBG_VALUE_LIST) |
1197 | : TII.get(Opcode: TargetOpcode::DBG_VALUE); |
1198 | |
1199 | #ifdef EXPENSIVE_CHECKS |
1200 | assert(all_of(DbgOps, |
1201 | [](const ResolvedDbgOp &Op) { |
1202 | return Op.IsConst || !Op.Loc.isIllegal(); |
1203 | }) && |
1204 | "Did not expect illegal ops in DbgOps." ); |
1205 | assert((DbgOps.size() == 0 || |
1206 | DbgOps.size() == Properties.getLocationOpCount()) && |
1207 | "Expected to have either one DbgOp per MI LocationOp, or none." ); |
1208 | #endif |
1209 | |
1210 | auto GetRegOp = [](unsigned Reg) -> MachineOperand { |
1211 | return MachineOperand::CreateReg( |
1212 | /* Reg */ Reg, /* isDef */ false, /* isImp */ false, |
1213 | /* isKill */ false, /* isDead */ false, |
1214 | /* isUndef */ false, /* isEarlyClobber */ false, |
1215 | /* SubReg */ 0, /* isDebug */ true); |
1216 | }; |
1217 | |
1218 | SmallVector<MachineOperand> MOs; |
1219 | |
1220 | auto EmitUndef = [&]() { |
1221 | MOs.clear(); |
1222 | MOs.assign(NumElts: Properties.getLocationOpCount(), Elt: GetRegOp(0)); |
1223 | return BuildMI(MF, DL, MCID: Desc, IsIndirect: false, MOs, Variable: Var.getVariable(), |
1224 | Expr: Properties.DIExpr); |
1225 | }; |
1226 | |
1227 | // Don't bother passing any real operands to BuildMI if any of them would be |
1228 | // $noreg. |
1229 | if (DbgOps.empty()) |
1230 | return EmitUndef(); |
1231 | |
1232 | bool Indirect = Properties.Indirect; |
1233 | |
1234 | const DIExpression *Expr = Properties.DIExpr; |
1235 | |
1236 | assert(DbgOps.size() == Properties.getLocationOpCount()); |
1237 | |
1238 | // If all locations are valid, accumulate them into our list of |
1239 | // MachineOperands. For any spilled locations, either update the indirectness |
1240 | // register or apply the appropriate transformations in the DIExpression. |
1241 | for (size_t Idx = 0; Idx < Properties.getLocationOpCount(); ++Idx) { |
1242 | const ResolvedDbgOp &Op = DbgOps[Idx]; |
1243 | |
1244 | if (Op.IsConst) { |
1245 | MOs.push_back(Elt: Op.MO); |
1246 | continue; |
1247 | } |
1248 | |
1249 | LocIdx MLoc = Op.Loc; |
1250 | unsigned LocID = LocIdxToLocID[MLoc]; |
1251 | if (LocID >= NumRegs) { |
1252 | SpillLocationNo SpillID = locIDToSpill(ID: LocID); |
1253 | StackSlotPos StackIdx = locIDToSpillIdx(ID: LocID); |
1254 | unsigned short Offset = StackIdx.second; |
1255 | |
1256 | // TODO: support variables that are located in spill slots, with non-zero |
1257 | // offsets from the start of the spill slot. It would require some more |
1258 | // complex DIExpression calculations. This doesn't seem to be produced by |
1259 | // LLVM right now, so don't try and support it. |
1260 | // Accept no-subregister slots and subregisters where the offset is zero. |
1261 | // The consumer should already have type information to work out how large |
1262 | // the variable is. |
1263 | if (Offset == 0) { |
1264 | const SpillLoc &Spill = SpillLocs[SpillID.id()]; |
1265 | unsigned Base = Spill.SpillBase; |
1266 | |
1267 | // There are several ways we can dereference things, and several inputs |
1268 | // to consider: |
1269 | // * NRVO variables will appear with IsIndirect set, but should have |
1270 | // nothing else in their DIExpressions, |
1271 | // * Variables with DW_OP_stack_value in their expr already need an |
1272 | // explicit dereference of the stack location, |
1273 | // * Values that don't match the variable size need DW_OP_deref_size, |
1274 | // * Everything else can just become a simple location expression. |
1275 | |
1276 | // We need to use deref_size whenever there's a mismatch between the |
1277 | // size of value and the size of variable portion being read. |
1278 | // Additionally, we should use it whenever dealing with stack_value |
1279 | // fragments, to avoid the consumer having to determine the deref size |
1280 | // from DW_OP_piece. |
1281 | bool UseDerefSize = false; |
1282 | unsigned ValueSizeInBits = getLocSizeInBits(L: MLoc); |
1283 | unsigned DerefSizeInBytes = ValueSizeInBits / 8; |
1284 | if (auto Fragment = Var.getFragment()) { |
1285 | unsigned VariableSizeInBits = Fragment->SizeInBits; |
1286 | if (VariableSizeInBits != ValueSizeInBits || Expr->isComplex()) |
1287 | UseDerefSize = true; |
1288 | } else if (auto Size = Var.getVariable()->getSizeInBits()) { |
1289 | if (*Size != ValueSizeInBits) { |
1290 | UseDerefSize = true; |
1291 | } |
1292 | } |
1293 | |
1294 | SmallVector<uint64_t, 5> OffsetOps; |
1295 | TRI.getOffsetOpcodes(Offset: Spill.SpillOffset, Ops&: OffsetOps); |
1296 | bool StackValue = false; |
1297 | |
1298 | if (Properties.Indirect) { |
1299 | // This is something like an NRVO variable, where the pointer has been |
1300 | // spilt to the stack. It should end up being a memory location, with |
1301 | // the pointer to the variable loaded off the stack with a deref: |
1302 | assert(!Expr->isImplicit()); |
1303 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref); |
1304 | } else if (UseDerefSize && Expr->isSingleLocationExpression()) { |
1305 | // TODO: Figure out how to handle deref size issues for variadic |
1306 | // values. |
1307 | // We're loading a value off the stack that's not the same size as the |
1308 | // variable. Add / subtract stack offset, explicitly deref with a |
1309 | // size, and add DW_OP_stack_value if not already present. |
1310 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref_size); |
1311 | OffsetOps.push_back(Elt: DerefSizeInBytes); |
1312 | StackValue = true; |
1313 | } else if (Expr->isComplex() || Properties.IsVariadic) { |
1314 | // A variable with no size ambiguity, but with extra elements in it's |
1315 | // expression. Manually dereference the stack location. |
1316 | OffsetOps.push_back(Elt: dwarf::DW_OP_deref); |
1317 | } else { |
1318 | // A plain value that has been spilt to the stack, with no further |
1319 | // context. Request a location expression, marking the DBG_VALUE as |
1320 | // IsIndirect. |
1321 | Indirect = true; |
1322 | } |
1323 | |
1324 | Expr = DIExpression::appendOpsToArg(Expr, Ops: OffsetOps, ArgNo: Idx, StackValue); |
1325 | MOs.push_back(Elt: GetRegOp(Base)); |
1326 | } else { |
1327 | // This is a stack location with a weird subregister offset: emit an |
1328 | // undef DBG_VALUE instead. |
1329 | return EmitUndef(); |
1330 | } |
1331 | } else { |
1332 | // Non-empty, non-stack slot, must be a plain register. |
1333 | MOs.push_back(Elt: GetRegOp(LocID)); |
1334 | } |
1335 | } |
1336 | |
1337 | return BuildMI(MF, DL, MCID: Desc, IsIndirect: Indirect, MOs, Variable: Var.getVariable(), Expr); |
1338 | } |
1339 | |
1340 | /// Default construct and initialize the pass. |
1341 | InstrRefBasedLDV::InstrRefBasedLDV() = default; |
1342 | |
1343 | bool InstrRefBasedLDV::isCalleeSaved(LocIdx L) const { |
1344 | unsigned Reg = MTracker->LocIdxToLocID[L]; |
1345 | return isCalleeSavedReg(R: Reg); |
1346 | } |
1347 | bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const { |
1348 | for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) |
1349 | if (CalleeSavedRegs.test(Idx: *RAI)) |
1350 | return true; |
1351 | return false; |
1352 | } |
1353 | |
1354 | //===----------------------------------------------------------------------===// |
1355 | // Debug Range Extension Implementation |
1356 | //===----------------------------------------------------------------------===// |
1357 | |
1358 | #ifndef NDEBUG |
1359 | // Something to restore in the future. |
1360 | // void InstrRefBasedLDV::printVarLocInMBB(..) |
1361 | #endif |
1362 | |
1363 | std::optional<SpillLocationNo> |
1364 | InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { |
1365 | assert(MI.hasOneMemOperand() && |
1366 | "Spill instruction does not have exactly one memory operand?" ); |
1367 | auto MMOI = MI.memoperands_begin(); |
1368 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); |
1369 | assert(PVal->kind() == PseudoSourceValue::FixedStack && |
1370 | "Inconsistent memory operand in spill instruction" ); |
1371 | int FI = cast<FixedStackPseudoSourceValue>(Val: PVal)->getFrameIndex(); |
1372 | const MachineBasicBlock *MBB = MI.getParent(); |
1373 | Register Reg; |
1374 | StackOffset Offset = TFI->getFrameIndexReference(MF: *MBB->getParent(), FI, FrameReg&: Reg); |
1375 | return MTracker->getOrTrackSpillLoc(L: {.SpillBase: Reg, .SpillOffset: Offset}); |
1376 | } |
1377 | |
1378 | std::optional<LocIdx> |
1379 | InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { |
1380 | std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); |
1381 | if (!SpillLoc) |
1382 | return std::nullopt; |
1383 | |
1384 | // Where in the stack slot is this value defined -- i.e., what size of value |
1385 | // is this? An important question, because it could be loaded into a register |
1386 | // from the stack at some point. Happily the memory operand will tell us |
1387 | // the size written to the stack. |
1388 | auto *MemOperand = *MI.memoperands_begin(); |
1389 | LocationSize SizeInBits = MemOperand->getSizeInBits(); |
1390 | assert(SizeInBits.hasValue() && "Expected to find a valid size!" ); |
1391 | |
1392 | // Find that position in the stack indexes we're tracking. |
1393 | auto IdxIt = MTracker->StackSlotIdxes.find(Val: {SizeInBits.getValue(), 0}); |
1394 | if (IdxIt == MTracker->StackSlotIdxes.end()) |
1395 | // That index is not tracked. This is suprising, and unlikely to ever |
1396 | // occur, but the safe action is to indicate the variable is optimised out. |
1397 | return std::nullopt; |
1398 | |
1399 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillLoc, Idx: IdxIt->second); |
1400 | return MTracker->getSpillMLoc(SpillID); |
1401 | } |
1402 | |
1403 | /// End all previous ranges related to @MI and start a new range from @MI |
1404 | /// if it is a DBG_VALUE instr. |
1405 | bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) { |
1406 | if (!MI.isDebugValue()) |
1407 | return false; |
1408 | |
1409 | assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && |
1410 | "Expected inlined-at fields to agree" ); |
1411 | |
1412 | // If there are no instructions in this lexical scope, do no location tracking |
1413 | // at all, this variable shouldn't get a legitimate location range. |
1414 | auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get()); |
1415 | if (Scope == nullptr) |
1416 | return true; // handled it; by doing nothing |
1417 | |
1418 | // MLocTracker needs to know that this register is read, even if it's only |
1419 | // read by a debug inst. |
1420 | for (const MachineOperand &MO : MI.debug_operands()) |
1421 | if (MO.isReg() && MO.getReg() != 0) |
1422 | (void)MTracker->readReg(R: MO.getReg()); |
1423 | |
1424 | // If we're preparing for the second analysis (variables), the machine value |
1425 | // locations are already solved, and we report this DBG_VALUE and the value |
1426 | // it refers to to VLocTracker. |
1427 | if (VTracker) { |
1428 | SmallVector<DbgOpID> DebugOps; |
1429 | // Feed defVar the new variable location, or if this is a DBG_VALUE $noreg, |
1430 | // feed defVar None. |
1431 | if (!MI.isUndefDebugValue()) { |
1432 | for (const MachineOperand &MO : MI.debug_operands()) { |
1433 | // There should be no undef registers here, as we've screened for undef |
1434 | // debug values. |
1435 | if (MO.isReg()) { |
1436 | DebugOps.push_back(Elt: DbgOpStore.insert(Op: MTracker->readReg(R: MO.getReg()))); |
1437 | } else if (MO.isImm() || MO.isFPImm() || MO.isCImm()) { |
1438 | DebugOps.push_back(Elt: DbgOpStore.insert(Op: MO)); |
1439 | } else { |
1440 | llvm_unreachable("Unexpected debug operand type." ); |
1441 | } |
1442 | } |
1443 | } |
1444 | VTracker->defVar(MI, Properties: DbgValueProperties(MI), DebugOps); |
1445 | } |
1446 | |
1447 | // If performing final tracking of transfers, report this variable definition |
1448 | // to the TransferTracker too. |
1449 | if (TTracker) |
1450 | TTracker->redefVar(MI); |
1451 | return true; |
1452 | } |
1453 | |
1454 | std::optional<ValueIDNum> InstrRefBasedLDV::getValueForInstrRef( |
1455 | unsigned InstNo, unsigned OpNo, MachineInstr &MI, |
1456 | const FuncValueTable *MLiveOuts, const FuncValueTable *MLiveIns) { |
1457 | // Various optimizations may have happened to the value during codegen, |
1458 | // recorded in the value substitution table. Apply any substitutions to |
1459 | // the instruction / operand number in this DBG_INSTR_REF, and collect |
1460 | // any subregister extractions performed during optimization. |
1461 | const MachineFunction &MF = *MI.getParent()->getParent(); |
1462 | |
1463 | // Create dummy substitution with Src set, for lookup. |
1464 | auto SoughtSub = |
1465 | MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0); |
1466 | |
1467 | SmallVector<unsigned, 4> SeenSubregs; |
1468 | auto LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub); |
1469 | while (LowerBoundIt != MF.DebugValueSubstitutions.end() && |
1470 | LowerBoundIt->Src == SoughtSub.Src) { |
1471 | std::tie(args&: InstNo, args&: OpNo) = LowerBoundIt->Dest; |
1472 | SoughtSub.Src = LowerBoundIt->Dest; |
1473 | if (unsigned Subreg = LowerBoundIt->Subreg) |
1474 | SeenSubregs.push_back(Elt: Subreg); |
1475 | LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub); |
1476 | } |
1477 | |
1478 | // Default machine value number is <None> -- if no instruction defines |
1479 | // the corresponding value, it must have been optimized out. |
1480 | std::optional<ValueIDNum> NewID; |
1481 | |
1482 | // Try to lookup the instruction number, and find the machine value number |
1483 | // that it defines. It could be an instruction, or a PHI. |
1484 | auto InstrIt = DebugInstrNumToInstr.find(x: InstNo); |
1485 | auto PHIIt = llvm::lower_bound(Range&: DebugPHINumToValue, Value&: InstNo); |
1486 | if (InstrIt != DebugInstrNumToInstr.end()) { |
1487 | const MachineInstr &TargetInstr = *InstrIt->second.first; |
1488 | uint64_t BlockNo = TargetInstr.getParent()->getNumber(); |
1489 | |
1490 | // Pick out the designated operand. It might be a memory reference, if |
1491 | // a register def was folded into a stack store. |
1492 | if (OpNo == MachineFunction::DebugOperandMemNumber && |
1493 | TargetInstr.hasOneMemOperand()) { |
1494 | std::optional<LocIdx> L = findLocationForMemOperand(MI: TargetInstr); |
1495 | if (L) |
1496 | NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L); |
1497 | } else if (OpNo != MachineFunction::DebugOperandMemNumber) { |
1498 | // Permit the debug-info to be completely wrong: identifying a nonexistant |
1499 | // operand, or one that is not a register definition, means something |
1500 | // unexpected happened during optimisation. Broken debug-info, however, |
1501 | // shouldn't crash the compiler -- instead leave the variable value as |
1502 | // None, which will make it appear "optimised out". |
1503 | if (OpNo < TargetInstr.getNumOperands()) { |
1504 | const MachineOperand &MO = TargetInstr.getOperand(i: OpNo); |
1505 | |
1506 | if (MO.isReg() && MO.isDef() && MO.getReg()) { |
1507 | unsigned LocID = MTracker->getLocID(Reg: MO.getReg()); |
1508 | LocIdx L = MTracker->LocIDToLocIdx[LocID]; |
1509 | NewID = ValueIDNum(BlockNo, InstrIt->second.second, L); |
1510 | } |
1511 | } |
1512 | |
1513 | if (!NewID) { |
1514 | LLVM_DEBUG( |
1515 | { dbgs() << "Seen instruction reference to illegal operand\n" ; }); |
1516 | } |
1517 | } |
1518 | // else: NewID is left as None. |
1519 | } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) { |
1520 | // It's actually a PHI value. Which value it is might not be obvious, use |
1521 | // the resolver helper to find out. |
1522 | assert(MLiveOuts && MLiveIns); |
1523 | NewID = resolveDbgPHIs(MF&: *MI.getParent()->getParent(), MLiveOuts: *MLiveOuts, MLiveIns: *MLiveIns, |
1524 | Here&: MI, InstrNum: InstNo); |
1525 | } |
1526 | |
1527 | // Apply any subregister extractions, in reverse. We might have seen code |
1528 | // like this: |
1529 | // CALL64 @foo, implicit-def $rax |
1530 | // %0:gr64 = COPY $rax |
1531 | // %1:gr32 = COPY %0.sub_32bit |
1532 | // %2:gr16 = COPY %1.sub_16bit |
1533 | // %3:gr8 = COPY %2.sub_8bit |
1534 | // In which case each copy would have been recorded as a substitution with |
1535 | // a subregister qualifier. Apply those qualifiers now. |
1536 | if (NewID && !SeenSubregs.empty()) { |
1537 | unsigned Offset = 0; |
1538 | unsigned Size = 0; |
1539 | |
1540 | // Look at each subregister that we passed through, and progressively |
1541 | // narrow in, accumulating any offsets that occur. Substitutions should |
1542 | // only ever be the same or narrower width than what they read from; |
1543 | // iterate in reverse order so that we go from wide to small. |
1544 | for (unsigned Subreg : reverse(C&: SeenSubregs)) { |
1545 | unsigned ThisSize = TRI->getSubRegIdxSize(Idx: Subreg); |
1546 | unsigned ThisOffset = TRI->getSubRegIdxOffset(Idx: Subreg); |
1547 | Offset += ThisOffset; |
1548 | Size = (Size == 0) ? ThisSize : std::min(a: Size, b: ThisSize); |
1549 | } |
1550 | |
1551 | // If that worked, look for an appropriate subregister with the register |
1552 | // where the define happens. Don't look at values that were defined during |
1553 | // a stack write: we can't currently express register locations within |
1554 | // spills. |
1555 | LocIdx L = NewID->getLoc(); |
1556 | if (NewID && !MTracker->isSpill(Idx: L)) { |
1557 | // Find the register class for the register where this def happened. |
1558 | // FIXME: no index for this? |
1559 | Register Reg = MTracker->LocIdxToLocID[L]; |
1560 | const TargetRegisterClass *TRC = nullptr; |
1561 | for (const auto *TRCI : TRI->regclasses()) |
1562 | if (TRCI->contains(Reg)) |
1563 | TRC = TRCI; |
1564 | assert(TRC && "Couldn't find target register class?" ); |
1565 | |
1566 | // If the register we have isn't the right size or in the right place, |
1567 | // Try to find a subregister inside it. |
1568 | unsigned MainRegSize = TRI->getRegSizeInBits(RC: *TRC); |
1569 | if (Size != MainRegSize || Offset) { |
1570 | // Enumerate all subregisters, searching. |
1571 | Register NewReg = 0; |
1572 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
1573 | unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
1574 | unsigned SubregSize = TRI->getSubRegIdxSize(Idx: Subreg); |
1575 | unsigned SubregOffset = TRI->getSubRegIdxOffset(Idx: Subreg); |
1576 | if (SubregSize == Size && SubregOffset == Offset) { |
1577 | NewReg = SR; |
1578 | break; |
1579 | } |
1580 | } |
1581 | |
1582 | // If we didn't find anything: there's no way to express our value. |
1583 | if (!NewReg) { |
1584 | NewID = std::nullopt; |
1585 | } else { |
1586 | // Re-state the value as being defined within the subregister |
1587 | // that we found. |
1588 | LocIdx NewLoc = MTracker->lookupOrTrackRegister(ID: NewReg); |
1589 | NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc); |
1590 | } |
1591 | } |
1592 | } else { |
1593 | // If we can't handle subregisters, unset the new value. |
1594 | NewID = std::nullopt; |
1595 | } |
1596 | } |
1597 | |
1598 | return NewID; |
1599 | } |
1600 | |
1601 | bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, |
1602 | const FuncValueTable *MLiveOuts, |
1603 | const FuncValueTable *MLiveIns) { |
1604 | if (!MI.isDebugRef()) |
1605 | return false; |
1606 | |
1607 | // Only handle this instruction when we are building the variable value |
1608 | // transfer function. |
1609 | if (!VTracker && !TTracker) |
1610 | return false; |
1611 | |
1612 | const DILocalVariable *Var = MI.getDebugVariable(); |
1613 | const DIExpression *Expr = MI.getDebugExpression(); |
1614 | const DILocation *DebugLoc = MI.getDebugLoc(); |
1615 | const DILocation *InlinedAt = DebugLoc->getInlinedAt(); |
1616 | assert(Var->isValidLocationForIntrinsic(DebugLoc) && |
1617 | "Expected inlined-at fields to agree" ); |
1618 | |
1619 | DebugVariable V(Var, Expr, InlinedAt); |
1620 | |
1621 | auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get()); |
1622 | if (Scope == nullptr) |
1623 | return true; // Handled by doing nothing. This variable is never in scope. |
1624 | |
1625 | SmallVector<DbgOpID> DbgOpIDs; |
1626 | for (const MachineOperand &MO : MI.debug_operands()) { |
1627 | if (!MO.isDbgInstrRef()) { |
1628 | assert(!MO.isReg() && "DBG_INSTR_REF should not contain registers" ); |
1629 | DbgOpID ConstOpID = DbgOpStore.insert(Op: DbgOp(MO)); |
1630 | DbgOpIDs.push_back(Elt: ConstOpID); |
1631 | continue; |
1632 | } |
1633 | |
1634 | unsigned InstNo = MO.getInstrRefInstrIndex(); |
1635 | unsigned OpNo = MO.getInstrRefOpIndex(); |
1636 | |
1637 | // Default machine value number is <None> -- if no instruction defines |
1638 | // the corresponding value, it must have been optimized out. |
1639 | std::optional<ValueIDNum> NewID = |
1640 | getValueForInstrRef(InstNo, OpNo, MI, MLiveOuts, MLiveIns); |
1641 | // We have a value number or std::nullopt. If the latter, then kill the |
1642 | // entire debug value. |
1643 | if (NewID) { |
1644 | DbgOpIDs.push_back(Elt: DbgOpStore.insert(Op: *NewID)); |
1645 | } else { |
1646 | DbgOpIDs.clear(); |
1647 | break; |
1648 | } |
1649 | } |
1650 | |
1651 | // We have a DbgOpID for every value or for none. Tell the variable value |
1652 | // tracker about it. The rest of this LiveDebugValues implementation acts |
1653 | // exactly the same for DBG_INSTR_REFs as DBG_VALUEs (just, the former can |
1654 | // refer to values that aren't immediately available). |
1655 | DbgValueProperties Properties(Expr, false, true); |
1656 | if (VTracker) |
1657 | VTracker->defVar(MI, Properties, DebugOps: DbgOpIDs); |
1658 | |
1659 | // If we're on the final pass through the function, decompose this INSTR_REF |
1660 | // into a plain DBG_VALUE. |
1661 | if (!TTracker) |
1662 | return true; |
1663 | |
1664 | // Fetch the concrete DbgOps now, as we will need them later. |
1665 | SmallVector<DbgOp> DbgOps; |
1666 | for (DbgOpID OpID : DbgOpIDs) { |
1667 | DbgOps.push_back(Elt: DbgOpStore.find(ID: OpID)); |
1668 | } |
1669 | |
1670 | // Pick a location for the machine value number, if such a location exists. |
1671 | // (This information could be stored in TransferTracker to make it faster). |
1672 | SmallDenseMap<ValueIDNum, TransferTracker::LocationAndQuality> FoundLocs; |
1673 | SmallVector<ValueIDNum> ValuesToFind; |
1674 | // Initialized the preferred-location map with illegal locations, to be |
1675 | // filled in later. |
1676 | for (const DbgOp &Op : DbgOps) { |
1677 | if (!Op.IsConst) |
1678 | if (FoundLocs.insert(KV: {Op.ID, TransferTracker::LocationAndQuality()}) |
1679 | .second) |
1680 | ValuesToFind.push_back(Elt: Op.ID); |
1681 | } |
1682 | |
1683 | for (auto Location : MTracker->locations()) { |
1684 | LocIdx CurL = Location.Idx; |
1685 | ValueIDNum ID = MTracker->readMLoc(L: CurL); |
1686 | auto ValueToFindIt = find(Range&: ValuesToFind, Val: ID); |
1687 | if (ValueToFindIt == ValuesToFind.end()) |
1688 | continue; |
1689 | auto &Previous = FoundLocs.find(Val: ID)->second; |
1690 | // If this is the first location with that value, pick it. Otherwise, |
1691 | // consider whether it's a "longer term" location. |
1692 | std::optional<TransferTracker::LocationQuality> ReplacementQuality = |
1693 | TTracker->getLocQualityIfBetter(L: CurL, Min: Previous.getQuality()); |
1694 | if (ReplacementQuality) { |
1695 | Previous = TransferTracker::LocationAndQuality(CurL, *ReplacementQuality); |
1696 | if (Previous.isBest()) { |
1697 | ValuesToFind.erase(CI: ValueToFindIt); |
1698 | if (ValuesToFind.empty()) |
1699 | break; |
1700 | } |
1701 | } |
1702 | } |
1703 | |
1704 | SmallVector<ResolvedDbgOp> NewLocs; |
1705 | for (const DbgOp &DbgOp : DbgOps) { |
1706 | if (DbgOp.IsConst) { |
1707 | NewLocs.push_back(Elt: DbgOp.MO); |
1708 | continue; |
1709 | } |
1710 | LocIdx FoundLoc = FoundLocs.find(Val: DbgOp.ID)->second.getLoc(); |
1711 | if (FoundLoc.isIllegal()) { |
1712 | NewLocs.clear(); |
1713 | break; |
1714 | } |
1715 | NewLocs.push_back(Elt: FoundLoc); |
1716 | } |
1717 | // Tell transfer tracker that the variable value has changed. |
1718 | TTracker->redefVar(MI, Properties, NewLocs); |
1719 | |
1720 | // If there were values with no location, but all such values are defined in |
1721 | // later instructions in this block, this is a block-local use-before-def. |
1722 | if (!DbgOps.empty() && NewLocs.empty()) { |
1723 | bool IsValidUseBeforeDef = true; |
1724 | uint64_t LastUseBeforeDef = 0; |
1725 | for (auto ValueLoc : FoundLocs) { |
1726 | ValueIDNum NewID = ValueLoc.first; |
1727 | LocIdx FoundLoc = ValueLoc.second.getLoc(); |
1728 | if (!FoundLoc.isIllegal()) |
1729 | continue; |
1730 | // If we have an value with no location that is not defined in this block, |
1731 | // then it has no location in this block, leaving this value undefined. |
1732 | if (NewID.getBlock() != CurBB || NewID.getInst() <= CurInst) { |
1733 | IsValidUseBeforeDef = false; |
1734 | break; |
1735 | } |
1736 | LastUseBeforeDef = std::max(a: LastUseBeforeDef, b: NewID.getInst()); |
1737 | } |
1738 | if (IsValidUseBeforeDef) { |
1739 | DebugVariableID VID = DVMap.insertDVID(Var&: V, Loc: MI.getDebugLoc().get()); |
1740 | TTracker->addUseBeforeDef(VarID: VID, Properties: {MI.getDebugExpression(), false, true}, |
1741 | DbgOps, Inst: LastUseBeforeDef); |
1742 | } |
1743 | } |
1744 | |
1745 | // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant. |
1746 | // This DBG_VALUE is potentially a $noreg / undefined location, if |
1747 | // FoundLoc is illegal. |
1748 | // (XXX -- could morph the DBG_INSTR_REF in the future). |
1749 | MachineInstr *DbgMI = |
1750 | MTracker->emitLoc(DbgOps: NewLocs, Var: V, DILoc: MI.getDebugLoc().get(), Properties); |
1751 | DebugVariableID ID = DVMap.getDVID(Var: V); |
1752 | |
1753 | TTracker->PendingDbgValues.push_back(Elt: std::make_pair(x&: ID, y&: DbgMI)); |
1754 | TTracker->flushDbgValues(Pos: MI.getIterator(), MBB: nullptr); |
1755 | return true; |
1756 | } |
1757 | |
1758 | bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { |
1759 | if (!MI.isDebugPHI()) |
1760 | return false; |
1761 | |
1762 | // Analyse these only when solving the machine value location problem. |
1763 | if (VTracker || TTracker) |
1764 | return true; |
1765 | |
1766 | // First operand is the value location, either a stack slot or register. |
1767 | // Second is the debug instruction number of the original PHI. |
1768 | const MachineOperand &MO = MI.getOperand(i: 0); |
1769 | unsigned InstrNum = MI.getOperand(i: 1).getImm(); |
1770 | |
1771 | auto EmitBadPHI = [this, &MI, InstrNum]() -> bool { |
1772 | // Helper lambda to do any accounting when we fail to find a location for |
1773 | // a DBG_PHI. This can happen if DBG_PHIs are malformed, or refer to a |
1774 | // dead stack slot, for example. |
1775 | // Record a DebugPHIRecord with an empty value + location. |
1776 | DebugPHINumToValue.push_back( |
1777 | Elt: {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: std::nullopt, .ReadLoc: std::nullopt}); |
1778 | return true; |
1779 | }; |
1780 | |
1781 | if (MO.isReg() && MO.getReg()) { |
1782 | // The value is whatever's currently in the register. Read and record it, |
1783 | // to be analysed later. |
1784 | Register Reg = MO.getReg(); |
1785 | ValueIDNum Num = MTracker->readReg(R: Reg); |
1786 | auto PHIRec = DebugPHIRecord( |
1787 | {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Num, .ReadLoc: MTracker->lookupOrTrackRegister(ID: Reg)}); |
1788 | DebugPHINumToValue.push_back(Elt: PHIRec); |
1789 | |
1790 | // Ensure this register is tracked. |
1791 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) |
1792 | MTracker->lookupOrTrackRegister(ID: *RAI); |
1793 | } else if (MO.isFI()) { |
1794 | // The value is whatever's in this stack slot. |
1795 | unsigned FI = MO.getIndex(); |
1796 | |
1797 | // If the stack slot is dead, then this was optimized away. |
1798 | // FIXME: stack slot colouring should account for slots that get merged. |
1799 | if (MFI->isDeadObjectIndex(ObjectIdx: FI)) |
1800 | return EmitBadPHI(); |
1801 | |
1802 | // Identify this spill slot, ensure it's tracked. |
1803 | Register Base; |
1804 | StackOffset Offs = TFI->getFrameIndexReference(MF: *MI.getMF(), FI, FrameReg&: Base); |
1805 | SpillLoc SL = {.SpillBase: Base, .SpillOffset: Offs}; |
1806 | std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(L: SL); |
1807 | |
1808 | // We might be able to find a value, but have chosen not to, to avoid |
1809 | // tracking too much stack information. |
1810 | if (!SpillNo) |
1811 | return EmitBadPHI(); |
1812 | |
1813 | // Any stack location DBG_PHI should have an associate bit-size. |
1814 | assert(MI.getNumOperands() == 3 && "Stack DBG_PHI with no size?" ); |
1815 | unsigned slotBitSize = MI.getOperand(i: 2).getImm(); |
1816 | |
1817 | unsigned SpillID = MTracker->getLocID(Spill: *SpillNo, Idx: {slotBitSize, 0}); |
1818 | LocIdx SpillLoc = MTracker->getSpillMLoc(SpillID); |
1819 | ValueIDNum Result = MTracker->readMLoc(L: SpillLoc); |
1820 | |
1821 | // Record this DBG_PHI for later analysis. |
1822 | auto DbgPHI = DebugPHIRecord({.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Result, .ReadLoc: SpillLoc}); |
1823 | DebugPHINumToValue.push_back(Elt: DbgPHI); |
1824 | } else { |
1825 | // Else: if the operand is neither a legal register or a stack slot, then |
1826 | // we're being fed illegal debug-info. Record an empty PHI, so that any |
1827 | // debug users trying to read this number will be put off trying to |
1828 | // interpret the value. |
1829 | LLVM_DEBUG( |
1830 | { dbgs() << "Seen DBG_PHI with unrecognised operand format\n" ; }); |
1831 | return EmitBadPHI(); |
1832 | } |
1833 | |
1834 | return true; |
1835 | } |
1836 | |
1837 | void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { |
1838 | // Meta Instructions do not affect the debug liveness of any register they |
1839 | // define. |
1840 | if (MI.isImplicitDef()) { |
1841 | // Except when there's an implicit def, and the location it's defining has |
1842 | // no value number. The whole point of an implicit def is to announce that |
1843 | // the register is live, without be specific about it's value. So define |
1844 | // a value if there isn't one already. |
1845 | ValueIDNum Num = MTracker->readReg(R: MI.getOperand(i: 0).getReg()); |
1846 | // Has a legitimate value -> ignore the implicit def. |
1847 | if (Num.getLoc() != 0) |
1848 | return; |
1849 | // Otherwise, def it here. |
1850 | } else if (MI.isMetaInstruction()) |
1851 | return; |
1852 | |
1853 | // We always ignore SP defines on call instructions, they don't actually |
1854 | // change the value of the stack pointer... except for win32's _chkstk. This |
1855 | // is rare: filter quickly for the common case (no stack adjustments, not a |
1856 | // call, etc). If it is a call that modifies SP, recognise the SP register |
1857 | // defs. |
1858 | bool CallChangesSP = false; |
1859 | if (AdjustsStackInCalls && MI.isCall() && MI.getOperand(i: 0).isSymbol() && |
1860 | !strcmp(s1: MI.getOperand(i: 0).getSymbolName(), s2: StackProbeSymbolName.data())) |
1861 | CallChangesSP = true; |
1862 | |
1863 | // Test whether we should ignore a def of this register due to it being part |
1864 | // of the stack pointer. |
1865 | auto IgnoreSPAlias = [this, &MI, CallChangesSP](Register R) -> bool { |
1866 | if (CallChangesSP) |
1867 | return false; |
1868 | return MI.isCall() && MTracker->SPAliases.count(V: R); |
1869 | }; |
1870 | |
1871 | // Find the regs killed by MI, and find regmasks of preserved regs. |
1872 | // Max out the number of statically allocated elements in `DeadRegs`, as this |
1873 | // prevents fallback to std::set::count() operations. |
1874 | SmallSet<uint32_t, 32> DeadRegs; |
1875 | SmallVector<const uint32_t *, 4> RegMasks; |
1876 | SmallVector<const MachineOperand *, 4> RegMaskPtrs; |
1877 | for (const MachineOperand &MO : MI.operands()) { |
1878 | // Determine whether the operand is a register def. |
1879 | if (MO.isReg() && MO.isDef() && MO.getReg() && MO.getReg().isPhysical() && |
1880 | !IgnoreSPAlias(MO.getReg())) { |
1881 | // Remove ranges of all aliased registers. |
1882 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) |
1883 | // FIXME: Can we break out of this loop early if no insertion occurs? |
1884 | DeadRegs.insert(V: *RAI); |
1885 | } else if (MO.isRegMask()) { |
1886 | RegMasks.push_back(Elt: MO.getRegMask()); |
1887 | RegMaskPtrs.push_back(Elt: &MO); |
1888 | } |
1889 | } |
1890 | |
1891 | // Tell MLocTracker about all definitions, of regmasks and otherwise. |
1892 | for (uint32_t DeadReg : DeadRegs) |
1893 | MTracker->defReg(R: DeadReg, BB: CurBB, Inst: CurInst); |
1894 | |
1895 | for (const auto *MO : RegMaskPtrs) |
1896 | MTracker->writeRegMask(MO, CurBB, InstID: CurInst); |
1897 | |
1898 | // If this instruction writes to a spill slot, def that slot. |
1899 | if (hasFoldedStackStore(MI)) { |
1900 | if (std::optional<SpillLocationNo> SpillNo = |
1901 | extractSpillBaseRegAndOffset(MI)) { |
1902 | for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { |
1903 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I); |
1904 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
1905 | MTracker->setMLoc(L, Num: ValueIDNum(CurBB, CurInst, L)); |
1906 | } |
1907 | } |
1908 | } |
1909 | |
1910 | if (!TTracker) |
1911 | return; |
1912 | |
1913 | // When committing variable values to locations: tell transfer tracker that |
1914 | // we've clobbered things. It may be able to recover the variable from a |
1915 | // different location. |
1916 | |
1917 | // Inform TTracker about any direct clobbers. |
1918 | for (uint32_t DeadReg : DeadRegs) { |
1919 | LocIdx Loc = MTracker->lookupOrTrackRegister(ID: DeadReg); |
1920 | TTracker->clobberMloc(MLoc: Loc, Pos: MI.getIterator(), MakeUndef: false); |
1921 | } |
1922 | |
1923 | // Look for any clobbers performed by a register mask. Only test locations |
1924 | // that are actually being tracked. |
1925 | if (!RegMaskPtrs.empty()) { |
1926 | for (auto L : MTracker->locations()) { |
1927 | // Stack locations can't be clobbered by regmasks. |
1928 | if (MTracker->isSpill(Idx: L.Idx)) |
1929 | continue; |
1930 | |
1931 | Register Reg = MTracker->LocIdxToLocID[L.Idx]; |
1932 | if (IgnoreSPAlias(Reg)) |
1933 | continue; |
1934 | |
1935 | for (const auto *MO : RegMaskPtrs) |
1936 | if (MO->clobbersPhysReg(PhysReg: Reg)) |
1937 | TTracker->clobberMloc(MLoc: L.Idx, Pos: MI.getIterator(), MakeUndef: false); |
1938 | } |
1939 | } |
1940 | |
1941 | // Tell TTracker about any folded stack store. |
1942 | if (hasFoldedStackStore(MI)) { |
1943 | if (std::optional<SpillLocationNo> SpillNo = |
1944 | extractSpillBaseRegAndOffset(MI)) { |
1945 | for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { |
1946 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I); |
1947 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
1948 | TTracker->clobberMloc(MLoc: L, Pos: MI.getIterator(), MakeUndef: true); |
1949 | } |
1950 | } |
1951 | } |
1952 | } |
1953 | |
1954 | void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { |
1955 | // In all circumstances, re-def all aliases. It's definitely a new value now. |
1956 | for (MCRegAliasIterator RAI(DstRegNum, TRI, true); RAI.isValid(); ++RAI) |
1957 | MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst); |
1958 | |
1959 | ValueIDNum SrcValue = MTracker->readReg(R: SrcRegNum); |
1960 | MTracker->setReg(R: DstRegNum, ValueID: SrcValue); |
1961 | |
1962 | // Copy subregisters from one location to another. |
1963 | for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) { |
1964 | unsigned SrcSubReg = SRI.getSubReg(); |
1965 | unsigned SubRegIdx = SRI.getSubRegIndex(); |
1966 | unsigned DstSubReg = TRI->getSubReg(Reg: DstRegNum, Idx: SubRegIdx); |
1967 | if (!DstSubReg) |
1968 | continue; |
1969 | |
1970 | // Do copy. There are two matching subregisters, the source value should |
1971 | // have been def'd when the super-reg was, the latter might not be tracked |
1972 | // yet. |
1973 | // This will force SrcSubReg to be tracked, if it isn't yet. Will read |
1974 | // mphi values if it wasn't tracked. |
1975 | LocIdx SrcL = MTracker->lookupOrTrackRegister(ID: SrcSubReg); |
1976 | LocIdx DstL = MTracker->lookupOrTrackRegister(ID: DstSubReg); |
1977 | (void)SrcL; |
1978 | (void)DstL; |
1979 | ValueIDNum CpyValue = MTracker->readReg(R: SrcSubReg); |
1980 | |
1981 | MTracker->setReg(R: DstSubReg, ValueID: CpyValue); |
1982 | } |
1983 | } |
1984 | |
1985 | std::optional<SpillLocationNo> |
1986 | InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, |
1987 | MachineFunction *MF) { |
1988 | // TODO: Handle multiple stores folded into one. |
1989 | if (!MI.hasOneMemOperand()) |
1990 | return std::nullopt; |
1991 | |
1992 | // Reject any memory operand that's aliased -- we can't guarantee its value. |
1993 | auto MMOI = MI.memoperands_begin(); |
1994 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); |
1995 | if (PVal->isAliased(MFI)) |
1996 | return std::nullopt; |
1997 | |
1998 | if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) |
1999 | return std::nullopt; // This is not a spill instruction, since no valid size |
2000 | // was returned from either function. |
2001 | |
2002 | return extractSpillBaseRegAndOffset(MI); |
2003 | } |
2004 | |
2005 | bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, |
2006 | MachineFunction *MF, unsigned &Reg) { |
2007 | if (!isSpillInstruction(MI, MF)) |
2008 | return false; |
2009 | |
2010 | int FI; |
2011 | Reg = TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FI); |
2012 | return Reg != 0; |
2013 | } |
2014 | |
2015 | std::optional<SpillLocationNo> |
2016 | InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, |
2017 | MachineFunction *MF, unsigned &Reg) { |
2018 | if (!MI.hasOneMemOperand()) |
2019 | return std::nullopt; |
2020 | |
2021 | // FIXME: Handle folded restore instructions with more than one memory |
2022 | // operand. |
2023 | if (MI.getRestoreSize(TII)) { |
2024 | Reg = MI.getOperand(i: 0).getReg(); |
2025 | return extractSpillBaseRegAndOffset(MI); |
2026 | } |
2027 | return std::nullopt; |
2028 | } |
2029 | |
2030 | bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { |
2031 | // XXX -- it's too difficult to implement VarLocBasedImpl's stack location |
2032 | // limitations under the new model. Therefore, when comparing them, compare |
2033 | // versions that don't attempt spills or restores at all. |
2034 | if (EmulateOldLDV) |
2035 | return false; |
2036 | |
2037 | // Strictly limit ourselves to plain loads and stores, not all instructions |
2038 | // that can access the stack. |
2039 | int DummyFI = -1; |
2040 | if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: DummyFI) && |
2041 | !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: DummyFI)) |
2042 | return false; |
2043 | |
2044 | MachineFunction *MF = MI.getMF(); |
2045 | unsigned Reg; |
2046 | |
2047 | LLVM_DEBUG(dbgs() << "Examining instruction: " ; MI.dump();); |
2048 | |
2049 | // Strictly limit ourselves to plain loads and stores, not all instructions |
2050 | // that can access the stack. |
2051 | int FIDummy; |
2052 | if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FIDummy) && |
2053 | !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: FIDummy)) |
2054 | return false; |
2055 | |
2056 | // First, if there are any DBG_VALUEs pointing at a spill slot that is |
2057 | // written to, terminate that variable location. The value in memory |
2058 | // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. |
2059 | if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { |
2060 | // Un-set this location and clobber, so that earlier locations don't |
2061 | // continue past this store. |
2062 | for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) { |
2063 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *Loc, Idx: SlotIdx); |
2064 | std::optional<LocIdx> MLoc = MTracker->getSpillMLoc(SpillID); |
2065 | if (!MLoc) |
2066 | continue; |
2067 | |
2068 | // We need to over-write the stack slot with something (here, a def at |
2069 | // this instruction) to ensure no values are preserved in this stack slot |
2070 | // after the spill. It also prevents TTracker from trying to recover the |
2071 | // location and re-installing it in the same place. |
2072 | ValueIDNum Def(CurBB, CurInst, *MLoc); |
2073 | MTracker->setMLoc(L: *MLoc, Num: Def); |
2074 | if (TTracker) |
2075 | TTracker->clobberMloc(MLoc: *MLoc, Pos: MI.getIterator()); |
2076 | } |
2077 | } |
2078 | |
2079 | // Try to recognise spill and restore instructions that may transfer a value. |
2080 | if (isLocationSpill(MI, MF, Reg)) { |
2081 | // isLocationSpill returning true should guarantee we can extract a |
2082 | // location. |
2083 | SpillLocationNo Loc = *extractSpillBaseRegAndOffset(MI); |
2084 | |
2085 | auto DoTransfer = [&](Register SrcReg, unsigned SpillID) { |
2086 | auto ReadValue = MTracker->readReg(R: SrcReg); |
2087 | LocIdx DstLoc = MTracker->getSpillMLoc(SpillID); |
2088 | MTracker->setMLoc(L: DstLoc, Num: ReadValue); |
2089 | |
2090 | if (TTracker) { |
2091 | LocIdx SrcLoc = MTracker->getRegMLoc(R: SrcReg); |
2092 | TTracker->transferMlocs(Src: SrcLoc, Dst: DstLoc, Pos: MI.getIterator()); |
2093 | } |
2094 | }; |
2095 | |
2096 | // Then, transfer subreg bits. |
2097 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
2098 | // Ensure this reg is tracked, |
2099 | (void)MTracker->lookupOrTrackRegister(ID: SR); |
2100 | unsigned SubregIdx = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
2101 | unsigned SpillID = MTracker->getLocID(Spill: Loc, SpillSubReg: SubregIdx); |
2102 | DoTransfer(SR, SpillID); |
2103 | } |
2104 | |
2105 | // Directly lookup size of main source reg, and transfer. |
2106 | unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI); |
2107 | unsigned SpillID = MTracker->getLocID(Spill: Loc, Idx: {Size, 0}); |
2108 | DoTransfer(Reg, SpillID); |
2109 | } else { |
2110 | std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); |
2111 | if (!Loc) |
2112 | return false; |
2113 | |
2114 | // Assumption: we're reading from the base of the stack slot, not some |
2115 | // offset into it. It seems very unlikely LLVM would ever generate |
2116 | // restores where this wasn't true. This then becomes a question of what |
2117 | // subregisters in the destination register line up with positions in the |
2118 | // stack slot. |
2119 | |
2120 | // Def all registers that alias the destination. |
2121 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) |
2122 | MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst); |
2123 | |
2124 | // Now find subregisters within the destination register, and load values |
2125 | // from stack slot positions. |
2126 | auto DoTransfer = [&](Register DestReg, unsigned SpillID) { |
2127 | LocIdx SrcIdx = MTracker->getSpillMLoc(SpillID); |
2128 | auto ReadValue = MTracker->readMLoc(L: SrcIdx); |
2129 | MTracker->setReg(R: DestReg, ValueID: ReadValue); |
2130 | }; |
2131 | |
2132 | for (MCPhysReg SR : TRI->subregs(Reg)) { |
2133 | unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR); |
2134 | unsigned SpillID = MTracker->getLocID(Spill: *Loc, SpillSubReg: Subreg); |
2135 | DoTransfer(SR, SpillID); |
2136 | } |
2137 | |
2138 | // Directly look up this registers slot idx by size, and transfer. |
2139 | unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI); |
2140 | unsigned SpillID = MTracker->getLocID(Spill: *Loc, Idx: {Size, 0}); |
2141 | DoTransfer(Reg, SpillID); |
2142 | } |
2143 | return true; |
2144 | } |
2145 | |
2146 | bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) { |
2147 | auto DestSrc = TII->isCopyLikeInstr(MI); |
2148 | if (!DestSrc) |
2149 | return false; |
2150 | |
2151 | const MachineOperand *DestRegOp = DestSrc->Destination; |
2152 | const MachineOperand *SrcRegOp = DestSrc->Source; |
2153 | |
2154 | Register SrcReg = SrcRegOp->getReg(); |
2155 | Register DestReg = DestRegOp->getReg(); |
2156 | |
2157 | // Ignore identity copies. Yep, these make it as far as LiveDebugValues. |
2158 | if (SrcReg == DestReg) |
2159 | return true; |
2160 | |
2161 | // For emulating VarLocBasedImpl: |
2162 | // We want to recognize instructions where destination register is callee |
2163 | // saved register. If register that could be clobbered by the call is |
2164 | // included, there would be a great chance that it is going to be clobbered |
2165 | // soon. It is more likely that previous register, which is callee saved, is |
2166 | // going to stay unclobbered longer, even if it is killed. |
2167 | // |
2168 | // For InstrRefBasedImpl, we can track multiple locations per value, so |
2169 | // ignore this condition. |
2170 | if (EmulateOldLDV && !isCalleeSavedReg(R: DestReg)) |
2171 | return false; |
2172 | |
2173 | // InstrRefBasedImpl only followed killing copies. |
2174 | if (EmulateOldLDV && !SrcRegOp->isKill()) |
2175 | return false; |
2176 | |
2177 | // Before we update MTracker, remember which values were present in each of |
2178 | // the locations about to be overwritten, so that we can recover any |
2179 | // potentially clobbered variables. |
2180 | DenseMap<LocIdx, ValueIDNum> ClobberedLocs; |
2181 | if (TTracker) { |
2182 | for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) { |
2183 | LocIdx ClobberedLoc = MTracker->getRegMLoc(R: *RAI); |
2184 | auto MLocIt = TTracker->ActiveMLocs.find(Val: ClobberedLoc); |
2185 | // If ActiveMLocs isn't tracking this location or there are no variables |
2186 | // using it, don't bother remembering. |
2187 | if (MLocIt == TTracker->ActiveMLocs.end() || MLocIt->second.empty()) |
2188 | continue; |
2189 | ValueIDNum Value = MTracker->readReg(R: *RAI); |
2190 | ClobberedLocs[ClobberedLoc] = Value; |
2191 | } |
2192 | } |
2193 | |
2194 | // Copy MTracker info, including subregs if available. |
2195 | InstrRefBasedLDV::performCopy(SrcRegNum: SrcReg, DstRegNum: DestReg); |
2196 | |
2197 | // The copy might have clobbered variables based on the destination register. |
2198 | // Tell TTracker about it, passing the old ValueIDNum to search for |
2199 | // alternative locations (or else terminating those variables). |
2200 | if (TTracker) { |
2201 | for (auto LocVal : ClobberedLocs) { |
2202 | TTracker->clobberMloc(MLoc: LocVal.first, OldValue: LocVal.second, Pos: MI.getIterator(), MakeUndef: false); |
2203 | } |
2204 | } |
2205 | |
2206 | // Only produce a transfer of DBG_VALUE within a block where old LDV |
2207 | // would have. We might make use of the additional value tracking in some |
2208 | // other way, later. |
2209 | if (TTracker && isCalleeSavedReg(R: DestReg) && SrcRegOp->isKill()) |
2210 | TTracker->transferMlocs(Src: MTracker->getRegMLoc(R: SrcReg), |
2211 | Dst: MTracker->getRegMLoc(R: DestReg), Pos: MI.getIterator()); |
2212 | |
2213 | // VarLocBasedImpl would quit tracking the old location after copying. |
2214 | if (EmulateOldLDV && SrcReg != DestReg) |
2215 | MTracker->defReg(R: SrcReg, BB: CurBB, Inst: CurInst); |
2216 | |
2217 | return true; |
2218 | } |
2219 | |
2220 | /// Accumulate a mapping between each DILocalVariable fragment and other |
2221 | /// fragments of that DILocalVariable which overlap. This reduces work during |
2222 | /// the data-flow stage from "Find any overlapping fragments" to "Check if the |
2223 | /// known-to-overlap fragments are present". |
2224 | /// \param MI A previously unprocessed debug instruction to analyze for |
2225 | /// fragment usage. |
2226 | void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) { |
2227 | assert(MI.isDebugValueLike()); |
2228 | DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), |
2229 | MI.getDebugLoc()->getInlinedAt()); |
2230 | FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); |
2231 | |
2232 | // If this is the first sighting of this variable, then we are guaranteed |
2233 | // there are currently no overlapping fragments either. Initialize the set |
2234 | // of seen fragments, record no overlaps for the current one, and return. |
2235 | auto SeenIt = SeenFragments.find(Val: MIVar.getVariable()); |
2236 | if (SeenIt == SeenFragments.end()) { |
2237 | SmallSet<FragmentInfo, 4> OneFragment; |
2238 | OneFragment.insert(V: ThisFragment); |
2239 | SeenFragments.insert(KV: {MIVar.getVariable(), OneFragment}); |
2240 | |
2241 | OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
2242 | return; |
2243 | } |
2244 | |
2245 | // If this particular Variable/Fragment pair already exists in the overlap |
2246 | // map, it has already been accounted for. |
2247 | auto IsInOLapMap = |
2248 | OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
2249 | if (!IsInOLapMap.second) |
2250 | return; |
2251 | |
2252 | auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; |
2253 | auto &AllSeenFragments = SeenIt->second; |
2254 | |
2255 | // Otherwise, examine all other seen fragments for this variable, with "this" |
2256 | // fragment being a previously unseen fragment. Record any pair of |
2257 | // overlapping fragments. |
2258 | for (const auto &ASeenFragment : AllSeenFragments) { |
2259 | // Does this previously seen fragment overlap? |
2260 | if (DIExpression::fragmentsOverlap(A: ThisFragment, B: ASeenFragment)) { |
2261 | // Yes: Mark the current fragment as being overlapped. |
2262 | ThisFragmentsOverlaps.push_back(Elt: ASeenFragment); |
2263 | // Mark the previously seen fragment as being overlapped by the current |
2264 | // one. |
2265 | auto ASeenFragmentsOverlaps = |
2266 | OverlapFragments.find(Val: {MIVar.getVariable(), ASeenFragment}); |
2267 | assert(ASeenFragmentsOverlaps != OverlapFragments.end() && |
2268 | "Previously seen var fragment has no vector of overlaps" ); |
2269 | ASeenFragmentsOverlaps->second.push_back(Elt: ThisFragment); |
2270 | } |
2271 | } |
2272 | |
2273 | AllSeenFragments.insert(V: ThisFragment); |
2274 | } |
2275 | |
2276 | void InstrRefBasedLDV::process(MachineInstr &MI, |
2277 | const FuncValueTable *MLiveOuts, |
2278 | const FuncValueTable *MLiveIns) { |
2279 | // Try to interpret an MI as a debug or transfer instruction. Only if it's |
2280 | // none of these should we interpret it's register defs as new value |
2281 | // definitions. |
2282 | if (transferDebugValue(MI)) |
2283 | return; |
2284 | if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns)) |
2285 | return; |
2286 | if (transferDebugPHI(MI)) |
2287 | return; |
2288 | if (transferRegisterCopy(MI)) |
2289 | return; |
2290 | if (transferSpillOrRestoreInst(MI)) |
2291 | return; |
2292 | transferRegisterDef(MI); |
2293 | } |
2294 | |
2295 | void InstrRefBasedLDV::produceMLocTransferFunction( |
2296 | MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer, |
2297 | unsigned MaxNumBlocks) { |
2298 | // Because we try to optimize around register mask operands by ignoring regs |
2299 | // that aren't currently tracked, we set up something ugly for later: RegMask |
2300 | // operands that are seen earlier than the first use of a register, still need |
2301 | // to clobber that register in the transfer function. But this information |
2302 | // isn't actively recorded. Instead, we track each RegMask used in each block, |
2303 | // and accumulated the clobbered but untracked registers in each block into |
2304 | // the following bitvector. Later, if new values are tracked, we can add |
2305 | // appropriate clobbers. |
2306 | SmallVector<BitVector, 32> BlockMasks; |
2307 | BlockMasks.resize(N: MaxNumBlocks); |
2308 | |
2309 | // Reserve one bit per register for the masks described above. |
2310 | unsigned BVWords = MachineOperand::getRegMaskSize(NumRegs: TRI->getNumRegs()); |
2311 | for (auto &BV : BlockMasks) |
2312 | BV.resize(N: TRI->getNumRegs(), t: true); |
2313 | |
2314 | // Step through all instructions and inhale the transfer function. |
2315 | for (auto &MBB : MF) { |
2316 | // Object fields that are read by trackers to know where we are in the |
2317 | // function. |
2318 | CurBB = MBB.getNumber(); |
2319 | CurInst = 1; |
2320 | |
2321 | // Set all machine locations to a PHI value. For transfer function |
2322 | // production only, this signifies the live-in value to the block. |
2323 | MTracker->reset(); |
2324 | MTracker->setMPhis(CurBB); |
2325 | |
2326 | // Step through each instruction in this block. |
2327 | for (auto &MI : MBB) { |
2328 | // Pass in an empty unique_ptr for the value tables when accumulating the |
2329 | // machine transfer function. |
2330 | process(MI, MLiveOuts: nullptr, MLiveIns: nullptr); |
2331 | |
2332 | // Also accumulate fragment map. |
2333 | if (MI.isDebugValueLike()) |
2334 | accumulateFragmentMap(MI); |
2335 | |
2336 | // Create a map from the instruction number (if present) to the |
2337 | // MachineInstr and its position. |
2338 | if (uint64_t InstrNo = MI.peekDebugInstrNum()) { |
2339 | auto InstrAndPos = std::make_pair(x: &MI, y&: CurInst); |
2340 | auto InsertResult = |
2341 | DebugInstrNumToInstr.insert(x: std::make_pair(x&: InstrNo, y&: InstrAndPos)); |
2342 | |
2343 | // There should never be duplicate instruction numbers. |
2344 | assert(InsertResult.second); |
2345 | (void)InsertResult; |
2346 | } |
2347 | |
2348 | ++CurInst; |
2349 | } |
2350 | |
2351 | // Produce the transfer function, a map of machine location to new value. If |
2352 | // any machine location has the live-in phi value from the start of the |
2353 | // block, it's live-through and doesn't need recording in the transfer |
2354 | // function. |
2355 | for (auto Location : MTracker->locations()) { |
2356 | LocIdx Idx = Location.Idx; |
2357 | ValueIDNum &P = Location.Value; |
2358 | if (P.isPHI() && P.getLoc() == Idx.asU64()) |
2359 | continue; |
2360 | |
2361 | // Insert-or-update. |
2362 | auto &TransferMap = MLocTransfer[CurBB]; |
2363 | auto Result = TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: P)); |
2364 | if (!Result.second) |
2365 | Result.first->second = P; |
2366 | } |
2367 | |
2368 | // Accumulate any bitmask operands into the clobbered reg mask for this |
2369 | // block. |
2370 | for (auto &P : MTracker->Masks) { |
2371 | BlockMasks[CurBB].clearBitsNotInMask(Mask: P.first->getRegMask(), MaskWords: BVWords); |
2372 | } |
2373 | } |
2374 | |
2375 | // Compute a bitvector of all the registers that are tracked in this block. |
2376 | BitVector UsedRegs(TRI->getNumRegs()); |
2377 | for (auto Location : MTracker->locations()) { |
2378 | unsigned ID = MTracker->LocIdxToLocID[Location.Idx]; |
2379 | // Ignore stack slots, and aliases of the stack pointer. |
2380 | if (ID >= TRI->getNumRegs() || MTracker->SPAliases.count(V: ID)) |
2381 | continue; |
2382 | UsedRegs.set(ID); |
2383 | } |
2384 | |
2385 | // Check that any regmask-clobber of a register that gets tracked, is not |
2386 | // live-through in the transfer function. It needs to be clobbered at the |
2387 | // very least. |
2388 | for (unsigned int I = 0; I < MaxNumBlocks; ++I) { |
2389 | BitVector &BV = BlockMasks[I]; |
2390 | BV.flip(); |
2391 | BV &= UsedRegs; |
2392 | // This produces all the bits that we clobber, but also use. Check that |
2393 | // they're all clobbered or at least set in the designated transfer |
2394 | // elem. |
2395 | for (unsigned Bit : BV.set_bits()) { |
2396 | unsigned ID = MTracker->getLocID(Reg: Bit); |
2397 | LocIdx Idx = MTracker->LocIDToLocIdx[ID]; |
2398 | auto &TransferMap = MLocTransfer[I]; |
2399 | |
2400 | // Install a value representing the fact that this location is effectively |
2401 | // written to in this block. As there's no reserved value, instead use |
2402 | // a value number that is never generated. Pick the value number for the |
2403 | // first instruction in the block, def'ing this location, which we know |
2404 | // this block never used anyway. |
2405 | ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx); |
2406 | auto Result = |
2407 | TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: NotGeneratedNum)); |
2408 | if (!Result.second) { |
2409 | ValueIDNum &ValueID = Result.first->second; |
2410 | if (ValueID.getBlock() == I && ValueID.isPHI()) |
2411 | // It was left as live-through. Set it to clobbered. |
2412 | ValueID = NotGeneratedNum; |
2413 | } |
2414 | } |
2415 | } |
2416 | } |
2417 | |
2418 | bool InstrRefBasedLDV::mlocJoin( |
2419 | MachineBasicBlock &MBB, SmallPtrSet<const MachineBasicBlock *, 16> &Visited, |
2420 | FuncValueTable &OutLocs, ValueTable &InLocs) { |
2421 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n" ); |
2422 | bool Changed = false; |
2423 | |
2424 | // Handle value-propagation when control flow merges on entry to a block. For |
2425 | // any location without a PHI already placed, the location has the same value |
2426 | // as its predecessors. If a PHI is placed, test to see whether it's now a |
2427 | // redundant PHI that we can eliminate. |
2428 | |
2429 | SmallVector<const MachineBasicBlock *, 8> BlockOrders; |
2430 | for (auto *Pred : MBB.predecessors()) |
2431 | BlockOrders.push_back(Elt: Pred); |
2432 | |
2433 | // Visit predecessors in RPOT order. |
2434 | auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { |
2435 | return BBToOrder.find(Val: A)->second < BBToOrder.find(Val: B)->second; |
2436 | }; |
2437 | llvm::sort(C&: BlockOrders, Comp: Cmp); |
2438 | |
2439 | // Skip entry block. |
2440 | if (BlockOrders.size() == 0) { |
2441 | // FIXME: We don't use assert here to prevent instr-ref-unreachable.mir |
2442 | // failing. |
2443 | LLVM_DEBUG(if (!MBB.isEntryBlock()) dbgs() |
2444 | << "Found not reachable block " << MBB.getFullName() |
2445 | << " from entry which may lead out of " |
2446 | "bound access to VarLocs\n" ); |
2447 | return false; |
2448 | } |
2449 | |
2450 | // Step through all machine locations, look at each predecessor and test |
2451 | // whether we can eliminate redundant PHIs. |
2452 | for (auto Location : MTracker->locations()) { |
2453 | LocIdx Idx = Location.Idx; |
2454 | |
2455 | // Pick out the first predecessors live-out value for this location. It's |
2456 | // guaranteed to not be a backedge, as we order by RPO. |
2457 | ValueIDNum FirstVal = OutLocs[*BlockOrders[0]][Idx.asU64()]; |
2458 | |
2459 | // If we've already eliminated a PHI here, do no further checking, just |
2460 | // propagate the first live-in value into this block. |
2461 | if (InLocs[Idx.asU64()] != ValueIDNum(MBB.getNumber(), 0, Idx)) { |
2462 | if (InLocs[Idx.asU64()] != FirstVal) { |
2463 | InLocs[Idx.asU64()] = FirstVal; |
2464 | Changed |= true; |
2465 | } |
2466 | continue; |
2467 | } |
2468 | |
2469 | // We're now examining a PHI to see whether it's un-necessary. Loop around |
2470 | // the other live-in values and test whether they're all the same. |
2471 | bool Disagree = false; |
2472 | for (unsigned int I = 1; I < BlockOrders.size(); ++I) { |
2473 | const MachineBasicBlock *PredMBB = BlockOrders[I]; |
2474 | const ValueIDNum &PredLiveOut = OutLocs[*PredMBB][Idx.asU64()]; |
2475 | |
2476 | // Incoming values agree, continue trying to eliminate this PHI. |
2477 | if (FirstVal == PredLiveOut) |
2478 | continue; |
2479 | |
2480 | // We can also accept a PHI value that feeds back into itself. |
2481 | if (PredLiveOut == ValueIDNum(MBB.getNumber(), 0, Idx)) |
2482 | continue; |
2483 | |
2484 | // Live-out of a predecessor disagrees with the first predecessor. |
2485 | Disagree = true; |
2486 | } |
2487 | |
2488 | // No disagreement? No PHI. Otherwise, leave the PHI in live-ins. |
2489 | if (!Disagree) { |
2490 | InLocs[Idx.asU64()] = FirstVal; |
2491 | Changed |= true; |
2492 | } |
2493 | } |
2494 | |
2495 | // TODO: Reimplement NumInserted and NumRemoved. |
2496 | return Changed; |
2497 | } |
2498 | |
2499 | void InstrRefBasedLDV::findStackIndexInterference( |
2500 | SmallVectorImpl<unsigned> &Slots) { |
2501 | // We could spend a bit of time finding the exact, minimal, set of stack |
2502 | // indexes that interfere with each other, much like reg units. Or, we can |
2503 | // rely on the fact that: |
2504 | // * The smallest / lowest index will interfere with everything at zero |
2505 | // offset, which will be the largest set of registers, |
2506 | // * Most indexes with non-zero offset will end up being interference units |
2507 | // anyway. |
2508 | // So just pick those out and return them. |
2509 | |
2510 | // We can rely on a single-byte stack index existing already, because we |
2511 | // initialize them in MLocTracker. |
2512 | auto It = MTracker->StackSlotIdxes.find(Val: {8, 0}); |
2513 | assert(It != MTracker->StackSlotIdxes.end()); |
2514 | Slots.push_back(Elt: It->second); |
2515 | |
2516 | // Find anything that has a non-zero offset and add that too. |
2517 | for (auto &Pair : MTracker->StackSlotIdxes) { |
2518 | // Is offset zero? If so, ignore. |
2519 | if (!Pair.first.second) |
2520 | continue; |
2521 | Slots.push_back(Elt: Pair.second); |
2522 | } |
2523 | } |
2524 | |
2525 | void InstrRefBasedLDV::placeMLocPHIs( |
2526 | MachineFunction &MF, SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, |
2527 | FuncValueTable &MInLocs, SmallVectorImpl<MLocTransferMap> &MLocTransfer) { |
2528 | SmallVector<unsigned, 4> StackUnits; |
2529 | findStackIndexInterference(Slots&: StackUnits); |
2530 | |
2531 | // To avoid repeatedly running the PHI placement algorithm, leverage the |
2532 | // fact that a def of register MUST also def its register units. Find the |
2533 | // units for registers, place PHIs for them, and then replicate them for |
2534 | // aliasing registers. Some inputs that are never def'd (DBG_PHIs of |
2535 | // arguments) don't lead to register units being tracked, just place PHIs for |
2536 | // those registers directly. Stack slots have their own form of "unit", |
2537 | // store them to one side. |
2538 | SmallSet<Register, 32> RegUnitsToPHIUp; |
2539 | SmallSet<LocIdx, 32> NormalLocsToPHI; |
2540 | SmallSet<SpillLocationNo, 32> StackSlots; |
2541 | for (auto Location : MTracker->locations()) { |
2542 | LocIdx L = Location.Idx; |
2543 | if (MTracker->isSpill(Idx: L)) { |
2544 | StackSlots.insert(V: MTracker->locIDToSpill(ID: MTracker->LocIdxToLocID[L])); |
2545 | continue; |
2546 | } |
2547 | |
2548 | Register R = MTracker->LocIdxToLocID[L]; |
2549 | SmallSet<Register, 8> FoundRegUnits; |
2550 | bool AnyIllegal = false; |
2551 | for (MCRegUnit Unit : TRI->regunits(Reg: R.asMCReg())) { |
2552 | for (MCRegUnitRootIterator URoot(Unit, TRI); URoot.isValid(); ++URoot) { |
2553 | if (!MTracker->isRegisterTracked(R: *URoot)) { |
2554 | // Not all roots were loaded into the tracking map: this register |
2555 | // isn't actually def'd anywhere, we only read from it. Generate PHIs |
2556 | // for this reg, but don't iterate units. |
2557 | AnyIllegal = true; |
2558 | } else { |
2559 | FoundRegUnits.insert(V: *URoot); |
2560 | } |
2561 | } |
2562 | } |
2563 | |
2564 | if (AnyIllegal) { |
2565 | NormalLocsToPHI.insert(V: L); |
2566 | continue; |
2567 | } |
2568 | |
2569 | RegUnitsToPHIUp.insert(I: FoundRegUnits.begin(), E: FoundRegUnits.end()); |
2570 | } |
2571 | |
2572 | // Lambda to fetch PHIs for a given location, and write into the PHIBlocks |
2573 | // collection. |
2574 | SmallVector<MachineBasicBlock *, 32> PHIBlocks; |
2575 | auto CollectPHIsForLoc = [&](LocIdx L) { |
2576 | // Collect the set of defs. |
2577 | SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; |
2578 | for (unsigned int I = 0; I < OrderToBB.size(); ++I) { |
2579 | MachineBasicBlock *MBB = OrderToBB[I]; |
2580 | const auto &TransferFunc = MLocTransfer[MBB->getNumber()]; |
2581 | if (TransferFunc.contains(Val: L)) |
2582 | DefBlocks.insert(Ptr: MBB); |
2583 | } |
2584 | |
2585 | // The entry block defs the location too: it's the live-in / argument value. |
2586 | // Only insert if there are other defs though; everything is trivially live |
2587 | // through otherwise. |
2588 | if (!DefBlocks.empty()) |
2589 | DefBlocks.insert(Ptr: &*MF.begin()); |
2590 | |
2591 | // Ask the SSA construction algorithm where we should put PHIs. Clear |
2592 | // anything that might have been hanging around from earlier. |
2593 | PHIBlocks.clear(); |
2594 | BlockPHIPlacement(AllBlocks, DefBlocks, PHIBlocks); |
2595 | }; |
2596 | |
2597 | auto InstallPHIsAtLoc = [&PHIBlocks, &MInLocs](LocIdx L) { |
2598 | for (const MachineBasicBlock *MBB : PHIBlocks) |
2599 | MInLocs[*MBB][L.asU64()] = ValueIDNum(MBB->getNumber(), 0, L); |
2600 | }; |
2601 | |
2602 | // For locations with no reg units, just place PHIs. |
2603 | for (LocIdx L : NormalLocsToPHI) { |
2604 | CollectPHIsForLoc(L); |
2605 | // Install those PHI values into the live-in value array. |
2606 | InstallPHIsAtLoc(L); |
2607 | } |
2608 | |
2609 | // For stack slots, calculate PHIs for the equivalent of the units, then |
2610 | // install for each index. |
2611 | for (SpillLocationNo Slot : StackSlots) { |
2612 | for (unsigned Idx : StackUnits) { |
2613 | unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx); |
2614 | LocIdx L = MTracker->getSpillMLoc(SpillID); |
2615 | CollectPHIsForLoc(L); |
2616 | InstallPHIsAtLoc(L); |
2617 | |
2618 | // Find anything that aliases this stack index, install PHIs for it too. |
2619 | unsigned Size, Offset; |
2620 | std::tie(args&: Size, args&: Offset) = MTracker->StackIdxesToPos[Idx]; |
2621 | for (auto &Pair : MTracker->StackSlotIdxes) { |
2622 | unsigned ThisSize, ThisOffset; |
2623 | std::tie(args&: ThisSize, args&: ThisOffset) = Pair.first; |
2624 | if (ThisSize + ThisOffset <= Offset || Size + Offset <= ThisOffset) |
2625 | continue; |
2626 | |
2627 | unsigned ThisID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx: Pair.second); |
2628 | LocIdx ThisL = MTracker->getSpillMLoc(SpillID: ThisID); |
2629 | InstallPHIsAtLoc(ThisL); |
2630 | } |
2631 | } |
2632 | } |
2633 | |
2634 | // For reg units, place PHIs, and then place them for any aliasing registers. |
2635 | for (Register R : RegUnitsToPHIUp) { |
2636 | LocIdx L = MTracker->lookupOrTrackRegister(ID: R); |
2637 | CollectPHIsForLoc(L); |
2638 | |
2639 | // Install those PHI values into the live-in value array. |
2640 | InstallPHIsAtLoc(L); |
2641 | |
2642 | // Now find aliases and install PHIs for those. |
2643 | for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) { |
2644 | // Super-registers that are "above" the largest register read/written by |
2645 | // the function will alias, but will not be tracked. |
2646 | if (!MTracker->isRegisterTracked(R: *RAI)) |
2647 | continue; |
2648 | |
2649 | LocIdx AliasLoc = MTracker->lookupOrTrackRegister(ID: *RAI); |
2650 | InstallPHIsAtLoc(AliasLoc); |
2651 | } |
2652 | } |
2653 | } |
2654 | |
2655 | void InstrRefBasedLDV::buildMLocValueMap( |
2656 | MachineFunction &MF, FuncValueTable &MInLocs, FuncValueTable &MOutLocs, |
2657 | SmallVectorImpl<MLocTransferMap> &MLocTransfer) { |
2658 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2659 | std::greater<unsigned int>> |
2660 | Worklist, Pending; |
2661 | |
2662 | // We track what is on the current and pending worklist to avoid inserting |
2663 | // the same thing twice. We could avoid this with a custom priority queue, |
2664 | // but this is probably not worth it. |
2665 | SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist; |
2666 | |
2667 | // Initialize worklist with every block to be visited. Also produce list of |
2668 | // all blocks. |
2669 | SmallPtrSet<MachineBasicBlock *, 32> AllBlocks; |
2670 | for (unsigned int I = 0; I < BBToOrder.size(); ++I) { |
2671 | Worklist.push(x: I); |
2672 | OnWorklist.insert(Ptr: OrderToBB[I]); |
2673 | AllBlocks.insert(Ptr: OrderToBB[I]); |
2674 | } |
2675 | |
2676 | // Initialize entry block to PHIs. These represent arguments. |
2677 | for (auto Location : MTracker->locations()) |
2678 | MInLocs.tableForEntryMBB()[Location.Idx.asU64()] = |
2679 | ValueIDNum(0, 0, Location.Idx); |
2680 | |
2681 | MTracker->reset(); |
2682 | |
2683 | // Start by placing PHIs, using the usual SSA constructor algorithm. Consider |
2684 | // any machine-location that isn't live-through a block to be def'd in that |
2685 | // block. |
2686 | placeMLocPHIs(MF, AllBlocks, MInLocs, MLocTransfer); |
2687 | |
2688 | // Propagate values to eliminate redundant PHIs. At the same time, this |
2689 | // produces the table of Block x Location => Value for the entry to each |
2690 | // block. |
2691 | // The kind of PHIs we can eliminate are, for example, where one path in a |
2692 | // conditional spills and restores a register, and the register still has |
2693 | // the same value once control flow joins, unbeknowns to the PHI placement |
2694 | // code. Propagating values allows us to identify such un-necessary PHIs and |
2695 | // remove them. |
2696 | SmallPtrSet<const MachineBasicBlock *, 16> Visited; |
2697 | while (!Worklist.empty() || !Pending.empty()) { |
2698 | // Vector for storing the evaluated block transfer function. |
2699 | SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap; |
2700 | |
2701 | while (!Worklist.empty()) { |
2702 | MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; |
2703 | CurBB = MBB->getNumber(); |
2704 | Worklist.pop(); |
2705 | |
2706 | // Join the values in all predecessor blocks. |
2707 | bool InLocsChanged; |
2708 | InLocsChanged = mlocJoin(MBB&: *MBB, Visited, OutLocs&: MOutLocs, InLocs&: MInLocs[*MBB]); |
2709 | InLocsChanged |= Visited.insert(Ptr: MBB).second; |
2710 | |
2711 | // Don't examine transfer function if we've visited this loc at least |
2712 | // once, and inlocs haven't changed. |
2713 | if (!InLocsChanged) |
2714 | continue; |
2715 | |
2716 | // Load the current set of live-ins into MLocTracker. |
2717 | MTracker->loadFromArray(Locs&: MInLocs[*MBB], NewCurBB: CurBB); |
2718 | |
2719 | // Each element of the transfer function can be a new def, or a read of |
2720 | // a live-in value. Evaluate each element, and store to "ToRemap". |
2721 | ToRemap.clear(); |
2722 | for (auto &P : MLocTransfer[CurBB]) { |
2723 | if (P.second.getBlock() == CurBB && P.second.isPHI()) { |
2724 | // This is a movement of whatever was live in. Read it. |
2725 | ValueIDNum NewID = MTracker->readMLoc(L: P.second.getLoc()); |
2726 | ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: NewID)); |
2727 | } else { |
2728 | // It's a def. Just set it. |
2729 | assert(P.second.getBlock() == CurBB); |
2730 | ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: P.second)); |
2731 | } |
2732 | } |
2733 | |
2734 | // Commit the transfer function changes into mloc tracker, which |
2735 | // transforms the contents of the MLocTracker into the live-outs. |
2736 | for (auto &P : ToRemap) |
2737 | MTracker->setMLoc(L: P.first, Num: P.second); |
2738 | |
2739 | // Now copy out-locs from mloc tracker into out-loc vector, checking |
2740 | // whether changes have occurred. These changes can have come from both |
2741 | // the transfer function, and mlocJoin. |
2742 | bool OLChanged = false; |
2743 | for (auto Location : MTracker->locations()) { |
2744 | OLChanged |= MOutLocs[*MBB][Location.Idx.asU64()] != Location.Value; |
2745 | MOutLocs[*MBB][Location.Idx.asU64()] = Location.Value; |
2746 | } |
2747 | |
2748 | MTracker->reset(); |
2749 | |
2750 | // No need to examine successors again if out-locs didn't change. |
2751 | if (!OLChanged) |
2752 | continue; |
2753 | |
2754 | // All successors should be visited: put any back-edges on the pending |
2755 | // list for the next pass-through, and any other successors to be |
2756 | // visited this pass, if they're not going to be already. |
2757 | for (auto *s : MBB->successors()) { |
2758 | // Does branching to this successor represent a back-edge? |
2759 | if (BBToOrder[s] > BBToOrder[MBB]) { |
2760 | // No: visit it during this dataflow iteration. |
2761 | if (OnWorklist.insert(Ptr: s).second) |
2762 | Worklist.push(x: BBToOrder[s]); |
2763 | } else { |
2764 | // Yes: visit it on the next iteration. |
2765 | if (OnPending.insert(Ptr: s).second) |
2766 | Pending.push(x: BBToOrder[s]); |
2767 | } |
2768 | } |
2769 | } |
2770 | |
2771 | Worklist.swap(pq&: Pending); |
2772 | std::swap(LHS&: OnPending, RHS&: OnWorklist); |
2773 | OnPending.clear(); |
2774 | // At this point, pending must be empty, since it was just the empty |
2775 | // worklist |
2776 | assert(Pending.empty() && "Pending should be empty" ); |
2777 | } |
2778 | |
2779 | // Once all the live-ins don't change on mlocJoin(), we've eliminated all |
2780 | // redundant PHIs. |
2781 | } |
2782 | |
2783 | void InstrRefBasedLDV::BlockPHIPlacement( |
2784 | const SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, |
2785 | const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks, |
2786 | SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) { |
2787 | // Apply IDF calculator to the designated set of location defs, storing |
2788 | // required PHIs into PHIBlocks. Uses the dominator tree stored in the |
2789 | // InstrRefBasedLDV object. |
2790 | IDFCalculatorBase<MachineBasicBlock, false> IDF(DomTree->getBase()); |
2791 | |
2792 | IDF.setLiveInBlocks(AllBlocks); |
2793 | IDF.setDefiningBlocks(DefBlocks); |
2794 | IDF.calculate(IDFBlocks&: PHIBlocks); |
2795 | } |
2796 | |
2797 | bool InstrRefBasedLDV::pickVPHILoc( |
2798 | SmallVectorImpl<DbgOpID> &OutValues, const MachineBasicBlock &MBB, |
2799 | const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, |
2800 | const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { |
2801 | |
2802 | // No predecessors means no PHIs. |
2803 | if (BlockOrders.empty()) |
2804 | return false; |
2805 | |
2806 | // All the location operands that do not already agree need to be joined, |
2807 | // track the indices of each such location operand here. |
2808 | SmallDenseSet<unsigned> LocOpsToJoin; |
2809 | |
2810 | auto FirstValueIt = LiveOuts.find(Val: BlockOrders[0]); |
2811 | if (FirstValueIt == LiveOuts.end()) |
2812 | return false; |
2813 | const DbgValue &FirstValue = *FirstValueIt->second; |
2814 | |
2815 | for (const auto p : BlockOrders) { |
2816 | auto OutValIt = LiveOuts.find(Val: p); |
2817 | if (OutValIt == LiveOuts.end()) |
2818 | // If we have a predecessor not in scope, we'll never find a PHI position. |
2819 | return false; |
2820 | const DbgValue &OutVal = *OutValIt->second; |
2821 | |
2822 | // No-values cannot have locations we can join on. |
2823 | if (OutVal.Kind == DbgValue::NoVal) |
2824 | return false; |
2825 | |
2826 | // For unjoined VPHIs where we don't know the location, we definitely |
2827 | // can't find a join loc unless the VPHI is a backedge. |
2828 | if (OutVal.isUnjoinedPHI() && OutVal.BlockNo != MBB.getNumber()) |
2829 | return false; |
2830 | |
2831 | if (!FirstValue.Properties.isJoinable(Other: OutVal.Properties)) |
2832 | return false; |
2833 | |
2834 | for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) { |
2835 | // An unjoined PHI has no defined locations, and so a shared location must |
2836 | // be found for every operand. |
2837 | if (OutVal.isUnjoinedPHI()) { |
2838 | LocOpsToJoin.insert(V: Idx); |
2839 | continue; |
2840 | } |
2841 | DbgOpID FirstValOp = FirstValue.getDbgOpID(Index: Idx); |
2842 | DbgOpID OutValOp = OutVal.getDbgOpID(Index: Idx); |
2843 | if (FirstValOp != OutValOp) { |
2844 | // We can never join constant ops - the ops must either both be equal |
2845 | // constant ops or non-const ops. |
2846 | if (FirstValOp.isConst() || OutValOp.isConst()) |
2847 | return false; |
2848 | else |
2849 | LocOpsToJoin.insert(V: Idx); |
2850 | } |
2851 | } |
2852 | } |
2853 | |
2854 | SmallVector<DbgOpID> NewDbgOps; |
2855 | |
2856 | for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) { |
2857 | // If this op doesn't need to be joined because the values agree, use that |
2858 | // already-agreed value. |
2859 | if (!LocOpsToJoin.contains(V: Idx)) { |
2860 | NewDbgOps.push_back(Elt: FirstValue.getDbgOpID(Index: Idx)); |
2861 | continue; |
2862 | } |
2863 | |
2864 | std::optional<ValueIDNum> JoinedOpLoc = |
2865 | pickOperandPHILoc(DbgOpIdx: Idx, MBB, LiveOuts, MOutLocs, BlockOrders); |
2866 | |
2867 | if (!JoinedOpLoc) |
2868 | return false; |
2869 | |
2870 | NewDbgOps.push_back(Elt: DbgOpStore.insert(Op: *JoinedOpLoc)); |
2871 | } |
2872 | |
2873 | OutValues.append(RHS: NewDbgOps); |
2874 | return true; |
2875 | } |
2876 | |
2877 | std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( |
2878 | unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, |
2879 | FuncValueTable &MOutLocs, |
2880 | const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { |
2881 | |
2882 | // Collect a set of locations from predecessor where its live-out value can |
2883 | // be found. |
2884 | SmallVector<SmallVector<LocIdx, 4>, 8> Locs; |
2885 | unsigned NumLocs = MTracker->getNumLocs(); |
2886 | |
2887 | for (const auto p : BlockOrders) { |
2888 | auto OutValIt = LiveOuts.find(Val: p); |
2889 | assert(OutValIt != LiveOuts.end()); |
2890 | const DbgValue &OutVal = *OutValIt->second; |
2891 | DbgOpID OutValOpID = OutVal.getDbgOpID(Index: DbgOpIdx); |
2892 | DbgOp OutValOp = DbgOpStore.find(ID: OutValOpID); |
2893 | assert(!OutValOp.IsConst); |
2894 | |
2895 | // Create new empty vector of locations. |
2896 | Locs.resize(N: Locs.size() + 1); |
2897 | |
2898 | // If the live-in value is a def, find the locations where that value is |
2899 | // present. Do the same for VPHIs where we know the VPHI value. |
2900 | if (OutVal.Kind == DbgValue::Def || |
2901 | (OutVal.Kind == DbgValue::VPHI && OutVal.BlockNo != MBB.getNumber() && |
2902 | !OutValOp.isUndef())) { |
2903 | ValueIDNum ValToLookFor = OutValOp.ID; |
2904 | // Search the live-outs of the predecessor for the specified value. |
2905 | for (unsigned int I = 0; I < NumLocs; ++I) { |
2906 | if (MOutLocs[*p][I] == ValToLookFor) |
2907 | Locs.back().push_back(Elt: LocIdx(I)); |
2908 | } |
2909 | } else { |
2910 | assert(OutVal.Kind == DbgValue::VPHI); |
2911 | // Otherwise: this is a VPHI on a backedge feeding back into itself, i.e. |
2912 | // a value that's live-through the whole loop. (It has to be a backedge, |
2913 | // because a block can't dominate itself). We can accept as a PHI location |
2914 | // any location where the other predecessors agree, _and_ the machine |
2915 | // locations feed back into themselves. Therefore, add all self-looping |
2916 | // machine-value PHI locations. |
2917 | for (unsigned int I = 0; I < NumLocs; ++I) { |
2918 | ValueIDNum MPHI(MBB.getNumber(), 0, LocIdx(I)); |
2919 | if (MOutLocs[*p][I] == MPHI) |
2920 | Locs.back().push_back(Elt: LocIdx(I)); |
2921 | } |
2922 | } |
2923 | } |
2924 | // We should have found locations for all predecessors, or returned. |
2925 | assert(Locs.size() == BlockOrders.size()); |
2926 | |
2927 | // Starting with the first set of locations, take the intersection with |
2928 | // subsequent sets. |
2929 | SmallVector<LocIdx, 4> CandidateLocs = Locs[0]; |
2930 | for (unsigned int I = 1; I < Locs.size(); ++I) { |
2931 | auto &LocVec = Locs[I]; |
2932 | SmallVector<LocIdx, 4> NewCandidates; |
2933 | std::set_intersection(first1: CandidateLocs.begin(), last1: CandidateLocs.end(), |
2934 | first2: LocVec.begin(), last2: LocVec.end(), result: std::inserter(x&: NewCandidates, i: NewCandidates.begin())); |
2935 | CandidateLocs = NewCandidates; |
2936 | } |
2937 | if (CandidateLocs.empty()) |
2938 | return std::nullopt; |
2939 | |
2940 | // We now have a set of LocIdxes that contain the right output value in |
2941 | // each of the predecessors. Pick the lowest; if there's a register loc, |
2942 | // that'll be it. |
2943 | LocIdx L = *CandidateLocs.begin(); |
2944 | |
2945 | // Return a PHI-value-number for the found location. |
2946 | ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L}; |
2947 | return PHIVal; |
2948 | } |
2949 | |
2950 | bool InstrRefBasedLDV::vlocJoin( |
2951 | MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, |
2952 | SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore, |
2953 | DbgValue &LiveIn) { |
2954 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n" ); |
2955 | bool Changed = false; |
2956 | |
2957 | // Order predecessors by RPOT order, for exploring them in that order. |
2958 | SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors()); |
2959 | |
2960 | auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { |
2961 | return BBToOrder[A] < BBToOrder[B]; |
2962 | }; |
2963 | |
2964 | llvm::sort(C&: BlockOrders, Comp: Cmp); |
2965 | |
2966 | unsigned CurBlockRPONum = BBToOrder[&MBB]; |
2967 | |
2968 | // Collect all the incoming DbgValues for this variable, from predecessor |
2969 | // live-out values. |
2970 | SmallVector<InValueT, 8> Values; |
2971 | bool Bail = false; |
2972 | int BackEdgesStart = 0; |
2973 | for (auto *p : BlockOrders) { |
2974 | // If the predecessor isn't in scope / to be explored, we'll never be |
2975 | // able to join any locations. |
2976 | if (!BlocksToExplore.contains(Ptr: p)) { |
2977 | Bail = true; |
2978 | break; |
2979 | } |
2980 | |
2981 | // All Live-outs will have been initialized. |
2982 | DbgValue &OutLoc = *VLOCOutLocs.find(Val: p)->second; |
2983 | |
2984 | // Keep track of where back-edges begin in the Values vector. Relies on |
2985 | // BlockOrders being sorted by RPO. |
2986 | unsigned ThisBBRPONum = BBToOrder[p]; |
2987 | if (ThisBBRPONum < CurBlockRPONum) |
2988 | ++BackEdgesStart; |
2989 | |
2990 | Values.push_back(Elt: std::make_pair(x&: p, y: &OutLoc)); |
2991 | } |
2992 | |
2993 | // If there were no values, or one of the predecessors couldn't have a |
2994 | // value, then give up immediately. It's not safe to produce a live-in |
2995 | // value. Leave as whatever it was before. |
2996 | if (Bail || Values.size() == 0) |
2997 | return false; |
2998 | |
2999 | // All (non-entry) blocks have at least one non-backedge predecessor. |
3000 | // Pick the variable value from the first of these, to compare against |
3001 | // all others. |
3002 | const DbgValue &FirstVal = *Values[0].second; |
3003 | |
3004 | // If the old live-in value is not a PHI then either a) no PHI is needed |
3005 | // here, or b) we eliminated the PHI that was here. If so, we can just |
3006 | // propagate in the first parent's incoming value. |
3007 | if (LiveIn.Kind != DbgValue::VPHI || LiveIn.BlockNo != MBB.getNumber()) { |
3008 | Changed = LiveIn != FirstVal; |
3009 | if (Changed) |
3010 | LiveIn = FirstVal; |
3011 | return Changed; |
3012 | } |
3013 | |
3014 | // Scan for variable values that can never be resolved: if they have |
3015 | // different DIExpressions, different indirectness, or are mixed constants / |
3016 | // non-constants. |
3017 | for (const auto &V : Values) { |
3018 | if (!V.second->Properties.isJoinable(Other: FirstVal.Properties)) |
3019 | return false; |
3020 | if (V.second->Kind == DbgValue::NoVal) |
3021 | return false; |
3022 | if (!V.second->hasJoinableLocOps(Other: FirstVal)) |
3023 | return false; |
3024 | } |
3025 | |
3026 | // Try to eliminate this PHI. Do the incoming values all agree? |
3027 | bool Disagree = false; |
3028 | for (auto &V : Values) { |
3029 | if (*V.second == FirstVal) |
3030 | continue; // No disagreement. |
3031 | |
3032 | // If both values are not equal but have equal non-empty IDs then they refer |
3033 | // to the same value from different sources (e.g. one is VPHI and the other |
3034 | // is Def), which does not cause disagreement. |
3035 | if (V.second->hasIdenticalValidLocOps(Other: FirstVal)) |
3036 | continue; |
3037 | |
3038 | // Eliminate if a backedge feeds a VPHI back into itself. |
3039 | if (V.second->Kind == DbgValue::VPHI && |
3040 | V.second->BlockNo == MBB.getNumber() && |
3041 | // Is this a backedge? |
3042 | std::distance(first: Values.begin(), last: &V) >= BackEdgesStart) |
3043 | continue; |
3044 | |
3045 | Disagree = true; |
3046 | } |
3047 | |
3048 | // No disagreement -> live-through value. |
3049 | if (!Disagree) { |
3050 | Changed = LiveIn != FirstVal; |
3051 | if (Changed) |
3052 | LiveIn = FirstVal; |
3053 | return Changed; |
3054 | } else { |
3055 | // Otherwise use a VPHI. |
3056 | DbgValue VPHI(MBB.getNumber(), FirstVal.Properties, DbgValue::VPHI); |
3057 | Changed = LiveIn != VPHI; |
3058 | if (Changed) |
3059 | LiveIn = VPHI; |
3060 | return Changed; |
3061 | } |
3062 | } |
3063 | |
3064 | void InstrRefBasedLDV::getBlocksForScope( |
3065 | const DILocation *DILoc, |
3066 | SmallPtrSetImpl<const MachineBasicBlock *> &BlocksToExplore, |
3067 | const SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks) { |
3068 | // Get the set of "normal" in-lexical-scope blocks. |
3069 | LS.getMachineBasicBlocks(DL: DILoc, MBBs&: BlocksToExplore); |
3070 | |
3071 | // VarLoc LiveDebugValues tracks variable locations that are defined in |
3072 | // blocks not in scope. This is something we could legitimately ignore, but |
3073 | // lets allow it for now for the sake of coverage. |
3074 | BlocksToExplore.insert(I: AssignBlocks.begin(), E: AssignBlocks.end()); |
3075 | |
3076 | // Storage for artificial blocks we intend to add to BlocksToExplore. |
3077 | DenseSet<const MachineBasicBlock *> ToAdd; |
3078 | |
3079 | // To avoid needlessly dropping large volumes of variable locations, propagate |
3080 | // variables through aritifical blocks, i.e. those that don't have any |
3081 | // instructions in scope at all. To accurately replicate VarLoc |
3082 | // LiveDebugValues, this means exploring all artificial successors too. |
3083 | // Perform a depth-first-search to enumerate those blocks. |
3084 | for (const auto *MBB : BlocksToExplore) { |
3085 | // Depth-first-search state: each node is a block and which successor |
3086 | // we're currently exploring. |
3087 | SmallVector<std::pair<const MachineBasicBlock *, |
3088 | MachineBasicBlock::const_succ_iterator>, |
3089 | 8> |
3090 | DFS; |
3091 | |
3092 | // Find any artificial successors not already tracked. |
3093 | for (auto *succ : MBB->successors()) { |
3094 | if (BlocksToExplore.count(Ptr: succ)) |
3095 | continue; |
3096 | if (!ArtificialBlocks.count(Ptr: succ)) |
3097 | continue; |
3098 | ToAdd.insert(V: succ); |
3099 | DFS.push_back(Elt: {succ, succ->succ_begin()}); |
3100 | } |
3101 | |
3102 | // Search all those blocks, depth first. |
3103 | while (!DFS.empty()) { |
3104 | const MachineBasicBlock *CurBB = DFS.back().first; |
3105 | MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second; |
3106 | // Walk back if we've explored this blocks successors to the end. |
3107 | if (CurSucc == CurBB->succ_end()) { |
3108 | DFS.pop_back(); |
3109 | continue; |
3110 | } |
3111 | |
3112 | // If the current successor is artificial and unexplored, descend into |
3113 | // it. |
3114 | if (!ToAdd.count(V: *CurSucc) && ArtificialBlocks.count(Ptr: *CurSucc)) { |
3115 | ToAdd.insert(V: *CurSucc); |
3116 | DFS.push_back(Elt: {*CurSucc, (*CurSucc)->succ_begin()}); |
3117 | continue; |
3118 | } |
3119 | |
3120 | ++CurSucc; |
3121 | } |
3122 | }; |
3123 | |
3124 | BlocksToExplore.insert(I: ToAdd.begin(), E: ToAdd.end()); |
3125 | } |
3126 | |
3127 | void InstrRefBasedLDV::buildVLocValueMap( |
3128 | const DILocation *DILoc, |
3129 | const SmallSet<DebugVariableID, 4> &VarsWeCareAbout, |
3130 | SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output, |
3131 | FuncValueTable &MOutLocs, FuncValueTable &MInLocs, |
3132 | SmallVectorImpl<VLocTracker> &AllTheVLocs) { |
3133 | // This method is much like buildMLocValueMap: but focuses on a single |
3134 | // LexicalScope at a time. Pick out a set of blocks and variables that are |
3135 | // to have their value assignments solved, then run our dataflow algorithm |
3136 | // until a fixedpoint is reached. |
3137 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
3138 | std::greater<unsigned int>> |
3139 | Worklist, Pending; |
3140 | SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending; |
3141 | |
3142 | // The set of blocks we'll be examining. |
3143 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3144 | |
3145 | // The order in which to examine them (RPO). |
3146 | SmallVector<MachineBasicBlock *, 16> BlockOrders; |
3147 | SmallVector<unsigned, 32> BlockOrderNums; |
3148 | |
3149 | getBlocksForScope(DILoc, BlocksToExplore, AssignBlocks); |
3150 | |
3151 | // Single block scope: not interesting! No propagation at all. Note that |
3152 | // this could probably go above ArtificialBlocks without damage, but |
3153 | // that then produces output differences from original-live-debug-values, |
3154 | // which propagates from a single block into many artificial ones. |
3155 | if (BlocksToExplore.size() == 1) |
3156 | return; |
3157 | |
3158 | // Convert a const set to a non-const set. LexicalScopes |
3159 | // getMachineBasicBlocks returns const MBB pointers, IDF wants mutable ones. |
3160 | // (Neither of them mutate anything). |
3161 | SmallPtrSet<MachineBasicBlock *, 8> MutBlocksToExplore; |
3162 | for (const auto *MBB : BlocksToExplore) |
3163 | MutBlocksToExplore.insert(Ptr: const_cast<MachineBasicBlock *>(MBB)); |
3164 | |
3165 | // Picks out relevants blocks RPO order and sort them. Sort their |
3166 | // order-numbers and map back to MBB pointers later, to avoid repeated |
3167 | // DenseMap queries during comparisons. |
3168 | for (const auto *MBB : BlocksToExplore) |
3169 | BlockOrderNums.push_back(Elt: BBToOrder[MBB]); |
3170 | |
3171 | llvm::sort(C&: BlockOrderNums); |
3172 | for (unsigned int I : BlockOrderNums) |
3173 | BlockOrders.push_back(Elt: OrderToBB[I]); |
3174 | BlockOrderNums.clear(); |
3175 | unsigned NumBlocks = BlockOrders.size(); |
3176 | |
3177 | // Allocate some vectors for storing the live ins and live outs. Large. |
3178 | SmallVector<DbgValue, 32> LiveIns, LiveOuts; |
3179 | LiveIns.reserve(N: NumBlocks); |
3180 | LiveOuts.reserve(N: NumBlocks); |
3181 | |
3182 | // Initialize all values to start as NoVals. This signifies "it's live |
3183 | // through, but we don't know what it is". |
3184 | DbgValueProperties EmptyProperties(EmptyExpr, false, false); |
3185 | for (unsigned int I = 0; I < NumBlocks; ++I) { |
3186 | DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); |
3187 | LiveIns.push_back(Elt: EmptyDbgValue); |
3188 | LiveOuts.push_back(Elt: EmptyDbgValue); |
3189 | } |
3190 | |
3191 | // Produce by-MBB indexes of live-in/live-outs, to ease lookup within |
3192 | // vlocJoin. |
3193 | LiveIdxT LiveOutIdx, LiveInIdx; |
3194 | LiveOutIdx.reserve(NumEntries: NumBlocks); |
3195 | LiveInIdx.reserve(NumEntries: NumBlocks); |
3196 | for (unsigned I = 0; I < NumBlocks; ++I) { |
3197 | LiveOutIdx[BlockOrders[I]] = &LiveOuts[I]; |
3198 | LiveInIdx[BlockOrders[I]] = &LiveIns[I]; |
3199 | } |
3200 | |
3201 | // Loop over each variable and place PHIs for it, then propagate values |
3202 | // between blocks. This keeps the locality of working on one lexical scope at |
3203 | // at time, but avoids re-processing variable values because some other |
3204 | // variable has been assigned. |
3205 | for (DebugVariableID VarID : VarsWeCareAbout) { |
3206 | // Re-initialize live-ins and live-outs, to clear the remains of previous |
3207 | // variables live-ins / live-outs. |
3208 | for (unsigned int I = 0; I < NumBlocks; ++I) { |
3209 | DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); |
3210 | LiveIns[I] = EmptyDbgValue; |
3211 | LiveOuts[I] = EmptyDbgValue; |
3212 | } |
3213 | |
3214 | // Place PHIs for variable values, using the LLVM IDF calculator. |
3215 | // Collect the set of blocks where variables are def'd. |
3216 | SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; |
3217 | for (const MachineBasicBlock *ExpMBB : BlocksToExplore) { |
3218 | auto &TransferFunc = AllTheVLocs[ExpMBB->getNumber()].Vars; |
3219 | if (TransferFunc.contains(Key: VarID)) |
3220 | DefBlocks.insert(Ptr: const_cast<MachineBasicBlock *>(ExpMBB)); |
3221 | } |
3222 | |
3223 | SmallVector<MachineBasicBlock *, 32> PHIBlocks; |
3224 | |
3225 | // Request the set of PHIs we should insert for this variable. If there's |
3226 | // only one value definition, things are very simple. |
3227 | if (DefBlocks.size() == 1) { |
3228 | placePHIsForSingleVarDefinition(InScopeBlocks: MutBlocksToExplore, MBB: *DefBlocks.begin(), |
3229 | AllTheVLocs, Var: VarID, Output); |
3230 | continue; |
3231 | } |
3232 | |
3233 | // Otherwise: we need to place PHIs through SSA and propagate values. |
3234 | BlockPHIPlacement(AllBlocks: MutBlocksToExplore, DefBlocks, PHIBlocks); |
3235 | |
3236 | // Insert PHIs into the per-block live-in tables for this variable. |
3237 | for (MachineBasicBlock *PHIMBB : PHIBlocks) { |
3238 | unsigned BlockNo = PHIMBB->getNumber(); |
3239 | DbgValue *LiveIn = LiveInIdx[PHIMBB]; |
3240 | *LiveIn = DbgValue(BlockNo, EmptyProperties, DbgValue::VPHI); |
3241 | } |
3242 | |
3243 | for (auto *MBB : BlockOrders) { |
3244 | Worklist.push(x: BBToOrder[MBB]); |
3245 | OnWorklist.insert(Ptr: MBB); |
3246 | } |
3247 | |
3248 | // Iterate over all the blocks we selected, propagating the variables value. |
3249 | // This loop does two things: |
3250 | // * Eliminates un-necessary VPHIs in vlocJoin, |
3251 | // * Evaluates the blocks transfer function (i.e. variable assignments) and |
3252 | // stores the result to the blocks live-outs. |
3253 | // Always evaluate the transfer function on the first iteration, and when |
3254 | // the live-ins change thereafter. |
3255 | bool FirstTrip = true; |
3256 | while (!Worklist.empty() || !Pending.empty()) { |
3257 | while (!Worklist.empty()) { |
3258 | auto *MBB = OrderToBB[Worklist.top()]; |
3259 | CurBB = MBB->getNumber(); |
3260 | Worklist.pop(); |
3261 | |
3262 | auto LiveInsIt = LiveInIdx.find(Val: MBB); |
3263 | assert(LiveInsIt != LiveInIdx.end()); |
3264 | DbgValue *LiveIn = LiveInsIt->second; |
3265 | |
3266 | // Join values from predecessors. Updates LiveInIdx, and writes output |
3267 | // into JoinedInLocs. |
3268 | bool InLocsChanged = |
3269 | vlocJoin(MBB&: *MBB, VLOCOutLocs&: LiveOutIdx, BlocksToExplore, LiveIn&: *LiveIn); |
3270 | |
3271 | SmallVector<const MachineBasicBlock *, 8> Preds; |
3272 | for (const auto *Pred : MBB->predecessors()) |
3273 | Preds.push_back(Elt: Pred); |
3274 | |
3275 | // If this block's live-in value is a VPHI, try to pick a machine-value |
3276 | // for it. This makes the machine-value available and propagated |
3277 | // through all blocks by the time value propagation finishes. We can't |
3278 | // do this any earlier as it needs to read the block live-outs. |
3279 | if (LiveIn->Kind == DbgValue::VPHI && LiveIn->BlockNo == (int)CurBB) { |
3280 | // There's a small possibility that on a preceeding path, a VPHI is |
3281 | // eliminated and transitions from VPHI-with-location to |
3282 | // live-through-value. As a result, the selected location of any VPHI |
3283 | // might change, so we need to re-compute it on each iteration. |
3284 | SmallVector<DbgOpID> JoinedOps; |
3285 | |
3286 | if (pickVPHILoc(OutValues&: JoinedOps, MBB: *MBB, LiveOuts: LiveOutIdx, MOutLocs, BlockOrders: Preds)) { |
3287 | bool NewLocPicked = !equal(LRange: LiveIn->getDbgOpIDs(), RRange&: JoinedOps); |
3288 | InLocsChanged |= NewLocPicked; |
3289 | if (NewLocPicked) |
3290 | LiveIn->setDbgOpIDs(JoinedOps); |
3291 | } |
3292 | } |
3293 | |
3294 | if (!InLocsChanged && !FirstTrip) |
3295 | continue; |
3296 | |
3297 | DbgValue *LiveOut = LiveOutIdx[MBB]; |
3298 | bool OLChanged = false; |
3299 | |
3300 | // Do transfer function. |
3301 | auto &VTracker = AllTheVLocs[MBB->getNumber()]; |
3302 | auto TransferIt = VTracker.Vars.find(Key: VarID); |
3303 | if (TransferIt != VTracker.Vars.end()) { |
3304 | // Erase on empty transfer (DBG_VALUE $noreg). |
3305 | if (TransferIt->second.Kind == DbgValue::Undef) { |
3306 | DbgValue NewVal(MBB->getNumber(), EmptyProperties, DbgValue::NoVal); |
3307 | if (*LiveOut != NewVal) { |
3308 | *LiveOut = NewVal; |
3309 | OLChanged = true; |
3310 | } |
3311 | } else { |
3312 | // Insert new variable value; or overwrite. |
3313 | if (*LiveOut != TransferIt->second) { |
3314 | *LiveOut = TransferIt->second; |
3315 | OLChanged = true; |
3316 | } |
3317 | } |
3318 | } else { |
3319 | // Just copy live-ins to live-outs, for anything not transferred. |
3320 | if (*LiveOut != *LiveIn) { |
3321 | *LiveOut = *LiveIn; |
3322 | OLChanged = true; |
3323 | } |
3324 | } |
3325 | |
3326 | // If no live-out value changed, there's no need to explore further. |
3327 | if (!OLChanged) |
3328 | continue; |
3329 | |
3330 | // We should visit all successors. Ensure we'll visit any non-backedge |
3331 | // successors during this dataflow iteration; book backedge successors |
3332 | // to be visited next time around. |
3333 | for (auto *s : MBB->successors()) { |
3334 | // Ignore out of scope / not-to-be-explored successors. |
3335 | if (!LiveInIdx.contains(Val: s)) |
3336 | continue; |
3337 | |
3338 | if (BBToOrder[s] > BBToOrder[MBB]) { |
3339 | if (OnWorklist.insert(Ptr: s).second) |
3340 | Worklist.push(x: BBToOrder[s]); |
3341 | } else if (OnPending.insert(Ptr: s).second && (FirstTrip || OLChanged)) { |
3342 | Pending.push(x: BBToOrder[s]); |
3343 | } |
3344 | } |
3345 | } |
3346 | Worklist.swap(pq&: Pending); |
3347 | std::swap(LHS&: OnWorklist, RHS&: OnPending); |
3348 | OnPending.clear(); |
3349 | assert(Pending.empty()); |
3350 | FirstTrip = false; |
3351 | } |
3352 | |
3353 | // Save live-ins to output vector. Ignore any that are still marked as being |
3354 | // VPHIs with no location -- those are variables that we know the value of, |
3355 | // but are not actually available in the register file. |
3356 | for (auto *MBB : BlockOrders) { |
3357 | DbgValue *BlockLiveIn = LiveInIdx[MBB]; |
3358 | if (BlockLiveIn->Kind == DbgValue::NoVal) |
3359 | continue; |
3360 | if (BlockLiveIn->isUnjoinedPHI()) |
3361 | continue; |
3362 | if (BlockLiveIn->Kind == DbgValue::VPHI) |
3363 | BlockLiveIn->Kind = DbgValue::Def; |
3364 | [[maybe_unused]] auto &[Var, DILoc] = DVMap.lookupDVID(ID: VarID); |
3365 | assert(BlockLiveIn->Properties.DIExpr->getFragmentInfo() == |
3366 | Var.getFragment() && |
3367 | "Fragment info missing during value prop" ); |
3368 | Output[MBB->getNumber()].push_back(Elt: std::make_pair(x&: VarID, y&: *BlockLiveIn)); |
3369 | } |
3370 | } // Per-variable loop. |
3371 | |
3372 | BlockOrders.clear(); |
3373 | BlocksToExplore.clear(); |
3374 | } |
3375 | |
3376 | void InstrRefBasedLDV::placePHIsForSingleVarDefinition( |
3377 | const SmallPtrSetImpl<MachineBasicBlock *> &InScopeBlocks, |
3378 | MachineBasicBlock *AssignMBB, SmallVectorImpl<VLocTracker> &AllTheVLocs, |
3379 | DebugVariableID VarID, LiveInsT &Output) { |
3380 | // If there is a single definition of the variable, then working out it's |
3381 | // value everywhere is very simple: it's every block dominated by the |
3382 | // definition. At the dominance frontier, the usual algorithm would: |
3383 | // * Place PHIs, |
3384 | // * Propagate values into them, |
3385 | // * Find there's no incoming variable value from the other incoming branches |
3386 | // of the dominance frontier, |
3387 | // * Specify there's no variable value in blocks past the frontier. |
3388 | // This is a common case, hence it's worth special-casing it. |
3389 | |
3390 | // Pick out the variables value from the block transfer function. |
3391 | VLocTracker &VLocs = AllTheVLocs[AssignMBB->getNumber()]; |
3392 | auto ValueIt = VLocs.Vars.find(Key: VarID); |
3393 | const DbgValue &Value = ValueIt->second; |
3394 | |
3395 | // If it's an explicit assignment of "undef", that means there is no location |
3396 | // anyway, anywhere. |
3397 | if (Value.Kind == DbgValue::Undef) |
3398 | return; |
3399 | |
3400 | // Assign the variable value to entry to each dominated block that's in scope. |
3401 | // Skip the definition block -- it's assigned the variable value in the middle |
3402 | // of the block somewhere. |
3403 | for (auto *ScopeBlock : InScopeBlocks) { |
3404 | if (!DomTree->properlyDominates(A: AssignMBB, B: ScopeBlock)) |
3405 | continue; |
3406 | |
3407 | Output[ScopeBlock->getNumber()].push_back(Elt: {VarID, Value}); |
3408 | } |
3409 | |
3410 | // All blocks that aren't dominated have no live-in value, thus no variable |
3411 | // value will be given to them. |
3412 | } |
3413 | |
3414 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
3415 | void InstrRefBasedLDV::dump_mloc_transfer( |
3416 | const MLocTransferMap &mloc_transfer) const { |
3417 | for (const auto &P : mloc_transfer) { |
3418 | std::string foo = MTracker->LocIdxToName(P.first); |
3419 | std::string bar = MTracker->IDAsString(P.second); |
3420 | dbgs() << "Loc " << foo << " --> " << bar << "\n" ; |
3421 | } |
3422 | } |
3423 | #endif |
3424 | |
3425 | void InstrRefBasedLDV::initialSetup(MachineFunction &MF) { |
3426 | // Build some useful data structures. |
3427 | |
3428 | LLVMContext &Context = MF.getFunction().getContext(); |
3429 | EmptyExpr = DIExpression::get(Context, Elements: {}); |
3430 | |
3431 | auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { |
3432 | if (const DebugLoc &DL = MI.getDebugLoc()) |
3433 | return DL.getLine() != 0; |
3434 | return false; |
3435 | }; |
3436 | |
3437 | // Collect a set of all the artificial blocks. Collect the size too, ilist |
3438 | // size calls are O(n). |
3439 | unsigned int Size = 0; |
3440 | for (auto &MBB : MF) { |
3441 | ++Size; |
3442 | if (none_of(Range: MBB.instrs(), P: hasNonArtificialLocation)) |
3443 | ArtificialBlocks.insert(Ptr: &MBB); |
3444 | } |
3445 | |
3446 | // Compute mappings of block <=> RPO order. |
3447 | ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); |
3448 | unsigned int RPONumber = 0; |
3449 | OrderToBB.reserve(N: Size); |
3450 | BBToOrder.reserve(NumEntries: Size); |
3451 | BBNumToRPO.reserve(NumEntries: Size); |
3452 | auto processMBB = [&](MachineBasicBlock *MBB) { |
3453 | OrderToBB.push_back(Elt: MBB); |
3454 | BBToOrder[MBB] = RPONumber; |
3455 | BBNumToRPO[MBB->getNumber()] = RPONumber; |
3456 | ++RPONumber; |
3457 | }; |
3458 | for (MachineBasicBlock *MBB : RPOT) |
3459 | processMBB(MBB); |
3460 | for (MachineBasicBlock &MBB : MF) |
3461 | if (!BBToOrder.contains(Val: &MBB)) |
3462 | processMBB(&MBB); |
3463 | |
3464 | // Order value substitutions by their "source" operand pair, for quick lookup. |
3465 | llvm::sort(C&: MF.DebugValueSubstitutions); |
3466 | |
3467 | #ifdef EXPENSIVE_CHECKS |
3468 | // As an expensive check, test whether there are any duplicate substitution |
3469 | // sources in the collection. |
3470 | if (MF.DebugValueSubstitutions.size() > 2) { |
3471 | for (auto It = MF.DebugValueSubstitutions.begin(); |
3472 | It != std::prev(MF.DebugValueSubstitutions.end()); ++It) { |
3473 | assert(It->Src != std::next(It)->Src && "Duplicate variable location " |
3474 | "substitution seen" ); |
3475 | } |
3476 | } |
3477 | #endif |
3478 | } |
3479 | |
3480 | // Produce an "ejection map" for blocks, i.e., what's the highest-numbered |
3481 | // lexical scope it's used in. When exploring in DFS order and we pass that |
3482 | // scope, the block can be processed and any tracking information freed. |
3483 | void InstrRefBasedLDV::makeDepthFirstEjectionMap( |
3484 | SmallVectorImpl<unsigned> &EjectionMap, |
3485 | const ScopeToDILocT &ScopeToDILocation, |
3486 | ScopeToAssignBlocksT &ScopeToAssignBlocks) { |
3487 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3488 | SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; |
3489 | auto *TopScope = LS.getCurrentFunctionScope(); |
3490 | |
3491 | // Unlike lexical scope explorers, we explore in reverse order, to find the |
3492 | // "last" lexical scope used for each block early. |
3493 | WorkStack.push_back(Elt: {TopScope, TopScope->getChildren().size() - 1}); |
3494 | |
3495 | while (!WorkStack.empty()) { |
3496 | auto &ScopePosition = WorkStack.back(); |
3497 | LexicalScope *WS = ScopePosition.first; |
3498 | ssize_t ChildNum = ScopePosition.second--; |
3499 | |
3500 | const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); |
3501 | if (ChildNum >= 0) { |
3502 | // If ChildNum is positive, there are remaining children to explore. |
3503 | // Push the child and its children-count onto the stack. |
3504 | auto &ChildScope = Children[ChildNum]; |
3505 | WorkStack.push_back( |
3506 | Elt: std::make_pair(x: ChildScope, y: ChildScope->getChildren().size() - 1)); |
3507 | } else { |
3508 | WorkStack.pop_back(); |
3509 | |
3510 | // We've explored all children and any later blocks: examine all blocks |
3511 | // in our scope. If they haven't yet had an ejection number set, then |
3512 | // this scope will be the last to use that block. |
3513 | auto DILocationIt = ScopeToDILocation.find(Val: WS); |
3514 | if (DILocationIt != ScopeToDILocation.end()) { |
3515 | getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore, |
3516 | AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second); |
3517 | for (const auto *MBB : BlocksToExplore) { |
3518 | unsigned BBNum = MBB->getNumber(); |
3519 | if (EjectionMap[BBNum] == 0) |
3520 | EjectionMap[BBNum] = WS->getDFSOut(); |
3521 | } |
3522 | |
3523 | BlocksToExplore.clear(); |
3524 | } |
3525 | } |
3526 | } |
3527 | } |
3528 | |
3529 | bool InstrRefBasedLDV::depthFirstVLocAndEmit( |
3530 | unsigned MaxNumBlocks, const ScopeToDILocT &ScopeToDILocation, |
3531 | const ScopeToVarsT &ScopeToVars, ScopeToAssignBlocksT &ScopeToAssignBlocks, |
3532 | LiveInsT &Output, FuncValueTable &MOutLocs, FuncValueTable &MInLocs, |
3533 | SmallVectorImpl<VLocTracker> &AllTheVLocs, MachineFunction &MF, |
3534 | const TargetPassConfig &TPC) { |
3535 | TTracker = |
3536 | new TransferTracker(TII, MTracker, MF, DVMap, *TRI, CalleeSavedRegs, TPC); |
3537 | unsigned NumLocs = MTracker->getNumLocs(); |
3538 | VTracker = nullptr; |
3539 | |
3540 | // No scopes? No variable locations. |
3541 | if (!LS.getCurrentFunctionScope()) |
3542 | return false; |
3543 | |
3544 | // Build map from block number to the last scope that uses the block. |
3545 | SmallVector<unsigned, 16> EjectionMap; |
3546 | EjectionMap.resize(N: MaxNumBlocks, NV: 0); |
3547 | makeDepthFirstEjectionMap(EjectionMap, ScopeToDILocation, |
3548 | ScopeToAssignBlocks); |
3549 | |
3550 | // Helper lambda for ejecting a block -- if nothing is going to use the block, |
3551 | // we can translate the variable location information into DBG_VALUEs and then |
3552 | // free all of InstrRefBasedLDV's data structures. |
3553 | auto EjectBlock = [&](MachineBasicBlock &MBB) -> void { |
3554 | unsigned BBNum = MBB.getNumber(); |
3555 | AllTheVLocs[BBNum].clear(); |
3556 | |
3557 | // Prime the transfer-tracker, and then step through all the block |
3558 | // instructions, installing transfers. |
3559 | MTracker->reset(); |
3560 | MTracker->loadFromArray(Locs&: MInLocs[MBB], NewCurBB: BBNum); |
3561 | TTracker->loadInlocs(MBB, MLocs&: MInLocs[MBB], DbgOpStore, VLocs: Output[BBNum], NumLocs); |
3562 | |
3563 | CurBB = BBNum; |
3564 | CurInst = 1; |
3565 | for (auto &MI : MBB) { |
3566 | process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs); |
3567 | TTracker->checkInstForNewValues(Inst: CurInst, pos: MI.getIterator()); |
3568 | ++CurInst; |
3569 | } |
3570 | |
3571 | // Free machine-location tables for this block. |
3572 | MInLocs.ejectTableForBlock(MBB); |
3573 | MOutLocs.ejectTableForBlock(MBB); |
3574 | // We don't need live-in variable values for this block either. |
3575 | Output[BBNum].clear(); |
3576 | AllTheVLocs[BBNum].clear(); |
3577 | }; |
3578 | |
3579 | SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; |
3580 | SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; |
3581 | WorkStack.push_back(Elt: {LS.getCurrentFunctionScope(), 0}); |
3582 | unsigned HighestDFSIn = 0; |
3583 | |
3584 | // Proceed to explore in depth first order. |
3585 | while (!WorkStack.empty()) { |
3586 | auto &ScopePosition = WorkStack.back(); |
3587 | LexicalScope *WS = ScopePosition.first; |
3588 | ssize_t ChildNum = ScopePosition.second++; |
3589 | |
3590 | // We obesrve scopes with children twice here, once descending in, once |
3591 | // ascending out of the scope nest. Use HighestDFSIn as a ratchet to ensure |
3592 | // we don't process a scope twice. Additionally, ignore scopes that don't |
3593 | // have a DILocation -- by proxy, this means we never tracked any variable |
3594 | // assignments in that scope. |
3595 | auto DILocIt = ScopeToDILocation.find(Val: WS); |
3596 | if (HighestDFSIn <= WS->getDFSIn() && DILocIt != ScopeToDILocation.end()) { |
3597 | const DILocation *DILoc = DILocIt->second; |
3598 | auto &VarsWeCareAbout = ScopeToVars.find(Val: WS)->second; |
3599 | auto &BlocksInScope = ScopeToAssignBlocks.find(Val: WS)->second; |
3600 | |
3601 | buildVLocValueMap(DILoc, VarsWeCareAbout, AssignBlocks&: BlocksInScope, Output, MOutLocs, |
3602 | MInLocs, AllTheVLocs); |
3603 | } |
3604 | |
3605 | HighestDFSIn = std::max(a: HighestDFSIn, b: WS->getDFSIn()); |
3606 | |
3607 | // Descend into any scope nests. |
3608 | const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); |
3609 | if (ChildNum < (ssize_t)Children.size()) { |
3610 | // There are children to explore -- push onto stack and continue. |
3611 | auto &ChildScope = Children[ChildNum]; |
3612 | WorkStack.push_back(Elt: std::make_pair(x: ChildScope, y: 0)); |
3613 | } else { |
3614 | WorkStack.pop_back(); |
3615 | |
3616 | // We've explored a leaf, or have explored all the children of a scope. |
3617 | // Try to eject any blocks where this is the last scope it's relevant to. |
3618 | auto DILocationIt = ScopeToDILocation.find(Val: WS); |
3619 | if (DILocationIt == ScopeToDILocation.end()) |
3620 | continue; |
3621 | |
3622 | getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore, |
3623 | AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second); |
3624 | for (const auto *MBB : BlocksToExplore) |
3625 | if (WS->getDFSOut() == EjectionMap[MBB->getNumber()]) |
3626 | EjectBlock(const_cast<MachineBasicBlock &>(*MBB)); |
3627 | |
3628 | BlocksToExplore.clear(); |
3629 | } |
3630 | } |
3631 | |
3632 | // Some artificial blocks may not have been ejected, meaning they're not |
3633 | // connected to an actual legitimate scope. This can technically happen |
3634 | // with things like the entry block. In theory, we shouldn't need to do |
3635 | // anything for such out-of-scope blocks, but for the sake of being similar |
3636 | // to VarLocBasedLDV, eject these too. |
3637 | for (auto *MBB : ArtificialBlocks) |
3638 | if (MInLocs.hasTableFor(MBB&: *MBB)) |
3639 | EjectBlock(*MBB); |
3640 | |
3641 | return emitTransfers(); |
3642 | } |
3643 | |
3644 | bool InstrRefBasedLDV::emitTransfers() { |
3645 | // Go through all the transfers recorded in the TransferTracker -- this is |
3646 | // both the live-ins to a block, and any movements of values that happen |
3647 | // in the middle. |
3648 | for (auto &P : TTracker->Transfers) { |
3649 | // We have to insert DBG_VALUEs in a consistent order, otherwise they |
3650 | // appear in DWARF in different orders. Use the order that they appear |
3651 | // when walking through each block / each instruction, stored in |
3652 | // DVMap. |
3653 | llvm::sort(C&: P.Insts, Comp: llvm::less_first()); |
3654 | |
3655 | // Insert either before or after the designated point... |
3656 | if (P.MBB) { |
3657 | MachineBasicBlock &MBB = *P.MBB; |
3658 | for (const auto &Pair : P.Insts) |
3659 | MBB.insert(I: P.Pos, M: Pair.second); |
3660 | } else { |
3661 | // Terminators, like tail calls, can clobber things. Don't try and place |
3662 | // transfers after them. |
3663 | if (P.Pos->isTerminator()) |
3664 | continue; |
3665 | |
3666 | MachineBasicBlock &MBB = *P.Pos->getParent(); |
3667 | for (const auto &Pair : P.Insts) |
3668 | MBB.insertAfterBundle(I: P.Pos, MI: Pair.second); |
3669 | } |
3670 | } |
3671 | |
3672 | return TTracker->Transfers.size() != 0; |
3673 | } |
3674 | |
3675 | /// Calculate the liveness information for the given machine function and |
3676 | /// extend ranges across basic blocks. |
3677 | bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF, |
3678 | MachineDominatorTree *DomTree, |
3679 | TargetPassConfig *TPC, |
3680 | unsigned InputBBLimit, |
3681 | unsigned InputDbgValLimit) { |
3682 | // No subprogram means this function contains no debuginfo. |
3683 | if (!MF.getFunction().getSubprogram()) |
3684 | return false; |
3685 | |
3686 | LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n" ); |
3687 | this->TPC = TPC; |
3688 | |
3689 | this->DomTree = DomTree; |
3690 | TRI = MF.getSubtarget().getRegisterInfo(); |
3691 | MRI = &MF.getRegInfo(); |
3692 | TII = MF.getSubtarget().getInstrInfo(); |
3693 | TFI = MF.getSubtarget().getFrameLowering(); |
3694 | TFI->getCalleeSaves(MF, SavedRegs&: CalleeSavedRegs); |
3695 | MFI = &MF.getFrameInfo(); |
3696 | LS.initialize(MF); |
3697 | |
3698 | const auto &STI = MF.getSubtarget(); |
3699 | AdjustsStackInCalls = MFI->adjustsStack() && |
3700 | STI.getFrameLowering()->stackProbeFunctionModifiesSP(); |
3701 | if (AdjustsStackInCalls) |
3702 | StackProbeSymbolName = STI.getTargetLowering()->getStackProbeSymbolName(MF); |
3703 | |
3704 | MTracker = |
3705 | new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering()); |
3706 | VTracker = nullptr; |
3707 | TTracker = nullptr; |
3708 | |
3709 | SmallVector<MLocTransferMap, 32> MLocTransfer; |
3710 | SmallVector<VLocTracker, 8> vlocs; |
3711 | LiveInsT SavedLiveIns; |
3712 | |
3713 | int MaxNumBlocks = -1; |
3714 | for (auto &MBB : MF) |
3715 | MaxNumBlocks = std::max(a: MBB.getNumber(), b: MaxNumBlocks); |
3716 | assert(MaxNumBlocks >= 0); |
3717 | ++MaxNumBlocks; |
3718 | |
3719 | initialSetup(MF); |
3720 | |
3721 | MLocTransfer.resize(N: MaxNumBlocks); |
3722 | vlocs.resize(N: MaxNumBlocks, NV: VLocTracker(DVMap, OverlapFragments, EmptyExpr)); |
3723 | SavedLiveIns.resize(N: MaxNumBlocks); |
3724 | |
3725 | produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks); |
3726 | |
3727 | // Allocate and initialize two array-of-arrays for the live-in and live-out |
3728 | // machine values. The outer dimension is the block number; while the inner |
3729 | // dimension is a LocIdx from MLocTracker. |
3730 | unsigned NumLocs = MTracker->getNumLocs(); |
3731 | FuncValueTable MOutLocs(MaxNumBlocks, NumLocs); |
3732 | FuncValueTable MInLocs(MaxNumBlocks, NumLocs); |
3733 | |
3734 | // Solve the machine value dataflow problem using the MLocTransfer function, |
3735 | // storing the computed live-ins / live-outs into the array-of-arrays. We use |
3736 | // both live-ins and live-outs for decision making in the variable value |
3737 | // dataflow problem. |
3738 | buildMLocValueMap(MF, MInLocs, MOutLocs, MLocTransfer); |
3739 | |
3740 | // Patch up debug phi numbers, turning unknown block-live-in values into |
3741 | // either live-through machine values, or PHIs. |
3742 | for (auto &DBG_PHI : DebugPHINumToValue) { |
3743 | // Identify unresolved block-live-ins. |
3744 | if (!DBG_PHI.ValueRead) |
3745 | continue; |
3746 | |
3747 | ValueIDNum &Num = *DBG_PHI.ValueRead; |
3748 | if (!Num.isPHI()) |
3749 | continue; |
3750 | |
3751 | unsigned BlockNo = Num.getBlock(); |
3752 | LocIdx LocNo = Num.getLoc(); |
3753 | ValueIDNum ResolvedValue = MInLocs[BlockNo][LocNo.asU64()]; |
3754 | // If there is no resolved value for this live-in then it is not directly |
3755 | // reachable from the entry block -- model it as a PHI on entry to this |
3756 | // block, which means we leave the ValueIDNum unchanged. |
3757 | if (ResolvedValue != ValueIDNum::EmptyValue) |
3758 | Num = ResolvedValue; |
3759 | } |
3760 | // Later, we'll be looking up ranges of instruction numbers. |
3761 | llvm::sort(C&: DebugPHINumToValue); |
3762 | |
3763 | // Walk back through each block / instruction, collecting DBG_VALUE |
3764 | // instructions and recording what machine value their operands refer to. |
3765 | for (MachineBasicBlock *MBB : OrderToBB) { |
3766 | CurBB = MBB->getNumber(); |
3767 | VTracker = &vlocs[CurBB]; |
3768 | VTracker->MBB = MBB; |
3769 | MTracker->loadFromArray(Locs&: MInLocs[*MBB], NewCurBB: CurBB); |
3770 | CurInst = 1; |
3771 | for (auto &MI : *MBB) { |
3772 | process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs); |
3773 | ++CurInst; |
3774 | } |
3775 | MTracker->reset(); |
3776 | } |
3777 | |
3778 | // Map from one LexicalScope to all the variables in that scope. |
3779 | ScopeToVarsT ScopeToVars; |
3780 | |
3781 | // Map from One lexical scope to all blocks where assignments happen for |
3782 | // that scope. |
3783 | ScopeToAssignBlocksT ScopeToAssignBlocks; |
3784 | |
3785 | // Store map of DILocations that describes scopes. |
3786 | ScopeToDILocT ScopeToDILocation; |
3787 | |
3788 | // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise |
3789 | // the order is unimportant, it just has to be stable. |
3790 | unsigned VarAssignCount = 0; |
3791 | for (unsigned int I = 0; I < OrderToBB.size(); ++I) { |
3792 | auto *MBB = OrderToBB[I]; |
3793 | auto *VTracker = &vlocs[MBB->getNumber()]; |
3794 | // Collect each variable with a DBG_VALUE in this block. |
3795 | for (auto &idx : VTracker->Vars) { |
3796 | DebugVariableID VarID = idx.first; |
3797 | const DILocation *ScopeLoc = VTracker->Scopes[VarID]; |
3798 | assert(ScopeLoc != nullptr); |
3799 | auto *Scope = LS.findLexicalScope(DL: ScopeLoc); |
3800 | |
3801 | // No insts in scope -> shouldn't have been recorded. |
3802 | assert(Scope != nullptr); |
3803 | |
3804 | ScopeToVars[Scope].insert(V: VarID); |
3805 | ScopeToAssignBlocks[Scope].insert(Ptr: VTracker->MBB); |
3806 | ScopeToDILocation[Scope] = ScopeLoc; |
3807 | ++VarAssignCount; |
3808 | } |
3809 | } |
3810 | |
3811 | bool Changed = false; |
3812 | |
3813 | // If we have an extremely large number of variable assignments and blocks, |
3814 | // bail out at this point. We've burnt some time doing analysis already, |
3815 | // however we should cut our losses. |
3816 | if ((unsigned)MaxNumBlocks > InputBBLimit && |
3817 | VarAssignCount > InputDbgValLimit) { |
3818 | LLVM_DEBUG(dbgs() << "Disabling InstrRefBasedLDV: " << MF.getName() |
3819 | << " has " << MaxNumBlocks << " basic blocks and " |
3820 | << VarAssignCount |
3821 | << " variable assignments, exceeding limits.\n" ); |
3822 | } else { |
3823 | // Optionally, solve the variable value problem and emit to blocks by using |
3824 | // a lexical-scope-depth search. It should be functionally identical to |
3825 | // the "else" block of this condition. |
3826 | Changed = depthFirstVLocAndEmit( |
3827 | MaxNumBlocks, ScopeToDILocation, ScopeToVars, ScopeToAssignBlocks, |
3828 | Output&: SavedLiveIns, MOutLocs, MInLocs, AllTheVLocs&: vlocs, MF, TPC: *TPC); |
3829 | } |
3830 | |
3831 | delete MTracker; |
3832 | delete TTracker; |
3833 | MTracker = nullptr; |
3834 | VTracker = nullptr; |
3835 | TTracker = nullptr; |
3836 | |
3837 | ArtificialBlocks.clear(); |
3838 | OrderToBB.clear(); |
3839 | BBToOrder.clear(); |
3840 | BBNumToRPO.clear(); |
3841 | DebugInstrNumToInstr.clear(); |
3842 | DebugPHINumToValue.clear(); |
3843 | OverlapFragments.clear(); |
3844 | SeenFragments.clear(); |
3845 | SeenDbgPHIs.clear(); |
3846 | DbgOpStore.clear(); |
3847 | DVMap.clear(); |
3848 | |
3849 | return Changed; |
3850 | } |
3851 | |
3852 | LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() { |
3853 | return new InstrRefBasedLDV(); |
3854 | } |
3855 | |
3856 | namespace { |
3857 | class LDVSSABlock; |
3858 | class LDVSSAUpdater; |
3859 | |
3860 | // Pick a type to identify incoming block values as we construct SSA. We |
3861 | // can't use anything more robust than an integer unfortunately, as SSAUpdater |
3862 | // expects to zero-initialize the type. |
3863 | typedef uint64_t BlockValueNum; |
3864 | |
3865 | /// Represents an SSA PHI node for the SSA updater class. Contains the block |
3866 | /// this PHI is in, the value number it would have, and the expected incoming |
3867 | /// values from parent blocks. |
3868 | class LDVSSAPhi { |
3869 | public: |
3870 | SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues; |
3871 | LDVSSABlock *ParentBlock; |
3872 | BlockValueNum PHIValNum; |
3873 | LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock) |
3874 | : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {} |
3875 | |
3876 | LDVSSABlock *getParent() { return ParentBlock; } |
3877 | }; |
3878 | |
3879 | /// Thin wrapper around a block predecessor iterator. Only difference from a |
3880 | /// normal block iterator is that it dereferences to an LDVSSABlock. |
3881 | class LDVSSABlockIterator { |
3882 | public: |
3883 | MachineBasicBlock::pred_iterator PredIt; |
3884 | LDVSSAUpdater &Updater; |
3885 | |
3886 | LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt, |
3887 | LDVSSAUpdater &Updater) |
3888 | : PredIt(PredIt), Updater(Updater) {} |
3889 | |
3890 | bool operator!=(const LDVSSABlockIterator &OtherIt) const { |
3891 | return OtherIt.PredIt != PredIt; |
3892 | } |
3893 | |
3894 | LDVSSABlockIterator &operator++() { |
3895 | ++PredIt; |
3896 | return *this; |
3897 | } |
3898 | |
3899 | LDVSSABlock *operator*(); |
3900 | }; |
3901 | |
3902 | /// Thin wrapper around a block for SSA Updater interface. Necessary because |
3903 | /// we need to track the PHI value(s) that we may have observed as necessary |
3904 | /// in this block. |
3905 | class LDVSSABlock { |
3906 | public: |
3907 | MachineBasicBlock &BB; |
3908 | LDVSSAUpdater &Updater; |
3909 | using PHIListT = SmallVector<LDVSSAPhi, 1>; |
3910 | /// List of PHIs in this block. There should only ever be one. |
3911 | PHIListT PHIList; |
3912 | |
3913 | LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater) |
3914 | : BB(BB), Updater(Updater) {} |
3915 | |
3916 | LDVSSABlockIterator succ_begin() { |
3917 | return LDVSSABlockIterator(BB.succ_begin(), Updater); |
3918 | } |
3919 | |
3920 | LDVSSABlockIterator succ_end() { |
3921 | return LDVSSABlockIterator(BB.succ_end(), Updater); |
3922 | } |
3923 | |
3924 | /// SSAUpdater has requested a PHI: create that within this block record. |
3925 | LDVSSAPhi *newPHI(BlockValueNum Value) { |
3926 | PHIList.emplace_back(Args&: Value, Args: this); |
3927 | return &PHIList.back(); |
3928 | } |
3929 | |
3930 | /// SSAUpdater wishes to know what PHIs already exist in this block. |
3931 | PHIListT &phis() { return PHIList; } |
3932 | }; |
3933 | |
3934 | /// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values |
3935 | /// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to |
3936 | // SSAUpdaterTraits<LDVSSAUpdater>. |
3937 | class LDVSSAUpdater { |
3938 | public: |
3939 | /// Map of value numbers to PHI records. |
3940 | DenseMap<BlockValueNum, LDVSSAPhi *> PHIs; |
3941 | /// Map of which blocks generate Undef values -- blocks that are not |
3942 | /// dominated by any Def. |
3943 | DenseMap<MachineBasicBlock *, BlockValueNum> PoisonMap; |
3944 | /// Map of machine blocks to our own records of them. |
3945 | DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap; |
3946 | /// Machine location where any PHI must occur. |
3947 | LocIdx Loc; |
3948 | /// Table of live-in machine value numbers for blocks / locations. |
3949 | const FuncValueTable &MLiveIns; |
3950 | |
3951 | LDVSSAUpdater(LocIdx L, const FuncValueTable &MLiveIns) |
3952 | : Loc(L), MLiveIns(MLiveIns) {} |
3953 | |
3954 | void reset() { |
3955 | for (auto &Block : BlockMap) |
3956 | delete Block.second; |
3957 | |
3958 | PHIs.clear(); |
3959 | PoisonMap.clear(); |
3960 | BlockMap.clear(); |
3961 | } |
3962 | |
3963 | ~LDVSSAUpdater() { reset(); } |
3964 | |
3965 | /// For a given MBB, create a wrapper block for it. Stores it in the |
3966 | /// LDVSSAUpdater block map. |
3967 | LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) { |
3968 | auto it = BlockMap.find(Val: BB); |
3969 | if (it == BlockMap.end()) { |
3970 | BlockMap[BB] = new LDVSSABlock(*BB, *this); |
3971 | it = BlockMap.find(Val: BB); |
3972 | } |
3973 | return it->second; |
3974 | } |
3975 | |
3976 | /// Find the live-in value number for the given block. Looks up the value at |
3977 | /// the PHI location on entry. |
3978 | BlockValueNum getValue(LDVSSABlock *LDVBB) { |
3979 | return MLiveIns[LDVBB->BB][Loc.asU64()].asU64(); |
3980 | } |
3981 | }; |
3982 | |
3983 | LDVSSABlock *LDVSSABlockIterator::operator*() { |
3984 | return Updater.getSSALDVBlock(BB: *PredIt); |
3985 | } |
3986 | |
3987 | #ifndef NDEBUG |
3988 | |
3989 | raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) { |
3990 | out << "SSALDVPHI " << PHI.PHIValNum; |
3991 | return out; |
3992 | } |
3993 | |
3994 | #endif |
3995 | |
3996 | } // namespace |
3997 | |
3998 | namespace llvm { |
3999 | |
4000 | /// Template specialization to give SSAUpdater access to CFG and value |
4001 | /// information. SSAUpdater calls methods in these traits, passing in the |
4002 | /// LDVSSAUpdater object, to learn about blocks and the values they define. |
4003 | /// It also provides methods to create PHI nodes and track them. |
4004 | template <> class SSAUpdaterTraits<LDVSSAUpdater> { |
4005 | public: |
4006 | using BlkT = LDVSSABlock; |
4007 | using ValT = BlockValueNum; |
4008 | using PhiT = LDVSSAPhi; |
4009 | using BlkSucc_iterator = LDVSSABlockIterator; |
4010 | |
4011 | // Methods to access block successors -- dereferencing to our wrapper class. |
4012 | static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); } |
4013 | static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); } |
4014 | |
4015 | /// Iterator for PHI operands. |
4016 | class PHI_iterator { |
4017 | private: |
4018 | LDVSSAPhi *PHI; |
4019 | unsigned Idx; |
4020 | |
4021 | public: |
4022 | explicit PHI_iterator(LDVSSAPhi *P) // begin iterator |
4023 | : PHI(P), Idx(0) {} |
4024 | PHI_iterator(LDVSSAPhi *P, bool) // end iterator |
4025 | : PHI(P), Idx(PHI->IncomingValues.size()) {} |
4026 | |
4027 | PHI_iterator &operator++() { |
4028 | Idx++; |
4029 | return *this; |
4030 | } |
4031 | bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; } |
4032 | bool operator!=(const PHI_iterator &X) const { return !operator==(X); } |
4033 | |
4034 | BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; } |
4035 | |
4036 | LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; } |
4037 | }; |
4038 | |
4039 | static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); } |
4040 | |
4041 | static inline PHI_iterator PHI_end(PhiT *PHI) { |
4042 | return PHI_iterator(PHI, true); |
4043 | } |
4044 | |
4045 | /// FindPredecessorBlocks - Put the predecessors of BB into the Preds |
4046 | /// vector. |
4047 | static void FindPredecessorBlocks(LDVSSABlock *BB, |
4048 | SmallVectorImpl<LDVSSABlock *> *Preds) { |
4049 | for (MachineBasicBlock *Pred : BB->BB.predecessors()) |
4050 | Preds->push_back(Elt: BB->Updater.getSSALDVBlock(BB: Pred)); |
4051 | } |
4052 | |
4053 | /// GetPoisonVal - Normally creates an IMPLICIT_DEF instruction with a new |
4054 | /// register. For LiveDebugValues, represents a block identified as not having |
4055 | /// any DBG_PHI predecessors. |
4056 | static BlockValueNum GetPoisonVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) { |
4057 | // Create a value number for this block -- it needs to be unique and in the |
4058 | // "poison" collection, so that we know it's not real. Use a number |
4059 | // representing a PHI into this block. |
4060 | BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64(); |
4061 | Updater->PoisonMap[&BB->BB] = Num; |
4062 | return Num; |
4063 | } |
4064 | |
4065 | /// CreateEmptyPHI - Create a (representation of a) PHI in the given block. |
4066 | /// SSAUpdater will populate it with information about incoming values. The |
4067 | /// value number of this PHI is whatever the machine value number problem |
4068 | /// solution determined it to be. This includes non-phi values if SSAUpdater |
4069 | /// tries to create a PHI where the incoming values are identical. |
4070 | static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds, |
4071 | LDVSSAUpdater *Updater) { |
4072 | BlockValueNum PHIValNum = Updater->getValue(LDVBB: BB); |
4073 | LDVSSAPhi *PHI = BB->newPHI(Value: PHIValNum); |
4074 | Updater->PHIs[PHIValNum] = PHI; |
4075 | return PHIValNum; |
4076 | } |
4077 | |
4078 | /// AddPHIOperand - Add the specified value as an operand of the PHI for |
4079 | /// the specified predecessor block. |
4080 | static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) { |
4081 | PHI->IncomingValues.push_back(Elt: std::make_pair(x&: Pred, y&: Val)); |
4082 | } |
4083 | |
4084 | /// ValueIsPHI - Check if the instruction that defines the specified value |
4085 | /// is a PHI instruction. |
4086 | static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { |
4087 | return Updater->PHIs.lookup(Val); |
4088 | } |
4089 | |
4090 | /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source |
4091 | /// operands, i.e., it was just added. |
4092 | static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { |
4093 | LDVSSAPhi *PHI = ValueIsPHI(Val, Updater); |
4094 | if (PHI && PHI->IncomingValues.size() == 0) |
4095 | return PHI; |
4096 | return nullptr; |
4097 | } |
4098 | |
4099 | /// GetPHIValue - For the specified PHI instruction, return the value |
4100 | /// that it defines. |
4101 | static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; } |
4102 | }; |
4103 | |
4104 | } // end namespace llvm |
4105 | |
4106 | std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( |
4107 | MachineFunction &MF, const FuncValueTable &MLiveOuts, |
4108 | const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) { |
4109 | // This function will be called twice per DBG_INSTR_REF, and might end up |
4110 | // computing lots of SSA information: memoize it. |
4111 | auto SeenDbgPHIIt = SeenDbgPHIs.find(Val: std::make_pair(x: &Here, y&: InstrNum)); |
4112 | if (SeenDbgPHIIt != SeenDbgPHIs.end()) |
4113 | return SeenDbgPHIIt->second; |
4114 | |
4115 | std::optional<ValueIDNum> Result = |
4116 | resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum); |
4117 | SeenDbgPHIs.insert(KV: {std::make_pair(x: &Here, y&: InstrNum), Result}); |
4118 | return Result; |
4119 | } |
4120 | |
4121 | std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( |
4122 | MachineFunction &MF, const FuncValueTable &MLiveOuts, |
4123 | const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) { |
4124 | // Pick out records of DBG_PHI instructions that have been observed. If there |
4125 | // are none, then we cannot compute a value number. |
4126 | auto RangePair = std::equal_range(first: DebugPHINumToValue.begin(), |
4127 | last: DebugPHINumToValue.end(), val: InstrNum); |
4128 | auto LowerIt = RangePair.first; |
4129 | auto UpperIt = RangePair.second; |
4130 | |
4131 | // No DBG_PHI means there can be no location. |
4132 | if (LowerIt == UpperIt) |
4133 | return std::nullopt; |
4134 | |
4135 | // If any DBG_PHIs referred to a location we didn't understand, don't try to |
4136 | // compute a value. There might be scenarios where we could recover a value |
4137 | // for some range of DBG_INSTR_REFs, but at this point we can have high |
4138 | // confidence that we've seen a bug. |
4139 | auto DBGPHIRange = make_range(x: LowerIt, y: UpperIt); |
4140 | for (const DebugPHIRecord &DBG_PHI : DBGPHIRange) |
4141 | if (!DBG_PHI.ValueRead) |
4142 | return std::nullopt; |
4143 | |
4144 | // If there's only one DBG_PHI, then that is our value number. |
4145 | if (std::distance(first: LowerIt, last: UpperIt) == 1) |
4146 | return *LowerIt->ValueRead; |
4147 | |
4148 | // Pick out the location (physreg, slot) where any PHIs must occur. It's |
4149 | // technically possible for us to merge values in different registers in each |
4150 | // block, but highly unlikely that LLVM will generate such code after register |
4151 | // allocation. |
4152 | LocIdx Loc = *LowerIt->ReadLoc; |
4153 | |
4154 | // We have several DBG_PHIs, and a use position (the Here inst). All each |
4155 | // DBG_PHI does is identify a value at a program position. We can treat each |
4156 | // DBG_PHI like it's a Def of a value, and the use position is a Use of a |
4157 | // value, just like SSA. We use the bulk-standard LLVM SSA updater class to |
4158 | // determine which Def is used at the Use, and any PHIs that happen along |
4159 | // the way. |
4160 | // Adapted LLVM SSA Updater: |
4161 | LDVSSAUpdater Updater(Loc, MLiveIns); |
4162 | // Map of which Def or PHI is the current value in each block. |
4163 | DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues; |
4164 | // Set of PHIs that we have created along the way. |
4165 | SmallVector<LDVSSAPhi *, 8> CreatedPHIs; |
4166 | |
4167 | // Each existing DBG_PHI is a Def'd value under this model. Record these Defs |
4168 | // for the SSAUpdater. |
4169 | for (const auto &DBG_PHI : DBGPHIRange) { |
4170 | LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB); |
4171 | const ValueIDNum &Num = *DBG_PHI.ValueRead; |
4172 | AvailableValues.insert(KV: std::make_pair(x&: Block, y: Num.asU64())); |
4173 | } |
4174 | |
4175 | LDVSSABlock *HereBlock = Updater.getSSALDVBlock(BB: Here.getParent()); |
4176 | const auto &AvailIt = AvailableValues.find(Val: HereBlock); |
4177 | if (AvailIt != AvailableValues.end()) { |
4178 | // Actually, we already know what the value is -- the Use is in the same |
4179 | // block as the Def. |
4180 | return ValueIDNum::fromU64(v: AvailIt->second); |
4181 | } |
4182 | |
4183 | // Otherwise, we must use the SSA Updater. It will identify the value number |
4184 | // that we are to use, and the PHIs that must happen along the way. |
4185 | SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs); |
4186 | BlockValueNum ResultInt = Impl.GetValue(BB: Updater.getSSALDVBlock(BB: Here.getParent())); |
4187 | ValueIDNum Result = ValueIDNum::fromU64(v: ResultInt); |
4188 | |
4189 | // We have the number for a PHI, or possibly live-through value, to be used |
4190 | // at this Use. There are a number of things we have to check about it though: |
4191 | // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this |
4192 | // Use was not completely dominated by DBG_PHIs and we should abort. |
4193 | // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that |
4194 | // we've left SSA form. Validate that the inputs to each PHI are the |
4195 | // expected values. |
4196 | // * Is a PHI we've created actually a merging of values, or are all the |
4197 | // predecessor values the same, leading to a non-PHI machine value number? |
4198 | // (SSAUpdater doesn't know that either). Remap validated PHIs into the |
4199 | // the ValidatedValues collection below to sort this out. |
4200 | DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues; |
4201 | |
4202 | // Define all the input DBG_PHI values in ValidatedValues. |
4203 | for (const auto &DBG_PHI : DBGPHIRange) { |
4204 | LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB); |
4205 | const ValueIDNum &Num = *DBG_PHI.ValueRead; |
4206 | ValidatedValues.insert(KV: std::make_pair(x&: Block, y: Num)); |
4207 | } |
4208 | |
4209 | // Sort PHIs to validate into RPO-order. |
4210 | SmallVector<LDVSSAPhi *, 8> SortedPHIs; |
4211 | for (auto &PHI : CreatedPHIs) |
4212 | SortedPHIs.push_back(Elt: PHI); |
4213 | |
4214 | llvm::sort(C&: SortedPHIs, Comp: [&](LDVSSAPhi *A, LDVSSAPhi *B) { |
4215 | return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB]; |
4216 | }); |
4217 | |
4218 | for (auto &PHI : SortedPHIs) { |
4219 | ValueIDNum ThisBlockValueNum = MLiveIns[PHI->ParentBlock->BB][Loc.asU64()]; |
4220 | |
4221 | // Are all these things actually defined? |
4222 | for (auto &PHIIt : PHI->IncomingValues) { |
4223 | // Any undef input means DBG_PHIs didn't dominate the use point. |
4224 | if (Updater.PoisonMap.contains(Val: &PHIIt.first->BB)) |
4225 | return std::nullopt; |
4226 | |
4227 | ValueIDNum ValueToCheck; |
4228 | const ValueTable &BlockLiveOuts = MLiveOuts[PHIIt.first->BB]; |
4229 | |
4230 | auto VVal = ValidatedValues.find(Val: PHIIt.first); |
4231 | if (VVal == ValidatedValues.end()) { |
4232 | // We cross a loop, and this is a backedge. LLVMs tail duplication |
4233 | // happens so late that DBG_PHI instructions should not be able to |
4234 | // migrate into loops -- meaning we can only be live-through this |
4235 | // loop. |
4236 | ValueToCheck = ThisBlockValueNum; |
4237 | } else { |
4238 | // Does the block have as a live-out, in the location we're examining, |
4239 | // the value that we expect? If not, it's been moved or clobbered. |
4240 | ValueToCheck = VVal->second; |
4241 | } |
4242 | |
4243 | if (BlockLiveOuts[Loc.asU64()] != ValueToCheck) |
4244 | return std::nullopt; |
4245 | } |
4246 | |
4247 | // Record this value as validated. |
4248 | ValidatedValues.insert(KV: {PHI->ParentBlock, ThisBlockValueNum}); |
4249 | } |
4250 | |
4251 | // All the PHIs are valid: we can return what the SSAUpdater said our value |
4252 | // number was. |
4253 | return Result; |
4254 | } |
4255 | |