1 | //===-- AssignmentTrackingAnalysis.cpp ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/CodeGen/AssignmentTrackingAnalysis.h" |
10 | #include "LiveDebugValues/LiveDebugValues.h" |
11 | #include "llvm/ADT/BitVector.h" |
12 | #include "llvm/ADT/DenseMapInfo.h" |
13 | #include "llvm/ADT/IntervalMap.h" |
14 | #include "llvm/ADT/PostOrderIterator.h" |
15 | #include "llvm/ADT/STLExtras.h" |
16 | #include "llvm/ADT/Statistic.h" |
17 | #include "llvm/ADT/UniqueVector.h" |
18 | #include "llvm/BinaryFormat/Dwarf.h" |
19 | #include "llvm/IR/BasicBlock.h" |
20 | #include "llvm/IR/DataLayout.h" |
21 | #include "llvm/IR/DebugInfo.h" |
22 | #include "llvm/IR/DebugProgramInstruction.h" |
23 | #include "llvm/IR/Function.h" |
24 | #include "llvm/IR/Instruction.h" |
25 | #include "llvm/IR/IntrinsicInst.h" |
26 | #include "llvm/IR/Module.h" |
27 | #include "llvm/IR/PassManager.h" |
28 | #include "llvm/IR/PrintPasses.h" |
29 | #include "llvm/InitializePasses.h" |
30 | #include "llvm/Support/CommandLine.h" |
31 | #include "llvm/Support/ErrorHandling.h" |
32 | #include "llvm/Support/raw_ostream.h" |
33 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
34 | #include <assert.h> |
35 | #include <cstdint> |
36 | #include <optional> |
37 | #include <queue> |
38 | #include <sstream> |
39 | #include <unordered_map> |
40 | |
41 | using namespace llvm; |
42 | #define DEBUG_TYPE "debug-ata" |
43 | |
44 | STATISTIC(NumDefsScanned, "Number of dbg locs that get scanned for removal" ); |
45 | STATISTIC(NumDefsRemoved, "Number of dbg locs removed" ); |
46 | STATISTIC(NumWedgesScanned, "Number of dbg wedges scanned" ); |
47 | STATISTIC(NumWedgesChanged, "Number of dbg wedges changed" ); |
48 | |
49 | static cl::opt<unsigned> |
50 | MaxNumBlocks("debug-ata-max-blocks" , cl::init(Val: 10000), |
51 | cl::desc("Maximum num basic blocks before debug info dropped" ), |
52 | cl::Hidden); |
53 | /// Option for debugging the pass, determines if the memory location fragment |
54 | /// filling happens after generating the variable locations. |
55 | static cl::opt<bool> EnableMemLocFragFill("mem-loc-frag-fill" , cl::init(Val: true), |
56 | cl::Hidden); |
57 | /// Print the results of the analysis. Respects -filter-print-funcs. |
58 | static cl::opt<bool> PrintResults("print-debug-ata" , cl::init(Val: false), |
59 | cl::Hidden); |
60 | |
61 | /// Coalesce adjacent dbg locs describing memory locations that have contiguous |
62 | /// fragments. This reduces the cost of LiveDebugValues which does SSA |
63 | /// construction for each explicitly stated variable fragment. |
64 | static cl::opt<cl::boolOrDefault> |
65 | CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags" , cl::Hidden); |
66 | |
67 | // Implicit conversions are disabled for enum class types, so unfortunately we |
68 | // need to create a DenseMapInfo wrapper around the specified underlying type. |
69 | template <> struct llvm::DenseMapInfo<VariableID> { |
70 | using Wrapped = DenseMapInfo<unsigned>; |
71 | static inline VariableID getEmptyKey() { |
72 | return static_cast<VariableID>(Wrapped::getEmptyKey()); |
73 | } |
74 | static inline VariableID getTombstoneKey() { |
75 | return static_cast<VariableID>(Wrapped::getTombstoneKey()); |
76 | } |
77 | static unsigned getHashValue(const VariableID &Val) { |
78 | return Wrapped::getHashValue(Val: static_cast<unsigned>(Val)); |
79 | } |
80 | static bool isEqual(const VariableID &LHS, const VariableID &RHS) { |
81 | return LHS == RHS; |
82 | } |
83 | }; |
84 | |
85 | using VarLocInsertPt = PointerUnion<const Instruction *, const DbgRecord *>; |
86 | |
87 | namespace std { |
88 | template <> struct hash<VarLocInsertPt> { |
89 | using argument_type = VarLocInsertPt; |
90 | using result_type = std::size_t; |
91 | |
92 | result_type operator()(const argument_type &Arg) const { |
93 | return std::hash<void *>()(Arg.getOpaqueValue()); |
94 | } |
95 | }; |
96 | } // namespace std |
97 | |
98 | /// Helper class to build FunctionVarLocs, since that class isn't easy to |
99 | /// modify. TODO: There's not a great deal of value in the split, it could be |
100 | /// worth merging the two classes. |
101 | class FunctionVarLocsBuilder { |
102 | friend FunctionVarLocs; |
103 | UniqueVector<DebugVariable> Variables; |
104 | // Use an unordered_map so we don't invalidate iterators after |
105 | // insert/modifications. |
106 | std::unordered_map<VarLocInsertPt, SmallVector<VarLocInfo>> VarLocsBeforeInst; |
107 | |
108 | SmallVector<VarLocInfo> SingleLocVars; |
109 | |
110 | public: |
111 | unsigned getNumVariables() const { return Variables.size(); } |
112 | |
113 | /// Find or insert \p V and return the ID. |
114 | VariableID insertVariable(DebugVariable V) { |
115 | return static_cast<VariableID>(Variables.insert(Entry: V)); |
116 | } |
117 | |
118 | /// Get a variable from its \p ID. |
119 | const DebugVariable &getVariable(VariableID ID) const { |
120 | return Variables[static_cast<unsigned>(ID)]; |
121 | } |
122 | |
123 | /// Return ptr to wedge of defs or nullptr if no defs come just before /p |
124 | /// Before. |
125 | const SmallVectorImpl<VarLocInfo> *getWedge(VarLocInsertPt Before) const { |
126 | auto R = VarLocsBeforeInst.find(x: Before); |
127 | if (R == VarLocsBeforeInst.end()) |
128 | return nullptr; |
129 | return &R->second; |
130 | } |
131 | |
132 | /// Replace the defs that come just before /p Before with /p Wedge. |
133 | void setWedge(VarLocInsertPt Before, SmallVector<VarLocInfo> &&Wedge) { |
134 | VarLocsBeforeInst[Before] = std::move(Wedge); |
135 | } |
136 | |
137 | /// Add a def for a variable that is valid for its lifetime. |
138 | void addSingleLocVar(DebugVariable Var, DIExpression *Expr, DebugLoc DL, |
139 | RawLocationWrapper R) { |
140 | VarLocInfo VarLoc; |
141 | VarLoc.VariableID = insertVariable(V: Var); |
142 | VarLoc.Expr = Expr; |
143 | VarLoc.DL = DL; |
144 | VarLoc.Values = R; |
145 | SingleLocVars.emplace_back(Args&: VarLoc); |
146 | } |
147 | |
148 | /// Add a def to the wedge of defs just before /p Before. |
149 | void addVarLoc(VarLocInsertPt Before, DebugVariable Var, DIExpression *Expr, |
150 | DebugLoc DL, RawLocationWrapper R) { |
151 | VarLocInfo VarLoc; |
152 | VarLoc.VariableID = insertVariable(V: Var); |
153 | VarLoc.Expr = Expr; |
154 | VarLoc.DL = DL; |
155 | VarLoc.Values = R; |
156 | VarLocsBeforeInst[Before].emplace_back(Args&: VarLoc); |
157 | } |
158 | }; |
159 | |
160 | void FunctionVarLocs::print(raw_ostream &OS, const Function &Fn) const { |
161 | // Print the variable table first. TODO: Sorting by variable could make the |
162 | // output more stable? |
163 | unsigned Counter = -1; |
164 | OS << "=== Variables ===\n" ; |
165 | for (const DebugVariable &V : Variables) { |
166 | ++Counter; |
167 | // Skip first entry because it is a dummy entry. |
168 | if (Counter == 0) { |
169 | continue; |
170 | } |
171 | OS << "[" << Counter << "] " << V.getVariable()->getName(); |
172 | if (auto F = V.getFragment()) |
173 | OS << " bits [" << F->OffsetInBits << ", " |
174 | << F->OffsetInBits + F->SizeInBits << ")" ; |
175 | if (const auto *IA = V.getInlinedAt()) |
176 | OS << " inlined-at " << *IA; |
177 | OS << "\n" ; |
178 | } |
179 | |
180 | auto PrintLoc = [&OS](const VarLocInfo &Loc) { |
181 | OS << "DEF Var=[" << (unsigned)Loc.VariableID << "]" |
182 | << " Expr=" << *Loc.Expr << " Values=(" ; |
183 | for (auto *Op : Loc.Values.location_ops()) { |
184 | errs() << Op->getName() << " " ; |
185 | } |
186 | errs() << ")\n" ; |
187 | }; |
188 | |
189 | // Print the single location variables. |
190 | OS << "=== Single location vars ===\n" ; |
191 | for (auto It = single_locs_begin(), End = single_locs_end(); It != End; |
192 | ++It) { |
193 | PrintLoc(*It); |
194 | } |
195 | |
196 | // Print the non-single-location defs in line with IR. |
197 | OS << "=== In-line variable defs ===" ; |
198 | for (const BasicBlock &BB : Fn) { |
199 | OS << "\n" << BB.getName() << ":\n" ; |
200 | for (const Instruction &I : BB) { |
201 | for (auto It = locs_begin(Before: &I), End = locs_end(Before: &I); It != End; ++It) { |
202 | PrintLoc(*It); |
203 | } |
204 | OS << I << "\n" ; |
205 | } |
206 | } |
207 | } |
208 | |
209 | void FunctionVarLocs::init(FunctionVarLocsBuilder &Builder) { |
210 | // Add the single-location variables first. |
211 | for (const auto &VarLoc : Builder.SingleLocVars) |
212 | VarLocRecords.emplace_back(Args: VarLoc); |
213 | // Mark the end of the section. |
214 | SingleVarLocEnd = VarLocRecords.size(); |
215 | |
216 | // Insert a contiguous block of VarLocInfos for each instruction, mapping it |
217 | // to the start and end position in the vector with VarLocsBeforeInst. This |
218 | // block includes VarLocs for any DbgVariableRecords attached to that |
219 | // instruction. |
220 | for (auto &P : Builder.VarLocsBeforeInst) { |
221 | // Process VarLocs attached to a DbgRecord alongside their marker |
222 | // Instruction. |
223 | if (isa<const DbgRecord *>(Val: P.first)) |
224 | continue; |
225 | const Instruction *I = cast<const Instruction *>(Val: P.first); |
226 | unsigned BlockStart = VarLocRecords.size(); |
227 | // Any VarLocInfos attached to a DbgRecord should now be remapped to their |
228 | // marker Instruction, in order of DbgRecord appearance and prior to any |
229 | // VarLocInfos attached directly to that instruction. |
230 | for (const DbgVariableRecord &DVR : filterDbgVars(R: I->getDbgRecordRange())) { |
231 | // Even though DVR defines a variable location, VarLocsBeforeInst can |
232 | // still be empty if that VarLoc was redundant. |
233 | if (!Builder.VarLocsBeforeInst.count(x: &DVR)) |
234 | continue; |
235 | for (const VarLocInfo &VarLoc : Builder.VarLocsBeforeInst[&DVR]) |
236 | VarLocRecords.emplace_back(Args: VarLoc); |
237 | } |
238 | for (const VarLocInfo &VarLoc : P.second) |
239 | VarLocRecords.emplace_back(Args: VarLoc); |
240 | unsigned BlockEnd = VarLocRecords.size(); |
241 | // Record the start and end indices. |
242 | if (BlockEnd != BlockStart) |
243 | VarLocsBeforeInst[I] = {BlockStart, BlockEnd}; |
244 | } |
245 | |
246 | // Copy the Variables vector from the builder's UniqueVector. |
247 | assert(Variables.empty() && "Expect clear before init" ); |
248 | // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values |
249 | // are one-based) so reserve an extra and insert a dummy. |
250 | Variables.reserve(N: Builder.Variables.size() + 1); |
251 | Variables.push_back(Elt: DebugVariable(nullptr, std::nullopt, nullptr)); |
252 | Variables.append(in_start: Builder.Variables.begin(), in_end: Builder.Variables.end()); |
253 | } |
254 | |
255 | void FunctionVarLocs::clear() { |
256 | Variables.clear(); |
257 | VarLocRecords.clear(); |
258 | VarLocsBeforeInst.clear(); |
259 | SingleVarLocEnd = 0; |
260 | } |
261 | |
262 | /// Walk backwards along constant GEPs and bitcasts to the base storage from \p |
263 | /// Start as far as possible. Prepend \Expression with the offset and append it |
264 | /// with a DW_OP_deref that haes been implicit until now. Returns the walked-to |
265 | /// value and modified expression. |
266 | static std::pair<Value *, DIExpression *> |
267 | walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, |
268 | DIExpression *Expression) { |
269 | APInt OffsetInBytes(DL.getTypeSizeInBits(Ty: Start->getType()), false); |
270 | Value *End = |
271 | Start->stripAndAccumulateInBoundsConstantOffsets(DL, Offset&: OffsetInBytes); |
272 | SmallVector<uint64_t, 3> Ops; |
273 | if (OffsetInBytes.getBoolValue()) { |
274 | Ops = {dwarf::DW_OP_plus_uconst, OffsetInBytes.getZExtValue()}; |
275 | Expression = DIExpression::prependOpcodes( |
276 | Expr: Expression, Ops, /*StackValue=*/false, /*EntryValue=*/false); |
277 | } |
278 | Expression = DIExpression::append(Expr: Expression, Ops: {dwarf::DW_OP_deref}); |
279 | return {End, Expression}; |
280 | } |
281 | |
282 | /// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression |
283 | /// doesn't explicitly describe a memory location with DW_OP_deref or if the |
284 | /// expression is too complex to interpret. |
285 | static std::optional<int64_t> |
286 | getDerefOffsetInBytes(const DIExpression *DIExpr) { |
287 | int64_t Offset = 0; |
288 | const unsigned NumElements = DIExpr->getNumElements(); |
289 | const auto Elements = DIExpr->getElements(); |
290 | unsigned ExpectedDerefIdx = 0; |
291 | // Extract the offset. |
292 | if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) { |
293 | Offset = Elements[1]; |
294 | ExpectedDerefIdx = 2; |
295 | } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) { |
296 | ExpectedDerefIdx = 3; |
297 | if (Elements[2] == dwarf::DW_OP_plus) |
298 | Offset = Elements[1]; |
299 | else if (Elements[2] == dwarf::DW_OP_minus) |
300 | Offset = -Elements[1]; |
301 | else |
302 | return std::nullopt; |
303 | } |
304 | |
305 | // If that's all there is it means there's no deref. |
306 | if (ExpectedDerefIdx >= NumElements) |
307 | return std::nullopt; |
308 | |
309 | // Check the next element is DW_OP_deref - otherwise this is too complex or |
310 | // isn't a deref expression. |
311 | if (Elements[ExpectedDerefIdx] != dwarf::DW_OP_deref) |
312 | return std::nullopt; |
313 | |
314 | // Check the final operation is either the DW_OP_deref or is a fragment. |
315 | if (NumElements == ExpectedDerefIdx + 1) |
316 | return Offset; // Ends with deref. |
317 | unsigned ExpectedFragFirstIdx = ExpectedDerefIdx + 1; |
318 | unsigned ExpectedFragFinalIdx = ExpectedFragFirstIdx + 2; |
319 | if (NumElements == ExpectedFragFinalIdx + 1 && |
320 | Elements[ExpectedFragFirstIdx] == dwarf::DW_OP_LLVM_fragment) |
321 | return Offset; // Ends with deref + fragment. |
322 | |
323 | // Don't bother trying to interpret anything more complex. |
324 | return std::nullopt; |
325 | } |
326 | |
327 | /// A whole (unfragmented) source variable. |
328 | using DebugAggregate = std::pair<const DILocalVariable *, const DILocation *>; |
329 | static DebugAggregate getAggregate(const DbgVariableIntrinsic *DII) { |
330 | return DebugAggregate(DII->getVariable(), DII->getDebugLoc().getInlinedAt()); |
331 | } |
332 | static DebugAggregate getAggregate(const DebugVariable &Var) { |
333 | return DebugAggregate(Var.getVariable(), Var.getInlinedAt()); |
334 | } |
335 | |
336 | static bool shouldCoalesceFragments(Function &F) { |
337 | // Enabling fragment coalescing reduces compiler run time when instruction |
338 | // referencing is enabled. However, it may cause LiveDebugVariables to create |
339 | // incorrect locations. Since instruction-referencing mode effectively |
340 | // bypasses LiveDebugVariables we only enable coalescing if the cl::opt flag |
341 | // has not been explicitly set and instruction-referencing is turned on. |
342 | switch (CoalesceAdjacentFragmentsOpt) { |
343 | case cl::boolOrDefault::BOU_UNSET: |
344 | return debuginfoShouldUseDebugInstrRef( |
345 | T: Triple(F.getParent()->getTargetTriple())); |
346 | case cl::boolOrDefault::BOU_TRUE: |
347 | return true; |
348 | case cl::boolOrDefault::BOU_FALSE: |
349 | return false; |
350 | } |
351 | llvm_unreachable("Unknown boolOrDefault value" ); |
352 | } |
353 | |
354 | namespace { |
355 | /// In dwarf emission, the following sequence |
356 | /// 1. dbg.value ... Fragment(0, 64) |
357 | /// 2. dbg.value ... Fragment(0, 32) |
358 | /// effectively sets Fragment(32, 32) to undef (each def sets all bits not in |
359 | /// the intersection of the fragments to having "no location"). This makes |
360 | /// sense for implicit location values because splitting the computed values |
361 | /// could be troublesome, and is probably quite uncommon. When we convert |
362 | /// dbg.assigns to dbg.value+deref this kind of thing is common, and describing |
363 | /// a location (memory) rather than a value means we don't need to worry about |
364 | /// splitting any values, so we try to recover the rest of the fragment |
365 | /// location here. |
366 | /// This class performs a(nother) dataflow analysis over the function, adding |
367 | /// variable locations so that any bits of a variable with a memory location |
368 | /// have that location explicitly reinstated at each subsequent variable |
369 | /// location definition that that doesn't overwrite those bits. i.e. after a |
370 | /// variable location def, insert new defs for the memory location with |
371 | /// fragments for the difference of "all bits currently in memory" and "the |
372 | /// fragment of the second def". |
373 | class MemLocFragmentFill { |
374 | Function &Fn; |
375 | FunctionVarLocsBuilder *FnVarLocs; |
376 | const DenseSet<DebugAggregate> *VarsWithStackSlot; |
377 | bool CoalesceAdjacentFragments; |
378 | |
379 | // 0 = no memory location. |
380 | using BaseAddress = unsigned; |
381 | using OffsetInBitsTy = unsigned; |
382 | using FragTraits = IntervalMapHalfOpenInfo<OffsetInBitsTy>; |
383 | using FragsInMemMap = IntervalMap< |
384 | OffsetInBitsTy, BaseAddress, |
385 | IntervalMapImpl::NodeSizer<OffsetInBitsTy, BaseAddress>::LeafSize, |
386 | FragTraits>; |
387 | FragsInMemMap::Allocator IntervalMapAlloc; |
388 | using VarFragMap = DenseMap<unsigned, FragsInMemMap>; |
389 | |
390 | /// IDs for memory location base addresses in maps. Use 0 to indicate that |
391 | /// there's no memory location. |
392 | UniqueVector<RawLocationWrapper> Bases; |
393 | UniqueVector<DebugAggregate> Aggregates; |
394 | DenseMap<const BasicBlock *, VarFragMap> LiveIn; |
395 | DenseMap<const BasicBlock *, VarFragMap> LiveOut; |
396 | |
397 | struct FragMemLoc { |
398 | unsigned Var; |
399 | unsigned Base; |
400 | unsigned OffsetInBits; |
401 | unsigned SizeInBits; |
402 | DebugLoc DL; |
403 | }; |
404 | using InsertMap = MapVector<VarLocInsertPt, SmallVector<FragMemLoc>>; |
405 | |
406 | /// BBInsertBeforeMap holds a description for the set of location defs to be |
407 | /// inserted after the analysis is complete. It is updated during the dataflow |
408 | /// and the entry for a block is CLEARED each time it is (re-)visited. After |
409 | /// the dataflow is complete, each block entry will contain the set of defs |
410 | /// calculated during the final (fixed-point) iteration. |
411 | DenseMap<const BasicBlock *, InsertMap> BBInsertBeforeMap; |
412 | |
413 | static bool intervalMapsAreEqual(const FragsInMemMap &A, |
414 | const FragsInMemMap &B) { |
415 | auto AIt = A.begin(), AEnd = A.end(); |
416 | auto BIt = B.begin(), BEnd = B.end(); |
417 | for (; AIt != AEnd; ++AIt, ++BIt) { |
418 | if (BIt == BEnd) |
419 | return false; // B has fewer elements than A. |
420 | if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop()) |
421 | return false; // Interval is different. |
422 | if (*AIt != *BIt) |
423 | return false; // Value at interval is different. |
424 | } |
425 | // AIt == AEnd. Check BIt is also now at end. |
426 | return BIt == BEnd; |
427 | } |
428 | |
429 | static bool varFragMapsAreEqual(const VarFragMap &A, const VarFragMap &B) { |
430 | if (A.size() != B.size()) |
431 | return false; |
432 | for (const auto &APair : A) { |
433 | auto BIt = B.find(Val: APair.first); |
434 | if (BIt == B.end()) |
435 | return false; |
436 | if (!intervalMapsAreEqual(A: APair.second, B: BIt->second)) |
437 | return false; |
438 | } |
439 | return true; |
440 | } |
441 | |
442 | /// Return a string for the value that \p BaseID represents. |
443 | std::string toString(unsigned BaseID) { |
444 | if (BaseID) |
445 | return Bases[BaseID].getVariableLocationOp(OpIdx: 0)->getName().str(); |
446 | else |
447 | return "None" ; |
448 | } |
449 | |
450 | /// Format string describing an FragsInMemMap (IntervalMap) interval. |
451 | std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) { |
452 | std::string String; |
453 | std::stringstream S(String); |
454 | if (It.valid()) { |
455 | S << "[" << It.start() << ", " << It.stop() |
456 | << "): " << toString(BaseID: It.value()); |
457 | } else { |
458 | S << "invalid iterator (end)" ; |
459 | } |
460 | if (Newline) |
461 | S << "\n" ; |
462 | return S.str(); |
463 | }; |
464 | |
465 | FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) { |
466 | FragsInMemMap Result(IntervalMapAlloc); |
467 | for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) { |
468 | LLVM_DEBUG(dbgs() << "a " << toString(AIt)); |
469 | // This is basically copied from process() and inverted (process is |
470 | // performing something like a union whereas this is more of an |
471 | // intersect). |
472 | |
473 | // There's no work to do if interval `a` overlaps no fragments in map `B`. |
474 | if (!B.overlaps(a: AIt.start(), b: AIt.stop())) |
475 | continue; |
476 | |
477 | // Does StartBit intersect an existing fragment? |
478 | auto FirstOverlap = B.find(x: AIt.start()); |
479 | assert(FirstOverlap != B.end()); |
480 | bool IntersectStart = FirstOverlap.start() < AIt.start(); |
481 | LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false) |
482 | << ", IntersectStart: " << IntersectStart << "\n" ); |
483 | |
484 | // Does EndBit intersect an existing fragment? |
485 | auto LastOverlap = B.find(x: AIt.stop()); |
486 | bool IntersectEnd = |
487 | LastOverlap != B.end() && LastOverlap.start() < AIt.stop(); |
488 | LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false) |
489 | << ", IntersectEnd: " << IntersectEnd << "\n" ); |
490 | |
491 | // Check if both ends of `a` intersect the same interval `b`. |
492 | if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { |
493 | // Insert `a` (`a` is contained in `b`) if the values match. |
494 | // [ a ] |
495 | // [ - b - ] |
496 | // - |
497 | // [ r ] |
498 | LLVM_DEBUG(dbgs() << "- a is contained within " |
499 | << toString(FirstOverlap)); |
500 | if (*AIt && *AIt == *FirstOverlap) |
501 | Result.insert(a: AIt.start(), b: AIt.stop(), y: *AIt); |
502 | } else { |
503 | // There's an overlap but `a` is not fully contained within |
504 | // `b`. Shorten any end-point intersections. |
505 | // [ - a - ] |
506 | // [ - b - ] |
507 | // - |
508 | // [ r ] |
509 | auto Next = FirstOverlap; |
510 | if (IntersectStart) { |
511 | LLVM_DEBUG(dbgs() << "- insert intersection of a and " |
512 | << toString(FirstOverlap)); |
513 | if (*AIt && *AIt == *FirstOverlap) |
514 | Result.insert(a: AIt.start(), b: FirstOverlap.stop(), y: *AIt); |
515 | ++Next; |
516 | } |
517 | // [ - a - ] |
518 | // [ - b - ] |
519 | // - |
520 | // [ r ] |
521 | if (IntersectEnd) { |
522 | LLVM_DEBUG(dbgs() << "- insert intersection of a and " |
523 | << toString(LastOverlap)); |
524 | if (*AIt && *AIt == *LastOverlap) |
525 | Result.insert(a: LastOverlap.start(), b: AIt.stop(), y: *AIt); |
526 | } |
527 | |
528 | // Insert all intervals in map `B` that are contained within interval |
529 | // `a` where the values match. |
530 | // [ - - a - - ] |
531 | // [ b1 ] [ b2 ] |
532 | // - |
533 | // [ r1 ] [ r2 ] |
534 | while (Next != B.end() && Next.start() < AIt.stop() && |
535 | Next.stop() <= AIt.stop()) { |
536 | LLVM_DEBUG(dbgs() |
537 | << "- insert intersection of a and " << toString(Next)); |
538 | if (*AIt && *AIt == *Next) |
539 | Result.insert(a: Next.start(), b: Next.stop(), y: *Next); |
540 | ++Next; |
541 | } |
542 | } |
543 | } |
544 | return Result; |
545 | } |
546 | |
547 | /// Meet \p A and \p B, storing the result in \p A. |
548 | void meetVars(VarFragMap &A, const VarFragMap &B) { |
549 | // Meet A and B. |
550 | // |
551 | // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b) |
552 | for (auto It = A.begin(), End = A.end(); It != End; ++It) { |
553 | unsigned AVar = It->first; |
554 | FragsInMemMap &AFrags = It->second; |
555 | auto BIt = B.find(Val: AVar); |
556 | if (BIt == B.end()) { |
557 | A.erase(I: It); |
558 | continue; // Var has no bits defined in B. |
559 | } |
560 | LLVM_DEBUG(dbgs() << "meet fragment maps for " |
561 | << Aggregates[AVar].first->getName() << "\n" ); |
562 | AFrags = meetFragments(A: AFrags, B: BIt->second); |
563 | } |
564 | } |
565 | |
566 | bool meet(const BasicBlock &BB, |
567 | const SmallPtrSet<BasicBlock *, 16> &Visited) { |
568 | LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName() |
569 | << "\n" ); |
570 | |
571 | VarFragMap BBLiveIn; |
572 | bool FirstMeet = true; |
573 | // LiveIn locs for BB is the meet of the already-processed preds' LiveOut |
574 | // locs. |
575 | for (const BasicBlock *Pred : predecessors(BB: &BB)) { |
576 | // Ignore preds that haven't been processed yet. This is essentially the |
577 | // same as initialising all variables to implicit top value (⊤) which is |
578 | // the identity value for the meet operation. |
579 | if (!Visited.count(Ptr: Pred)) |
580 | continue; |
581 | |
582 | auto PredLiveOut = LiveOut.find(Val: Pred); |
583 | assert(PredLiveOut != LiveOut.end()); |
584 | |
585 | if (FirstMeet) { |
586 | LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n" ); |
587 | BBLiveIn = PredLiveOut->second; |
588 | FirstMeet = false; |
589 | } else { |
590 | LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName() |
591 | << "\n" ); |
592 | meetVars(A&: BBLiveIn, B: PredLiveOut->second); |
593 | } |
594 | |
595 | // An empty set is ⊥ for the intersect-like meet operation. If we've |
596 | // already got ⊥ there's no need to run the code - we know the result is |
597 | // ⊥ since `meet(a, ⊥) = ⊥`. |
598 | if (BBLiveIn.size() == 0) |
599 | break; |
600 | } |
601 | |
602 | auto CurrentLiveInEntry = LiveIn.find(Val: &BB); |
603 | // If there's no LiveIn entry for the block yet, add it. |
604 | if (CurrentLiveInEntry == LiveIn.end()) { |
605 | LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName() |
606 | << "\n" ); |
607 | LiveIn[&BB] = std::move(BBLiveIn); |
608 | return /*Changed=*/true; |
609 | } |
610 | |
611 | // If the LiveIn set has changed (expensive check) update it and return |
612 | // true. |
613 | if (!varFragMapsAreEqual(A: BBLiveIn, B: CurrentLiveInEntry->second)) { |
614 | LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n" ); |
615 | CurrentLiveInEntry->second = std::move(BBLiveIn); |
616 | return /*Changed=*/true; |
617 | } |
618 | |
619 | LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n" ); |
620 | return /*Changed=*/false; |
621 | } |
622 | |
623 | void insertMemLoc(BasicBlock &BB, VarLocInsertPt Before, unsigned Var, |
624 | unsigned StartBit, unsigned EndBit, unsigned Base, |
625 | DebugLoc DL) { |
626 | assert(StartBit < EndBit && "Cannot create fragment of size <= 0" ); |
627 | if (!Base) |
628 | return; |
629 | FragMemLoc Loc; |
630 | Loc.Var = Var; |
631 | Loc.OffsetInBits = StartBit; |
632 | Loc.SizeInBits = EndBit - StartBit; |
633 | assert(Base && "Expected a non-zero ID for Base address" ); |
634 | Loc.Base = Base; |
635 | Loc.DL = DL; |
636 | BBInsertBeforeMap[&BB][Before].push_back(Elt: Loc); |
637 | LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName() |
638 | << " bits [" << StartBit << ", " << EndBit << ")\n" ); |
639 | } |
640 | |
641 | /// Inserts a new dbg def if the interval found when looking up \p StartBit |
642 | /// in \p FragMap starts before \p StartBit or ends after \p EndBit (which |
643 | /// indicates - assuming StartBit->EndBit has just been inserted - that the |
644 | /// slice has been coalesced in the map). |
645 | void coalesceFragments(BasicBlock &BB, VarLocInsertPt Before, unsigned Var, |
646 | unsigned StartBit, unsigned EndBit, unsigned Base, |
647 | DebugLoc DL, const FragsInMemMap &FragMap) { |
648 | if (!CoalesceAdjacentFragments) |
649 | return; |
650 | // We've inserted the location into the map. The map will have coalesced |
651 | // adjacent intervals (variable fragments) that describe the same memory |
652 | // location. Use this knowledge to insert a debug location that describes |
653 | // that coalesced fragment. This may eclipse other locs we've just |
654 | // inserted. This is okay as redundant locs will be cleaned up later. |
655 | auto CoalescedFrag = FragMap.find(x: StartBit); |
656 | // Bail if no coalescing has taken place. |
657 | if (CoalescedFrag.start() == StartBit && CoalescedFrag.stop() == EndBit) |
658 | return; |
659 | |
660 | LLVM_DEBUG(dbgs() << "- Insert loc for bits " << CoalescedFrag.start() |
661 | << " to " << CoalescedFrag.stop() << "\n" ); |
662 | insertMemLoc(BB, Before, Var, StartBit: CoalescedFrag.start(), EndBit: CoalescedFrag.stop(), |
663 | Base, DL); |
664 | } |
665 | |
666 | void addDef(const VarLocInfo &VarLoc, VarLocInsertPt Before, BasicBlock &BB, |
667 | VarFragMap &LiveSet) { |
668 | DebugVariable DbgVar = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
669 | if (skipVariable(V: DbgVar.getVariable())) |
670 | return; |
671 | // Don't bother doing anything for this variables if we know it's fully |
672 | // promoted. We're only interested in variables that (sometimes) live on |
673 | // the stack here. |
674 | if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgVar))) |
675 | return; |
676 | unsigned Var = Aggregates.insert( |
677 | Entry: DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt())); |
678 | |
679 | // [StartBit: EndBit) are the bits affected by this def. |
680 | const DIExpression *DIExpr = VarLoc.Expr; |
681 | unsigned StartBit; |
682 | unsigned EndBit; |
683 | if (auto Frag = DIExpr->getFragmentInfo()) { |
684 | StartBit = Frag->OffsetInBits; |
685 | EndBit = StartBit + Frag->SizeInBits; |
686 | } else { |
687 | assert(static_cast<bool>(DbgVar.getVariable()->getSizeInBits())); |
688 | StartBit = 0; |
689 | EndBit = *DbgVar.getVariable()->getSizeInBits(); |
690 | } |
691 | |
692 | // We will only fill fragments for simple memory-describing dbg.value |
693 | // intrinsics. If the fragment offset is the same as the offset from the |
694 | // base pointer, do The Thing, otherwise fall back to normal dbg.value |
695 | // behaviour. AssignmentTrackingLowering has generated DIExpressions |
696 | // written in terms of the base pointer. |
697 | // TODO: Remove this condition since the fragment offset doesn't always |
698 | // equal the offset from base pointer (e.g. for a SROA-split variable). |
699 | const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr); |
700 | const unsigned Base = |
701 | DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit |
702 | ? Bases.insert(Entry: VarLoc.Values) |
703 | : 0; |
704 | LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " [" |
705 | << StartBit << ", " << EndBit << "): " << toString(Base) |
706 | << "\n" ); |
707 | |
708 | // First of all, any locs that use mem that are disrupted need reinstating. |
709 | // Unfortunately, IntervalMap doesn't let us insert intervals that overlap |
710 | // with existing intervals so this code involves a lot of fiddling around |
711 | // with intervals to do that manually. |
712 | auto FragIt = LiveSet.find(Val: Var); |
713 | |
714 | // Check if the variable does not exist in the map. |
715 | if (FragIt == LiveSet.end()) { |
716 | // Add this variable to the BB map. |
717 | auto P = LiveSet.try_emplace(Key: Var, Args: FragsInMemMap(IntervalMapAlloc)); |
718 | assert(P.second && "Var already in map?" ); |
719 | // Add the interval to the fragment map. |
720 | P.first->second.insert(a: StartBit, b: EndBit, y: Base); |
721 | return; |
722 | } |
723 | // The variable has an entry in the map. |
724 | |
725 | FragsInMemMap &FragMap = FragIt->second; |
726 | // First check the easy case: the new fragment `f` doesn't overlap with any |
727 | // intervals. |
728 | if (!FragMap.overlaps(a: StartBit, b: EndBit)) { |
729 | LLVM_DEBUG(dbgs() << "- No overlaps\n" ); |
730 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
731 | coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL, |
732 | FragMap); |
733 | return; |
734 | } |
735 | // There is at least one overlap. |
736 | |
737 | // Does StartBit intersect an existing fragment? |
738 | auto FirstOverlap = FragMap.find(x: StartBit); |
739 | assert(FirstOverlap != FragMap.end()); |
740 | bool IntersectStart = FirstOverlap.start() < StartBit; |
741 | |
742 | // Does EndBit intersect an existing fragment? |
743 | auto LastOverlap = FragMap.find(x: EndBit); |
744 | bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit; |
745 | |
746 | // Check if both ends of `f` intersect the same interval `i`. |
747 | if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { |
748 | LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n" ); |
749 | // Shorten `i` so that there's space to insert `f`. |
750 | // [ f ] |
751 | // [ - i - ] |
752 | // + |
753 | // [ i ][ f ][ i ] |
754 | |
755 | // Save values for use after inserting a new interval. |
756 | auto EndBitOfOverlap = FirstOverlap.stop(); |
757 | unsigned OverlapValue = FirstOverlap.value(); |
758 | |
759 | // Shorten the overlapping interval. |
760 | FirstOverlap.setStop(StartBit); |
761 | insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit, |
762 | Base: OverlapValue, DL: VarLoc.DL); |
763 | |
764 | // Insert a new interval to represent the end part. |
765 | FragMap.insert(a: EndBit, b: EndBitOfOverlap, y: OverlapValue); |
766 | insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: EndBitOfOverlap, Base: OverlapValue, |
767 | DL: VarLoc.DL); |
768 | |
769 | // Insert the new (middle) fragment now there is space. |
770 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
771 | } else { |
772 | // There's an overlap but `f` may not be fully contained within |
773 | // `i`. Shorten any end-point intersections so that we can then |
774 | // insert `f`. |
775 | // [ - f - ] |
776 | // [ - i - ] |
777 | // | | |
778 | // [ i ] |
779 | // Shorten any end-point intersections. |
780 | if (IntersectStart) { |
781 | LLVM_DEBUG(dbgs() << "- Intersect interval at start\n" ); |
782 | // Split off at the intersection. |
783 | FirstOverlap.setStop(StartBit); |
784 | insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit, |
785 | Base: *FirstOverlap, DL: VarLoc.DL); |
786 | } |
787 | // [ - f - ] |
788 | // [ - i - ] |
789 | // | | |
790 | // [ i ] |
791 | if (IntersectEnd) { |
792 | LLVM_DEBUG(dbgs() << "- Intersect interval at end\n" ); |
793 | // Split off at the intersection. |
794 | LastOverlap.setStart(EndBit); |
795 | insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: LastOverlap.stop(), Base: *LastOverlap, |
796 | DL: VarLoc.DL); |
797 | } |
798 | |
799 | LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n" ); |
800 | // FirstOverlap and LastOverlap have been shortened such that they're |
801 | // no longer overlapping with [StartBit, EndBit). Delete any overlaps |
802 | // that remain (these will be fully contained within `f`). |
803 | // [ - f - ] } |
804 | // [ - i - ] } Intersection shortening that has happened above. |
805 | // | | } |
806 | // [ i ] } |
807 | // ----------------- |
808 | // [i2 ] } Intervals fully contained within `f` get erased. |
809 | // ----------------- |
810 | // [ - f - ][ i ] } Completed insertion. |
811 | auto It = FirstOverlap; |
812 | if (IntersectStart) |
813 | ++It; // IntersectStart: first overlap has been shortened. |
814 | while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) { |
815 | LLVM_DEBUG(dbgs() << "- Erase " << toString(It)); |
816 | It.erase(); // This increments It after removing the interval. |
817 | } |
818 | // We've dealt with all the overlaps now! |
819 | assert(!FragMap.overlaps(StartBit, EndBit)); |
820 | LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n" ); |
821 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
822 | } |
823 | |
824 | coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL, |
825 | FragMap); |
826 | } |
827 | |
828 | bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); } |
829 | |
830 | void process(BasicBlock &BB, VarFragMap &LiveSet) { |
831 | BBInsertBeforeMap[&BB].clear(); |
832 | for (auto &I : BB) { |
833 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) { |
834 | if (const auto *Locs = FnVarLocs->getWedge(Before: &DVR)) { |
835 | for (const VarLocInfo &Loc : *Locs) { |
836 | addDef(VarLoc: Loc, Before: &DVR, BB&: *I.getParent(), LiveSet); |
837 | } |
838 | } |
839 | } |
840 | if (const auto *Locs = FnVarLocs->getWedge(Before: &I)) { |
841 | for (const VarLocInfo &Loc : *Locs) { |
842 | addDef(VarLoc: Loc, Before: &I, BB&: *I.getParent(), LiveSet); |
843 | } |
844 | } |
845 | } |
846 | } |
847 | |
848 | public: |
849 | MemLocFragmentFill(Function &Fn, |
850 | const DenseSet<DebugAggregate> *VarsWithStackSlot, |
851 | bool CoalesceAdjacentFragments) |
852 | : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot), |
853 | CoalesceAdjacentFragments(CoalesceAdjacentFragments) {} |
854 | |
855 | /// Add variable locations to \p FnVarLocs so that any bits of a variable |
856 | /// with a memory location have that location explicitly reinstated at each |
857 | /// subsequent variable location definition that that doesn't overwrite those |
858 | /// bits. i.e. after a variable location def, insert new defs for the memory |
859 | /// location with fragments for the difference of "all bits currently in |
860 | /// memory" and "the fragment of the second def". e.g. |
861 | /// |
862 | /// Before: |
863 | /// |
864 | /// var x bits 0 to 63: value in memory |
865 | /// more instructions |
866 | /// var x bits 0 to 31: value is %0 |
867 | /// |
868 | /// After: |
869 | /// |
870 | /// var x bits 0 to 63: value in memory |
871 | /// more instructions |
872 | /// var x bits 0 to 31: value is %0 |
873 | /// var x bits 32 to 61: value in memory ; <-- new loc def |
874 | /// |
875 | void run(FunctionVarLocsBuilder *FnVarLocs) { |
876 | if (!EnableMemLocFragFill) |
877 | return; |
878 | |
879 | this->FnVarLocs = FnVarLocs; |
880 | |
881 | // Prepare for traversal. |
882 | // |
883 | ReversePostOrderTraversal<Function *> RPOT(&Fn); |
884 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
885 | std::greater<unsigned int>> |
886 | Worklist; |
887 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
888 | std::greater<unsigned int>> |
889 | Pending; |
890 | DenseMap<unsigned int, BasicBlock *> OrderToBB; |
891 | DenseMap<BasicBlock *, unsigned int> BBToOrder; |
892 | { // Init OrderToBB and BBToOrder. |
893 | unsigned int RPONumber = 0; |
894 | for (BasicBlock *BB : RPOT) { |
895 | OrderToBB[RPONumber] = BB; |
896 | BBToOrder[BB] = RPONumber; |
897 | Worklist.push(x: RPONumber); |
898 | ++RPONumber; |
899 | } |
900 | LiveIn.init(InitNumEntries: RPONumber); |
901 | LiveOut.init(InitNumEntries: RPONumber); |
902 | } |
903 | |
904 | // Perform the traversal. |
905 | // |
906 | // This is a standard "intersect of predecessor outs" dataflow problem. To |
907 | // solve it, we perform meet() and process() using the two worklist method |
908 | // until the LiveIn data for each block becomes unchanging. |
909 | // |
910 | // This dataflow is essentially working on maps of sets and at each meet we |
911 | // intersect the maps and the mapped sets. So, initialized live-in maps |
912 | // monotonically decrease in value throughout the dataflow. |
913 | SmallPtrSet<BasicBlock *, 16> Visited; |
914 | while (!Worklist.empty() || !Pending.empty()) { |
915 | // We track what is on the pending worklist to avoid inserting the same |
916 | // thing twice. We could avoid this with a custom priority queue, but |
917 | // this is probably not worth it. |
918 | SmallPtrSet<BasicBlock *, 16> OnPending; |
919 | LLVM_DEBUG(dbgs() << "Processing Worklist\n" ); |
920 | while (!Worklist.empty()) { |
921 | BasicBlock *BB = OrderToBB[Worklist.top()]; |
922 | LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n" ); |
923 | Worklist.pop(); |
924 | bool InChanged = meet(BB: *BB, Visited); |
925 | // Always consider LiveIn changed on the first visit. |
926 | InChanged |= Visited.insert(Ptr: BB).second; |
927 | if (InChanged) { |
928 | LLVM_DEBUG(dbgs() |
929 | << BB->getName() << " has new InLocs, process it\n" ); |
930 | // Mutate a copy of LiveIn while processing BB. Once we've processed |
931 | // the terminator LiveSet is the LiveOut set for BB. |
932 | // This is an expensive copy! |
933 | VarFragMap LiveSet = LiveIn[BB]; |
934 | |
935 | // Process the instructions in the block. |
936 | process(BB&: *BB, LiveSet); |
937 | |
938 | // Relatively expensive check: has anything changed in LiveOut for BB? |
939 | if (!varFragMapsAreEqual(A: LiveOut[BB], B: LiveSet)) { |
940 | LLVM_DEBUG(dbgs() << BB->getName() |
941 | << " has new OutLocs, add succs to worklist: [ " ); |
942 | LiveOut[BB] = std::move(LiveSet); |
943 | for (BasicBlock *Succ : successors(BB)) { |
944 | if (OnPending.insert(Ptr: Succ).second) { |
945 | LLVM_DEBUG(dbgs() << Succ->getName() << " " ); |
946 | Pending.push(x: BBToOrder[Succ]); |
947 | } |
948 | } |
949 | LLVM_DEBUG(dbgs() << "]\n" ); |
950 | } |
951 | } |
952 | } |
953 | Worklist.swap(pq&: Pending); |
954 | // At this point, pending must be empty, since it was just the empty |
955 | // worklist |
956 | assert(Pending.empty() && "Pending should be empty" ); |
957 | } |
958 | |
959 | // Insert new location defs. |
960 | for (auto &Pair : BBInsertBeforeMap) { |
961 | InsertMap &Map = Pair.second; |
962 | for (auto &Pair : Map) { |
963 | auto InsertBefore = Pair.first; |
964 | assert(InsertBefore && "should never be null" ); |
965 | auto FragMemLocs = Pair.second; |
966 | auto &Ctx = Fn.getContext(); |
967 | |
968 | for (auto &FragMemLoc : FragMemLocs) { |
969 | DIExpression *Expr = DIExpression::get(Context&: Ctx, Elements: std::nullopt); |
970 | if (FragMemLoc.SizeInBits != |
971 | *Aggregates[FragMemLoc.Var].first->getSizeInBits()) |
972 | Expr = *DIExpression::createFragmentExpression( |
973 | Expr, OffsetInBits: FragMemLoc.OffsetInBits, SizeInBits: FragMemLoc.SizeInBits); |
974 | Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefAfter, |
975 | Offset: FragMemLoc.OffsetInBits / 8); |
976 | DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr, |
977 | FragMemLoc.DL.getInlinedAt()); |
978 | FnVarLocs->addVarLoc(Before: InsertBefore, Var, Expr, DL: FragMemLoc.DL, |
979 | R: Bases[FragMemLoc.Base]); |
980 | } |
981 | } |
982 | } |
983 | } |
984 | }; |
985 | |
986 | /// AssignmentTrackingLowering encapsulates a dataflow analysis over a function |
987 | /// that interprets assignment tracking debug info metadata and stores in IR to |
988 | /// create a map of variable locations. |
989 | class AssignmentTrackingLowering { |
990 | public: |
991 | /// The kind of location in use for a variable, where Mem is the stack home, |
992 | /// Val is an SSA value or const, and None means that there is not one single |
993 | /// kind (either because there are multiple or because there is none; it may |
994 | /// prove useful to split this into two values in the future). |
995 | /// |
996 | /// LocKind is a join-semilattice with the partial order: |
997 | /// None > Mem, Val |
998 | /// |
999 | /// i.e. |
1000 | /// join(Mem, Mem) = Mem |
1001 | /// join(Val, Val) = Val |
1002 | /// join(Mem, Val) = None |
1003 | /// join(None, Mem) = None |
1004 | /// join(None, Val) = None |
1005 | /// join(None, None) = None |
1006 | /// |
1007 | /// Note: the order is not `None > Val > Mem` because we're using DIAssignID |
1008 | /// to name assignments and are not tracking the actual stored values. |
1009 | /// Therefore currently there's no way to ensure that Mem values and Val |
1010 | /// values are the same. This could be a future extension, though it's not |
1011 | /// clear that many additional locations would be recovered that way in |
1012 | /// practice as the likelihood of this sitation arising naturally seems |
1013 | /// incredibly low. |
1014 | enum class LocKind { Mem, Val, None }; |
1015 | |
1016 | /// An abstraction of the assignment of a value to a variable or memory |
1017 | /// location. |
1018 | /// |
1019 | /// An Assignment is Known or NoneOrPhi. A Known Assignment means we have a |
1020 | /// DIAssignID ptr that represents it. NoneOrPhi means that we don't (or |
1021 | /// can't) know the ID of the last assignment that took place. |
1022 | /// |
1023 | /// The Status of the Assignment (Known or NoneOrPhi) is another |
1024 | /// join-semilattice. The partial order is: |
1025 | /// NoneOrPhi > Known {id_0, id_1, ...id_N} |
1026 | /// |
1027 | /// i.e. for all values x and y where x != y: |
1028 | /// join(x, x) = x |
1029 | /// join(x, y) = NoneOrPhi |
1030 | using AssignRecord = PointerUnion<DbgAssignIntrinsic *, DbgVariableRecord *>; |
1031 | struct Assignment { |
1032 | enum S { Known, NoneOrPhi } Status; |
1033 | /// ID of the assignment. nullptr if Status is not Known. |
1034 | DIAssignID *ID; |
1035 | /// The dbg.assign that marks this dbg-def. Mem-defs don't use this field. |
1036 | /// May be nullptr. |
1037 | AssignRecord Source; |
1038 | |
1039 | bool isSameSourceAssignment(const Assignment &Other) const { |
1040 | // Don't include Source in the equality check. Assignments are |
1041 | // defined by their ID, not debug intrinsic(s). |
1042 | return std::tie(args: Status, args: ID) == std::tie(args: Other.Status, args: Other.ID); |
1043 | } |
1044 | void dump(raw_ostream &OS) { |
1045 | static const char *LUT[] = {"Known" , "NoneOrPhi" }; |
1046 | OS << LUT[Status] << "(id=" ; |
1047 | if (ID) |
1048 | OS << ID; |
1049 | else |
1050 | OS << "null" ; |
1051 | OS << ", s=" ; |
1052 | if (Source.isNull()) |
1053 | OS << "null" ; |
1054 | else if (isa<DbgAssignIntrinsic *>(Val: Source)) |
1055 | OS << Source.get<DbgAssignIntrinsic *>(); |
1056 | else |
1057 | OS << Source.get<DbgVariableRecord *>(); |
1058 | OS << ")" ; |
1059 | } |
1060 | |
1061 | static Assignment make(DIAssignID *ID, DbgAssignIntrinsic *Source) { |
1062 | return Assignment(Known, ID, Source); |
1063 | } |
1064 | static Assignment make(DIAssignID *ID, DbgVariableRecord *Source) { |
1065 | assert(Source->isDbgAssign() && |
1066 | "Cannot make an assignment from a non-assign DbgVariableRecord" ); |
1067 | return Assignment(Known, ID, Source); |
1068 | } |
1069 | static Assignment make(DIAssignID *ID, AssignRecord Source) { |
1070 | return Assignment(Known, ID, Source); |
1071 | } |
1072 | static Assignment makeFromMemDef(DIAssignID *ID) { |
1073 | return Assignment(Known, ID); |
1074 | } |
1075 | static Assignment makeNoneOrPhi() { return Assignment(NoneOrPhi, nullptr); } |
1076 | // Again, need a Top value? |
1077 | Assignment() : Status(NoneOrPhi), ID(nullptr) {} // Can we delete this? |
1078 | Assignment(S Status, DIAssignID *ID) : Status(Status), ID(ID) { |
1079 | // If the Status is Known then we expect there to be an assignment ID. |
1080 | assert(Status == NoneOrPhi || ID); |
1081 | } |
1082 | Assignment(S Status, DIAssignID *ID, DbgAssignIntrinsic *Source) |
1083 | : Status(Status), ID(ID), Source(Source) { |
1084 | // If the Status is Known then we expect there to be an assignment ID. |
1085 | assert(Status == NoneOrPhi || ID); |
1086 | } |
1087 | Assignment(S Status, DIAssignID *ID, DbgVariableRecord *Source) |
1088 | : Status(Status), ID(ID), Source(Source) { |
1089 | // If the Status is Known then we expect there to be an assignment ID. |
1090 | assert(Status == NoneOrPhi || ID); |
1091 | } |
1092 | Assignment(S Status, DIAssignID *ID, AssignRecord Source) |
1093 | : Status(Status), ID(ID), Source(Source) { |
1094 | // If the Status is Known then we expect there to be an assignment ID. |
1095 | assert(Status == NoneOrPhi || ID); |
1096 | } |
1097 | }; |
1098 | |
1099 | using AssignmentMap = SmallVector<Assignment>; |
1100 | using LocMap = SmallVector<LocKind>; |
1101 | using OverlapMap = DenseMap<VariableID, SmallVector<VariableID>>; |
1102 | using UntaggedStoreAssignmentMap = |
1103 | DenseMap<const Instruction *, |
1104 | SmallVector<std::pair<VariableID, at::AssignmentInfo>>>; |
1105 | |
1106 | private: |
1107 | /// The highest numbered VariableID for partially promoted variables plus 1, |
1108 | /// the values for which start at 1. |
1109 | unsigned TrackedVariablesVectorSize = 0; |
1110 | /// Map a variable to the set of variables that it fully contains. |
1111 | OverlapMap VarContains; |
1112 | /// Map untagged stores to the variable fragments they assign to. Used by |
1113 | /// processUntaggedInstruction. |
1114 | UntaggedStoreAssignmentMap UntaggedStoreVars; |
1115 | |
1116 | // Machinery to defer inserting dbg.values. |
1117 | using InstInsertMap = MapVector<VarLocInsertPt, SmallVector<VarLocInfo>>; |
1118 | InstInsertMap InsertBeforeMap; |
1119 | /// Clear the location definitions currently cached for insertion after /p |
1120 | /// After. |
1121 | void resetInsertionPoint(Instruction &After); |
1122 | void resetInsertionPoint(DbgVariableRecord &After); |
1123 | |
1124 | // emitDbgValue can be called with: |
1125 | // Source=[AssignRecord|DbgValueInst*|DbgAssignIntrinsic*|DbgVariableRecord*] |
1126 | // Since AssignRecord can be cast to one of the latter two types, and all |
1127 | // other types have a shared interface, we use a template to handle the latter |
1128 | // three types, and an explicit overload for AssignRecord that forwards to |
1129 | // the template version with the right type. |
1130 | void emitDbgValue(LocKind Kind, AssignRecord Source, VarLocInsertPt After); |
1131 | template <typename T> |
1132 | void emitDbgValue(LocKind Kind, const T Source, VarLocInsertPt After); |
1133 | |
1134 | static bool mapsAreEqual(const BitVector &Mask, const AssignmentMap &A, |
1135 | const AssignmentMap &B) { |
1136 | return llvm::all_of(Range: Mask.set_bits(), P: [&](unsigned VarID) { |
1137 | return A[VarID].isSameSourceAssignment(Other: B[VarID]); |
1138 | }); |
1139 | } |
1140 | |
1141 | /// Represents the stack and debug assignments in a block. Used to describe |
1142 | /// the live-in and live-out values for blocks, as well as the "current" |
1143 | /// value as we process each instruction in a block. |
1144 | struct BlockInfo { |
1145 | /// The set of variables (VariableID) being tracked in this block. |
1146 | BitVector VariableIDsInBlock; |
1147 | /// Dominating assignment to memory for each variable, indexed by |
1148 | /// VariableID. |
1149 | AssignmentMap StackHomeValue; |
1150 | /// Dominating assignemnt to each variable, indexed by VariableID. |
1151 | AssignmentMap DebugValue; |
1152 | /// Location kind for each variable. LiveLoc indicates whether the |
1153 | /// dominating assignment in StackHomeValue (LocKind::Mem), DebugValue |
1154 | /// (LocKind::Val), or neither (LocKind::None) is valid, in that order of |
1155 | /// preference. This cannot be derived by inspecting DebugValue and |
1156 | /// StackHomeValue due to the fact that there's no distinction in |
1157 | /// Assignment (the class) between whether an assignment is unknown or a |
1158 | /// merge of multiple assignments (both are Status::NoneOrPhi). In other |
1159 | /// words, the memory location may well be valid while both DebugValue and |
1160 | /// StackHomeValue contain Assignments that have a Status of NoneOrPhi. |
1161 | /// Indexed by VariableID. |
1162 | LocMap LiveLoc; |
1163 | |
1164 | public: |
1165 | enum AssignmentKind { Stack, Debug }; |
1166 | const AssignmentMap &getAssignmentMap(AssignmentKind Kind) const { |
1167 | switch (Kind) { |
1168 | case Stack: |
1169 | return StackHomeValue; |
1170 | case Debug: |
1171 | return DebugValue; |
1172 | } |
1173 | llvm_unreachable("Unknown AssignmentKind" ); |
1174 | } |
1175 | AssignmentMap &getAssignmentMap(AssignmentKind Kind) { |
1176 | return const_cast<AssignmentMap &>( |
1177 | const_cast<const BlockInfo *>(this)->getAssignmentMap(Kind)); |
1178 | } |
1179 | |
1180 | bool isVariableTracked(VariableID Var) const { |
1181 | return VariableIDsInBlock[static_cast<unsigned>(Var)]; |
1182 | } |
1183 | |
1184 | const Assignment &getAssignment(AssignmentKind Kind, VariableID Var) const { |
1185 | assert(isVariableTracked(Var) && "Var not tracked in block" ); |
1186 | return getAssignmentMap(Kind)[static_cast<unsigned>(Var)]; |
1187 | } |
1188 | |
1189 | LocKind getLocKind(VariableID Var) const { |
1190 | assert(isVariableTracked(Var) && "Var not tracked in block" ); |
1191 | return LiveLoc[static_cast<unsigned>(Var)]; |
1192 | } |
1193 | |
1194 | /// Set LocKind for \p Var only: does not set LocKind for VariableIDs of |
1195 | /// fragments contained win \p Var. |
1196 | void setLocKind(VariableID Var, LocKind K) { |
1197 | VariableIDsInBlock.set(static_cast<unsigned>(Var)); |
1198 | LiveLoc[static_cast<unsigned>(Var)] = K; |
1199 | } |
1200 | |
1201 | /// Set the assignment in the \p Kind assignment map for \p Var only: does |
1202 | /// not set the assignment for VariableIDs of fragments contained win \p |
1203 | /// Var. |
1204 | void setAssignment(AssignmentKind Kind, VariableID Var, |
1205 | const Assignment &AV) { |
1206 | VariableIDsInBlock.set(static_cast<unsigned>(Var)); |
1207 | getAssignmentMap(Kind)[static_cast<unsigned>(Var)] = AV; |
1208 | } |
1209 | |
1210 | /// Return true if there is an assignment matching \p AV in the \p Kind |
1211 | /// assignment map. Does consider assignments for VariableIDs of fragments |
1212 | /// contained win \p Var. |
1213 | bool hasAssignment(AssignmentKind Kind, VariableID Var, |
1214 | const Assignment &AV) const { |
1215 | if (!isVariableTracked(Var)) |
1216 | return false; |
1217 | return AV.isSameSourceAssignment(Other: getAssignment(Kind, Var)); |
1218 | } |
1219 | |
1220 | /// Compare every element in each map to determine structural equality |
1221 | /// (slow). |
1222 | bool operator==(const BlockInfo &Other) const { |
1223 | return VariableIDsInBlock == Other.VariableIDsInBlock && |
1224 | LiveLoc == Other.LiveLoc && |
1225 | mapsAreEqual(Mask: VariableIDsInBlock, A: StackHomeValue, |
1226 | B: Other.StackHomeValue) && |
1227 | mapsAreEqual(Mask: VariableIDsInBlock, A: DebugValue, B: Other.DebugValue); |
1228 | } |
1229 | bool operator!=(const BlockInfo &Other) const { return !(*this == Other); } |
1230 | bool isValid() { |
1231 | return LiveLoc.size() == DebugValue.size() && |
1232 | LiveLoc.size() == StackHomeValue.size(); |
1233 | } |
1234 | |
1235 | /// Clear everything and initialise with ⊤-values for all variables. |
1236 | void init(int NumVars) { |
1237 | StackHomeValue.clear(); |
1238 | DebugValue.clear(); |
1239 | LiveLoc.clear(); |
1240 | VariableIDsInBlock = BitVector(NumVars); |
1241 | StackHomeValue.insert(I: StackHomeValue.begin(), NumToInsert: NumVars, |
1242 | Elt: Assignment::makeNoneOrPhi()); |
1243 | DebugValue.insert(I: DebugValue.begin(), NumToInsert: NumVars, |
1244 | Elt: Assignment::makeNoneOrPhi()); |
1245 | LiveLoc.insert(I: LiveLoc.begin(), NumToInsert: NumVars, Elt: LocKind::None); |
1246 | } |
1247 | |
1248 | /// Helper for join. |
1249 | template <typename ElmtType, typename FnInputType> |
1250 | static void joinElmt(int Index, SmallVector<ElmtType> &Target, |
1251 | const SmallVector<ElmtType> &A, |
1252 | const SmallVector<ElmtType> &B, |
1253 | ElmtType (*Fn)(FnInputType, FnInputType)) { |
1254 | Target[Index] = Fn(A[Index], B[Index]); |
1255 | } |
1256 | |
1257 | /// See comment for AssignmentTrackingLowering::joinBlockInfo. |
1258 | static BlockInfo join(const BlockInfo &A, const BlockInfo &B, int NumVars) { |
1259 | // Join A and B. |
1260 | // |
1261 | // Intersect = join(a, b) for a in A, b in B where Var(a) == Var(b) |
1262 | // Difference = join(x, ⊤) for x where Var(x) is in A xor B |
1263 | // Join = Intersect ∪ Difference |
1264 | // |
1265 | // This is achieved by performing a join on elements from A and B with |
1266 | // variables common to both A and B (join elements indexed by var |
1267 | // intersect), then adding ⊤-value elements for vars in A xor B. The |
1268 | // latter part is equivalent to performing join on elements with variables |
1269 | // in A xor B with the ⊤-value for the map element since join(x, ⊤) = ⊤. |
1270 | // BlockInfo::init initializes all variable entries to the ⊤ value so we |
1271 | // don't need to explicitly perform that step as Join.VariableIDsInBlock |
1272 | // is set to the union of the variables in A and B at the end of this |
1273 | // function. |
1274 | BlockInfo Join; |
1275 | Join.init(NumVars); |
1276 | |
1277 | BitVector Intersect = A.VariableIDsInBlock; |
1278 | Intersect &= B.VariableIDsInBlock; |
1279 | |
1280 | for (auto VarID : Intersect.set_bits()) { |
1281 | joinElmt(Index: VarID, Target&: Join.LiveLoc, A: A.LiveLoc, B: B.LiveLoc, Fn: joinKind); |
1282 | joinElmt(Index: VarID, Target&: Join.DebugValue, A: A.DebugValue, B: B.DebugValue, |
1283 | Fn: joinAssignment); |
1284 | joinElmt(Index: VarID, Target&: Join.StackHomeValue, A: A.StackHomeValue, B: B.StackHomeValue, |
1285 | Fn: joinAssignment); |
1286 | } |
1287 | |
1288 | Join.VariableIDsInBlock = A.VariableIDsInBlock; |
1289 | Join.VariableIDsInBlock |= B.VariableIDsInBlock; |
1290 | assert(Join.isValid()); |
1291 | return Join; |
1292 | } |
1293 | }; |
1294 | |
1295 | Function &Fn; |
1296 | const DataLayout &Layout; |
1297 | const DenseSet<DebugAggregate> *VarsWithStackSlot; |
1298 | FunctionVarLocsBuilder *FnVarLocs; |
1299 | DenseMap<const BasicBlock *, BlockInfo> LiveIn; |
1300 | DenseMap<const BasicBlock *, BlockInfo> LiveOut; |
1301 | |
1302 | /// Helper for process methods to track variables touched each frame. |
1303 | DenseSet<VariableID> VarsTouchedThisFrame; |
1304 | |
1305 | /// The set of variables that sometimes are not located in their stack home. |
1306 | DenseSet<DebugAggregate> NotAlwaysStackHomed; |
1307 | |
1308 | VariableID getVariableID(const DebugVariable &Var) { |
1309 | return static_cast<VariableID>(FnVarLocs->insertVariable(V: Var)); |
1310 | } |
1311 | |
1312 | /// Join the LiveOut values of preds that are contained in \p Visited into |
1313 | /// LiveIn[BB]. Return True if LiveIn[BB] has changed as a result. LiveIn[BB] |
1314 | /// values monotonically increase. See the @link joinMethods join methods |
1315 | /// @endlink documentation for more info. |
1316 | bool join(const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited); |
1317 | ///@name joinMethods |
1318 | /// Functions that implement `join` (the least upper bound) for the |
1319 | /// join-semilattice types used in the dataflow. There is an explicit bottom |
1320 | /// value (⊥) for some types and and explicit top value (⊤) for all types. |
1321 | /// By definition: |
1322 | /// |
1323 | /// Join(A, B) >= A && Join(A, B) >= B |
1324 | /// Join(A, ⊥) = A |
1325 | /// Join(A, ⊤) = ⊤ |
1326 | /// |
1327 | /// These invariants are important for monotonicity. |
1328 | /// |
1329 | /// For the map-type functions, all unmapped keys in an empty map are |
1330 | /// associated with a bottom value (⊥). This represents their values being |
1331 | /// unknown. Unmapped keys in non-empty maps (joining two maps with a key |
1332 | /// only present in one) represents either a variable going out of scope or |
1333 | /// dropped debug info. It is assumed the key is associated with a top value |
1334 | /// (⊤) in this case (unknown location / assignment). |
1335 | ///@{ |
1336 | static LocKind joinKind(LocKind A, LocKind B); |
1337 | static Assignment joinAssignment(const Assignment &A, const Assignment &B); |
1338 | BlockInfo joinBlockInfo(const BlockInfo &A, const BlockInfo &B); |
1339 | ///@} |
1340 | |
1341 | /// Process the instructions in \p BB updating \p LiveSet along the way. \p |
1342 | /// LiveSet must be initialized with the current live-in locations before |
1343 | /// calling this. |
1344 | void process(BasicBlock &BB, BlockInfo *LiveSet); |
1345 | ///@name processMethods |
1346 | /// Methods to process instructions in order to update the LiveSet (current |
1347 | /// location information). |
1348 | ///@{ |
1349 | void processNonDbgInstruction(Instruction &I, BlockInfo *LiveSet); |
1350 | void processDbgInstruction(DbgInfoIntrinsic &I, BlockInfo *LiveSet); |
1351 | /// Update \p LiveSet after encountering an instruction with a DIAssignID |
1352 | /// attachment, \p I. |
1353 | void processTaggedInstruction(Instruction &I, BlockInfo *LiveSet); |
1354 | /// Update \p LiveSet after encountering an instruciton without a DIAssignID |
1355 | /// attachment, \p I. |
1356 | void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet); |
1357 | void processDbgAssign(AssignRecord Assign, BlockInfo *LiveSet); |
1358 | void processDbgVariableRecord(DbgVariableRecord &DVR, BlockInfo *LiveSet); |
1359 | void processDbgValue( |
1360 | PointerUnion<DbgValueInst *, DbgVariableRecord *> DbgValueRecord, |
1361 | BlockInfo *LiveSet); |
1362 | /// Add an assignment to memory for the variable /p Var. |
1363 | void addMemDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV); |
1364 | /// Add an assignment to the variable /p Var. |
1365 | void addDbgDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV); |
1366 | ///@} |
1367 | |
1368 | /// Set the LocKind for \p Var. |
1369 | void setLocKind(BlockInfo *LiveSet, VariableID Var, LocKind K); |
1370 | /// Get the live LocKind for a \p Var. Requires addMemDef or addDbgDef to |
1371 | /// have been called for \p Var first. |
1372 | LocKind getLocKind(BlockInfo *LiveSet, VariableID Var); |
1373 | /// Return true if \p Var has an assignment in \p M matching \p AV. |
1374 | bool hasVarWithAssignment(BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, |
1375 | VariableID Var, const Assignment &AV); |
1376 | /// Return the set of VariableIDs corresponding the fragments contained fully |
1377 | /// within the variable/fragment \p Var. |
1378 | ArrayRef<VariableID> getContainedFragments(VariableID Var) const; |
1379 | |
1380 | /// Mark \p Var as having been touched this frame. Note, this applies only |
1381 | /// to the exact fragment \p Var and not to any fragments contained within. |
1382 | void touchFragment(VariableID Var); |
1383 | |
1384 | /// Emit info for variables that are fully promoted. |
1385 | bool emitPromotedVarLocs(FunctionVarLocsBuilder *FnVarLocs); |
1386 | |
1387 | public: |
1388 | AssignmentTrackingLowering(Function &Fn, const DataLayout &Layout, |
1389 | const DenseSet<DebugAggregate> *VarsWithStackSlot) |
1390 | : Fn(Fn), Layout(Layout), VarsWithStackSlot(VarsWithStackSlot) {} |
1391 | /// Run the analysis, adding variable location info to \p FnVarLocs. Returns |
1392 | /// true if any variable locations have been added to FnVarLocs. |
1393 | bool run(FunctionVarLocsBuilder *FnVarLocs); |
1394 | }; |
1395 | } // namespace |
1396 | |
1397 | ArrayRef<VariableID> |
1398 | AssignmentTrackingLowering::getContainedFragments(VariableID Var) const { |
1399 | auto R = VarContains.find(Val: Var); |
1400 | if (R == VarContains.end()) |
1401 | return std::nullopt; |
1402 | return R->second; |
1403 | } |
1404 | |
1405 | void AssignmentTrackingLowering::touchFragment(VariableID Var) { |
1406 | VarsTouchedThisFrame.insert(V: Var); |
1407 | } |
1408 | |
1409 | void AssignmentTrackingLowering::setLocKind(BlockInfo *LiveSet, VariableID Var, |
1410 | LocKind K) { |
1411 | auto SetKind = [this](BlockInfo *LiveSet, VariableID Var, LocKind K) { |
1412 | LiveSet->setLocKind(Var, K); |
1413 | touchFragment(Var); |
1414 | }; |
1415 | SetKind(LiveSet, Var, K); |
1416 | |
1417 | // Update the LocKind for all fragments contained within Var. |
1418 | for (VariableID Frag : getContainedFragments(Var)) |
1419 | SetKind(LiveSet, Frag, K); |
1420 | } |
1421 | |
1422 | AssignmentTrackingLowering::LocKind |
1423 | AssignmentTrackingLowering::getLocKind(BlockInfo *LiveSet, VariableID Var) { |
1424 | return LiveSet->getLocKind(Var); |
1425 | } |
1426 | |
1427 | void AssignmentTrackingLowering::addMemDef(BlockInfo *LiveSet, VariableID Var, |
1428 | const Assignment &AV) { |
1429 | LiveSet->setAssignment(Kind: BlockInfo::Stack, Var, AV); |
1430 | |
1431 | // Use this assigment for all fragments contained within Var, but do not |
1432 | // provide a Source because we cannot convert Var's value to a value for the |
1433 | // fragment. |
1434 | Assignment FragAV = AV; |
1435 | FragAV.Source = nullptr; |
1436 | for (VariableID Frag : getContainedFragments(Var)) |
1437 | LiveSet->setAssignment(Kind: BlockInfo::Stack, Var: Frag, AV: FragAV); |
1438 | } |
1439 | |
1440 | void AssignmentTrackingLowering::addDbgDef(BlockInfo *LiveSet, VariableID Var, |
1441 | const Assignment &AV) { |
1442 | LiveSet->setAssignment(Kind: BlockInfo::Debug, Var, AV); |
1443 | |
1444 | // Use this assigment for all fragments contained within Var, but do not |
1445 | // provide a Source because we cannot convert Var's value to a value for the |
1446 | // fragment. |
1447 | Assignment FragAV = AV; |
1448 | FragAV.Source = nullptr; |
1449 | for (VariableID Frag : getContainedFragments(Var)) |
1450 | LiveSet->setAssignment(Kind: BlockInfo::Debug, Var: Frag, AV: FragAV); |
1451 | } |
1452 | |
1453 | static DIAssignID *getIDFromInst(const Instruction &I) { |
1454 | return cast<DIAssignID>(Val: I.getMetadata(KindID: LLVMContext::MD_DIAssignID)); |
1455 | } |
1456 | |
1457 | static DIAssignID *getIDFromMarker(const DbgAssignIntrinsic &DAI) { |
1458 | return cast<DIAssignID>(Val: DAI.getAssignID()); |
1459 | } |
1460 | |
1461 | static DIAssignID *getIDFromMarker(const DbgVariableRecord &DVR) { |
1462 | assert(DVR.isDbgAssign() && |
1463 | "Cannot get a DIAssignID from a non-assign DbgVariableRecord!" ); |
1464 | return DVR.getAssignID(); |
1465 | } |
1466 | |
1467 | /// Return true if \p Var has an assignment in \p M matching \p AV. |
1468 | bool AssignmentTrackingLowering::hasVarWithAssignment( |
1469 | BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, VariableID Var, |
1470 | const Assignment &AV) { |
1471 | if (!LiveSet->hasAssignment(Kind, Var, AV)) |
1472 | return false; |
1473 | |
1474 | // Check all the frags contained within Var as these will have all been |
1475 | // mapped to AV at the last store to Var. |
1476 | for (VariableID Frag : getContainedFragments(Var)) |
1477 | if (!LiveSet->hasAssignment(Kind, Var: Frag, AV)) |
1478 | return false; |
1479 | return true; |
1480 | } |
1481 | |
1482 | #ifndef NDEBUG |
1483 | const char *locStr(AssignmentTrackingLowering::LocKind Loc) { |
1484 | using LocKind = AssignmentTrackingLowering::LocKind; |
1485 | switch (Loc) { |
1486 | case LocKind::Val: |
1487 | return "Val" ; |
1488 | case LocKind::Mem: |
1489 | return "Mem" ; |
1490 | case LocKind::None: |
1491 | return "None" ; |
1492 | }; |
1493 | llvm_unreachable("unknown LocKind" ); |
1494 | } |
1495 | #endif |
1496 | |
1497 | VarLocInsertPt getNextNode(const DbgRecord *DVR) { |
1498 | auto NextIt = ++(DVR->getIterator()); |
1499 | if (NextIt == DVR->getMarker()->getDbgRecordRange().end()) |
1500 | return DVR->getMarker()->MarkedInstr; |
1501 | return &*NextIt; |
1502 | } |
1503 | VarLocInsertPt getNextNode(const Instruction *Inst) { |
1504 | const Instruction *Next = Inst->getNextNode(); |
1505 | if (!Next->hasDbgRecords()) |
1506 | return Next; |
1507 | return &*Next->getDbgRecordRange().begin(); |
1508 | } |
1509 | VarLocInsertPt getNextNode(VarLocInsertPt InsertPt) { |
1510 | if (isa<const Instruction *>(Val: InsertPt)) |
1511 | return getNextNode(Inst: cast<const Instruction *>(Val&: InsertPt)); |
1512 | return getNextNode(DVR: cast<const DbgRecord *>(Val&: InsertPt)); |
1513 | } |
1514 | |
1515 | DbgAssignIntrinsic *CastToDbgAssign(DbgVariableIntrinsic *DVI) { |
1516 | return cast<DbgAssignIntrinsic>(Val: DVI); |
1517 | } |
1518 | |
1519 | DbgVariableRecord *CastToDbgAssign(DbgVariableRecord *DVR) { |
1520 | assert(DVR->isDbgAssign() && |
1521 | "Attempted to cast non-assign DbgVariableRecord to DVRAssign." ); |
1522 | return DVR; |
1523 | } |
1524 | |
1525 | void AssignmentTrackingLowering::emitDbgValue( |
1526 | AssignmentTrackingLowering::LocKind Kind, |
1527 | AssignmentTrackingLowering::AssignRecord Source, VarLocInsertPt After) { |
1528 | if (isa<DbgAssignIntrinsic *>(Val: Source)) |
1529 | emitDbgValue(Kind, Source: cast<DbgAssignIntrinsic *>(Val&: Source), After); |
1530 | else |
1531 | emitDbgValue(Kind, Source: cast<DbgVariableRecord *>(Val&: Source), After); |
1532 | } |
1533 | template <typename T> |
1534 | void AssignmentTrackingLowering::emitDbgValue( |
1535 | AssignmentTrackingLowering::LocKind Kind, const T Source, |
1536 | VarLocInsertPt After) { |
1537 | |
1538 | DILocation *DL = Source->getDebugLoc(); |
1539 | auto Emit = [this, Source, After, DL](Metadata *Val, DIExpression *Expr) { |
1540 | assert(Expr); |
1541 | if (!Val) |
1542 | Val = ValueAsMetadata::get( |
1543 | V: PoisonValue::get(T: Type::getInt1Ty(C&: Source->getContext()))); |
1544 | |
1545 | // Find a suitable insert point. |
1546 | auto InsertBefore = getNextNode(InsertPt: After); |
1547 | assert(InsertBefore && "Shouldn't be inserting after a terminator" ); |
1548 | |
1549 | VariableID Var = getVariableID(Var: DebugVariable(Source)); |
1550 | VarLocInfo VarLoc; |
1551 | VarLoc.VariableID = static_cast<VariableID>(Var); |
1552 | VarLoc.Expr = Expr; |
1553 | VarLoc.Values = RawLocationWrapper(Val); |
1554 | VarLoc.DL = DL; |
1555 | // Insert it into the map for later. |
1556 | InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc); |
1557 | }; |
1558 | |
1559 | // NOTE: This block can mutate Kind. |
1560 | if (Kind == LocKind::Mem) { |
1561 | const auto *Assign = CastToDbgAssign(Source); |
1562 | // Check the address hasn't been dropped (e.g. the debug uses may not have |
1563 | // been replaced before deleting a Value). |
1564 | if (Assign->isKillAddress()) { |
1565 | // The address isn't valid so treat this as a non-memory def. |
1566 | Kind = LocKind::Val; |
1567 | } else { |
1568 | Value *Val = Assign->getAddress(); |
1569 | DIExpression *Expr = Assign->getAddressExpression(); |
1570 | assert(!Expr->getFragmentInfo() && |
1571 | "fragment info should be stored in value-expression only" ); |
1572 | // Copy the fragment info over from the value-expression to the new |
1573 | // DIExpression. |
1574 | if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) { |
1575 | auto FragInfo = *OptFragInfo; |
1576 | Expr = *DIExpression::createFragmentExpression( |
1577 | Expr, OffsetInBits: FragInfo.OffsetInBits, SizeInBits: FragInfo.SizeInBits); |
1578 | } |
1579 | // The address-expression has an implicit deref, add it now. |
1580 | std::tie(args&: Val, args&: Expr) = |
1581 | walkToAllocaAndPrependOffsetDeref(DL: Layout, Start: Val, Expression: Expr); |
1582 | Emit(ValueAsMetadata::get(V: Val), Expr); |
1583 | return; |
1584 | } |
1585 | } |
1586 | |
1587 | if (Kind == LocKind::Val) { |
1588 | Emit(Source->getRawLocation(), Source->getExpression()); |
1589 | return; |
1590 | } |
1591 | |
1592 | if (Kind == LocKind::None) { |
1593 | Emit(nullptr, Source->getExpression()); |
1594 | return; |
1595 | } |
1596 | } |
1597 | |
1598 | void AssignmentTrackingLowering::processNonDbgInstruction( |
1599 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1600 | if (I.hasMetadata(KindID: LLVMContext::MD_DIAssignID)) |
1601 | processTaggedInstruction(I, LiveSet); |
1602 | else |
1603 | processUntaggedInstruction(I, LiveSet); |
1604 | } |
1605 | |
1606 | void AssignmentTrackingLowering::processUntaggedInstruction( |
1607 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1608 | // Interpret stack stores that are not tagged as an assignment in memory for |
1609 | // the variables associated with that address. These stores may not be tagged |
1610 | // because a) the store cannot be represented using dbg.assigns (non-const |
1611 | // length or offset) or b) the tag was accidentally dropped during |
1612 | // optimisations. For these stores we fall back to assuming that the stack |
1613 | // home is a valid location for the variables. The benefit is that this |
1614 | // prevents us missing an assignment and therefore incorrectly maintaining |
1615 | // earlier location definitions, and in many cases it should be a reasonable |
1616 | // assumption. However, this will occasionally lead to slight |
1617 | // inaccuracies. The value of a hoisted untagged store will be visible |
1618 | // "early", for example. |
1619 | assert(!I.hasMetadata(LLVMContext::MD_DIAssignID)); |
1620 | auto It = UntaggedStoreVars.find(Val: &I); |
1621 | if (It == UntaggedStoreVars.end()) |
1622 | return; // No variables associated with the store destination. |
1623 | |
1624 | LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I |
1625 | << "\n" ); |
1626 | // Iterate over the variables that this store affects, add a NoneOrPhi dbg |
1627 | // and mem def, set lockind to Mem, and emit a location def for each. |
1628 | for (auto [Var, Info] : It->second) { |
1629 | // This instruction is treated as both a debug and memory assignment, |
1630 | // meaning the memory location should be used. We don't have an assignment |
1631 | // ID though so use Assignment::makeNoneOrPhi() to create an imaginary one. |
1632 | addMemDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi()); |
1633 | addDbgDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi()); |
1634 | setLocKind(LiveSet, Var, K: LocKind::Mem); |
1635 | LLVM_DEBUG(dbgs() << " setting Stack LocKind to: " << locStr(LocKind::Mem) |
1636 | << "\n" ); |
1637 | // Build the dbg location def to insert. |
1638 | // |
1639 | // DIExpression: Add fragment and offset. |
1640 | DebugVariable V = FnVarLocs->getVariable(ID: Var); |
1641 | DIExpression *DIE = DIExpression::get(Context&: I.getContext(), Elements: std::nullopt); |
1642 | if (auto Frag = V.getFragment()) { |
1643 | auto R = DIExpression::createFragmentExpression(Expr: DIE, OffsetInBits: Frag->OffsetInBits, |
1644 | SizeInBits: Frag->SizeInBits); |
1645 | assert(R && "unexpected createFragmentExpression failure" ); |
1646 | DIE = *R; |
1647 | } |
1648 | SmallVector<uint64_t, 3> Ops; |
1649 | if (Info.OffsetInBits) |
1650 | Ops = {dwarf::DW_OP_plus_uconst, Info.OffsetInBits / 8}; |
1651 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
1652 | DIE = DIExpression::prependOpcodes(Expr: DIE, Ops, /*StackValue=*/false, |
1653 | /*EntryValue=*/false); |
1654 | // Find a suitable insert point, before the next instruction or DbgRecord |
1655 | // after I. |
1656 | auto InsertBefore = getNextNode(Inst: &I); |
1657 | assert(InsertBefore && "Shouldn't be inserting after a terminator" ); |
1658 | |
1659 | // Get DILocation for this unrecorded assignment. |
1660 | DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt()); |
1661 | const DILocation *DILoc = DILocation::get( |
1662 | Context&: Fn.getContext(), Line: 0, Column: 0, Scope: V.getVariable()->getScope(), InlinedAt); |
1663 | |
1664 | VarLocInfo VarLoc; |
1665 | VarLoc.VariableID = static_cast<VariableID>(Var); |
1666 | VarLoc.Expr = DIE; |
1667 | VarLoc.Values = RawLocationWrapper( |
1668 | ValueAsMetadata::get(V: const_cast<AllocaInst *>(Info.Base))); |
1669 | VarLoc.DL = DILoc; |
1670 | // 3. Insert it into the map for later. |
1671 | InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc); |
1672 | } |
1673 | } |
1674 | |
1675 | void AssignmentTrackingLowering::processTaggedInstruction( |
1676 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1677 | auto Linked = at::getAssignmentMarkers(Inst: &I); |
1678 | auto LinkedDPAssigns = at::getDVRAssignmentMarkers(Inst: &I); |
1679 | // No dbg.assign intrinsics linked. |
1680 | // FIXME: All vars that have a stack slot this store modifies that don't have |
1681 | // a dbg.assign linked to it should probably treat this like an untagged |
1682 | // store. |
1683 | if (Linked.empty() && LinkedDPAssigns.empty()) |
1684 | return; |
1685 | |
1686 | LLVM_DEBUG(dbgs() << "processTaggedInstruction on " << I << "\n" ); |
1687 | auto ProcessLinkedAssign = [&](auto *Assign) { |
1688 | VariableID Var = getVariableID(Var: DebugVariable(Assign)); |
1689 | // Something has gone wrong if VarsWithStackSlot doesn't contain a variable |
1690 | // that is linked to a store. |
1691 | assert(VarsWithStackSlot->count(getAggregate(Assign)) && |
1692 | "expected Assign's variable to have stack slot" ); |
1693 | |
1694 | Assignment AV = Assignment::makeFromMemDef(ID: getIDFromInst(I)); |
1695 | addMemDef(LiveSet, Var, AV); |
1696 | |
1697 | LLVM_DEBUG(dbgs() << " linked to " << *Assign << "\n" ); |
1698 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1699 | << " -> " ); |
1700 | |
1701 | // The last assignment to the stack is now AV. Check if the last debug |
1702 | // assignment has a matching Assignment. |
1703 | if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Debug, Var, AV)) { |
1704 | // The StackHomeValue and DebugValue for this variable match so we can |
1705 | // emit a stack home location here. |
1706 | LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n" ;); |
1707 | LLVM_DEBUG(dbgs() << " Stack val: " ; AV.dump(dbgs()); dbgs() << "\n" ); |
1708 | LLVM_DEBUG(dbgs() << " Debug val: " ; |
1709 | LiveSet->DebugValue[static_cast<unsigned>(Var)].dump(dbgs()); |
1710 | dbgs() << "\n" ); |
1711 | setLocKind(LiveSet, Var, K: LocKind::Mem); |
1712 | emitDbgValue(LocKind::Mem, Assign, &I); |
1713 | return; |
1714 | } |
1715 | |
1716 | // The StackHomeValue and DebugValue for this variable do not match. I.e. |
1717 | // The value currently stored in the stack is not what we'd expect to |
1718 | // see, so we cannot use emit a stack home location here. Now we will |
1719 | // look at the live LocKind for the variable and determine an appropriate |
1720 | // dbg.value to emit. |
1721 | LocKind PrevLoc = getLocKind(LiveSet, Var); |
1722 | switch (PrevLoc) { |
1723 | case LocKind::Val: { |
1724 | // The value in memory in memory has changed but we're not currently |
1725 | // using the memory location. Do nothing. |
1726 | LLVM_DEBUG(dbgs() << "Val, (unchanged)\n" ;); |
1727 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1728 | } break; |
1729 | case LocKind::Mem: { |
1730 | // There's been an assignment to memory that we were using as a |
1731 | // location for this variable, and the Assignment doesn't match what |
1732 | // we'd expect to see in memory. |
1733 | Assignment DbgAV = LiveSet->getAssignment(Kind: BlockInfo::Debug, Var); |
1734 | if (DbgAV.Status == Assignment::NoneOrPhi) { |
1735 | // We need to terminate any previously open location now. |
1736 | LLVM_DEBUG(dbgs() << "None, No Debug value available\n" ;); |
1737 | setLocKind(LiveSet, Var, K: LocKind::None); |
1738 | emitDbgValue(LocKind::None, Assign, &I); |
1739 | } else { |
1740 | // The previous DebugValue Value can be used here. |
1741 | LLVM_DEBUG(dbgs() << "Val, Debug value is Known\n" ;); |
1742 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1743 | if (DbgAV.Source) { |
1744 | emitDbgValue(Kind: LocKind::Val, Source: DbgAV.Source, After: &I); |
1745 | } else { |
1746 | // PrevAV.Source is nullptr so we must emit undef here. |
1747 | emitDbgValue(LocKind::None, Assign, &I); |
1748 | } |
1749 | } |
1750 | } break; |
1751 | case LocKind::None: { |
1752 | // There's been an assignment to memory and we currently are |
1753 | // not tracking a location for the variable. Do not emit anything. |
1754 | LLVM_DEBUG(dbgs() << "None, (unchanged)\n" ;); |
1755 | setLocKind(LiveSet, Var, K: LocKind::None); |
1756 | } break; |
1757 | } |
1758 | }; |
1759 | for (DbgAssignIntrinsic *DAI : Linked) |
1760 | ProcessLinkedAssign(DAI); |
1761 | for (DbgVariableRecord *DVR : LinkedDPAssigns) |
1762 | ProcessLinkedAssign(DVR); |
1763 | } |
1764 | |
1765 | void AssignmentTrackingLowering::processDbgAssign(AssignRecord Assign, |
1766 | BlockInfo *LiveSet) { |
1767 | auto ProcessDbgAssignImpl = [&](auto *DbgAssign) { |
1768 | // Only bother tracking variables that are at some point stack homed. Other |
1769 | // variables can be dealt with trivially later. |
1770 | if (!VarsWithStackSlot->count(V: getAggregate(DbgAssign))) |
1771 | return; |
1772 | |
1773 | VariableID Var = getVariableID(Var: DebugVariable(DbgAssign)); |
1774 | Assignment AV = Assignment::make(getIDFromMarker(*DbgAssign), DbgAssign); |
1775 | addDbgDef(LiveSet, Var, AV); |
1776 | |
1777 | LLVM_DEBUG(dbgs() << "processDbgAssign on " << *DbgAssign << "\n" ;); |
1778 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1779 | << " -> " ); |
1780 | |
1781 | // Check if the DebugValue and StackHomeValue both hold the same |
1782 | // Assignment. |
1783 | if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Stack, Var, AV)) { |
1784 | // They match. We can use the stack home because the debug intrinsics |
1785 | // state that an assignment happened here, and we know that specific |
1786 | // assignment was the last one to take place in memory for this variable. |
1787 | LocKind Kind; |
1788 | if (DbgAssign->isKillAddress()) { |
1789 | LLVM_DEBUG( |
1790 | dbgs() |
1791 | << "Val, Stack matches Debug program but address is killed\n" ;); |
1792 | Kind = LocKind::Val; |
1793 | } else { |
1794 | LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n" ;); |
1795 | Kind = LocKind::Mem; |
1796 | }; |
1797 | setLocKind(LiveSet, Var, K: Kind); |
1798 | emitDbgValue(Kind, DbgAssign, DbgAssign); |
1799 | } else { |
1800 | // The last assignment to the memory location isn't the one that we want |
1801 | // to show to the user so emit a dbg.value(Value). Value may be undef. |
1802 | LLVM_DEBUG(dbgs() << "Val, Stack contents is unknown\n" ;); |
1803 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1804 | emitDbgValue(LocKind::Val, DbgAssign, DbgAssign); |
1805 | } |
1806 | }; |
1807 | if (isa<DbgVariableRecord *>(Val: Assign)) |
1808 | return ProcessDbgAssignImpl(cast<DbgVariableRecord *>(Val&: Assign)); |
1809 | return ProcessDbgAssignImpl(cast<DbgAssignIntrinsic *>(Val&: Assign)); |
1810 | } |
1811 | |
1812 | void AssignmentTrackingLowering::processDbgValue( |
1813 | PointerUnion<DbgValueInst *, DbgVariableRecord *> DbgValueRecord, |
1814 | BlockInfo *LiveSet) { |
1815 | auto ProcessDbgValueImpl = [&](auto *DbgValue) { |
1816 | // Only other tracking variables that are at some point stack homed. |
1817 | // Other variables can be dealt with trivally later. |
1818 | if (!VarsWithStackSlot->count(V: getAggregate(DbgValue))) |
1819 | return; |
1820 | |
1821 | VariableID Var = getVariableID(Var: DebugVariable(DbgValue)); |
1822 | // We have no ID to create an Assignment with so we mark this assignment as |
1823 | // NoneOrPhi. Note that the dbg.value still exists, we just cannot determine |
1824 | // the assignment responsible for setting this value. |
1825 | // This is fine; dbg.values are essentially interchangable with unlinked |
1826 | // dbg.assigns, and some passes such as mem2reg and instcombine add them to |
1827 | // PHIs for promoted variables. |
1828 | Assignment AV = Assignment::makeNoneOrPhi(); |
1829 | addDbgDef(LiveSet, Var, AV); |
1830 | |
1831 | LLVM_DEBUG(dbgs() << "processDbgValue on " << *DbgValue << "\n" ;); |
1832 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1833 | << " -> Val, dbg.value override" ); |
1834 | |
1835 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1836 | emitDbgValue(LocKind::Val, DbgValue, DbgValue); |
1837 | }; |
1838 | if (isa<DbgVariableRecord *>(Val: DbgValueRecord)) |
1839 | return ProcessDbgValueImpl(cast<DbgVariableRecord *>(Val&: DbgValueRecord)); |
1840 | return ProcessDbgValueImpl(cast<DbgValueInst *>(Val&: DbgValueRecord)); |
1841 | } |
1842 | |
1843 | template <typename T> static bool hasZeroSizedFragment(T &DbgValue) { |
1844 | if (auto F = DbgValue.getExpression()->getFragmentInfo()) |
1845 | return F->SizeInBits == 0; |
1846 | return false; |
1847 | } |
1848 | |
1849 | void AssignmentTrackingLowering::processDbgInstruction( |
1850 | DbgInfoIntrinsic &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1851 | auto *DVI = dyn_cast<DbgVariableIntrinsic>(Val: &I); |
1852 | if (!DVI) |
1853 | return; |
1854 | |
1855 | // Ignore assignments to zero bits of the variable. |
1856 | if (hasZeroSizedFragment(DbgValue&: *DVI)) |
1857 | return; |
1858 | |
1859 | if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: &I)) |
1860 | processDbgAssign(Assign: DAI, LiveSet); |
1861 | else if (auto *DVI = dyn_cast<DbgValueInst>(Val: &I)) |
1862 | processDbgValue(DbgValueRecord: DVI, LiveSet); |
1863 | } |
1864 | void AssignmentTrackingLowering::processDbgVariableRecord( |
1865 | DbgVariableRecord &DVR, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1866 | // Ignore assignments to zero bits of the variable. |
1867 | if (hasZeroSizedFragment(DbgValue&: DVR)) |
1868 | return; |
1869 | |
1870 | if (DVR.isDbgAssign()) |
1871 | processDbgAssign(Assign: &DVR, LiveSet); |
1872 | else if (DVR.isDbgValue()) |
1873 | processDbgValue(DbgValueRecord: &DVR, LiveSet); |
1874 | } |
1875 | |
1876 | void AssignmentTrackingLowering::resetInsertionPoint(Instruction &After) { |
1877 | assert(!After.isTerminator() && "Can't insert after a terminator" ); |
1878 | auto *R = InsertBeforeMap.find(Key: getNextNode(Inst: &After)); |
1879 | if (R == InsertBeforeMap.end()) |
1880 | return; |
1881 | R->second.clear(); |
1882 | } |
1883 | void AssignmentTrackingLowering::resetInsertionPoint(DbgVariableRecord &After) { |
1884 | auto *R = InsertBeforeMap.find(Key: getNextNode(DVR: &After)); |
1885 | if (R == InsertBeforeMap.end()) |
1886 | return; |
1887 | R->second.clear(); |
1888 | } |
1889 | |
1890 | void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) { |
1891 | // If the block starts with DbgRecords, we need to process those DbgRecords as |
1892 | // their own frame without processing any instructions first. |
1893 | bool ProcessedLeadingDbgRecords = !BB.begin()->hasDbgRecords(); |
1894 | for (auto II = BB.begin(), EI = BB.end(); II != EI;) { |
1895 | assert(VarsTouchedThisFrame.empty()); |
1896 | // Process the instructions in "frames". A "frame" includes a single |
1897 | // non-debug instruction followed any debug instructions before the |
1898 | // next non-debug instruction. |
1899 | |
1900 | // Skip the current instruction if it has unprocessed DbgRecords attached |
1901 | // (see comment above `ProcessedLeadingDbgRecords`). |
1902 | if (ProcessedLeadingDbgRecords) { |
1903 | // II is now either a debug intrinsic, a non-debug instruction with no |
1904 | // attached DbgRecords, or a non-debug instruction with attached processed |
1905 | // DbgRecords. |
1906 | // II has not been processed. |
1907 | if (!isa<DbgInfoIntrinsic>(Val: &*II)) { |
1908 | if (II->isTerminator()) |
1909 | break; |
1910 | resetInsertionPoint(After&: *II); |
1911 | processNonDbgInstruction(I&: *II, LiveSet); |
1912 | assert(LiveSet->isValid()); |
1913 | ++II; |
1914 | } |
1915 | } |
1916 | // II is now either a debug intrinsic, a non-debug instruction with no |
1917 | // attached DbgRecords, or a non-debug instruction with attached unprocessed |
1918 | // DbgRecords. |
1919 | if (II != EI && II->hasDbgRecords()) { |
1920 | // Skip over non-variable debug records (i.e., labels). They're going to |
1921 | // be read from IR (possibly re-ordering them within the debug record |
1922 | // range) rather than from the analysis results. |
1923 | for (DbgVariableRecord &DVR : filterDbgVars(R: II->getDbgRecordRange())) { |
1924 | resetInsertionPoint(After&: DVR); |
1925 | processDbgVariableRecord(DVR, LiveSet); |
1926 | assert(LiveSet->isValid()); |
1927 | } |
1928 | } |
1929 | ProcessedLeadingDbgRecords = true; |
1930 | while (II != EI) { |
1931 | auto *Dbg = dyn_cast<DbgInfoIntrinsic>(Val: &*II); |
1932 | if (!Dbg) |
1933 | break; |
1934 | resetInsertionPoint(After&: *II); |
1935 | processDbgInstruction(I&: *Dbg, LiveSet); |
1936 | assert(LiveSet->isValid()); |
1937 | ++II; |
1938 | } |
1939 | // II is now a non-debug instruction either with no attached DbgRecords, or |
1940 | // with attached processed DbgRecords. II has not been processed, and all |
1941 | // debug instructions or DbgRecords in the frame preceding II have been |
1942 | // processed. |
1943 | |
1944 | // We've processed everything in the "frame". Now determine which variables |
1945 | // cannot be represented by a dbg.declare. |
1946 | for (auto Var : VarsTouchedThisFrame) { |
1947 | LocKind Loc = getLocKind(LiveSet, Var); |
1948 | // If a variable's LocKind is anything other than LocKind::Mem then we |
1949 | // must note that it cannot be represented with a dbg.declare. |
1950 | // Note that this check is enough without having to check the result of |
1951 | // joins() because for join to produce anything other than Mem after |
1952 | // we've already seen a Mem we'd be joining None or Val with Mem. In that |
1953 | // case, we've already hit this codepath when we set the LocKind to Val |
1954 | // or None in that block. |
1955 | if (Loc != LocKind::Mem) { |
1956 | DebugVariable DbgVar = FnVarLocs->getVariable(ID: Var); |
1957 | DebugAggregate Aggr{DbgVar.getVariable(), DbgVar.getInlinedAt()}; |
1958 | NotAlwaysStackHomed.insert(V: Aggr); |
1959 | } |
1960 | } |
1961 | VarsTouchedThisFrame.clear(); |
1962 | } |
1963 | } |
1964 | |
1965 | AssignmentTrackingLowering::LocKind |
1966 | AssignmentTrackingLowering::joinKind(LocKind A, LocKind B) { |
1967 | // Partial order: |
1968 | // None > Mem, Val |
1969 | return A == B ? A : LocKind::None; |
1970 | } |
1971 | |
1972 | AssignmentTrackingLowering::Assignment |
1973 | AssignmentTrackingLowering::joinAssignment(const Assignment &A, |
1974 | const Assignment &B) { |
1975 | // Partial order: |
1976 | // NoneOrPhi(null, null) > Known(v, ?s) |
1977 | |
1978 | // If either are NoneOrPhi the join is NoneOrPhi. |
1979 | // If either value is different then the result is |
1980 | // NoneOrPhi (joining two values is a Phi). |
1981 | if (!A.isSameSourceAssignment(Other: B)) |
1982 | return Assignment::makeNoneOrPhi(); |
1983 | if (A.Status == Assignment::NoneOrPhi) |
1984 | return Assignment::makeNoneOrPhi(); |
1985 | |
1986 | // Source is used to lookup the value + expression in the debug program if |
1987 | // the stack slot gets assigned a value earlier than expected. Because |
1988 | // we're only tracking the one dbg.assign, we can't capture debug PHIs. |
1989 | // It's unlikely that we're losing out on much coverage by avoiding that |
1990 | // extra work. |
1991 | // The Source may differ in this situation: |
1992 | // Pred.1: |
1993 | // dbg.assign i32 0, ..., !1, ... |
1994 | // Pred.2: |
1995 | // dbg.assign i32 1, ..., !1, ... |
1996 | // Here the same assignment (!1) was performed in both preds in the source, |
1997 | // but we can't use either one unless they are identical (e.g. .we don't |
1998 | // want to arbitrarily pick between constant values). |
1999 | auto JoinSource = [&]() -> AssignRecord { |
2000 | if (A.Source == B.Source) |
2001 | return A.Source; |
2002 | if (!A.Source || !B.Source) |
2003 | return AssignRecord(); |
2004 | assert(isa<DbgVariableRecord *>(A.Source) == |
2005 | isa<DbgVariableRecord *>(B.Source)); |
2006 | if (isa<DbgVariableRecord *>(Val: A.Source) && |
2007 | cast<DbgVariableRecord *>(Val: A.Source)->isEquivalentTo( |
2008 | Other: *cast<DbgVariableRecord *>(Val: B.Source))) |
2009 | return A.Source; |
2010 | if (isa<DbgAssignIntrinsic *>(Val: A.Source) && |
2011 | cast<DbgAssignIntrinsic *>(Val: A.Source)->isIdenticalTo( |
2012 | I: cast<DbgAssignIntrinsic *>(Val: B.Source))) |
2013 | return A.Source; |
2014 | return AssignRecord(); |
2015 | }; |
2016 | AssignRecord Source = JoinSource(); |
2017 | assert(A.Status == B.Status && A.Status == Assignment::Known); |
2018 | assert(A.ID == B.ID); |
2019 | return Assignment::make(ID: A.ID, Source); |
2020 | } |
2021 | |
2022 | AssignmentTrackingLowering::BlockInfo |
2023 | AssignmentTrackingLowering::joinBlockInfo(const BlockInfo &A, |
2024 | const BlockInfo &B) { |
2025 | return BlockInfo::join(A, B, NumVars: TrackedVariablesVectorSize); |
2026 | } |
2027 | |
2028 | bool AssignmentTrackingLowering::join( |
2029 | const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited) { |
2030 | |
2031 | SmallVector<const BasicBlock *> VisitedPreds; |
2032 | // Ignore backedges if we have not visited the predecessor yet. As the |
2033 | // predecessor hasn't yet had locations propagated into it, most locations |
2034 | // will not yet be valid, so treat them as all being uninitialized and |
2035 | // potentially valid. If a location guessed to be correct here is |
2036 | // invalidated later, we will remove it when we revisit this block. This |
2037 | // is essentially the same as initialising all LocKinds and Assignments to |
2038 | // an implicit ⊥ value which is the identity value for the join operation. |
2039 | for (const BasicBlock *Pred : predecessors(BB: &BB)) { |
2040 | if (Visited.count(Ptr: Pred)) |
2041 | VisitedPreds.push_back(Elt: Pred); |
2042 | } |
2043 | |
2044 | // No preds visited yet. |
2045 | if (VisitedPreds.empty()) { |
2046 | auto It = LiveIn.try_emplace(Key: &BB, Args: BlockInfo()); |
2047 | bool DidInsert = It.second; |
2048 | if (DidInsert) |
2049 | It.first->second.init(NumVars: TrackedVariablesVectorSize); |
2050 | return /*Changed*/ DidInsert; |
2051 | } |
2052 | |
2053 | // Exactly one visited pred. Copy the LiveOut from that pred into BB LiveIn. |
2054 | if (VisitedPreds.size() == 1) { |
2055 | const BlockInfo &PredLiveOut = LiveOut.find(Val: VisitedPreds[0])->second; |
2056 | auto CurrentLiveInEntry = LiveIn.find(Val: &BB); |
2057 | |
2058 | // Check if there isn't an entry, or there is but the LiveIn set has |
2059 | // changed (expensive check). |
2060 | if (CurrentLiveInEntry == LiveIn.end()) |
2061 | LiveIn.insert(KV: std::make_pair(x: &BB, y: PredLiveOut)); |
2062 | else if (PredLiveOut != CurrentLiveInEntry->second) |
2063 | CurrentLiveInEntry->second = PredLiveOut; |
2064 | else |
2065 | return /*Changed*/ false; |
2066 | return /*Changed*/ true; |
2067 | } |
2068 | |
2069 | // More than one pred. Join LiveOuts of blocks 1 and 2. |
2070 | assert(VisitedPreds.size() > 1); |
2071 | const BlockInfo &PredLiveOut0 = LiveOut.find(Val: VisitedPreds[0])->second; |
2072 | const BlockInfo &PredLiveOut1 = LiveOut.find(Val: VisitedPreds[1])->second; |
2073 | BlockInfo BBLiveIn = joinBlockInfo(A: PredLiveOut0, B: PredLiveOut1); |
2074 | |
2075 | // Join the LiveOuts of subsequent blocks. |
2076 | ArrayRef Tail = ArrayRef(VisitedPreds).drop_front(N: 2); |
2077 | for (const BasicBlock *Pred : Tail) { |
2078 | const auto &PredLiveOut = LiveOut.find(Val: Pred); |
2079 | assert(PredLiveOut != LiveOut.end() && |
2080 | "block should have been processed already" ); |
2081 | BBLiveIn = joinBlockInfo(A: std::move(BBLiveIn), B: PredLiveOut->second); |
2082 | } |
2083 | |
2084 | // Save the joined result for BB. |
2085 | auto CurrentLiveInEntry = LiveIn.find(Val: &BB); |
2086 | // Check if there isn't an entry, or there is but the LiveIn set has changed |
2087 | // (expensive check). |
2088 | if (CurrentLiveInEntry == LiveIn.end()) |
2089 | LiveIn.try_emplace(Key: &BB, Args: std::move(BBLiveIn)); |
2090 | else if (BBLiveIn != CurrentLiveInEntry->second) |
2091 | CurrentLiveInEntry->second = std::move(BBLiveIn); |
2092 | else |
2093 | return /*Changed*/ false; |
2094 | return /*Changed*/ true; |
2095 | } |
2096 | |
2097 | /// Return true if A fully contains B. |
2098 | static bool fullyContains(DIExpression::FragmentInfo A, |
2099 | DIExpression::FragmentInfo B) { |
2100 | auto ALeft = A.OffsetInBits; |
2101 | auto BLeft = B.OffsetInBits; |
2102 | if (BLeft < ALeft) |
2103 | return false; |
2104 | |
2105 | auto ARight = ALeft + A.SizeInBits; |
2106 | auto BRight = BLeft + B.SizeInBits; |
2107 | if (BRight > ARight) |
2108 | return false; |
2109 | return true; |
2110 | } |
2111 | |
2112 | static std::optional<at::AssignmentInfo> |
2113 | getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) { |
2114 | // Don't bother checking if this is an AllocaInst. We know this |
2115 | // instruction has no tag which means there are no variables associated |
2116 | // with it. |
2117 | if (const auto *SI = dyn_cast<StoreInst>(Val: &I)) |
2118 | return at::getAssignmentInfo(DL: Layout, SI); |
2119 | if (const auto *MI = dyn_cast<MemIntrinsic>(Val: &I)) |
2120 | return at::getAssignmentInfo(DL: Layout, I: MI); |
2121 | // Alloca or non-store-like inst. |
2122 | return std::nullopt; |
2123 | } |
2124 | |
2125 | DbgDeclareInst *DynCastToDbgDeclare(DbgVariableIntrinsic *DVI) { |
2126 | return dyn_cast<DbgDeclareInst>(Val: DVI); |
2127 | } |
2128 | |
2129 | DbgVariableRecord *DynCastToDbgDeclare(DbgVariableRecord *DVR) { |
2130 | return DVR->isDbgDeclare() ? DVR : nullptr; |
2131 | } |
2132 | |
2133 | /// Build a map of {Variable x: Variables y} where all variable fragments |
2134 | /// contained within the variable fragment x are in set y. This means that |
2135 | /// y does not contain all overlaps because partial overlaps are excluded. |
2136 | /// |
2137 | /// While we're iterating over the function, add single location defs for |
2138 | /// dbg.declares to \p FnVarLocs. |
2139 | /// |
2140 | /// Variables that are interesting to this pass in are added to |
2141 | /// FnVarLocs->Variables first. TrackedVariablesVectorSize is set to the ID of |
2142 | /// the last interesting variable plus 1, meaning variables with ID 1 |
2143 | /// (inclusive) to TrackedVariablesVectorSize (exclusive) are interesting. The |
2144 | /// subsequent variables are either stack homed or fully promoted. |
2145 | /// |
2146 | /// Finally, populate UntaggedStoreVars with a mapping of untagged stores to |
2147 | /// the stored-to variable fragments. |
2148 | /// |
2149 | /// These tasks are bundled together to reduce the number of times we need |
2150 | /// to iterate over the function as they can be achieved together in one pass. |
2151 | static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares( |
2152 | Function &Fn, FunctionVarLocsBuilder *FnVarLocs, |
2153 | const DenseSet<DebugAggregate> &VarsWithStackSlot, |
2154 | AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars, |
2155 | unsigned &TrackedVariablesVectorSize) { |
2156 | DenseSet<DebugVariable> Seen; |
2157 | // Map of Variable: [Fragments]. |
2158 | DenseMap<DebugAggregate, SmallVector<DebugVariable, 8>> FragmentMap; |
2159 | // Iterate over all instructions: |
2160 | // - dbg.declare -> add single location variable record |
2161 | // - dbg.* -> Add fragments to FragmentMap |
2162 | // - untagged store -> Add fragments to FragmentMap and update |
2163 | // UntaggedStoreVars. |
2164 | // We need to add fragments for untagged stores too so that we can correctly |
2165 | // clobber overlapped fragment locations later. |
2166 | SmallVector<DbgDeclareInst *> InstDeclares; |
2167 | SmallVector<DbgVariableRecord *> DPDeclares; |
2168 | auto ProcessDbgRecord = [&](auto *Record, auto &DeclareList) { |
2169 | if (auto *Declare = DynCastToDbgDeclare(Record)) { |
2170 | DeclareList.push_back(Declare); |
2171 | return; |
2172 | } |
2173 | DebugVariable DV = DebugVariable(Record); |
2174 | DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()}; |
2175 | if (!VarsWithStackSlot.contains(V: DA)) |
2176 | return; |
2177 | if (Seen.insert(V: DV).second) |
2178 | FragmentMap[DA].push_back(Elt: DV); |
2179 | }; |
2180 | for (auto &BB : Fn) { |
2181 | for (auto &I : BB) { |
2182 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2183 | ProcessDbgRecord(&DVR, DPDeclares); |
2184 | if (auto *DII = dyn_cast<DbgVariableIntrinsic>(Val: &I)) { |
2185 | ProcessDbgRecord(DII, InstDeclares); |
2186 | } else if (auto Info = getUntaggedStoreAssignmentInfo( |
2187 | I, Layout: Fn.getDataLayout())) { |
2188 | // Find markers linked to this alloca. |
2189 | auto HandleDbgAssignForStore = [&](auto *Assign) { |
2190 | std::optional<DIExpression::FragmentInfo> FragInfo; |
2191 | |
2192 | // Skip this assignment if the affected bits are outside of the |
2193 | // variable fragment. |
2194 | if (!at::calculateFragmentIntersect( |
2195 | I.getDataLayout(), Info->Base, |
2196 | Info->OffsetInBits, Info->SizeInBits, Assign, FragInfo) || |
2197 | (FragInfo && FragInfo->SizeInBits == 0)) |
2198 | return; |
2199 | |
2200 | // FragInfo from calculateFragmentIntersect is nullopt if the |
2201 | // resultant fragment matches DAI's fragment or entire variable - in |
2202 | // which case copy the fragment info from DAI. If FragInfo is still |
2203 | // nullopt after the copy it means "no fragment info" instead, which |
2204 | // is how it is usually interpreted. |
2205 | if (!FragInfo) |
2206 | FragInfo = Assign->getExpression()->getFragmentInfo(); |
2207 | |
2208 | DebugVariable DV = |
2209 | DebugVariable(Assign->getVariable(), FragInfo, |
2210 | Assign->getDebugLoc().getInlinedAt()); |
2211 | DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()}; |
2212 | if (!VarsWithStackSlot.contains(V: DA)) |
2213 | return; |
2214 | |
2215 | // Cache this info for later. |
2216 | UntaggedStoreVars[&I].push_back( |
2217 | Elt: {FnVarLocs->insertVariable(V: DV), *Info}); |
2218 | |
2219 | if (Seen.insert(V: DV).second) |
2220 | FragmentMap[DA].push_back(Elt: DV); |
2221 | }; |
2222 | for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(Inst: Info->Base)) |
2223 | HandleDbgAssignForStore(DAI); |
2224 | for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: Info->Base)) |
2225 | HandleDbgAssignForStore(DVR); |
2226 | } |
2227 | } |
2228 | } |
2229 | |
2230 | // Sort the fragment map for each DebugAggregate in ascending |
2231 | // order of fragment size - there should be no duplicates. |
2232 | for (auto &Pair : FragmentMap) { |
2233 | SmallVector<DebugVariable, 8> &Frags = Pair.second; |
2234 | std::sort(first: Frags.begin(), last: Frags.end(), |
2235 | comp: [](const DebugVariable &Next, const DebugVariable &Elmt) { |
2236 | return Elmt.getFragmentOrDefault().SizeInBits > |
2237 | Next.getFragmentOrDefault().SizeInBits; |
2238 | }); |
2239 | // Check for duplicates. |
2240 | assert(std::adjacent_find(Frags.begin(), Frags.end()) == Frags.end()); |
2241 | } |
2242 | |
2243 | // Build the map. |
2244 | AssignmentTrackingLowering::OverlapMap Map; |
2245 | for (auto &Pair : FragmentMap) { |
2246 | auto &Frags = Pair.second; |
2247 | for (auto It = Frags.begin(), IEnd = Frags.end(); It != IEnd; ++It) { |
2248 | DIExpression::FragmentInfo Frag = It->getFragmentOrDefault(); |
2249 | // Find the frags that this is contained within. |
2250 | // |
2251 | // Because Frags is sorted by size and none have the same offset and |
2252 | // size, we know that this frag can only be contained by subsequent |
2253 | // elements. |
2254 | SmallVector<DebugVariable, 8>::iterator OtherIt = It; |
2255 | ++OtherIt; |
2256 | VariableID ThisVar = FnVarLocs->insertVariable(V: *It); |
2257 | for (; OtherIt != IEnd; ++OtherIt) { |
2258 | DIExpression::FragmentInfo OtherFrag = OtherIt->getFragmentOrDefault(); |
2259 | VariableID OtherVar = FnVarLocs->insertVariable(V: *OtherIt); |
2260 | if (fullyContains(A: OtherFrag, B: Frag)) |
2261 | Map[OtherVar].push_back(Elt: ThisVar); |
2262 | } |
2263 | } |
2264 | } |
2265 | |
2266 | // VariableIDs are 1-based so the variable-tracking bitvector needs |
2267 | // NumVariables plus 1 bits. |
2268 | TrackedVariablesVectorSize = FnVarLocs->getNumVariables() + 1; |
2269 | |
2270 | // Finally, insert the declares afterwards, so the first IDs are all |
2271 | // partially stack homed vars. |
2272 | for (auto *DDI : InstDeclares) |
2273 | FnVarLocs->addSingleLocVar(Var: DebugVariable(DDI), Expr: DDI->getExpression(), |
2274 | DL: DDI->getDebugLoc(), R: DDI->getWrappedLocation()); |
2275 | for (auto *DVR : DPDeclares) |
2276 | FnVarLocs->addSingleLocVar(Var: DebugVariable(DVR), Expr: DVR->getExpression(), |
2277 | DL: DVR->getDebugLoc(), |
2278 | R: RawLocationWrapper(DVR->getRawLocation())); |
2279 | return Map; |
2280 | } |
2281 | |
2282 | bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) { |
2283 | if (Fn.size() > MaxNumBlocks) { |
2284 | LLVM_DEBUG(dbgs() << "[AT] Dropping var locs in: " << Fn.getName() |
2285 | << ": too many blocks (" << Fn.size() << ")\n" ); |
2286 | at::deleteAll(F: &Fn); |
2287 | return false; |
2288 | } |
2289 | |
2290 | FnVarLocs = FnVarLocsBuilder; |
2291 | |
2292 | // The general structure here is inspired by VarLocBasedImpl.cpp |
2293 | // (LiveDebugValues). |
2294 | |
2295 | // Build the variable fragment overlap map. |
2296 | // Note that this pass doesn't handle partial overlaps correctly (FWIW |
2297 | // neither does LiveDebugVariables) because that is difficult to do and |
2298 | // appears to be rare occurance. |
2299 | VarContains = buildOverlapMapAndRecordDeclares( |
2300 | Fn, FnVarLocs, VarsWithStackSlot: *VarsWithStackSlot, UntaggedStoreVars, |
2301 | TrackedVariablesVectorSize); |
2302 | |
2303 | // Prepare for traversal. |
2304 | ReversePostOrderTraversal<Function *> RPOT(&Fn); |
2305 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2306 | std::greater<unsigned int>> |
2307 | Worklist; |
2308 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2309 | std::greater<unsigned int>> |
2310 | Pending; |
2311 | DenseMap<unsigned int, BasicBlock *> OrderToBB; |
2312 | DenseMap<BasicBlock *, unsigned int> BBToOrder; |
2313 | { // Init OrderToBB and BBToOrder. |
2314 | unsigned int RPONumber = 0; |
2315 | for (BasicBlock *BB : RPOT) { |
2316 | OrderToBB[RPONumber] = BB; |
2317 | BBToOrder[BB] = RPONumber; |
2318 | Worklist.push(x: RPONumber); |
2319 | ++RPONumber; |
2320 | } |
2321 | LiveIn.init(InitNumEntries: RPONumber); |
2322 | LiveOut.init(InitNumEntries: RPONumber); |
2323 | } |
2324 | |
2325 | // Perform the traversal. |
2326 | // |
2327 | // This is a standard "union of predecessor outs" dataflow problem. To solve |
2328 | // it, we perform join() and process() using the two worklist method until |
2329 | // the LiveIn data for each block becomes unchanging. The "proof" that this |
2330 | // terminates can be put together by looking at the comments around LocKind, |
2331 | // Assignment, and the various join methods, which show that all the elements |
2332 | // involved are made up of join-semilattices; LiveIn(n) can only |
2333 | // monotonically increase in value throughout the dataflow. |
2334 | // |
2335 | SmallPtrSet<BasicBlock *, 16> Visited; |
2336 | while (!Worklist.empty()) { |
2337 | // We track what is on the pending worklist to avoid inserting the same |
2338 | // thing twice. |
2339 | SmallPtrSet<BasicBlock *, 16> OnPending; |
2340 | LLVM_DEBUG(dbgs() << "Processing Worklist\n" ); |
2341 | while (!Worklist.empty()) { |
2342 | BasicBlock *BB = OrderToBB[Worklist.top()]; |
2343 | LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n" ); |
2344 | Worklist.pop(); |
2345 | bool InChanged = join(BB: *BB, Visited); |
2346 | // Always consider LiveIn changed on the first visit. |
2347 | InChanged |= Visited.insert(Ptr: BB).second; |
2348 | if (InChanged) { |
2349 | LLVM_DEBUG(dbgs() << BB->getName() << " has new InLocs, process it\n" ); |
2350 | // Mutate a copy of LiveIn while processing BB. After calling process |
2351 | // LiveSet is the LiveOut set for BB. |
2352 | BlockInfo LiveSet = LiveIn[BB]; |
2353 | |
2354 | // Process the instructions in the block. |
2355 | process(BB&: *BB, LiveSet: &LiveSet); |
2356 | |
2357 | // Relatively expensive check: has anything changed in LiveOut for BB? |
2358 | if (LiveOut[BB] != LiveSet) { |
2359 | LLVM_DEBUG(dbgs() << BB->getName() |
2360 | << " has new OutLocs, add succs to worklist: [ " ); |
2361 | LiveOut[BB] = std::move(LiveSet); |
2362 | for (BasicBlock *Succ : successors(BB)) { |
2363 | if (OnPending.insert(Ptr: Succ).second) { |
2364 | LLVM_DEBUG(dbgs() << Succ->getName() << " " ); |
2365 | Pending.push(x: BBToOrder[Succ]); |
2366 | } |
2367 | } |
2368 | LLVM_DEBUG(dbgs() << "]\n" ); |
2369 | } |
2370 | } |
2371 | } |
2372 | Worklist.swap(pq&: Pending); |
2373 | // At this point, pending must be empty, since it was just the empty |
2374 | // worklist |
2375 | assert(Pending.empty() && "Pending should be empty" ); |
2376 | } |
2377 | |
2378 | // That's the hard part over. Now we just have some admin to do. |
2379 | |
2380 | // Record whether we inserted any intrinsics. |
2381 | bool InsertedAnyIntrinsics = false; |
2382 | |
2383 | // Identify and add defs for single location variables. |
2384 | // |
2385 | // Go through all of the defs that we plan to add. If the aggregate variable |
2386 | // it's a part of is not in the NotAlwaysStackHomed set we can emit a single |
2387 | // location def and omit the rest. Add an entry to AlwaysStackHomed so that |
2388 | // we can identify those uneeded defs later. |
2389 | DenseSet<DebugAggregate> AlwaysStackHomed; |
2390 | for (const auto &Pair : InsertBeforeMap) { |
2391 | auto &Vec = Pair.second; |
2392 | for (VarLocInfo VarLoc : Vec) { |
2393 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2394 | DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()}; |
2395 | |
2396 | // Skip this Var if it's not always stack homed. |
2397 | if (NotAlwaysStackHomed.contains(V: Aggr)) |
2398 | continue; |
2399 | |
2400 | // Skip complex cases such as when different fragments of a variable have |
2401 | // been split into different allocas. Skipping in this case means falling |
2402 | // back to using a list of defs (which could reduce coverage, but is no |
2403 | // less correct). |
2404 | bool Simple = |
2405 | VarLoc.Expr->getNumElements() == 1 && VarLoc.Expr->startsWithDeref(); |
2406 | if (!Simple) { |
2407 | NotAlwaysStackHomed.insert(V: Aggr); |
2408 | continue; |
2409 | } |
2410 | |
2411 | // All source assignments to this variable remain and all stores to any |
2412 | // part of the variable store to the same address (with varying |
2413 | // offsets). We can just emit a single location for the whole variable. |
2414 | // |
2415 | // Unless we've already done so, create the single location def now. |
2416 | if (AlwaysStackHomed.insert(V: Aggr).second) { |
2417 | assert(!VarLoc.Values.hasArgList()); |
2418 | // TODO: When more complex cases are handled VarLoc.Expr should be |
2419 | // built appropriately rather than always using an empty DIExpression. |
2420 | // The assert below is a reminder. |
2421 | assert(Simple); |
2422 | VarLoc.Expr = DIExpression::get(Context&: Fn.getContext(), Elements: std::nullopt); |
2423 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2424 | FnVarLocs->addSingleLocVar(Var, Expr: VarLoc.Expr, DL: VarLoc.DL, R: VarLoc.Values); |
2425 | InsertedAnyIntrinsics = true; |
2426 | } |
2427 | } |
2428 | } |
2429 | |
2430 | // Insert the other DEFs. |
2431 | for (const auto &[InsertBefore, Vec] : InsertBeforeMap) { |
2432 | SmallVector<VarLocInfo> NewDefs; |
2433 | for (const VarLocInfo &VarLoc : Vec) { |
2434 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2435 | DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()}; |
2436 | // If this variable is always stack homed then we have already inserted a |
2437 | // dbg.declare and deleted this dbg.value. |
2438 | if (AlwaysStackHomed.contains(V: Aggr)) |
2439 | continue; |
2440 | NewDefs.push_back(Elt: VarLoc); |
2441 | InsertedAnyIntrinsics = true; |
2442 | } |
2443 | |
2444 | FnVarLocs->setWedge(Before: InsertBefore, Wedge: std::move(NewDefs)); |
2445 | } |
2446 | |
2447 | InsertedAnyIntrinsics |= emitPromotedVarLocs(FnVarLocs); |
2448 | |
2449 | return InsertedAnyIntrinsics; |
2450 | } |
2451 | |
2452 | bool AssignmentTrackingLowering::emitPromotedVarLocs( |
2453 | FunctionVarLocsBuilder *FnVarLocs) { |
2454 | bool InsertedAnyIntrinsics = false; |
2455 | // Go through every block, translating debug intrinsics for fully promoted |
2456 | // variables into FnVarLocs location defs. No analysis required for these. |
2457 | auto TranslateDbgRecord = [&](auto *Record) { |
2458 | // Skip variables that haven't been promoted - we've dealt with those |
2459 | // already. |
2460 | if (VarsWithStackSlot->contains(V: getAggregate(Record))) |
2461 | return; |
2462 | auto InsertBefore = getNextNode(Record); |
2463 | assert(InsertBefore && "Unexpected: debug intrinsics after a terminator" ); |
2464 | FnVarLocs->addVarLoc(Before: InsertBefore, Var: DebugVariable(Record), |
2465 | Expr: Record->getExpression(), DL: Record->getDebugLoc(), |
2466 | R: RawLocationWrapper(Record->getRawLocation())); |
2467 | InsertedAnyIntrinsics = true; |
2468 | }; |
2469 | for (auto &BB : Fn) { |
2470 | for (auto &I : BB) { |
2471 | // Skip instructions other than dbg.values and dbg.assigns. |
2472 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2473 | if (DVR.isDbgValue() || DVR.isDbgAssign()) |
2474 | TranslateDbgRecord(&DVR); |
2475 | auto *DVI = dyn_cast<DbgValueInst>(Val: &I); |
2476 | if (DVI) |
2477 | TranslateDbgRecord(DVI); |
2478 | } |
2479 | } |
2480 | return InsertedAnyIntrinsics; |
2481 | } |
2482 | |
2483 | /// Remove redundant definitions within sequences of consecutive location defs. |
2484 | /// This is done using a backward scan to keep the last def describing a |
2485 | /// specific variable/fragment. |
2486 | /// |
2487 | /// This implements removeRedundantDbgInstrsUsingBackwardScan from |
2488 | /// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with |
2489 | /// FunctionVarLocsBuilder instead of with intrinsics. |
2490 | static bool |
2491 | removeRedundantDbgLocsUsingBackwardScan(const BasicBlock *BB, |
2492 | FunctionVarLocsBuilder &FnVarLocs) { |
2493 | bool Changed = false; |
2494 | SmallDenseMap<DebugAggregate, BitVector> VariableDefinedBytes; |
2495 | // Scan over the entire block, not just over the instructions mapped by |
2496 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2497 | // instructions. |
2498 | for (const Instruction &I : reverse(C: *BB)) { |
2499 | if (!isa<DbgVariableIntrinsic>(Val: I)) { |
2500 | // Sequence of consecutive defs ended. Clear map for the next one. |
2501 | VariableDefinedBytes.clear(); |
2502 | } |
2503 | |
2504 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2505 | // Get the location defs that start just before this instruction. |
2506 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2507 | if (!Locs) |
2508 | return; |
2509 | |
2510 | NumWedgesScanned++; |
2511 | bool ChangedThisWedge = false; |
2512 | // The new pruned set of defs, reversed because we're scanning backwards. |
2513 | SmallVector<VarLocInfo> NewDefsReversed; |
2514 | |
2515 | // Iterate over the existing defs in reverse. |
2516 | for (auto RIt = Locs->rbegin(), REnd = Locs->rend(); RIt != REnd; ++RIt) { |
2517 | NumDefsScanned++; |
2518 | DebugAggregate Aggr = |
2519 | getAggregate(FnVarLocs.getVariable(ID: RIt->VariableID)); |
2520 | uint64_t SizeInBits = Aggr.first->getSizeInBits().value_or(u: 0); |
2521 | uint64_t SizeInBytes = divideCeil(Numerator: SizeInBits, Denominator: 8); |
2522 | |
2523 | // Cutoff for large variables to prevent expensive bitvector operations. |
2524 | const uint64_t MaxSizeBytes = 2048; |
2525 | |
2526 | if (SizeInBytes == 0 || SizeInBytes > MaxSizeBytes) { |
2527 | // If the size is unknown (0) then keep this location def to be safe. |
2528 | // Do the same for defs of large variables, which would be expensive |
2529 | // to represent with a BitVector. |
2530 | NewDefsReversed.push_back(*RIt); |
2531 | continue; |
2532 | } |
2533 | |
2534 | // Only keep this location definition if it is not fully eclipsed by |
2535 | // other definitions in this wedge that come after it |
2536 | |
2537 | // Inert the bytes the location definition defines. |
2538 | auto InsertResult = |
2539 | VariableDefinedBytes.try_emplace(Key: Aggr, Args: BitVector(SizeInBytes)); |
2540 | bool FirstDefinition = InsertResult.second; |
2541 | BitVector &DefinedBytes = InsertResult.first->second; |
2542 | |
2543 | DIExpression::FragmentInfo Fragment = |
2544 | RIt->Expr->getFragmentInfo().value_or( |
2545 | DIExpression::FragmentInfo(SizeInBits, 0)); |
2546 | bool InvalidFragment = Fragment.endInBits() > SizeInBits; |
2547 | uint64_t StartInBytes = Fragment.startInBits() / 8; |
2548 | uint64_t EndInBytes = divideCeil(Numerator: Fragment.endInBits(), Denominator: 8); |
2549 | |
2550 | // If this defines any previously undefined bytes, keep it. |
2551 | if (FirstDefinition || InvalidFragment || |
2552 | DefinedBytes.find_first_unset_in(Begin: StartInBytes, End: EndInBytes) != -1) { |
2553 | if (!InvalidFragment) |
2554 | DefinedBytes.set(I: StartInBytes, E: EndInBytes); |
2555 | NewDefsReversed.push_back(*RIt); |
2556 | continue; |
2557 | } |
2558 | |
2559 | // Redundant def found: throw it away. Since the wedge of defs is being |
2560 | // rebuilt, doing nothing is the same as deleting an entry. |
2561 | ChangedThisWedge = true; |
2562 | NumDefsRemoved++; |
2563 | } |
2564 | |
2565 | // Un-reverse the defs and replace the wedge with the pruned version. |
2566 | if (ChangedThisWedge) { |
2567 | std::reverse(first: NewDefsReversed.begin(), last: NewDefsReversed.end()); |
2568 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefsReversed)); |
2569 | NumWedgesChanged++; |
2570 | Changed = true; |
2571 | } |
2572 | }; |
2573 | HandleLocsForWedge(&I); |
2574 | for (DbgVariableRecord &DVR : reverse(C: filterDbgVars(R: I.getDbgRecordRange()))) |
2575 | HandleLocsForWedge(&DVR); |
2576 | } |
2577 | |
2578 | return Changed; |
2579 | } |
2580 | |
2581 | /// Remove redundant location defs using a forward scan. This can remove a |
2582 | /// location definition that is redundant due to indicating that a variable has |
2583 | /// the same value as is already being indicated by an earlier def. |
2584 | /// |
2585 | /// This implements removeRedundantDbgInstrsUsingForwardScan from |
2586 | /// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with |
2587 | /// FunctionVarLocsBuilder instead of with intrinsics |
2588 | static bool |
2589 | removeRedundantDbgLocsUsingForwardScan(const BasicBlock *BB, |
2590 | FunctionVarLocsBuilder &FnVarLocs) { |
2591 | bool Changed = false; |
2592 | DenseMap<DebugVariable, std::pair<RawLocationWrapper, DIExpression *>> |
2593 | VariableMap; |
2594 | |
2595 | // Scan over the entire block, not just over the instructions mapped by |
2596 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2597 | // instructions. |
2598 | for (const Instruction &I : *BB) { |
2599 | // Get the defs that come just before this instruction. |
2600 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2601 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2602 | if (!Locs) |
2603 | return; |
2604 | |
2605 | NumWedgesScanned++; |
2606 | bool ChangedThisWedge = false; |
2607 | // The new pruned set of defs. |
2608 | SmallVector<VarLocInfo> NewDefs; |
2609 | |
2610 | // Iterate over the existing defs. |
2611 | for (const VarLocInfo &Loc : *Locs) { |
2612 | NumDefsScanned++; |
2613 | DebugVariable Key(FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(), |
2614 | std::nullopt, Loc.DL.getInlinedAt()); |
2615 | auto VMI = VariableMap.find(Val: Key); |
2616 | |
2617 | // Update the map if we found a new value/expression describing the |
2618 | // variable, or if the variable wasn't mapped already. |
2619 | if (VMI == VariableMap.end() || VMI->second.first != Loc.Values || |
2620 | VMI->second.second != Loc.Expr) { |
2621 | VariableMap[Key] = {Loc.Values, Loc.Expr}; |
2622 | NewDefs.push_back(Elt: Loc); |
2623 | continue; |
2624 | } |
2625 | |
2626 | // Did not insert this Loc, which is the same as removing it. |
2627 | ChangedThisWedge = true; |
2628 | NumDefsRemoved++; |
2629 | } |
2630 | |
2631 | // Replace the existing wedge with the pruned version. |
2632 | if (ChangedThisWedge) { |
2633 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs)); |
2634 | NumWedgesChanged++; |
2635 | Changed = true; |
2636 | } |
2637 | }; |
2638 | |
2639 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2640 | HandleLocsForWedge(&DVR); |
2641 | HandleLocsForWedge(&I); |
2642 | } |
2643 | |
2644 | return Changed; |
2645 | } |
2646 | |
2647 | static bool |
2648 | removeUndefDbgLocsFromEntryBlock(const BasicBlock *BB, |
2649 | FunctionVarLocsBuilder &FnVarLocs) { |
2650 | assert(BB->isEntryBlock()); |
2651 | // Do extra work to ensure that we remove semantically unimportant undefs. |
2652 | // |
2653 | // This is to work around the fact that SelectionDAG will hoist dbg.values |
2654 | // using argument values to the top of the entry block. That can move arg |
2655 | // dbg.values before undef and constant dbg.values which they previously |
2656 | // followed. The easiest thing to do is to just try to feed SelectionDAG |
2657 | // input it's happy with. |
2658 | // |
2659 | // Map of {Variable x: Fragments y} where the fragments y of variable x have |
2660 | // have at least one non-undef location defined already. Don't use directly, |
2661 | // instead call DefineBits and HasDefinedBits. |
2662 | SmallDenseMap<DebugAggregate, SmallDenseSet<DIExpression::FragmentInfo>> |
2663 | VarsWithDef; |
2664 | // Specify that V (a fragment of A) has a non-undef location. |
2665 | auto DefineBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) { |
2666 | VarsWithDef[A].insert(V: V.getFragmentOrDefault()); |
2667 | }; |
2668 | // Return true if a non-undef location has been defined for V (a fragment of |
2669 | // A). Doesn't imply that the location is currently non-undef, just that a |
2670 | // non-undef location has been seen previously. |
2671 | auto HasDefinedBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) { |
2672 | auto FragsIt = VarsWithDef.find(Val: A); |
2673 | if (FragsIt == VarsWithDef.end()) |
2674 | return false; |
2675 | return llvm::any_of(Range&: FragsIt->second, P: [V](auto Frag) { |
2676 | return DIExpression::fragmentsOverlap(Frag, V.getFragmentOrDefault()); |
2677 | }); |
2678 | }; |
2679 | |
2680 | bool Changed = false; |
2681 | DenseMap<DebugVariable, std::pair<Value *, DIExpression *>> VariableMap; |
2682 | |
2683 | // Scan over the entire block, not just over the instructions mapped by |
2684 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2685 | // instructions. |
2686 | for (const Instruction &I : *BB) { |
2687 | // Get the defs that come just before this instruction. |
2688 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2689 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2690 | if (!Locs) |
2691 | return; |
2692 | |
2693 | NumWedgesScanned++; |
2694 | bool ChangedThisWedge = false; |
2695 | // The new pruned set of defs. |
2696 | SmallVector<VarLocInfo> NewDefs; |
2697 | |
2698 | // Iterate over the existing defs. |
2699 | for (const VarLocInfo &Loc : *Locs) { |
2700 | NumDefsScanned++; |
2701 | DebugAggregate Aggr{FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(), |
2702 | Loc.DL.getInlinedAt()}; |
2703 | DebugVariable Var = FnVarLocs.getVariable(ID: Loc.VariableID); |
2704 | |
2705 | // Remove undef entries that are encountered before any non-undef |
2706 | // intrinsics from the entry block. |
2707 | if (Loc.Values.isKillLocation(Expression: Loc.Expr) && !HasDefinedBits(Aggr, Var)) { |
2708 | // Did not insert this Loc, which is the same as removing it. |
2709 | NumDefsRemoved++; |
2710 | ChangedThisWedge = true; |
2711 | continue; |
2712 | } |
2713 | |
2714 | DefineBits(Aggr, Var); |
2715 | NewDefs.push_back(Elt: Loc); |
2716 | } |
2717 | |
2718 | // Replace the existing wedge with the pruned version. |
2719 | if (ChangedThisWedge) { |
2720 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs)); |
2721 | NumWedgesChanged++; |
2722 | Changed = true; |
2723 | } |
2724 | }; |
2725 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2726 | HandleLocsForWedge(&DVR); |
2727 | HandleLocsForWedge(&I); |
2728 | } |
2729 | |
2730 | return Changed; |
2731 | } |
2732 | |
2733 | static bool removeRedundantDbgLocs(const BasicBlock *BB, |
2734 | FunctionVarLocsBuilder &FnVarLocs) { |
2735 | bool MadeChanges = false; |
2736 | MadeChanges |= removeRedundantDbgLocsUsingBackwardScan(BB, FnVarLocs); |
2737 | if (BB->isEntryBlock()) |
2738 | MadeChanges |= removeUndefDbgLocsFromEntryBlock(BB, FnVarLocs); |
2739 | MadeChanges |= removeRedundantDbgLocsUsingForwardScan(BB, FnVarLocs); |
2740 | |
2741 | if (MadeChanges) |
2742 | LLVM_DEBUG(dbgs() << "Removed redundant dbg locs from: " << BB->getName() |
2743 | << "\n" ); |
2744 | return MadeChanges; |
2745 | } |
2746 | |
2747 | static DenseSet<DebugAggregate> findVarsWithStackSlot(Function &Fn) { |
2748 | DenseSet<DebugAggregate> Result; |
2749 | for (auto &BB : Fn) { |
2750 | for (auto &I : BB) { |
2751 | // Any variable linked to an instruction is considered |
2752 | // interesting. Ideally we only need to check Allocas, however, a |
2753 | // DIAssignID might get dropped from an alloca but not stores. In that |
2754 | // case, we need to consider the variable interesting for NFC behaviour |
2755 | // with this change. TODO: Consider only looking at allocas. |
2756 | for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(Inst: &I)) { |
2757 | Result.insert(V: {DAI->getVariable(), DAI->getDebugLoc().getInlinedAt()}); |
2758 | } |
2759 | for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: &I)) { |
2760 | Result.insert(V: {DVR->getVariable(), DVR->getDebugLoc().getInlinedAt()}); |
2761 | } |
2762 | } |
2763 | } |
2764 | return Result; |
2765 | } |
2766 | |
2767 | static void analyzeFunction(Function &Fn, const DataLayout &Layout, |
2768 | FunctionVarLocsBuilder *FnVarLocs) { |
2769 | // The analysis will generate location definitions for all variables, but we |
2770 | // only need to perform a dataflow on the set of variables which have a stack |
2771 | // slot. Find those now. |
2772 | DenseSet<DebugAggregate> VarsWithStackSlot = findVarsWithStackSlot(Fn); |
2773 | |
2774 | bool Changed = false; |
2775 | |
2776 | // Use a scope block to clean up AssignmentTrackingLowering before running |
2777 | // MemLocFragmentFill to reduce peak memory consumption. |
2778 | { |
2779 | AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot); |
2780 | Changed = Pass.run(FnVarLocsBuilder: FnVarLocs); |
2781 | } |
2782 | |
2783 | if (Changed) { |
2784 | MemLocFragmentFill Pass(Fn, &VarsWithStackSlot, |
2785 | shouldCoalesceFragments(F&: Fn)); |
2786 | Pass.run(FnVarLocs); |
2787 | |
2788 | // Remove redundant entries. As well as reducing memory consumption and |
2789 | // avoiding waiting cycles later by burning some now, this has another |
2790 | // important job. That is to work around some SelectionDAG quirks. See |
2791 | // removeRedundantDbgLocsUsingForwardScan comments for more info on that. |
2792 | for (auto &BB : Fn) |
2793 | removeRedundantDbgLocs(BB: &BB, FnVarLocs&: *FnVarLocs); |
2794 | } |
2795 | } |
2796 | |
2797 | FunctionVarLocs |
2798 | DebugAssignmentTrackingAnalysis::run(Function &F, |
2799 | FunctionAnalysisManager &FAM) { |
2800 | if (!isAssignmentTrackingEnabled(M: *F.getParent())) |
2801 | return FunctionVarLocs(); |
2802 | |
2803 | auto &DL = F.getDataLayout(); |
2804 | |
2805 | FunctionVarLocsBuilder Builder; |
2806 | analyzeFunction(Fn&: F, Layout: DL, FnVarLocs: &Builder); |
2807 | |
2808 | // Save these results. |
2809 | FunctionVarLocs Results; |
2810 | Results.init(Builder); |
2811 | return Results; |
2812 | } |
2813 | |
2814 | AnalysisKey DebugAssignmentTrackingAnalysis::Key; |
2815 | |
2816 | PreservedAnalyses |
2817 | DebugAssignmentTrackingPrinterPass::run(Function &F, |
2818 | FunctionAnalysisManager &FAM) { |
2819 | FAM.getResult<DebugAssignmentTrackingAnalysis>(IR&: F).print(OS, Fn: F); |
2820 | return PreservedAnalyses::all(); |
2821 | } |
2822 | |
2823 | bool AssignmentTrackingAnalysis::runOnFunction(Function &F) { |
2824 | if (!isAssignmentTrackingEnabled(M: *F.getParent())) |
2825 | return false; |
2826 | |
2827 | LLVM_DEBUG(dbgs() << "AssignmentTrackingAnalysis run on " << F.getName() |
2828 | << "\n" ); |
2829 | auto DL = std::make_unique<DataLayout>(args: F.getParent()); |
2830 | |
2831 | // Clear previous results. |
2832 | Results->clear(); |
2833 | |
2834 | FunctionVarLocsBuilder Builder; |
2835 | analyzeFunction(Fn&: F, Layout: *DL.get(), FnVarLocs: &Builder); |
2836 | |
2837 | // Save these results. |
2838 | Results->init(Builder); |
2839 | |
2840 | if (PrintResults && isFunctionInPrintList(FunctionName: F.getName())) |
2841 | Results->print(OS&: errs(), Fn: F); |
2842 | |
2843 | // Return false because this pass does not modify the function. |
2844 | return false; |
2845 | } |
2846 | |
2847 | AssignmentTrackingAnalysis::AssignmentTrackingAnalysis() |
2848 | : FunctionPass(ID), Results(std::make_unique<FunctionVarLocs>()) {} |
2849 | |
2850 | char AssignmentTrackingAnalysis::ID = 0; |
2851 | |
2852 | INITIALIZE_PASS(AssignmentTrackingAnalysis, DEBUG_TYPE, |
2853 | "Assignment Tracking Analysis" , false, true) |
2854 | |