1 | //===-- AssignmentTrackingAnalysis.cpp ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/CodeGen/AssignmentTrackingAnalysis.h" |
10 | #include "LiveDebugValues/LiveDebugValues.h" |
11 | #include "llvm/ADT/BitVector.h" |
12 | #include "llvm/ADT/DenseMapInfo.h" |
13 | #include "llvm/ADT/IntervalMap.h" |
14 | #include "llvm/ADT/PostOrderIterator.h" |
15 | #include "llvm/ADT/STLExtras.h" |
16 | #include "llvm/ADT/Statistic.h" |
17 | #include "llvm/ADT/UniqueVector.h" |
18 | #include "llvm/BinaryFormat/Dwarf.h" |
19 | #include "llvm/IR/BasicBlock.h" |
20 | #include "llvm/IR/DataLayout.h" |
21 | #include "llvm/IR/DebugInfo.h" |
22 | #include "llvm/IR/DebugProgramInstruction.h" |
23 | #include "llvm/IR/Function.h" |
24 | #include "llvm/IR/Instruction.h" |
25 | #include "llvm/IR/IntrinsicInst.h" |
26 | #include "llvm/IR/Intrinsics.h" |
27 | #include "llvm/IR/Module.h" |
28 | #include "llvm/IR/PassManager.h" |
29 | #include "llvm/IR/PrintPasses.h" |
30 | #include "llvm/InitializePasses.h" |
31 | #include "llvm/Support/CommandLine.h" |
32 | #include "llvm/Support/ErrorHandling.h" |
33 | #include "llvm/Support/raw_ostream.h" |
34 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
35 | #include <assert.h> |
36 | #include <cstdint> |
37 | #include <optional> |
38 | #include <queue> |
39 | #include <sstream> |
40 | #include <unordered_map> |
41 | |
42 | using namespace llvm; |
43 | #define DEBUG_TYPE "debug-ata" |
44 | |
45 | STATISTIC(NumDefsScanned, "Number of dbg locs that get scanned for removal" ); |
46 | STATISTIC(NumDefsRemoved, "Number of dbg locs removed" ); |
47 | STATISTIC(NumWedgesScanned, "Number of dbg wedges scanned" ); |
48 | STATISTIC(NumWedgesChanged, "Number of dbg wedges changed" ); |
49 | |
50 | static cl::opt<unsigned> |
51 | MaxNumBlocks("debug-ata-max-blocks" , cl::init(Val: 10000), |
52 | cl::desc("Maximum num basic blocks before debug info dropped" ), |
53 | cl::Hidden); |
54 | /// Option for debugging the pass, determines if the memory location fragment |
55 | /// filling happens after generating the variable locations. |
56 | static cl::opt<bool> EnableMemLocFragFill("mem-loc-frag-fill" , cl::init(Val: true), |
57 | cl::Hidden); |
58 | /// Print the results of the analysis. Respects -filter-print-funcs. |
59 | static cl::opt<bool> PrintResults("print-debug-ata" , cl::init(Val: false), |
60 | cl::Hidden); |
61 | |
62 | /// Coalesce adjacent dbg locs describing memory locations that have contiguous |
63 | /// fragments. This reduces the cost of LiveDebugValues which does SSA |
64 | /// construction for each explicitly stated variable fragment. |
65 | static cl::opt<cl::boolOrDefault> |
66 | CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags" , cl::Hidden); |
67 | |
68 | // Implicit conversions are disabled for enum class types, so unfortunately we |
69 | // need to create a DenseMapInfo wrapper around the specified underlying type. |
70 | template <> struct llvm::DenseMapInfo<VariableID> { |
71 | using Wrapped = DenseMapInfo<unsigned>; |
72 | static inline VariableID getEmptyKey() { |
73 | return static_cast<VariableID>(Wrapped::getEmptyKey()); |
74 | } |
75 | static inline VariableID getTombstoneKey() { |
76 | return static_cast<VariableID>(Wrapped::getTombstoneKey()); |
77 | } |
78 | static unsigned getHashValue(const VariableID &Val) { |
79 | return Wrapped::getHashValue(Val: static_cast<unsigned>(Val)); |
80 | } |
81 | static bool isEqual(const VariableID &LHS, const VariableID &RHS) { |
82 | return LHS == RHS; |
83 | } |
84 | }; |
85 | |
86 | using VarLocInsertPt = PointerUnion<const Instruction *, const DbgRecord *>; |
87 | |
88 | namespace std { |
89 | template <> struct hash<VarLocInsertPt> { |
90 | using argument_type = VarLocInsertPt; |
91 | using result_type = std::size_t; |
92 | |
93 | result_type operator()(const argument_type &Arg) const { |
94 | return std::hash<void *>()(Arg.getOpaqueValue()); |
95 | } |
96 | }; |
97 | } // namespace std |
98 | |
99 | /// Helper class to build FunctionVarLocs, since that class isn't easy to |
100 | /// modify. TODO: There's not a great deal of value in the split, it could be |
101 | /// worth merging the two classes. |
102 | class FunctionVarLocsBuilder { |
103 | friend FunctionVarLocs; |
104 | UniqueVector<DebugVariable> Variables; |
105 | // Use an unordered_map so we don't invalidate iterators after |
106 | // insert/modifications. |
107 | std::unordered_map<VarLocInsertPt, SmallVector<VarLocInfo>> VarLocsBeforeInst; |
108 | |
109 | SmallVector<VarLocInfo> SingleLocVars; |
110 | |
111 | public: |
112 | unsigned getNumVariables() const { return Variables.size(); } |
113 | |
114 | /// Find or insert \p V and return the ID. |
115 | VariableID insertVariable(DebugVariable V) { |
116 | return static_cast<VariableID>(Variables.insert(Entry: V)); |
117 | } |
118 | |
119 | /// Get a variable from its \p ID. |
120 | const DebugVariable &getVariable(VariableID ID) const { |
121 | return Variables[static_cast<unsigned>(ID)]; |
122 | } |
123 | |
124 | /// Return ptr to wedge of defs or nullptr if no defs come just before /p |
125 | /// Before. |
126 | const SmallVectorImpl<VarLocInfo> *getWedge(VarLocInsertPt Before) const { |
127 | auto R = VarLocsBeforeInst.find(x: Before); |
128 | if (R == VarLocsBeforeInst.end()) |
129 | return nullptr; |
130 | return &R->second; |
131 | } |
132 | |
133 | /// Replace the defs that come just before /p Before with /p Wedge. |
134 | void setWedge(VarLocInsertPt Before, SmallVector<VarLocInfo> &&Wedge) { |
135 | VarLocsBeforeInst[Before] = std::move(Wedge); |
136 | } |
137 | |
138 | /// Add a def for a variable that is valid for its lifetime. |
139 | void addSingleLocVar(DebugVariable Var, DIExpression *Expr, DebugLoc DL, |
140 | RawLocationWrapper R) { |
141 | VarLocInfo VarLoc; |
142 | VarLoc.VariableID = insertVariable(V: Var); |
143 | VarLoc.Expr = Expr; |
144 | VarLoc.DL = DL; |
145 | VarLoc.Values = R; |
146 | SingleLocVars.emplace_back(Args&: VarLoc); |
147 | } |
148 | |
149 | /// Add a def to the wedge of defs just before /p Before. |
150 | void addVarLoc(VarLocInsertPt Before, DebugVariable Var, DIExpression *Expr, |
151 | DebugLoc DL, RawLocationWrapper R) { |
152 | VarLocInfo VarLoc; |
153 | VarLoc.VariableID = insertVariable(V: Var); |
154 | VarLoc.Expr = Expr; |
155 | VarLoc.DL = DL; |
156 | VarLoc.Values = R; |
157 | VarLocsBeforeInst[Before].emplace_back(Args&: VarLoc); |
158 | } |
159 | }; |
160 | |
161 | void FunctionVarLocs::print(raw_ostream &OS, const Function &Fn) const { |
162 | // Print the variable table first. TODO: Sorting by variable could make the |
163 | // output more stable? |
164 | unsigned Counter = -1; |
165 | OS << "=== Variables ===\n" ; |
166 | for (const DebugVariable &V : Variables) { |
167 | ++Counter; |
168 | // Skip first entry because it is a dummy entry. |
169 | if (Counter == 0) { |
170 | continue; |
171 | } |
172 | OS << "[" << Counter << "] " << V.getVariable()->getName(); |
173 | if (auto F = V.getFragment()) |
174 | OS << " bits [" << F->OffsetInBits << ", " |
175 | << F->OffsetInBits + F->SizeInBits << ")" ; |
176 | if (const auto *IA = V.getInlinedAt()) |
177 | OS << " inlined-at " << *IA; |
178 | OS << "\n" ; |
179 | } |
180 | |
181 | auto PrintLoc = [&OS](const VarLocInfo &Loc) { |
182 | OS << "DEF Var=[" << (unsigned)Loc.VariableID << "]" |
183 | << " Expr=" << *Loc.Expr << " Values=(" ; |
184 | for (auto *Op : Loc.Values.location_ops()) { |
185 | errs() << Op->getName() << " " ; |
186 | } |
187 | errs() << ")\n" ; |
188 | }; |
189 | |
190 | // Print the single location variables. |
191 | OS << "=== Single location vars ===\n" ; |
192 | for (auto It = single_locs_begin(), End = single_locs_end(); It != End; |
193 | ++It) { |
194 | PrintLoc(*It); |
195 | } |
196 | |
197 | // Print the non-single-location defs in line with IR. |
198 | OS << "=== In-line variable defs ===" ; |
199 | for (const BasicBlock &BB : Fn) { |
200 | OS << "\n" << BB.getName() << ":\n" ; |
201 | for (const Instruction &I : BB) { |
202 | for (auto It = locs_begin(Before: &I), End = locs_end(Before: &I); It != End; ++It) { |
203 | PrintLoc(*It); |
204 | } |
205 | OS << I << "\n" ; |
206 | } |
207 | } |
208 | } |
209 | |
210 | void FunctionVarLocs::init(FunctionVarLocsBuilder &Builder) { |
211 | // Add the single-location variables first. |
212 | for (const auto &VarLoc : Builder.SingleLocVars) |
213 | VarLocRecords.emplace_back(Args: VarLoc); |
214 | // Mark the end of the section. |
215 | SingleVarLocEnd = VarLocRecords.size(); |
216 | |
217 | // Insert a contiguous block of VarLocInfos for each instruction, mapping it |
218 | // to the start and end position in the vector with VarLocsBeforeInst. This |
219 | // block includes VarLocs for any DbgVariableRecords attached to that |
220 | // instruction. |
221 | for (auto &P : Builder.VarLocsBeforeInst) { |
222 | // Process VarLocs attached to a DbgRecord alongside their marker |
223 | // Instruction. |
224 | if (isa<const DbgRecord *>(Val: P.first)) |
225 | continue; |
226 | const Instruction *I = cast<const Instruction *>(Val: P.first); |
227 | unsigned BlockStart = VarLocRecords.size(); |
228 | // Any VarLocInfos attached to a DbgRecord should now be remapped to their |
229 | // marker Instruction, in order of DbgRecord appearance and prior to any |
230 | // VarLocInfos attached directly to that instruction. |
231 | for (const DbgVariableRecord &DVR : filterDbgVars(R: I->getDbgRecordRange())) { |
232 | // Even though DVR defines a variable location, VarLocsBeforeInst can |
233 | // still be empty if that VarLoc was redundant. |
234 | auto It = Builder.VarLocsBeforeInst.find(x: &DVR); |
235 | if (It == Builder.VarLocsBeforeInst.end()) |
236 | continue; |
237 | for (const VarLocInfo &VarLoc : It->second) |
238 | VarLocRecords.emplace_back(Args: VarLoc); |
239 | } |
240 | for (const VarLocInfo &VarLoc : P.second) |
241 | VarLocRecords.emplace_back(Args: VarLoc); |
242 | unsigned BlockEnd = VarLocRecords.size(); |
243 | // Record the start and end indices. |
244 | if (BlockEnd != BlockStart) |
245 | VarLocsBeforeInst[I] = {BlockStart, BlockEnd}; |
246 | } |
247 | |
248 | // Copy the Variables vector from the builder's UniqueVector. |
249 | assert(Variables.empty() && "Expect clear before init" ); |
250 | // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values |
251 | // are one-based) so reserve an extra and insert a dummy. |
252 | Variables.reserve(N: Builder.Variables.size() + 1); |
253 | Variables.push_back(Elt: DebugVariable(nullptr, std::nullopt, nullptr)); |
254 | Variables.append(in_start: Builder.Variables.begin(), in_end: Builder.Variables.end()); |
255 | } |
256 | |
257 | void FunctionVarLocs::clear() { |
258 | Variables.clear(); |
259 | VarLocRecords.clear(); |
260 | VarLocsBeforeInst.clear(); |
261 | SingleVarLocEnd = 0; |
262 | } |
263 | |
264 | /// Walk backwards along constant GEPs and bitcasts to the base storage from \p |
265 | /// Start as far as possible. Prepend \Expression with the offset and append it |
266 | /// with a DW_OP_deref that haes been implicit until now. Returns the walked-to |
267 | /// value and modified expression. |
268 | static std::pair<Value *, DIExpression *> |
269 | walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, |
270 | DIExpression *Expression) { |
271 | APInt OffsetInBytes(DL.getTypeSizeInBits(Ty: Start->getType()), false); |
272 | Value *End = |
273 | Start->stripAndAccumulateInBoundsConstantOffsets(DL, Offset&: OffsetInBytes); |
274 | SmallVector<uint64_t, 3> Ops; |
275 | if (OffsetInBytes.getBoolValue()) { |
276 | Ops = {dwarf::DW_OP_plus_uconst, OffsetInBytes.getZExtValue()}; |
277 | Expression = DIExpression::prependOpcodes( |
278 | Expr: Expression, Ops, /*StackValue=*/false, /*EntryValue=*/false); |
279 | } |
280 | Expression = DIExpression::append(Expr: Expression, Ops: {dwarf::DW_OP_deref}); |
281 | return {End, Expression}; |
282 | } |
283 | |
284 | /// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression |
285 | /// doesn't explicitly describe a memory location with DW_OP_deref or if the |
286 | /// expression is too complex to interpret. |
287 | static std::optional<int64_t> |
288 | getDerefOffsetInBytes(const DIExpression *DIExpr) { |
289 | int64_t Offset = 0; |
290 | const unsigned NumElements = DIExpr->getNumElements(); |
291 | const auto Elements = DIExpr->getElements(); |
292 | unsigned ExpectedDerefIdx = 0; |
293 | // Extract the offset. |
294 | if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) { |
295 | Offset = Elements[1]; |
296 | ExpectedDerefIdx = 2; |
297 | } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) { |
298 | ExpectedDerefIdx = 3; |
299 | if (Elements[2] == dwarf::DW_OP_plus) |
300 | Offset = Elements[1]; |
301 | else if (Elements[2] == dwarf::DW_OP_minus) |
302 | Offset = -Elements[1]; |
303 | else |
304 | return std::nullopt; |
305 | } |
306 | |
307 | // If that's all there is it means there's no deref. |
308 | if (ExpectedDerefIdx >= NumElements) |
309 | return std::nullopt; |
310 | |
311 | // Check the next element is DW_OP_deref - otherwise this is too complex or |
312 | // isn't a deref expression. |
313 | if (Elements[ExpectedDerefIdx] != dwarf::DW_OP_deref) |
314 | return std::nullopt; |
315 | |
316 | // Check the final operation is either the DW_OP_deref or is a fragment. |
317 | if (NumElements == ExpectedDerefIdx + 1) |
318 | return Offset; // Ends with deref. |
319 | unsigned ExpectedFragFirstIdx = ExpectedDerefIdx + 1; |
320 | unsigned ExpectedFragFinalIdx = ExpectedFragFirstIdx + 2; |
321 | if (NumElements == ExpectedFragFinalIdx + 1 && |
322 | Elements[ExpectedFragFirstIdx] == dwarf::DW_OP_LLVM_fragment) |
323 | return Offset; // Ends with deref + fragment. |
324 | |
325 | // Don't bother trying to interpret anything more complex. |
326 | return std::nullopt; |
327 | } |
328 | |
329 | /// A whole (unfragmented) source variable. |
330 | using DebugAggregate = std::pair<const DILocalVariable *, const DILocation *>; |
331 | static DebugAggregate getAggregate(const DebugVariable &Var) { |
332 | return DebugAggregate(Var.getVariable(), Var.getInlinedAt()); |
333 | } |
334 | |
335 | static bool shouldCoalesceFragments(Function &F) { |
336 | // Enabling fragment coalescing reduces compiler run time when instruction |
337 | // referencing is enabled. However, it may cause LiveDebugVariables to create |
338 | // incorrect locations. Since instruction-referencing mode effectively |
339 | // bypasses LiveDebugVariables we only enable coalescing if the cl::opt flag |
340 | // has not been explicitly set and instruction-referencing is turned on. |
341 | switch (CoalesceAdjacentFragmentsOpt) { |
342 | case cl::boolOrDefault::BOU_UNSET: |
343 | return debuginfoShouldUseDebugInstrRef(T: F.getParent()->getTargetTriple()); |
344 | case cl::boolOrDefault::BOU_TRUE: |
345 | return true; |
346 | case cl::boolOrDefault::BOU_FALSE: |
347 | return false; |
348 | } |
349 | llvm_unreachable("Unknown boolOrDefault value" ); |
350 | } |
351 | |
352 | namespace { |
353 | /// In dwarf emission, the following sequence |
354 | /// 1. dbg.value ... Fragment(0, 64) |
355 | /// 2. dbg.value ... Fragment(0, 32) |
356 | /// effectively sets Fragment(32, 32) to undef (each def sets all bits not in |
357 | /// the intersection of the fragments to having "no location"). This makes |
358 | /// sense for implicit location values because splitting the computed values |
359 | /// could be troublesome, and is probably quite uncommon. When we convert |
360 | /// dbg.assigns to dbg.value+deref this kind of thing is common, and describing |
361 | /// a location (memory) rather than a value means we don't need to worry about |
362 | /// splitting any values, so we try to recover the rest of the fragment |
363 | /// location here. |
364 | /// This class performs a(nother) dataflow analysis over the function, adding |
365 | /// variable locations so that any bits of a variable with a memory location |
366 | /// have that location explicitly reinstated at each subsequent variable |
367 | /// location definition that that doesn't overwrite those bits. i.e. after a |
368 | /// variable location def, insert new defs for the memory location with |
369 | /// fragments for the difference of "all bits currently in memory" and "the |
370 | /// fragment of the second def". |
371 | class MemLocFragmentFill { |
372 | Function &Fn; |
373 | FunctionVarLocsBuilder *FnVarLocs; |
374 | const DenseSet<DebugAggregate> *VarsWithStackSlot; |
375 | bool CoalesceAdjacentFragments; |
376 | |
377 | // 0 = no memory location. |
378 | using BaseAddress = unsigned; |
379 | using OffsetInBitsTy = unsigned; |
380 | using FragTraits = IntervalMapHalfOpenInfo<OffsetInBitsTy>; |
381 | using FragsInMemMap = IntervalMap< |
382 | OffsetInBitsTy, BaseAddress, |
383 | IntervalMapImpl::NodeSizer<OffsetInBitsTy, BaseAddress>::LeafSize, |
384 | FragTraits>; |
385 | FragsInMemMap::Allocator IntervalMapAlloc; |
386 | using VarFragMap = DenseMap<unsigned, FragsInMemMap>; |
387 | |
388 | /// IDs for memory location base addresses in maps. Use 0 to indicate that |
389 | /// there's no memory location. |
390 | UniqueVector<RawLocationWrapper> Bases; |
391 | UniqueVector<DebugAggregate> Aggregates; |
392 | DenseMap<const BasicBlock *, VarFragMap> LiveIn; |
393 | DenseMap<const BasicBlock *, VarFragMap> LiveOut; |
394 | |
395 | struct FragMemLoc { |
396 | unsigned Var; |
397 | unsigned Base; |
398 | unsigned OffsetInBits; |
399 | unsigned SizeInBits; |
400 | DebugLoc DL; |
401 | }; |
402 | using InsertMap = MapVector<VarLocInsertPt, SmallVector<FragMemLoc>>; |
403 | |
404 | /// BBInsertBeforeMap holds a description for the set of location defs to be |
405 | /// inserted after the analysis is complete. It is updated during the dataflow |
406 | /// and the entry for a block is CLEARED each time it is (re-)visited. After |
407 | /// the dataflow is complete, each block entry will contain the set of defs |
408 | /// calculated during the final (fixed-point) iteration. |
409 | DenseMap<const BasicBlock *, InsertMap> BBInsertBeforeMap; |
410 | |
411 | static bool intervalMapsAreEqual(const FragsInMemMap &A, |
412 | const FragsInMemMap &B) { |
413 | auto AIt = A.begin(), AEnd = A.end(); |
414 | auto BIt = B.begin(), BEnd = B.end(); |
415 | for (; AIt != AEnd; ++AIt, ++BIt) { |
416 | if (BIt == BEnd) |
417 | return false; // B has fewer elements than A. |
418 | if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop()) |
419 | return false; // Interval is different. |
420 | if (*AIt != *BIt) |
421 | return false; // Value at interval is different. |
422 | } |
423 | // AIt == AEnd. Check BIt is also now at end. |
424 | return BIt == BEnd; |
425 | } |
426 | |
427 | static bool varFragMapsAreEqual(const VarFragMap &A, const VarFragMap &B) { |
428 | if (A.size() != B.size()) |
429 | return false; |
430 | for (const auto &APair : A) { |
431 | auto BIt = B.find(Val: APair.first); |
432 | if (BIt == B.end()) |
433 | return false; |
434 | if (!intervalMapsAreEqual(A: APair.second, B: BIt->second)) |
435 | return false; |
436 | } |
437 | return true; |
438 | } |
439 | |
440 | /// Return a string for the value that \p BaseID represents. |
441 | std::string toString(unsigned BaseID) { |
442 | if (BaseID) |
443 | return Bases[BaseID].getVariableLocationOp(OpIdx: 0)->getName().str(); |
444 | else |
445 | return "None" ; |
446 | } |
447 | |
448 | /// Format string describing an FragsInMemMap (IntervalMap) interval. |
449 | std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) { |
450 | std::string String; |
451 | std::stringstream S(String); |
452 | if (It.valid()) { |
453 | S << "[" << It.start() << ", " << It.stop() |
454 | << "): " << toString(BaseID: It.value()); |
455 | } else { |
456 | S << "invalid iterator (end)" ; |
457 | } |
458 | if (Newline) |
459 | S << "\n" ; |
460 | return S.str(); |
461 | }; |
462 | |
463 | FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) { |
464 | FragsInMemMap Result(IntervalMapAlloc); |
465 | for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) { |
466 | LLVM_DEBUG(dbgs() << "a " << toString(AIt)); |
467 | // This is basically copied from process() and inverted (process is |
468 | // performing something like a union whereas this is more of an |
469 | // intersect). |
470 | |
471 | // There's no work to do if interval `a` overlaps no fragments in map `B`. |
472 | if (!B.overlaps(a: AIt.start(), b: AIt.stop())) |
473 | continue; |
474 | |
475 | // Does StartBit intersect an existing fragment? |
476 | auto FirstOverlap = B.find(x: AIt.start()); |
477 | assert(FirstOverlap != B.end()); |
478 | bool IntersectStart = FirstOverlap.start() < AIt.start(); |
479 | LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false) |
480 | << ", IntersectStart: " << IntersectStart << "\n" ); |
481 | |
482 | // Does EndBit intersect an existing fragment? |
483 | auto LastOverlap = B.find(x: AIt.stop()); |
484 | bool IntersectEnd = |
485 | LastOverlap != B.end() && LastOverlap.start() < AIt.stop(); |
486 | LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false) |
487 | << ", IntersectEnd: " << IntersectEnd << "\n" ); |
488 | |
489 | // Check if both ends of `a` intersect the same interval `b`. |
490 | if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { |
491 | // Insert `a` (`a` is contained in `b`) if the values match. |
492 | // [ a ] |
493 | // [ - b - ] |
494 | // - |
495 | // [ r ] |
496 | LLVM_DEBUG(dbgs() << "- a is contained within " |
497 | << toString(FirstOverlap)); |
498 | if (*AIt && *AIt == *FirstOverlap) |
499 | Result.insert(a: AIt.start(), b: AIt.stop(), y: *AIt); |
500 | } else { |
501 | // There's an overlap but `a` is not fully contained within |
502 | // `b`. Shorten any end-point intersections. |
503 | // [ - a - ] |
504 | // [ - b - ] |
505 | // - |
506 | // [ r ] |
507 | auto Next = FirstOverlap; |
508 | if (IntersectStart) { |
509 | LLVM_DEBUG(dbgs() << "- insert intersection of a and " |
510 | << toString(FirstOverlap)); |
511 | if (*AIt && *AIt == *FirstOverlap) |
512 | Result.insert(a: AIt.start(), b: FirstOverlap.stop(), y: *AIt); |
513 | ++Next; |
514 | } |
515 | // [ - a - ] |
516 | // [ - b - ] |
517 | // - |
518 | // [ r ] |
519 | if (IntersectEnd) { |
520 | LLVM_DEBUG(dbgs() << "- insert intersection of a and " |
521 | << toString(LastOverlap)); |
522 | if (*AIt && *AIt == *LastOverlap) |
523 | Result.insert(a: LastOverlap.start(), b: AIt.stop(), y: *AIt); |
524 | } |
525 | |
526 | // Insert all intervals in map `B` that are contained within interval |
527 | // `a` where the values match. |
528 | // [ - - a - - ] |
529 | // [ b1 ] [ b2 ] |
530 | // - |
531 | // [ r1 ] [ r2 ] |
532 | while (Next != B.end() && Next.start() < AIt.stop() && |
533 | Next.stop() <= AIt.stop()) { |
534 | LLVM_DEBUG(dbgs() |
535 | << "- insert intersection of a and " << toString(Next)); |
536 | if (*AIt && *AIt == *Next) |
537 | Result.insert(a: Next.start(), b: Next.stop(), y: *Next); |
538 | ++Next; |
539 | } |
540 | } |
541 | } |
542 | return Result; |
543 | } |
544 | |
545 | /// Meet \p A and \p B, storing the result in \p A. |
546 | void meetVars(VarFragMap &A, const VarFragMap &B) { |
547 | // Meet A and B. |
548 | // |
549 | // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b) |
550 | for (auto It = A.begin(), End = A.end(); It != End; ++It) { |
551 | unsigned AVar = It->first; |
552 | FragsInMemMap &AFrags = It->second; |
553 | auto BIt = B.find(Val: AVar); |
554 | if (BIt == B.end()) { |
555 | A.erase(I: It); |
556 | continue; // Var has no bits defined in B. |
557 | } |
558 | LLVM_DEBUG(dbgs() << "meet fragment maps for " |
559 | << Aggregates[AVar].first->getName() << "\n" ); |
560 | AFrags = meetFragments(A: AFrags, B: BIt->second); |
561 | } |
562 | } |
563 | |
564 | bool meet(const BasicBlock &BB, |
565 | const SmallPtrSet<BasicBlock *, 16> &Visited) { |
566 | LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName() |
567 | << "\n" ); |
568 | |
569 | VarFragMap BBLiveIn; |
570 | bool FirstMeet = true; |
571 | // LiveIn locs for BB is the meet of the already-processed preds' LiveOut |
572 | // locs. |
573 | for (const BasicBlock *Pred : predecessors(BB: &BB)) { |
574 | // Ignore preds that haven't been processed yet. This is essentially the |
575 | // same as initialising all variables to implicit top value (⊤) which is |
576 | // the identity value for the meet operation. |
577 | if (!Visited.count(Ptr: Pred)) |
578 | continue; |
579 | |
580 | auto PredLiveOut = LiveOut.find(Val: Pred); |
581 | assert(PredLiveOut != LiveOut.end()); |
582 | |
583 | if (FirstMeet) { |
584 | LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n" ); |
585 | BBLiveIn = PredLiveOut->second; |
586 | FirstMeet = false; |
587 | } else { |
588 | LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName() |
589 | << "\n" ); |
590 | meetVars(A&: BBLiveIn, B: PredLiveOut->second); |
591 | } |
592 | |
593 | // An empty set is ⊥ for the intersect-like meet operation. If we've |
594 | // already got ⊥ there's no need to run the code - we know the result is |
595 | // ⊥ since `meet(a, ⊥) = ⊥`. |
596 | if (BBLiveIn.size() == 0) |
597 | break; |
598 | } |
599 | |
600 | // If there's no LiveIn entry for the block yet, add it. |
601 | auto [CurrentLiveInEntry, Inserted] = LiveIn.try_emplace(Key: &BB); |
602 | if (Inserted) { |
603 | LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName() |
604 | << "\n" ); |
605 | CurrentLiveInEntry->second = std::move(BBLiveIn); |
606 | return /*Changed=*/true; |
607 | } |
608 | |
609 | // If the LiveIn set has changed (expensive check) update it and return |
610 | // true. |
611 | if (!varFragMapsAreEqual(A: BBLiveIn, B: CurrentLiveInEntry->second)) { |
612 | LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n" ); |
613 | CurrentLiveInEntry->second = std::move(BBLiveIn); |
614 | return /*Changed=*/true; |
615 | } |
616 | |
617 | LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n" ); |
618 | return /*Changed=*/false; |
619 | } |
620 | |
621 | void insertMemLoc(BasicBlock &BB, VarLocInsertPt Before, unsigned Var, |
622 | unsigned StartBit, unsigned EndBit, unsigned Base, |
623 | DebugLoc DL) { |
624 | assert(StartBit < EndBit && "Cannot create fragment of size <= 0" ); |
625 | if (!Base) |
626 | return; |
627 | FragMemLoc Loc; |
628 | Loc.Var = Var; |
629 | Loc.OffsetInBits = StartBit; |
630 | Loc.SizeInBits = EndBit - StartBit; |
631 | assert(Base && "Expected a non-zero ID for Base address" ); |
632 | Loc.Base = Base; |
633 | Loc.DL = DL; |
634 | BBInsertBeforeMap[&BB][Before].push_back(Elt: Loc); |
635 | LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName() |
636 | << " bits [" << StartBit << ", " << EndBit << ")\n" ); |
637 | } |
638 | |
639 | /// Inserts a new dbg def if the interval found when looking up \p StartBit |
640 | /// in \p FragMap starts before \p StartBit or ends after \p EndBit (which |
641 | /// indicates - assuming StartBit->EndBit has just been inserted - that the |
642 | /// slice has been coalesced in the map). |
643 | void coalesceFragments(BasicBlock &BB, VarLocInsertPt Before, unsigned Var, |
644 | unsigned StartBit, unsigned EndBit, unsigned Base, |
645 | DebugLoc DL, const FragsInMemMap &FragMap) { |
646 | if (!CoalesceAdjacentFragments) |
647 | return; |
648 | // We've inserted the location into the map. The map will have coalesced |
649 | // adjacent intervals (variable fragments) that describe the same memory |
650 | // location. Use this knowledge to insert a debug location that describes |
651 | // that coalesced fragment. This may eclipse other locs we've just |
652 | // inserted. This is okay as redundant locs will be cleaned up later. |
653 | auto CoalescedFrag = FragMap.find(x: StartBit); |
654 | // Bail if no coalescing has taken place. |
655 | if (CoalescedFrag.start() == StartBit && CoalescedFrag.stop() == EndBit) |
656 | return; |
657 | |
658 | LLVM_DEBUG(dbgs() << "- Insert loc for bits " << CoalescedFrag.start() |
659 | << " to " << CoalescedFrag.stop() << "\n" ); |
660 | insertMemLoc(BB, Before, Var, StartBit: CoalescedFrag.start(), EndBit: CoalescedFrag.stop(), |
661 | Base, DL); |
662 | } |
663 | |
664 | void addDef(const VarLocInfo &VarLoc, VarLocInsertPt Before, BasicBlock &BB, |
665 | VarFragMap &LiveSet) { |
666 | DebugVariable DbgVar = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
667 | if (skipVariable(V: DbgVar.getVariable())) |
668 | return; |
669 | // Don't bother doing anything for this variables if we know it's fully |
670 | // promoted. We're only interested in variables that (sometimes) live on |
671 | // the stack here. |
672 | if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgVar))) |
673 | return; |
674 | unsigned Var = Aggregates.insert( |
675 | Entry: DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt())); |
676 | |
677 | // [StartBit: EndBit) are the bits affected by this def. |
678 | const DIExpression *DIExpr = VarLoc.Expr; |
679 | unsigned StartBit; |
680 | unsigned EndBit; |
681 | if (auto Frag = DIExpr->getFragmentInfo()) { |
682 | StartBit = Frag->OffsetInBits; |
683 | EndBit = StartBit + Frag->SizeInBits; |
684 | } else { |
685 | assert(static_cast<bool>(DbgVar.getVariable()->getSizeInBits())); |
686 | StartBit = 0; |
687 | EndBit = *DbgVar.getVariable()->getSizeInBits(); |
688 | } |
689 | |
690 | // We will only fill fragments for simple memory-describing dbg.value |
691 | // intrinsics. If the fragment offset is the same as the offset from the |
692 | // base pointer, do The Thing, otherwise fall back to normal dbg.value |
693 | // behaviour. AssignmentTrackingLowering has generated DIExpressions |
694 | // written in terms of the base pointer. |
695 | // TODO: Remove this condition since the fragment offset doesn't always |
696 | // equal the offset from base pointer (e.g. for a SROA-split variable). |
697 | const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr); |
698 | const unsigned Base = |
699 | DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit |
700 | ? Bases.insert(Entry: VarLoc.Values) |
701 | : 0; |
702 | LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " [" |
703 | << StartBit << ", " << EndBit << "): " << toString(Base) |
704 | << "\n" ); |
705 | |
706 | // First of all, any locs that use mem that are disrupted need reinstating. |
707 | // Unfortunately, IntervalMap doesn't let us insert intervals that overlap |
708 | // with existing intervals so this code involves a lot of fiddling around |
709 | // with intervals to do that manually. |
710 | auto FragIt = LiveSet.find(Val: Var); |
711 | |
712 | // Check if the variable does not exist in the map. |
713 | if (FragIt == LiveSet.end()) { |
714 | // Add this variable to the BB map. |
715 | auto P = LiveSet.try_emplace(Key: Var, Args: FragsInMemMap(IntervalMapAlloc)); |
716 | assert(P.second && "Var already in map?" ); |
717 | // Add the interval to the fragment map. |
718 | P.first->second.insert(a: StartBit, b: EndBit, y: Base); |
719 | return; |
720 | } |
721 | // The variable has an entry in the map. |
722 | |
723 | FragsInMemMap &FragMap = FragIt->second; |
724 | // First check the easy case: the new fragment `f` doesn't overlap with any |
725 | // intervals. |
726 | if (!FragMap.overlaps(a: StartBit, b: EndBit)) { |
727 | LLVM_DEBUG(dbgs() << "- No overlaps\n" ); |
728 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
729 | coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL, |
730 | FragMap); |
731 | return; |
732 | } |
733 | // There is at least one overlap. |
734 | |
735 | // Does StartBit intersect an existing fragment? |
736 | auto FirstOverlap = FragMap.find(x: StartBit); |
737 | assert(FirstOverlap != FragMap.end()); |
738 | bool IntersectStart = FirstOverlap.start() < StartBit; |
739 | |
740 | // Does EndBit intersect an existing fragment? |
741 | auto LastOverlap = FragMap.find(x: EndBit); |
742 | bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit; |
743 | |
744 | // Check if both ends of `f` intersect the same interval `i`. |
745 | if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { |
746 | LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n" ); |
747 | // Shorten `i` so that there's space to insert `f`. |
748 | // [ f ] |
749 | // [ - i - ] |
750 | // + |
751 | // [ i ][ f ][ i ] |
752 | |
753 | // Save values for use after inserting a new interval. |
754 | auto EndBitOfOverlap = FirstOverlap.stop(); |
755 | unsigned OverlapValue = FirstOverlap.value(); |
756 | |
757 | // Shorten the overlapping interval. |
758 | FirstOverlap.setStop(StartBit); |
759 | insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit, |
760 | Base: OverlapValue, DL: VarLoc.DL); |
761 | |
762 | // Insert a new interval to represent the end part. |
763 | FragMap.insert(a: EndBit, b: EndBitOfOverlap, y: OverlapValue); |
764 | insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: EndBitOfOverlap, Base: OverlapValue, |
765 | DL: VarLoc.DL); |
766 | |
767 | // Insert the new (middle) fragment now there is space. |
768 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
769 | } else { |
770 | // There's an overlap but `f` may not be fully contained within |
771 | // `i`. Shorten any end-point intersections so that we can then |
772 | // insert `f`. |
773 | // [ - f - ] |
774 | // [ - i - ] |
775 | // | | |
776 | // [ i ] |
777 | // Shorten any end-point intersections. |
778 | if (IntersectStart) { |
779 | LLVM_DEBUG(dbgs() << "- Intersect interval at start\n" ); |
780 | // Split off at the intersection. |
781 | FirstOverlap.setStop(StartBit); |
782 | insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit, |
783 | Base: *FirstOverlap, DL: VarLoc.DL); |
784 | } |
785 | // [ - f - ] |
786 | // [ - i - ] |
787 | // | | |
788 | // [ i ] |
789 | if (IntersectEnd) { |
790 | LLVM_DEBUG(dbgs() << "- Intersect interval at end\n" ); |
791 | // Split off at the intersection. |
792 | LastOverlap.setStart(EndBit); |
793 | insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: LastOverlap.stop(), Base: *LastOverlap, |
794 | DL: VarLoc.DL); |
795 | } |
796 | |
797 | LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n" ); |
798 | // FirstOverlap and LastOverlap have been shortened such that they're |
799 | // no longer overlapping with [StartBit, EndBit). Delete any overlaps |
800 | // that remain (these will be fully contained within `f`). |
801 | // [ - f - ] } |
802 | // [ - i - ] } Intersection shortening that has happened above. |
803 | // | | } |
804 | // [ i ] } |
805 | // ----------------- |
806 | // [i2 ] } Intervals fully contained within `f` get erased. |
807 | // ----------------- |
808 | // [ - f - ][ i ] } Completed insertion. |
809 | auto It = FirstOverlap; |
810 | if (IntersectStart) |
811 | ++It; // IntersectStart: first overlap has been shortened. |
812 | while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) { |
813 | LLVM_DEBUG(dbgs() << "- Erase " << toString(It)); |
814 | It.erase(); // This increments It after removing the interval. |
815 | } |
816 | // We've dealt with all the overlaps now! |
817 | assert(!FragMap.overlaps(StartBit, EndBit)); |
818 | LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n" ); |
819 | FragMap.insert(a: StartBit, b: EndBit, y: Base); |
820 | } |
821 | |
822 | coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL, |
823 | FragMap); |
824 | } |
825 | |
826 | bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); } |
827 | |
828 | void process(BasicBlock &BB, VarFragMap &LiveSet) { |
829 | BBInsertBeforeMap[&BB].clear(); |
830 | for (auto &I : BB) { |
831 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) { |
832 | if (const auto *Locs = FnVarLocs->getWedge(Before: &DVR)) { |
833 | for (const VarLocInfo &Loc : *Locs) { |
834 | addDef(VarLoc: Loc, Before: &DVR, BB&: *I.getParent(), LiveSet); |
835 | } |
836 | } |
837 | } |
838 | if (const auto *Locs = FnVarLocs->getWedge(Before: &I)) { |
839 | for (const VarLocInfo &Loc : *Locs) { |
840 | addDef(VarLoc: Loc, Before: &I, BB&: *I.getParent(), LiveSet); |
841 | } |
842 | } |
843 | } |
844 | } |
845 | |
846 | public: |
847 | MemLocFragmentFill(Function &Fn, |
848 | const DenseSet<DebugAggregate> *VarsWithStackSlot, |
849 | bool CoalesceAdjacentFragments) |
850 | : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot), |
851 | CoalesceAdjacentFragments(CoalesceAdjacentFragments) {} |
852 | |
853 | /// Add variable locations to \p FnVarLocs so that any bits of a variable |
854 | /// with a memory location have that location explicitly reinstated at each |
855 | /// subsequent variable location definition that that doesn't overwrite those |
856 | /// bits. i.e. after a variable location def, insert new defs for the memory |
857 | /// location with fragments for the difference of "all bits currently in |
858 | /// memory" and "the fragment of the second def". e.g. |
859 | /// |
860 | /// Before: |
861 | /// |
862 | /// var x bits 0 to 63: value in memory |
863 | /// more instructions |
864 | /// var x bits 0 to 31: value is %0 |
865 | /// |
866 | /// After: |
867 | /// |
868 | /// var x bits 0 to 63: value in memory |
869 | /// more instructions |
870 | /// var x bits 0 to 31: value is %0 |
871 | /// var x bits 32 to 61: value in memory ; <-- new loc def |
872 | /// |
873 | void run(FunctionVarLocsBuilder *FnVarLocs) { |
874 | if (!EnableMemLocFragFill) |
875 | return; |
876 | |
877 | this->FnVarLocs = FnVarLocs; |
878 | |
879 | // Prepare for traversal. |
880 | // |
881 | ReversePostOrderTraversal<Function *> RPOT(&Fn); |
882 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
883 | std::greater<unsigned int>> |
884 | Worklist; |
885 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
886 | std::greater<unsigned int>> |
887 | Pending; |
888 | DenseMap<unsigned int, BasicBlock *> OrderToBB; |
889 | DenseMap<BasicBlock *, unsigned int> BBToOrder; |
890 | { // Init OrderToBB and BBToOrder. |
891 | unsigned int RPONumber = 0; |
892 | for (BasicBlock *BB : RPOT) { |
893 | OrderToBB[RPONumber] = BB; |
894 | BBToOrder[BB] = RPONumber; |
895 | Worklist.push(x: RPONumber); |
896 | ++RPONumber; |
897 | } |
898 | LiveIn.reserve(NumEntries: RPONumber); |
899 | LiveOut.reserve(NumEntries: RPONumber); |
900 | } |
901 | |
902 | // Perform the traversal. |
903 | // |
904 | // This is a standard "intersect of predecessor outs" dataflow problem. To |
905 | // solve it, we perform meet() and process() using the two worklist method |
906 | // until the LiveIn data for each block becomes unchanging. |
907 | // |
908 | // This dataflow is essentially working on maps of sets and at each meet we |
909 | // intersect the maps and the mapped sets. So, initialized live-in maps |
910 | // monotonically decrease in value throughout the dataflow. |
911 | SmallPtrSet<BasicBlock *, 16> Visited; |
912 | while (!Worklist.empty() || !Pending.empty()) { |
913 | // We track what is on the pending worklist to avoid inserting the same |
914 | // thing twice. We could avoid this with a custom priority queue, but |
915 | // this is probably not worth it. |
916 | SmallPtrSet<BasicBlock *, 16> OnPending; |
917 | LLVM_DEBUG(dbgs() << "Processing Worklist\n" ); |
918 | while (!Worklist.empty()) { |
919 | BasicBlock *BB = OrderToBB[Worklist.top()]; |
920 | LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n" ); |
921 | Worklist.pop(); |
922 | bool InChanged = meet(BB: *BB, Visited); |
923 | // Always consider LiveIn changed on the first visit. |
924 | InChanged |= Visited.insert(Ptr: BB).second; |
925 | if (InChanged) { |
926 | LLVM_DEBUG(dbgs() |
927 | << BB->getName() << " has new InLocs, process it\n" ); |
928 | // Mutate a copy of LiveIn while processing BB. Once we've processed |
929 | // the terminator LiveSet is the LiveOut set for BB. |
930 | // This is an expensive copy! |
931 | VarFragMap LiveSet = LiveIn[BB]; |
932 | |
933 | // Process the instructions in the block. |
934 | process(BB&: *BB, LiveSet); |
935 | |
936 | // Relatively expensive check: has anything changed in LiveOut for BB? |
937 | if (!varFragMapsAreEqual(A: LiveOut[BB], B: LiveSet)) { |
938 | LLVM_DEBUG(dbgs() << BB->getName() |
939 | << " has new OutLocs, add succs to worklist: [ " ); |
940 | LiveOut[BB] = std::move(LiveSet); |
941 | for (BasicBlock *Succ : successors(BB)) { |
942 | if (OnPending.insert(Ptr: Succ).second) { |
943 | LLVM_DEBUG(dbgs() << Succ->getName() << " " ); |
944 | Pending.push(x: BBToOrder[Succ]); |
945 | } |
946 | } |
947 | LLVM_DEBUG(dbgs() << "]\n" ); |
948 | } |
949 | } |
950 | } |
951 | Worklist.swap(pq&: Pending); |
952 | // At this point, pending must be empty, since it was just the empty |
953 | // worklist |
954 | assert(Pending.empty() && "Pending should be empty" ); |
955 | } |
956 | |
957 | // Insert new location defs. |
958 | for (auto &Pair : BBInsertBeforeMap) { |
959 | InsertMap &Map = Pair.second; |
960 | for (auto &Pair : Map) { |
961 | auto InsertBefore = Pair.first; |
962 | assert(InsertBefore && "should never be null" ); |
963 | auto FragMemLocs = Pair.second; |
964 | auto &Ctx = Fn.getContext(); |
965 | |
966 | for (auto &FragMemLoc : FragMemLocs) { |
967 | DIExpression *Expr = DIExpression::get(Context&: Ctx, Elements: {}); |
968 | if (FragMemLoc.SizeInBits != |
969 | *Aggregates[FragMemLoc.Var].first->getSizeInBits()) |
970 | Expr = *DIExpression::createFragmentExpression( |
971 | Expr, OffsetInBits: FragMemLoc.OffsetInBits, SizeInBits: FragMemLoc.SizeInBits); |
972 | Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefAfter, |
973 | Offset: FragMemLoc.OffsetInBits / 8); |
974 | DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr, |
975 | FragMemLoc.DL.getInlinedAt()); |
976 | FnVarLocs->addVarLoc(Before: InsertBefore, Var, Expr, DL: FragMemLoc.DL, |
977 | R: Bases[FragMemLoc.Base]); |
978 | } |
979 | } |
980 | } |
981 | } |
982 | }; |
983 | |
984 | /// AssignmentTrackingLowering encapsulates a dataflow analysis over a function |
985 | /// that interprets assignment tracking debug info metadata and stores in IR to |
986 | /// create a map of variable locations. |
987 | class AssignmentTrackingLowering { |
988 | public: |
989 | /// The kind of location in use for a variable, where Mem is the stack home, |
990 | /// Val is an SSA value or const, and None means that there is not one single |
991 | /// kind (either because there are multiple or because there is none; it may |
992 | /// prove useful to split this into two values in the future). |
993 | /// |
994 | /// LocKind is a join-semilattice with the partial order: |
995 | /// None > Mem, Val |
996 | /// |
997 | /// i.e. |
998 | /// join(Mem, Mem) = Mem |
999 | /// join(Val, Val) = Val |
1000 | /// join(Mem, Val) = None |
1001 | /// join(None, Mem) = None |
1002 | /// join(None, Val) = None |
1003 | /// join(None, None) = None |
1004 | /// |
1005 | /// Note: the order is not `None > Val > Mem` because we're using DIAssignID |
1006 | /// to name assignments and are not tracking the actual stored values. |
1007 | /// Therefore currently there's no way to ensure that Mem values and Val |
1008 | /// values are the same. This could be a future extension, though it's not |
1009 | /// clear that many additional locations would be recovered that way in |
1010 | /// practice as the likelihood of this sitation arising naturally seems |
1011 | /// incredibly low. |
1012 | enum class LocKind { Mem, Val, None }; |
1013 | |
1014 | /// An abstraction of the assignment of a value to a variable or memory |
1015 | /// location. |
1016 | /// |
1017 | /// An Assignment is Known or NoneOrPhi. A Known Assignment means we have a |
1018 | /// DIAssignID ptr that represents it. NoneOrPhi means that we don't (or |
1019 | /// can't) know the ID of the last assignment that took place. |
1020 | /// |
1021 | /// The Status of the Assignment (Known or NoneOrPhi) is another |
1022 | /// join-semilattice. The partial order is: |
1023 | /// NoneOrPhi > Known {id_0, id_1, ...id_N} |
1024 | /// |
1025 | /// i.e. for all values x and y where x != y: |
1026 | /// join(x, x) = x |
1027 | /// join(x, y) = NoneOrPhi |
1028 | struct Assignment { |
1029 | enum S { Known, NoneOrPhi } Status; |
1030 | /// ID of the assignment. nullptr if Status is not Known. |
1031 | DIAssignID *ID; |
1032 | /// The dbg.assign that marks this dbg-def. Mem-defs don't use this field. |
1033 | /// May be nullptr. |
1034 | DbgVariableRecord *Source = nullptr; |
1035 | |
1036 | bool isSameSourceAssignment(const Assignment &Other) const { |
1037 | // Don't include Source in the equality check. Assignments are |
1038 | // defined by their ID, not debug intrinsic(s). |
1039 | return std::tie(args: Status, args: ID) == std::tie(args: Other.Status, args: Other.ID); |
1040 | } |
1041 | void dump(raw_ostream &OS) { |
1042 | static const char *LUT[] = {"Known" , "NoneOrPhi" }; |
1043 | OS << LUT[Status] << "(id=" ; |
1044 | if (ID) |
1045 | OS << ID; |
1046 | else |
1047 | OS << "null" ; |
1048 | OS << ", s=" ; |
1049 | if (!Source) |
1050 | OS << "null" ; |
1051 | else |
1052 | OS << Source; |
1053 | OS << ")" ; |
1054 | } |
1055 | |
1056 | static Assignment make(DIAssignID *ID, DbgVariableRecord *Source) { |
1057 | assert((!Source || Source->isDbgAssign()) && |
1058 | "Cannot make an assignment from a non-assign DbgVariableRecord" ); |
1059 | return Assignment(Known, ID, Source); |
1060 | } |
1061 | static Assignment makeFromMemDef(DIAssignID *ID) { |
1062 | return Assignment(Known, ID); |
1063 | } |
1064 | static Assignment makeNoneOrPhi() { return Assignment(NoneOrPhi, nullptr); } |
1065 | // Again, need a Top value? |
1066 | Assignment() : Status(NoneOrPhi), ID(nullptr) {} // Can we delete this? |
1067 | Assignment(S Status, DIAssignID *ID) : Status(Status), ID(ID) { |
1068 | // If the Status is Known then we expect there to be an assignment ID. |
1069 | assert(Status == NoneOrPhi || ID); |
1070 | } |
1071 | Assignment(S Status, DIAssignID *ID, DbgVariableRecord *Source) |
1072 | : Status(Status), ID(ID), Source(Source) { |
1073 | // If the Status is Known then we expect there to be an assignment ID. |
1074 | assert(Status == NoneOrPhi || ID); |
1075 | } |
1076 | }; |
1077 | |
1078 | using AssignmentMap = SmallVector<Assignment>; |
1079 | using LocMap = SmallVector<LocKind>; |
1080 | using OverlapMap = DenseMap<VariableID, SmallVector<VariableID>>; |
1081 | using UntaggedStoreAssignmentMap = |
1082 | DenseMap<const Instruction *, |
1083 | SmallVector<std::pair<VariableID, at::AssignmentInfo>>>; |
1084 | using UnknownStoreAssignmentMap = |
1085 | DenseMap<const Instruction *, SmallVector<VariableID>>; |
1086 | |
1087 | private: |
1088 | /// The highest numbered VariableID for partially promoted variables plus 1, |
1089 | /// the values for which start at 1. |
1090 | unsigned TrackedVariablesVectorSize = 0; |
1091 | /// Map a variable to the set of variables that it fully contains. |
1092 | OverlapMap VarContains; |
1093 | /// Map untagged stores to the variable fragments they assign to. Used by |
1094 | /// processUntaggedInstruction. |
1095 | UntaggedStoreAssignmentMap UntaggedStoreVars; |
1096 | /// Map untagged unknown stores (e.g. strided/masked store intrinsics) |
1097 | /// to the variables they may assign to. Used by processUntaggedInstruction. |
1098 | UnknownStoreAssignmentMap UnknownStoreVars; |
1099 | |
1100 | // Machinery to defer inserting dbg.values. |
1101 | using InstInsertMap = MapVector<VarLocInsertPt, SmallVector<VarLocInfo>>; |
1102 | InstInsertMap InsertBeforeMap; |
1103 | /// Clear the location definitions currently cached for insertion after /p |
1104 | /// After. |
1105 | void resetInsertionPoint(Instruction &After); |
1106 | void resetInsertionPoint(DbgVariableRecord &After); |
1107 | |
1108 | void emitDbgValue(LocKind Kind, DbgVariableRecord *, VarLocInsertPt After); |
1109 | |
1110 | static bool mapsAreEqual(const BitVector &Mask, const AssignmentMap &A, |
1111 | const AssignmentMap &B) { |
1112 | return llvm::all_of(Range: Mask.set_bits(), P: [&](unsigned VarID) { |
1113 | return A[VarID].isSameSourceAssignment(Other: B[VarID]); |
1114 | }); |
1115 | } |
1116 | |
1117 | /// Represents the stack and debug assignments in a block. Used to describe |
1118 | /// the live-in and live-out values for blocks, as well as the "current" |
1119 | /// value as we process each instruction in a block. |
1120 | struct BlockInfo { |
1121 | /// The set of variables (VariableID) being tracked in this block. |
1122 | BitVector VariableIDsInBlock; |
1123 | /// Dominating assignment to memory for each variable, indexed by |
1124 | /// VariableID. |
1125 | AssignmentMap StackHomeValue; |
1126 | /// Dominating assignemnt to each variable, indexed by VariableID. |
1127 | AssignmentMap DebugValue; |
1128 | /// Location kind for each variable. LiveLoc indicates whether the |
1129 | /// dominating assignment in StackHomeValue (LocKind::Mem), DebugValue |
1130 | /// (LocKind::Val), or neither (LocKind::None) is valid, in that order of |
1131 | /// preference. This cannot be derived by inspecting DebugValue and |
1132 | /// StackHomeValue due to the fact that there's no distinction in |
1133 | /// Assignment (the class) between whether an assignment is unknown or a |
1134 | /// merge of multiple assignments (both are Status::NoneOrPhi). In other |
1135 | /// words, the memory location may well be valid while both DebugValue and |
1136 | /// StackHomeValue contain Assignments that have a Status of NoneOrPhi. |
1137 | /// Indexed by VariableID. |
1138 | LocMap LiveLoc; |
1139 | |
1140 | public: |
1141 | enum AssignmentKind { Stack, Debug }; |
1142 | const AssignmentMap &getAssignmentMap(AssignmentKind Kind) const { |
1143 | switch (Kind) { |
1144 | case Stack: |
1145 | return StackHomeValue; |
1146 | case Debug: |
1147 | return DebugValue; |
1148 | } |
1149 | llvm_unreachable("Unknown AssignmentKind" ); |
1150 | } |
1151 | AssignmentMap &getAssignmentMap(AssignmentKind Kind) { |
1152 | return const_cast<AssignmentMap &>( |
1153 | const_cast<const BlockInfo *>(this)->getAssignmentMap(Kind)); |
1154 | } |
1155 | |
1156 | bool isVariableTracked(VariableID Var) const { |
1157 | return VariableIDsInBlock[static_cast<unsigned>(Var)]; |
1158 | } |
1159 | |
1160 | const Assignment &getAssignment(AssignmentKind Kind, VariableID Var) const { |
1161 | assert(isVariableTracked(Var) && "Var not tracked in block" ); |
1162 | return getAssignmentMap(Kind)[static_cast<unsigned>(Var)]; |
1163 | } |
1164 | |
1165 | LocKind getLocKind(VariableID Var) const { |
1166 | assert(isVariableTracked(Var) && "Var not tracked in block" ); |
1167 | return LiveLoc[static_cast<unsigned>(Var)]; |
1168 | } |
1169 | |
1170 | /// Set LocKind for \p Var only: does not set LocKind for VariableIDs of |
1171 | /// fragments contained win \p Var. |
1172 | void setLocKind(VariableID Var, LocKind K) { |
1173 | VariableIDsInBlock.set(static_cast<unsigned>(Var)); |
1174 | LiveLoc[static_cast<unsigned>(Var)] = K; |
1175 | } |
1176 | |
1177 | /// Set the assignment in the \p Kind assignment map for \p Var only: does |
1178 | /// not set the assignment for VariableIDs of fragments contained win \p |
1179 | /// Var. |
1180 | void setAssignment(AssignmentKind Kind, VariableID Var, |
1181 | const Assignment &AV) { |
1182 | VariableIDsInBlock.set(static_cast<unsigned>(Var)); |
1183 | getAssignmentMap(Kind)[static_cast<unsigned>(Var)] = AV; |
1184 | } |
1185 | |
1186 | /// Return true if there is an assignment matching \p AV in the \p Kind |
1187 | /// assignment map. Does consider assignments for VariableIDs of fragments |
1188 | /// contained win \p Var. |
1189 | bool hasAssignment(AssignmentKind Kind, VariableID Var, |
1190 | const Assignment &AV) const { |
1191 | if (!isVariableTracked(Var)) |
1192 | return false; |
1193 | return AV.isSameSourceAssignment(Other: getAssignment(Kind, Var)); |
1194 | } |
1195 | |
1196 | /// Compare every element in each map to determine structural equality |
1197 | /// (slow). |
1198 | bool operator==(const BlockInfo &Other) const { |
1199 | return VariableIDsInBlock == Other.VariableIDsInBlock && |
1200 | LiveLoc == Other.LiveLoc && |
1201 | mapsAreEqual(Mask: VariableIDsInBlock, A: StackHomeValue, |
1202 | B: Other.StackHomeValue) && |
1203 | mapsAreEqual(Mask: VariableIDsInBlock, A: DebugValue, B: Other.DebugValue); |
1204 | } |
1205 | bool operator!=(const BlockInfo &Other) const { return !(*this == Other); } |
1206 | bool isValid() { |
1207 | return LiveLoc.size() == DebugValue.size() && |
1208 | LiveLoc.size() == StackHomeValue.size(); |
1209 | } |
1210 | |
1211 | /// Clear everything and initialise with ⊤-values for all variables. |
1212 | void init(int NumVars) { |
1213 | StackHomeValue.clear(); |
1214 | DebugValue.clear(); |
1215 | LiveLoc.clear(); |
1216 | VariableIDsInBlock = BitVector(NumVars); |
1217 | StackHomeValue.insert(I: StackHomeValue.begin(), NumToInsert: NumVars, |
1218 | Elt: Assignment::makeNoneOrPhi()); |
1219 | DebugValue.insert(I: DebugValue.begin(), NumToInsert: NumVars, |
1220 | Elt: Assignment::makeNoneOrPhi()); |
1221 | LiveLoc.insert(I: LiveLoc.begin(), NumToInsert: NumVars, Elt: LocKind::None); |
1222 | } |
1223 | |
1224 | /// Helper for join. |
1225 | template <typename ElmtType, typename FnInputType> |
1226 | static void joinElmt(int Index, SmallVector<ElmtType> &Target, |
1227 | const SmallVector<ElmtType> &A, |
1228 | const SmallVector<ElmtType> &B, |
1229 | ElmtType (*Fn)(FnInputType, FnInputType)) { |
1230 | Target[Index] = Fn(A[Index], B[Index]); |
1231 | } |
1232 | |
1233 | /// See comment for AssignmentTrackingLowering::joinBlockInfo. |
1234 | static BlockInfo join(const BlockInfo &A, const BlockInfo &B, int NumVars) { |
1235 | // Join A and B. |
1236 | // |
1237 | // Intersect = join(a, b) for a in A, b in B where Var(a) == Var(b) |
1238 | // Difference = join(x, ⊤) for x where Var(x) is in A xor B |
1239 | // Join = Intersect ∪ Difference |
1240 | // |
1241 | // This is achieved by performing a join on elements from A and B with |
1242 | // variables common to both A and B (join elements indexed by var |
1243 | // intersect), then adding ⊤-value elements for vars in A xor B. The |
1244 | // latter part is equivalent to performing join on elements with variables |
1245 | // in A xor B with the ⊤-value for the map element since join(x, ⊤) = ⊤. |
1246 | // BlockInfo::init initializes all variable entries to the ⊤ value so we |
1247 | // don't need to explicitly perform that step as Join.VariableIDsInBlock |
1248 | // is set to the union of the variables in A and B at the end of this |
1249 | // function. |
1250 | BlockInfo Join; |
1251 | Join.init(NumVars); |
1252 | |
1253 | BitVector Intersect = A.VariableIDsInBlock; |
1254 | Intersect &= B.VariableIDsInBlock; |
1255 | |
1256 | for (auto VarID : Intersect.set_bits()) { |
1257 | joinElmt(Index: VarID, Target&: Join.LiveLoc, A: A.LiveLoc, B: B.LiveLoc, Fn: joinKind); |
1258 | joinElmt(Index: VarID, Target&: Join.DebugValue, A: A.DebugValue, B: B.DebugValue, |
1259 | Fn: joinAssignment); |
1260 | joinElmt(Index: VarID, Target&: Join.StackHomeValue, A: A.StackHomeValue, B: B.StackHomeValue, |
1261 | Fn: joinAssignment); |
1262 | } |
1263 | |
1264 | Join.VariableIDsInBlock = A.VariableIDsInBlock; |
1265 | Join.VariableIDsInBlock |= B.VariableIDsInBlock; |
1266 | assert(Join.isValid()); |
1267 | return Join; |
1268 | } |
1269 | }; |
1270 | |
1271 | Function &Fn; |
1272 | const DataLayout &Layout; |
1273 | const DenseSet<DebugAggregate> *VarsWithStackSlot; |
1274 | FunctionVarLocsBuilder *FnVarLocs; |
1275 | DenseMap<const BasicBlock *, BlockInfo> LiveIn; |
1276 | DenseMap<const BasicBlock *, BlockInfo> LiveOut; |
1277 | |
1278 | /// Helper for process methods to track variables touched each frame. |
1279 | DenseSet<VariableID> VarsTouchedThisFrame; |
1280 | |
1281 | /// The set of variables that sometimes are not located in their stack home. |
1282 | DenseSet<DebugAggregate> NotAlwaysStackHomed; |
1283 | |
1284 | VariableID getVariableID(const DebugVariable &Var) { |
1285 | return FnVarLocs->insertVariable(V: Var); |
1286 | } |
1287 | |
1288 | /// Join the LiveOut values of preds that are contained in \p Visited into |
1289 | /// LiveIn[BB]. Return True if LiveIn[BB] has changed as a result. LiveIn[BB] |
1290 | /// values monotonically increase. See the @link joinMethods join methods |
1291 | /// @endlink documentation for more info. |
1292 | bool join(const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited); |
1293 | ///@name joinMethods |
1294 | /// Functions that implement `join` (the least upper bound) for the |
1295 | /// join-semilattice types used in the dataflow. There is an explicit bottom |
1296 | /// value (⊥) for some types and and explicit top value (⊤) for all types. |
1297 | /// By definition: |
1298 | /// |
1299 | /// Join(A, B) >= A && Join(A, B) >= B |
1300 | /// Join(A, ⊥) = A |
1301 | /// Join(A, ⊤) = ⊤ |
1302 | /// |
1303 | /// These invariants are important for monotonicity. |
1304 | /// |
1305 | /// For the map-type functions, all unmapped keys in an empty map are |
1306 | /// associated with a bottom value (⊥). This represents their values being |
1307 | /// unknown. Unmapped keys in non-empty maps (joining two maps with a key |
1308 | /// only present in one) represents either a variable going out of scope or |
1309 | /// dropped debug info. It is assumed the key is associated with a top value |
1310 | /// (⊤) in this case (unknown location / assignment). |
1311 | ///@{ |
1312 | static LocKind joinKind(LocKind A, LocKind B); |
1313 | static Assignment joinAssignment(const Assignment &A, const Assignment &B); |
1314 | BlockInfo joinBlockInfo(const BlockInfo &A, const BlockInfo &B); |
1315 | ///@} |
1316 | |
1317 | /// Process the instructions in \p BB updating \p LiveSet along the way. \p |
1318 | /// LiveSet must be initialized with the current live-in locations before |
1319 | /// calling this. |
1320 | void process(BasicBlock &BB, BlockInfo *LiveSet); |
1321 | ///@name processMethods |
1322 | /// Methods to process instructions in order to update the LiveSet (current |
1323 | /// location information). |
1324 | ///@{ |
1325 | void processNonDbgInstruction(Instruction &I, BlockInfo *LiveSet); |
1326 | /// Update \p LiveSet after encountering an instruction with a DIAssignID |
1327 | /// attachment, \p I. |
1328 | void processTaggedInstruction(Instruction &I, BlockInfo *LiveSet); |
1329 | /// Update \p LiveSet after encountering an instruciton without a DIAssignID |
1330 | /// attachment, \p I. |
1331 | void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet); |
1332 | void processUnknownStoreToVariable(Instruction &I, VariableID &Var, |
1333 | BlockInfo *LiveSet); |
1334 | void processDbgAssign(DbgVariableRecord *Assign, BlockInfo *LiveSet); |
1335 | void processDbgVariableRecord(DbgVariableRecord &DVR, BlockInfo *LiveSet); |
1336 | void processDbgValue(DbgVariableRecord *DbgValue, BlockInfo *LiveSet); |
1337 | /// Add an assignment to memory for the variable /p Var. |
1338 | void addMemDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV); |
1339 | /// Add an assignment to the variable /p Var. |
1340 | void addDbgDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV); |
1341 | ///@} |
1342 | |
1343 | /// Set the LocKind for \p Var. |
1344 | void setLocKind(BlockInfo *LiveSet, VariableID Var, LocKind K); |
1345 | /// Get the live LocKind for a \p Var. Requires addMemDef or addDbgDef to |
1346 | /// have been called for \p Var first. |
1347 | LocKind getLocKind(BlockInfo *LiveSet, VariableID Var); |
1348 | /// Return true if \p Var has an assignment in \p M matching \p AV. |
1349 | bool hasVarWithAssignment(BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, |
1350 | VariableID Var, const Assignment &AV); |
1351 | /// Return the set of VariableIDs corresponding the fragments contained fully |
1352 | /// within the variable/fragment \p Var. |
1353 | ArrayRef<VariableID> getContainedFragments(VariableID Var) const; |
1354 | |
1355 | /// Mark \p Var as having been touched this frame. Note, this applies only |
1356 | /// to the exact fragment \p Var and not to any fragments contained within. |
1357 | void touchFragment(VariableID Var); |
1358 | |
1359 | /// Emit info for variables that are fully promoted. |
1360 | bool emitPromotedVarLocs(FunctionVarLocsBuilder *FnVarLocs); |
1361 | |
1362 | public: |
1363 | AssignmentTrackingLowering(Function &Fn, const DataLayout &Layout, |
1364 | const DenseSet<DebugAggregate> *VarsWithStackSlot) |
1365 | : Fn(Fn), Layout(Layout), VarsWithStackSlot(VarsWithStackSlot) {} |
1366 | /// Run the analysis, adding variable location info to \p FnVarLocs. Returns |
1367 | /// true if any variable locations have been added to FnVarLocs. |
1368 | bool run(FunctionVarLocsBuilder *FnVarLocs); |
1369 | }; |
1370 | } // namespace |
1371 | |
1372 | ArrayRef<VariableID> |
1373 | AssignmentTrackingLowering::getContainedFragments(VariableID Var) const { |
1374 | auto R = VarContains.find(Val: Var); |
1375 | if (R == VarContains.end()) |
1376 | return {}; |
1377 | return R->second; |
1378 | } |
1379 | |
1380 | void AssignmentTrackingLowering::touchFragment(VariableID Var) { |
1381 | VarsTouchedThisFrame.insert(V: Var); |
1382 | } |
1383 | |
1384 | void AssignmentTrackingLowering::setLocKind(BlockInfo *LiveSet, VariableID Var, |
1385 | LocKind K) { |
1386 | auto SetKind = [this](BlockInfo *LiveSet, VariableID Var, LocKind K) { |
1387 | LiveSet->setLocKind(Var, K); |
1388 | touchFragment(Var); |
1389 | }; |
1390 | SetKind(LiveSet, Var, K); |
1391 | |
1392 | // Update the LocKind for all fragments contained within Var. |
1393 | for (VariableID Frag : getContainedFragments(Var)) |
1394 | SetKind(LiveSet, Frag, K); |
1395 | } |
1396 | |
1397 | AssignmentTrackingLowering::LocKind |
1398 | AssignmentTrackingLowering::getLocKind(BlockInfo *LiveSet, VariableID Var) { |
1399 | return LiveSet->getLocKind(Var); |
1400 | } |
1401 | |
1402 | void AssignmentTrackingLowering::addMemDef(BlockInfo *LiveSet, VariableID Var, |
1403 | const Assignment &AV) { |
1404 | LiveSet->setAssignment(Kind: BlockInfo::Stack, Var, AV); |
1405 | |
1406 | // Use this assigment for all fragments contained within Var, but do not |
1407 | // provide a Source because we cannot convert Var's value to a value for the |
1408 | // fragment. |
1409 | Assignment FragAV = AV; |
1410 | FragAV.Source = nullptr; |
1411 | for (VariableID Frag : getContainedFragments(Var)) |
1412 | LiveSet->setAssignment(Kind: BlockInfo::Stack, Var: Frag, AV: FragAV); |
1413 | } |
1414 | |
1415 | void AssignmentTrackingLowering::addDbgDef(BlockInfo *LiveSet, VariableID Var, |
1416 | const Assignment &AV) { |
1417 | LiveSet->setAssignment(Kind: BlockInfo::Debug, Var, AV); |
1418 | |
1419 | // Use this assigment for all fragments contained within Var, but do not |
1420 | // provide a Source because we cannot convert Var's value to a value for the |
1421 | // fragment. |
1422 | Assignment FragAV = AV; |
1423 | FragAV.Source = nullptr; |
1424 | for (VariableID Frag : getContainedFragments(Var)) |
1425 | LiveSet->setAssignment(Kind: BlockInfo::Debug, Var: Frag, AV: FragAV); |
1426 | } |
1427 | |
1428 | static DIAssignID *getIDFromInst(const Instruction &I) { |
1429 | return cast<DIAssignID>(Val: I.getMetadata(KindID: LLVMContext::MD_DIAssignID)); |
1430 | } |
1431 | |
1432 | static DIAssignID *getIDFromMarker(const DbgVariableRecord &DVR) { |
1433 | assert(DVR.isDbgAssign() && |
1434 | "Cannot get a DIAssignID from a non-assign DbgVariableRecord!" ); |
1435 | return DVR.getAssignID(); |
1436 | } |
1437 | |
1438 | /// Return true if \p Var has an assignment in \p M matching \p AV. |
1439 | bool AssignmentTrackingLowering::hasVarWithAssignment( |
1440 | BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, VariableID Var, |
1441 | const Assignment &AV) { |
1442 | if (!LiveSet->hasAssignment(Kind, Var, AV)) |
1443 | return false; |
1444 | |
1445 | // Check all the frags contained within Var as these will have all been |
1446 | // mapped to AV at the last store to Var. |
1447 | for (VariableID Frag : getContainedFragments(Var)) |
1448 | if (!LiveSet->hasAssignment(Kind, Var: Frag, AV)) |
1449 | return false; |
1450 | return true; |
1451 | } |
1452 | |
1453 | #ifndef NDEBUG |
1454 | const char *locStr(AssignmentTrackingLowering::LocKind Loc) { |
1455 | using LocKind = AssignmentTrackingLowering::LocKind; |
1456 | switch (Loc) { |
1457 | case LocKind::Val: |
1458 | return "Val" ; |
1459 | case LocKind::Mem: |
1460 | return "Mem" ; |
1461 | case LocKind::None: |
1462 | return "None" ; |
1463 | }; |
1464 | llvm_unreachable("unknown LocKind" ); |
1465 | } |
1466 | #endif |
1467 | |
1468 | VarLocInsertPt getNextNode(const DbgRecord *DVR) { |
1469 | auto NextIt = ++(DVR->getIterator()); |
1470 | if (NextIt == DVR->getMarker()->getDbgRecordRange().end()) |
1471 | return DVR->getMarker()->MarkedInstr; |
1472 | return &*NextIt; |
1473 | } |
1474 | VarLocInsertPt getNextNode(const Instruction *Inst) { |
1475 | const Instruction *Next = Inst->getNextNode(); |
1476 | if (!Next->hasDbgRecords()) |
1477 | return Next; |
1478 | return &*Next->getDbgRecordRange().begin(); |
1479 | } |
1480 | VarLocInsertPt getNextNode(VarLocInsertPt InsertPt) { |
1481 | if (isa<const Instruction *>(Val: InsertPt)) |
1482 | return getNextNode(Inst: cast<const Instruction *>(Val&: InsertPt)); |
1483 | return getNextNode(DVR: cast<const DbgRecord *>(Val&: InsertPt)); |
1484 | } |
1485 | |
1486 | void AssignmentTrackingLowering::emitDbgValue( |
1487 | AssignmentTrackingLowering::LocKind Kind, DbgVariableRecord *Source, |
1488 | VarLocInsertPt After) { |
1489 | |
1490 | DILocation *DL = Source->getDebugLoc(); |
1491 | auto Emit = [this, Source, After, DL](Metadata *Val, DIExpression *Expr) { |
1492 | assert(Expr); |
1493 | if (!Val) |
1494 | Val = ValueAsMetadata::get( |
1495 | V: PoisonValue::get(T: Type::getInt1Ty(C&: Source->getContext()))); |
1496 | |
1497 | // Find a suitable insert point. |
1498 | auto InsertBefore = getNextNode(InsertPt: After); |
1499 | assert(InsertBefore && "Shouldn't be inserting after a terminator" ); |
1500 | |
1501 | VariableID Var = getVariableID(Var: DebugVariable(Source)); |
1502 | VarLocInfo VarLoc; |
1503 | VarLoc.VariableID = Var; |
1504 | VarLoc.Expr = Expr; |
1505 | VarLoc.Values = RawLocationWrapper(Val); |
1506 | VarLoc.DL = DL; |
1507 | // Insert it into the map for later. |
1508 | InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc); |
1509 | }; |
1510 | |
1511 | // NOTE: This block can mutate Kind. |
1512 | if (Kind == LocKind::Mem) { |
1513 | assert(Source->isDbgAssign()); |
1514 | const DbgVariableRecord *Assign = Source; |
1515 | // Check the address hasn't been dropped (e.g. the debug uses may not have |
1516 | // been replaced before deleting a Value). |
1517 | if (Assign->isKillAddress()) { |
1518 | // The address isn't valid so treat this as a non-memory def. |
1519 | Kind = LocKind::Val; |
1520 | } else { |
1521 | Value *Val = Assign->getAddress(); |
1522 | DIExpression *Expr = Assign->getAddressExpression(); |
1523 | assert(!Expr->getFragmentInfo() && |
1524 | "fragment info should be stored in value-expression only" ); |
1525 | // Copy the fragment info over from the value-expression to the new |
1526 | // DIExpression. |
1527 | if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) { |
1528 | auto FragInfo = *OptFragInfo; |
1529 | Expr = *DIExpression::createFragmentExpression( |
1530 | Expr, OffsetInBits: FragInfo.OffsetInBits, SizeInBits: FragInfo.SizeInBits); |
1531 | } |
1532 | // The address-expression has an implicit deref, add it now. |
1533 | std::tie(args&: Val, args&: Expr) = |
1534 | walkToAllocaAndPrependOffsetDeref(DL: Layout, Start: Val, Expression: Expr); |
1535 | Emit(ValueAsMetadata::get(V: Val), Expr); |
1536 | return; |
1537 | } |
1538 | } |
1539 | |
1540 | if (Kind == LocKind::Val) { |
1541 | Emit(Source->getRawLocation(), Source->getExpression()); |
1542 | return; |
1543 | } |
1544 | |
1545 | if (Kind == LocKind::None) { |
1546 | Emit(nullptr, Source->getExpression()); |
1547 | return; |
1548 | } |
1549 | } |
1550 | |
1551 | void AssignmentTrackingLowering::processNonDbgInstruction( |
1552 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1553 | if (I.hasMetadata(KindID: LLVMContext::MD_DIAssignID)) |
1554 | processTaggedInstruction(I, LiveSet); |
1555 | else |
1556 | processUntaggedInstruction(I, LiveSet); |
1557 | } |
1558 | |
1559 | void AssignmentTrackingLowering::processUnknownStoreToVariable( |
1560 | Instruction &I, VariableID &Var, BlockInfo *LiveSet) { |
1561 | // We may have assigned to some unknown fragment of the variable, so |
1562 | // treat the memory assignment as unknown for now. |
1563 | addMemDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi()); |
1564 | // If we weren't already using a memory location, we don't need to do |
1565 | // anything more. |
1566 | if (getLocKind(LiveSet, Var) != LocKind::Mem) |
1567 | return; |
1568 | // If there is a live debug value for this variable, fall back to using |
1569 | // that. |
1570 | Assignment DbgAV = LiveSet->getAssignment(Kind: BlockInfo::Debug, Var); |
1571 | if (DbgAV.Status != Assignment::NoneOrPhi && DbgAV.Source) { |
1572 | LLVM_DEBUG(dbgs() << "Switching to fallback debug value: " ; |
1573 | DbgAV.dump(dbgs()); dbgs() << "\n" ); |
1574 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1575 | emitDbgValue(Kind: LocKind::Val, Source: DbgAV.Source, After: &I); |
1576 | return; |
1577 | } |
1578 | // Otherwise, find a suitable insert point, before the next instruction or |
1579 | // DbgRecord after I. |
1580 | auto InsertBefore = getNextNode(Inst: &I); |
1581 | assert(InsertBefore && "Shouldn't be inserting after a terminator" ); |
1582 | |
1583 | // Get DILocation for this assignment. |
1584 | DebugVariable V = FnVarLocs->getVariable(ID: Var); |
1585 | DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt()); |
1586 | const DILocation *DILoc = DILocation::get( |
1587 | Context&: Fn.getContext(), Line: 0, Column: 0, Scope: V.getVariable()->getScope(), InlinedAt); |
1588 | |
1589 | VarLocInfo VarLoc; |
1590 | VarLoc.VariableID = Var; |
1591 | VarLoc.Expr = DIExpression::get(Context&: I.getContext(), Elements: {}); |
1592 | VarLoc.Values = RawLocationWrapper( |
1593 | ValueAsMetadata::get(V: PoisonValue::get(T: Type::getInt1Ty(C&: I.getContext())))); |
1594 | VarLoc.DL = DILoc; |
1595 | InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc); |
1596 | } |
1597 | |
1598 | void AssignmentTrackingLowering::processUntaggedInstruction( |
1599 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1600 | // Interpret stack stores that are not tagged as an assignment in memory for |
1601 | // the variables associated with that address. These stores may not be tagged |
1602 | // because a) the store cannot be represented using dbg.assigns (non-const |
1603 | // length or offset) or b) the tag was accidentally dropped during |
1604 | // optimisations. For these stores we fall back to assuming that the stack |
1605 | // home is a valid location for the variables. The benefit is that this |
1606 | // prevents us missing an assignment and therefore incorrectly maintaining |
1607 | // earlier location definitions, and in many cases it should be a reasonable |
1608 | // assumption. However, this will occasionally lead to slight |
1609 | // inaccuracies. The value of a hoisted untagged store will be visible |
1610 | // "early", for example. |
1611 | assert(!I.hasMetadata(LLVMContext::MD_DIAssignID)); |
1612 | auto It = UntaggedStoreVars.find(Val: &I); |
1613 | if (It == UntaggedStoreVars.end()) { |
1614 | // It is possible that we have an untagged unknown store, i.e. one that |
1615 | // cannot be represented as a simple (base, offset, size) - in this case we |
1616 | // should undef the memory location of the variable, as if we had a tagged |
1617 | // store that did not match the current assignment. |
1618 | // FIXME: It should be possible to support these stores, but it would |
1619 | // require more extensive changes to our representation of assignments. |
1620 | if (auto UnhandledStoreIt = UnknownStoreVars.find(Val: &I); |
1621 | UnhandledStoreIt != UnknownStoreVars.end()) { |
1622 | LLVM_DEBUG(dbgs() << "Processing untagged unknown store " << I << "\n" ); |
1623 | for (auto &Var : UnhandledStoreIt->second) |
1624 | processUnknownStoreToVariable(I, Var, LiveSet); |
1625 | } |
1626 | return; // No variables associated with the store destination. |
1627 | } |
1628 | |
1629 | LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I |
1630 | << "\n" ); |
1631 | // Iterate over the variables that this store affects, add a NoneOrPhi dbg |
1632 | // and mem def, set lockind to Mem, and emit a location def for each. |
1633 | for (auto [Var, Info] : It->second) { |
1634 | // This instruction is treated as both a debug and memory assignment, |
1635 | // meaning the memory location should be used. We don't have an assignment |
1636 | // ID though so use Assignment::makeNoneOrPhi() to create an imaginary one. |
1637 | addMemDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi()); |
1638 | addDbgDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi()); |
1639 | setLocKind(LiveSet, Var, K: LocKind::Mem); |
1640 | LLVM_DEBUG(dbgs() << " setting Stack LocKind to: " << locStr(LocKind::Mem) |
1641 | << "\n" ); |
1642 | // Build the dbg location def to insert. |
1643 | // |
1644 | // DIExpression: Add fragment and offset. |
1645 | DebugVariable V = FnVarLocs->getVariable(ID: Var); |
1646 | DIExpression *DIE = DIExpression::get(Context&: I.getContext(), Elements: {}); |
1647 | if (auto Frag = V.getFragment()) { |
1648 | auto R = DIExpression::createFragmentExpression(Expr: DIE, OffsetInBits: Frag->OffsetInBits, |
1649 | SizeInBits: Frag->SizeInBits); |
1650 | assert(R && "unexpected createFragmentExpression failure" ); |
1651 | DIE = *R; |
1652 | } |
1653 | SmallVector<uint64_t, 3> Ops; |
1654 | if (Info.OffsetInBits) |
1655 | Ops = {dwarf::DW_OP_plus_uconst, Info.OffsetInBits / 8}; |
1656 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
1657 | DIE = DIExpression::prependOpcodes(Expr: DIE, Ops, /*StackValue=*/false, |
1658 | /*EntryValue=*/false); |
1659 | // Find a suitable insert point, before the next instruction or DbgRecord |
1660 | // after I. |
1661 | auto InsertBefore = getNextNode(Inst: &I); |
1662 | assert(InsertBefore && "Shouldn't be inserting after a terminator" ); |
1663 | |
1664 | // Get DILocation for this unrecorded assignment. |
1665 | DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt()); |
1666 | const DILocation *DILoc = DILocation::get( |
1667 | Context&: Fn.getContext(), Line: 0, Column: 0, Scope: V.getVariable()->getScope(), InlinedAt); |
1668 | |
1669 | VarLocInfo VarLoc; |
1670 | VarLoc.VariableID = static_cast<VariableID>(Var); |
1671 | VarLoc.Expr = DIE; |
1672 | VarLoc.Values = RawLocationWrapper( |
1673 | ValueAsMetadata::get(V: const_cast<AllocaInst *>(Info.Base))); |
1674 | VarLoc.DL = DILoc; |
1675 | // 3. Insert it into the map for later. |
1676 | InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc); |
1677 | } |
1678 | } |
1679 | |
1680 | void AssignmentTrackingLowering::processTaggedInstruction( |
1681 | Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1682 | auto LinkedDPAssigns = at::getDVRAssignmentMarkers(Inst: &I); |
1683 | // No dbg.assign intrinsics linked. |
1684 | // FIXME: All vars that have a stack slot this store modifies that don't have |
1685 | // a dbg.assign linked to it should probably treat this like an untagged |
1686 | // store. |
1687 | if (LinkedDPAssigns.empty()) |
1688 | return; |
1689 | |
1690 | LLVM_DEBUG(dbgs() << "processTaggedInstruction on " << I << "\n" ); |
1691 | for (DbgVariableRecord *Assign : LinkedDPAssigns) { |
1692 | VariableID Var = getVariableID(Var: DebugVariable(Assign)); |
1693 | // Something has gone wrong if VarsWithStackSlot doesn't contain a variable |
1694 | // that is linked to a store. |
1695 | assert(VarsWithStackSlot->count(getAggregate(Assign)) && |
1696 | "expected Assign's variable to have stack slot" ); |
1697 | |
1698 | Assignment AV = Assignment::makeFromMemDef(ID: getIDFromInst(I)); |
1699 | addMemDef(LiveSet, Var, AV); |
1700 | |
1701 | LLVM_DEBUG(dbgs() << " linked to " << *Assign << "\n" ); |
1702 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1703 | << " -> " ); |
1704 | |
1705 | // The last assignment to the stack is now AV. Check if the last debug |
1706 | // assignment has a matching Assignment. |
1707 | if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Debug, Var, AV)) { |
1708 | // The StackHomeValue and DebugValue for this variable match so we can |
1709 | // emit a stack home location here. |
1710 | LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n" ;); |
1711 | LLVM_DEBUG(dbgs() << " Stack val: " ; AV.dump(dbgs()); dbgs() << "\n" ); |
1712 | LLVM_DEBUG(dbgs() << " Debug val: " ; |
1713 | LiveSet->DebugValue[static_cast<unsigned>(Var)].dump(dbgs()); |
1714 | dbgs() << "\n" ); |
1715 | setLocKind(LiveSet, Var, K: LocKind::Mem); |
1716 | emitDbgValue(Kind: LocKind::Mem, Source: Assign, After: &I); |
1717 | return; |
1718 | } |
1719 | |
1720 | // The StackHomeValue and DebugValue for this variable do not match. I.e. |
1721 | // The value currently stored in the stack is not what we'd expect to |
1722 | // see, so we cannot use emit a stack home location here. Now we will |
1723 | // look at the live LocKind for the variable and determine an appropriate |
1724 | // dbg.value to emit. |
1725 | LocKind PrevLoc = getLocKind(LiveSet, Var); |
1726 | switch (PrevLoc) { |
1727 | case LocKind::Val: { |
1728 | // The value in memory in memory has changed but we're not currently |
1729 | // using the memory location. Do nothing. |
1730 | LLVM_DEBUG(dbgs() << "Val, (unchanged)\n" ;); |
1731 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1732 | } break; |
1733 | case LocKind::Mem: { |
1734 | // There's been an assignment to memory that we were using as a |
1735 | // location for this variable, and the Assignment doesn't match what |
1736 | // we'd expect to see in memory. |
1737 | Assignment DbgAV = LiveSet->getAssignment(Kind: BlockInfo::Debug, Var); |
1738 | if (DbgAV.Status == Assignment::NoneOrPhi) { |
1739 | // We need to terminate any previously open location now. |
1740 | LLVM_DEBUG(dbgs() << "None, No Debug value available\n" ;); |
1741 | setLocKind(LiveSet, Var, K: LocKind::None); |
1742 | emitDbgValue(Kind: LocKind::None, Source: Assign, After: &I); |
1743 | } else { |
1744 | // The previous DebugValue Value can be used here. |
1745 | LLVM_DEBUG(dbgs() << "Val, Debug value is Known\n" ;); |
1746 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1747 | if (DbgAV.Source) { |
1748 | emitDbgValue(Kind: LocKind::Val, Source: DbgAV.Source, After: &I); |
1749 | } else { |
1750 | // PrevAV.Source is nullptr so we must emit undef here. |
1751 | emitDbgValue(Kind: LocKind::None, Source: Assign, After: &I); |
1752 | } |
1753 | } |
1754 | } break; |
1755 | case LocKind::None: { |
1756 | // There's been an assignment to memory and we currently are |
1757 | // not tracking a location for the variable. Do not emit anything. |
1758 | LLVM_DEBUG(dbgs() << "None, (unchanged)\n" ;); |
1759 | setLocKind(LiveSet, Var, K: LocKind::None); |
1760 | } break; |
1761 | } |
1762 | } |
1763 | } |
1764 | |
1765 | void AssignmentTrackingLowering::processDbgAssign(DbgVariableRecord *DbgAssign, |
1766 | BlockInfo *LiveSet) { |
1767 | // Only bother tracking variables that are at some point stack homed. Other |
1768 | // variables can be dealt with trivially later. |
1769 | if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgAssign))) |
1770 | return; |
1771 | |
1772 | VariableID Var = getVariableID(Var: DebugVariable(DbgAssign)); |
1773 | Assignment AV = Assignment::make(ID: getIDFromMarker(DVR: *DbgAssign), Source: DbgAssign); |
1774 | addDbgDef(LiveSet, Var, AV); |
1775 | |
1776 | LLVM_DEBUG(dbgs() << "processDbgAssign on " << *DbgAssign << "\n" ;); |
1777 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1778 | << " -> " ); |
1779 | |
1780 | // Check if the DebugValue and StackHomeValue both hold the same |
1781 | // Assignment. |
1782 | if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Stack, Var, AV)) { |
1783 | // They match. We can use the stack home because the debug intrinsics |
1784 | // state that an assignment happened here, and we know that specific |
1785 | // assignment was the last one to take place in memory for this variable. |
1786 | LocKind Kind; |
1787 | if (DbgAssign->isKillAddress()) { |
1788 | LLVM_DEBUG( |
1789 | dbgs() |
1790 | << "Val, Stack matches Debug program but address is killed\n" ;); |
1791 | Kind = LocKind::Val; |
1792 | } else { |
1793 | LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n" ;); |
1794 | Kind = LocKind::Mem; |
1795 | }; |
1796 | setLocKind(LiveSet, Var, K: Kind); |
1797 | emitDbgValue(Kind, Source: DbgAssign, After: DbgAssign); |
1798 | } else { |
1799 | // The last assignment to the memory location isn't the one that we want |
1800 | // to show to the user so emit a dbg.value(Value). Value may be undef. |
1801 | LLVM_DEBUG(dbgs() << "Val, Stack contents is unknown\n" ;); |
1802 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1803 | emitDbgValue(Kind: LocKind::Val, Source: DbgAssign, After: DbgAssign); |
1804 | } |
1805 | } |
1806 | |
1807 | void AssignmentTrackingLowering::processDbgValue(DbgVariableRecord *DbgValue, |
1808 | BlockInfo *LiveSet) { |
1809 | // Only other tracking variables that are at some point stack homed. |
1810 | // Other variables can be dealt with trivally later. |
1811 | if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgValue))) |
1812 | return; |
1813 | |
1814 | VariableID Var = getVariableID(Var: DebugVariable(DbgValue)); |
1815 | // We have no ID to create an Assignment with so we mark this assignment as |
1816 | // NoneOrPhi. Note that the dbg.value still exists, we just cannot determine |
1817 | // the assignment responsible for setting this value. |
1818 | // This is fine; dbg.values are essentially interchangable with unlinked |
1819 | // dbg.assigns, and some passes such as mem2reg and instcombine add them to |
1820 | // PHIs for promoted variables. |
1821 | Assignment AV = Assignment::makeNoneOrPhi(); |
1822 | addDbgDef(LiveSet, Var, AV); |
1823 | |
1824 | LLVM_DEBUG(dbgs() << "processDbgValue on " << *DbgValue << "\n" ;); |
1825 | LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var)) |
1826 | << " -> Val, dbg.value override" ); |
1827 | |
1828 | setLocKind(LiveSet, Var, K: LocKind::Val); |
1829 | emitDbgValue(Kind: LocKind::Val, Source: DbgValue, After: DbgValue); |
1830 | } |
1831 | |
1832 | static bool hasZeroSizedFragment(DbgVariableRecord &DbgValue) { |
1833 | if (auto F = DbgValue.getExpression()->getFragmentInfo()) |
1834 | return F->SizeInBits == 0; |
1835 | return false; |
1836 | } |
1837 | |
1838 | void AssignmentTrackingLowering::processDbgVariableRecord( |
1839 | DbgVariableRecord &DVR, AssignmentTrackingLowering::BlockInfo *LiveSet) { |
1840 | // Ignore assignments to zero bits of the variable. |
1841 | if (hasZeroSizedFragment(DbgValue&: DVR)) |
1842 | return; |
1843 | |
1844 | if (DVR.isDbgAssign()) |
1845 | processDbgAssign(DbgAssign: &DVR, LiveSet); |
1846 | else if (DVR.isDbgValue()) |
1847 | processDbgValue(DbgValue: &DVR, LiveSet); |
1848 | } |
1849 | |
1850 | void AssignmentTrackingLowering::resetInsertionPoint(Instruction &After) { |
1851 | assert(!After.isTerminator() && "Can't insert after a terminator" ); |
1852 | auto *R = InsertBeforeMap.find(Key: getNextNode(Inst: &After)); |
1853 | if (R == InsertBeforeMap.end()) |
1854 | return; |
1855 | R->second.clear(); |
1856 | } |
1857 | void AssignmentTrackingLowering::resetInsertionPoint(DbgVariableRecord &After) { |
1858 | auto *R = InsertBeforeMap.find(Key: getNextNode(DVR: &After)); |
1859 | if (R == InsertBeforeMap.end()) |
1860 | return; |
1861 | R->second.clear(); |
1862 | } |
1863 | |
1864 | void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) { |
1865 | // If the block starts with DbgRecords, we need to process those DbgRecords as |
1866 | // their own frame without processing any instructions first. |
1867 | bool ProcessedLeadingDbgRecords = !BB.begin()->hasDbgRecords(); |
1868 | for (auto II = BB.begin(), EI = BB.end(); II != EI;) { |
1869 | assert(VarsTouchedThisFrame.empty()); |
1870 | // Process the instructions in "frames". A "frame" includes a single |
1871 | // non-debug instruction followed any debug instructions before the |
1872 | // next non-debug instruction. |
1873 | |
1874 | // Skip the current instruction if it has unprocessed DbgRecords attached |
1875 | // (see comment above `ProcessedLeadingDbgRecords`). |
1876 | if (ProcessedLeadingDbgRecords) { |
1877 | // II is now either a debug intrinsic, a non-debug instruction with no |
1878 | // attached DbgRecords, or a non-debug instruction with attached processed |
1879 | // DbgRecords. |
1880 | // II has not been processed. |
1881 | if (II->isTerminator()) |
1882 | break; |
1883 | resetInsertionPoint(After&: *II); |
1884 | processNonDbgInstruction(I&: *II, LiveSet); |
1885 | assert(LiveSet->isValid()); |
1886 | ++II; |
1887 | } |
1888 | // II is now either a debug intrinsic, a non-debug instruction with no |
1889 | // attached DbgRecords, or a non-debug instruction with attached unprocessed |
1890 | // DbgRecords. |
1891 | if (II != EI && II->hasDbgRecords()) { |
1892 | // Skip over non-variable debug records (i.e., labels). They're going to |
1893 | // be read from IR (possibly re-ordering them within the debug record |
1894 | // range) rather than from the analysis results. |
1895 | for (DbgVariableRecord &DVR : filterDbgVars(R: II->getDbgRecordRange())) { |
1896 | resetInsertionPoint(After&: DVR); |
1897 | processDbgVariableRecord(DVR, LiveSet); |
1898 | assert(LiveSet->isValid()); |
1899 | } |
1900 | } |
1901 | ProcessedLeadingDbgRecords = true; |
1902 | // II is now a non-debug instruction either with no attached DbgRecords, or |
1903 | // with attached processed DbgRecords. II has not been processed, and all |
1904 | // debug instructions or DbgRecords in the frame preceding II have been |
1905 | // processed. |
1906 | |
1907 | // We've processed everything in the "frame". Now determine which variables |
1908 | // cannot be represented by a dbg.declare. |
1909 | for (auto Var : VarsTouchedThisFrame) { |
1910 | LocKind Loc = getLocKind(LiveSet, Var); |
1911 | // If a variable's LocKind is anything other than LocKind::Mem then we |
1912 | // must note that it cannot be represented with a dbg.declare. |
1913 | // Note that this check is enough without having to check the result of |
1914 | // joins() because for join to produce anything other than Mem after |
1915 | // we've already seen a Mem we'd be joining None or Val with Mem. In that |
1916 | // case, we've already hit this codepath when we set the LocKind to Val |
1917 | // or None in that block. |
1918 | if (Loc != LocKind::Mem) { |
1919 | DebugVariable DbgVar = FnVarLocs->getVariable(ID: Var); |
1920 | DebugAggregate Aggr{DbgVar.getVariable(), DbgVar.getInlinedAt()}; |
1921 | NotAlwaysStackHomed.insert(V: Aggr); |
1922 | } |
1923 | } |
1924 | VarsTouchedThisFrame.clear(); |
1925 | } |
1926 | } |
1927 | |
1928 | AssignmentTrackingLowering::LocKind |
1929 | AssignmentTrackingLowering::joinKind(LocKind A, LocKind B) { |
1930 | // Partial order: |
1931 | // None > Mem, Val |
1932 | return A == B ? A : LocKind::None; |
1933 | } |
1934 | |
1935 | AssignmentTrackingLowering::Assignment |
1936 | AssignmentTrackingLowering::joinAssignment(const Assignment &A, |
1937 | const Assignment &B) { |
1938 | // Partial order: |
1939 | // NoneOrPhi(null, null) > Known(v, ?s) |
1940 | |
1941 | // If either are NoneOrPhi the join is NoneOrPhi. |
1942 | // If either value is different then the result is |
1943 | // NoneOrPhi (joining two values is a Phi). |
1944 | if (!A.isSameSourceAssignment(Other: B)) |
1945 | return Assignment::makeNoneOrPhi(); |
1946 | if (A.Status == Assignment::NoneOrPhi) |
1947 | return Assignment::makeNoneOrPhi(); |
1948 | |
1949 | // Source is used to lookup the value + expression in the debug program if |
1950 | // the stack slot gets assigned a value earlier than expected. Because |
1951 | // we're only tracking the one dbg.assign, we can't capture debug PHIs. |
1952 | // It's unlikely that we're losing out on much coverage by avoiding that |
1953 | // extra work. |
1954 | // The Source may differ in this situation: |
1955 | // Pred.1: |
1956 | // dbg.assign i32 0, ..., !1, ... |
1957 | // Pred.2: |
1958 | // dbg.assign i32 1, ..., !1, ... |
1959 | // Here the same assignment (!1) was performed in both preds in the source, |
1960 | // but we can't use either one unless they are identical (e.g. .we don't |
1961 | // want to arbitrarily pick between constant values). |
1962 | auto JoinSource = [&]() -> DbgVariableRecord * { |
1963 | if (A.Source == B.Source) |
1964 | return A.Source; |
1965 | if (!A.Source || !B.Source) |
1966 | return nullptr; |
1967 | if (A.Source->isEquivalentTo(Other: *B.Source)) |
1968 | return A.Source; |
1969 | return nullptr; |
1970 | }; |
1971 | DbgVariableRecord *Source = JoinSource(); |
1972 | assert(A.Status == B.Status && A.Status == Assignment::Known); |
1973 | assert(A.ID == B.ID); |
1974 | return Assignment::make(ID: A.ID, Source); |
1975 | } |
1976 | |
1977 | AssignmentTrackingLowering::BlockInfo |
1978 | AssignmentTrackingLowering::joinBlockInfo(const BlockInfo &A, |
1979 | const BlockInfo &B) { |
1980 | return BlockInfo::join(A, B, NumVars: TrackedVariablesVectorSize); |
1981 | } |
1982 | |
1983 | bool AssignmentTrackingLowering::join( |
1984 | const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited) { |
1985 | |
1986 | SmallVector<const BasicBlock *> VisitedPreds; |
1987 | // Ignore backedges if we have not visited the predecessor yet. As the |
1988 | // predecessor hasn't yet had locations propagated into it, most locations |
1989 | // will not yet be valid, so treat them as all being uninitialized and |
1990 | // potentially valid. If a location guessed to be correct here is |
1991 | // invalidated later, we will remove it when we revisit this block. This |
1992 | // is essentially the same as initialising all LocKinds and Assignments to |
1993 | // an implicit ⊥ value which is the identity value for the join operation. |
1994 | for (const BasicBlock *Pred : predecessors(BB: &BB)) { |
1995 | if (Visited.count(Ptr: Pred)) |
1996 | VisitedPreds.push_back(Elt: Pred); |
1997 | } |
1998 | |
1999 | // No preds visited yet. |
2000 | if (VisitedPreds.empty()) { |
2001 | auto It = LiveIn.try_emplace(Key: &BB, Args: BlockInfo()); |
2002 | bool DidInsert = It.second; |
2003 | if (DidInsert) |
2004 | It.first->second.init(NumVars: TrackedVariablesVectorSize); |
2005 | return /*Changed*/ DidInsert; |
2006 | } |
2007 | |
2008 | // Exactly one visited pred. Copy the LiveOut from that pred into BB LiveIn. |
2009 | if (VisitedPreds.size() == 1) { |
2010 | const BlockInfo &PredLiveOut = LiveOut.find(Val: VisitedPreds[0])->second; |
2011 | |
2012 | // Check if there isn't an entry, or there is but the LiveIn set has |
2013 | // changed (expensive check). |
2014 | auto [CurrentLiveInEntry, Inserted] = LiveIn.try_emplace(Key: &BB, Args: PredLiveOut); |
2015 | if (Inserted) |
2016 | return /*Changed*/ true; |
2017 | if (PredLiveOut != CurrentLiveInEntry->second) { |
2018 | CurrentLiveInEntry->second = PredLiveOut; |
2019 | return /*Changed*/ true; |
2020 | } |
2021 | return /*Changed*/ false; |
2022 | } |
2023 | |
2024 | // More than one pred. Join LiveOuts of blocks 1 and 2. |
2025 | assert(VisitedPreds.size() > 1); |
2026 | const BlockInfo &PredLiveOut0 = LiveOut.find(Val: VisitedPreds[0])->second; |
2027 | const BlockInfo &PredLiveOut1 = LiveOut.find(Val: VisitedPreds[1])->second; |
2028 | BlockInfo BBLiveIn = joinBlockInfo(A: PredLiveOut0, B: PredLiveOut1); |
2029 | |
2030 | // Join the LiveOuts of subsequent blocks. |
2031 | ArrayRef Tail = ArrayRef(VisitedPreds).drop_front(N: 2); |
2032 | for (const BasicBlock *Pred : Tail) { |
2033 | const auto &PredLiveOut = LiveOut.find(Val: Pred); |
2034 | assert(PredLiveOut != LiveOut.end() && |
2035 | "block should have been processed already" ); |
2036 | BBLiveIn = joinBlockInfo(A: std::move(BBLiveIn), B: PredLiveOut->second); |
2037 | } |
2038 | |
2039 | // Save the joined result for BB. |
2040 | auto CurrentLiveInEntry = LiveIn.find(Val: &BB); |
2041 | // Check if there isn't an entry, or there is but the LiveIn set has changed |
2042 | // (expensive check). |
2043 | if (CurrentLiveInEntry == LiveIn.end()) |
2044 | LiveIn.try_emplace(Key: &BB, Args: std::move(BBLiveIn)); |
2045 | else if (BBLiveIn != CurrentLiveInEntry->second) |
2046 | CurrentLiveInEntry->second = std::move(BBLiveIn); |
2047 | else |
2048 | return /*Changed*/ false; |
2049 | return /*Changed*/ true; |
2050 | } |
2051 | |
2052 | /// Return true if A fully contains B. |
2053 | static bool fullyContains(DIExpression::FragmentInfo A, |
2054 | DIExpression::FragmentInfo B) { |
2055 | auto ALeft = A.OffsetInBits; |
2056 | auto BLeft = B.OffsetInBits; |
2057 | if (BLeft < ALeft) |
2058 | return false; |
2059 | |
2060 | auto ARight = ALeft + A.SizeInBits; |
2061 | auto BRight = BLeft + B.SizeInBits; |
2062 | if (BRight > ARight) |
2063 | return false; |
2064 | return true; |
2065 | } |
2066 | |
2067 | static std::optional<at::AssignmentInfo> |
2068 | getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) { |
2069 | // Don't bother checking if this is an AllocaInst. We know this |
2070 | // instruction has no tag which means there are no variables associated |
2071 | // with it. |
2072 | if (const auto *SI = dyn_cast<StoreInst>(Val: &I)) |
2073 | return at::getAssignmentInfo(DL: Layout, SI); |
2074 | if (const auto *MI = dyn_cast<MemIntrinsic>(Val: &I)) |
2075 | return at::getAssignmentInfo(DL: Layout, I: MI); |
2076 | // Alloca or non-store-like inst. |
2077 | return std::nullopt; |
2078 | } |
2079 | |
2080 | AllocaInst *getUnknownStore(const Instruction &I, const DataLayout &Layout) { |
2081 | auto *II = dyn_cast<IntrinsicInst>(Val: &I); |
2082 | if (!II) |
2083 | return nullptr; |
2084 | Intrinsic::ID ID = II->getIntrinsicID(); |
2085 | if (ID != Intrinsic::experimental_vp_strided_store && |
2086 | ID != Intrinsic::masked_store && ID != Intrinsic::vp_scatter && |
2087 | ID != Intrinsic::masked_scatter && ID != Intrinsic::vp_store && |
2088 | ID != Intrinsic::masked_compressstore) |
2089 | return nullptr; |
2090 | Value *MemOp = II->getArgOperand(i: 1); |
2091 | // We don't actually use the constant offset for now, but we may in future, |
2092 | // and the non-accumulating versions do not support a vector of pointers. |
2093 | APInt Offset(Layout.getIndexTypeSizeInBits(Ty: MemOp->getType()), 0); |
2094 | Value *Base = MemOp->stripAndAccumulateConstantOffsets(DL: Layout, Offset, AllowNonInbounds: true); |
2095 | // For Base pointers that are not an alloca instruction we don't need to do |
2096 | // anything, and simply return nullptr. |
2097 | return dyn_cast<AllocaInst>(Val: Base); |
2098 | } |
2099 | |
2100 | /// Build a map of {Variable x: Variables y} where all variable fragments |
2101 | /// contained within the variable fragment x are in set y. This means that |
2102 | /// y does not contain all overlaps because partial overlaps are excluded. |
2103 | /// |
2104 | /// While we're iterating over the function, add single location defs for |
2105 | /// dbg.declares to \p FnVarLocs. |
2106 | /// |
2107 | /// Variables that are interesting to this pass in are added to |
2108 | /// FnVarLocs->Variables first. TrackedVariablesVectorSize is set to the ID of |
2109 | /// the last interesting variable plus 1, meaning variables with ID 1 |
2110 | /// (inclusive) to TrackedVariablesVectorSize (exclusive) are interesting. The |
2111 | /// subsequent variables are either stack homed or fully promoted. |
2112 | /// |
2113 | /// Finally, populate UntaggedStoreVars with a mapping of untagged stores to |
2114 | /// the stored-to variable fragments, and UnknownStoreVars with a mapping |
2115 | /// of untagged unknown stores to the stored-to variable aggregates. |
2116 | /// |
2117 | /// These tasks are bundled together to reduce the number of times we need |
2118 | /// to iterate over the function as they can be achieved together in one pass. |
2119 | static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares( |
2120 | Function &Fn, FunctionVarLocsBuilder *FnVarLocs, |
2121 | const DenseSet<DebugAggregate> &VarsWithStackSlot, |
2122 | AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars, |
2123 | AssignmentTrackingLowering::UnknownStoreAssignmentMap &UnknownStoreVars, |
2124 | unsigned &TrackedVariablesVectorSize) { |
2125 | DenseSet<DebugVariable> Seen; |
2126 | // Map of Variable: [Fragments]. |
2127 | DenseMap<DebugAggregate, SmallVector<DebugVariable, 8>> FragmentMap; |
2128 | // Iterate over all instructions: |
2129 | // - dbg.declare -> add single location variable record |
2130 | // - dbg.* -> Add fragments to FragmentMap |
2131 | // - untagged store -> Add fragments to FragmentMap and update |
2132 | // UntaggedStoreVars, or add to UnknownStoreVars if |
2133 | // we can't determine the fragment overlap. |
2134 | // We need to add fragments for untagged stores too so that we can correctly |
2135 | // clobber overlapped fragment locations later. |
2136 | SmallVector<DbgVariableRecord *> DPDeclares; |
2137 | auto ProcessDbgRecord = [&](DbgVariableRecord *Record) { |
2138 | if (Record->isDbgDeclare()) { |
2139 | DPDeclares.push_back(Elt: Record); |
2140 | return; |
2141 | } |
2142 | DebugVariable DV = DebugVariable(Record); |
2143 | DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()}; |
2144 | if (!VarsWithStackSlot.contains(V: DA)) |
2145 | return; |
2146 | if (Seen.insert(V: DV).second) |
2147 | FragmentMap[DA].push_back(Elt: DV); |
2148 | }; |
2149 | for (auto &BB : Fn) { |
2150 | for (auto &I : BB) { |
2151 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2152 | ProcessDbgRecord(&DVR); |
2153 | if (auto Info = getUntaggedStoreAssignmentInfo(I, Layout: Fn.getDataLayout())) { |
2154 | // Find markers linked to this alloca. |
2155 | auto HandleDbgAssignForStore = [&](DbgVariableRecord *Assign) { |
2156 | std::optional<DIExpression::FragmentInfo> FragInfo; |
2157 | |
2158 | // Skip this assignment if the affected bits are outside of the |
2159 | // variable fragment. |
2160 | if (!at::calculateFragmentIntersect( |
2161 | DL: I.getDataLayout(), Dest: Info->Base, |
2162 | SliceOffsetInBits: Info->OffsetInBits, SliceSizeInBits: Info->SizeInBits, DVRAssign: Assign, Result&: FragInfo) || |
2163 | (FragInfo && FragInfo->SizeInBits == 0)) |
2164 | return; |
2165 | |
2166 | // FragInfo from calculateFragmentIntersect is nullopt if the |
2167 | // resultant fragment matches DAI's fragment or entire variable - in |
2168 | // which case copy the fragment info from DAI. If FragInfo is still |
2169 | // nullopt after the copy it means "no fragment info" instead, which |
2170 | // is how it is usually interpreted. |
2171 | if (!FragInfo) |
2172 | FragInfo = Assign->getExpression()->getFragmentInfo(); |
2173 | |
2174 | DebugVariable DV = |
2175 | DebugVariable(Assign->getVariable(), FragInfo, |
2176 | Assign->getDebugLoc().getInlinedAt()); |
2177 | DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()}; |
2178 | if (!VarsWithStackSlot.contains(V: DA)) |
2179 | return; |
2180 | |
2181 | // Cache this info for later. |
2182 | UntaggedStoreVars[&I].push_back( |
2183 | Elt: {FnVarLocs->insertVariable(V: DV), *Info}); |
2184 | |
2185 | if (Seen.insert(V: DV).second) |
2186 | FragmentMap[DA].push_back(Elt: DV); |
2187 | }; |
2188 | for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: Info->Base)) |
2189 | HandleDbgAssignForStore(DVR); |
2190 | } else if (auto *AI = getUnknownStore(I, Layout: Fn.getDataLayout())) { |
2191 | // Find markers linked to this alloca. |
2192 | auto HandleDbgAssignForUnknownStore = [&](DbgVariableRecord *Assign) { |
2193 | // Because we can't currently represent the fragment info for this |
2194 | // store, we treat it as an unusable store to the whole variable. |
2195 | DebugVariable DV = |
2196 | DebugVariable(Assign->getVariable(), std::nullopt, |
2197 | Assign->getDebugLoc().getInlinedAt()); |
2198 | DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()}; |
2199 | if (!VarsWithStackSlot.contains(V: DA)) |
2200 | return; |
2201 | |
2202 | // Cache this info for later. |
2203 | UnknownStoreVars[&I].push_back(Elt: FnVarLocs->insertVariable(V: DV)); |
2204 | }; |
2205 | for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: AI)) |
2206 | HandleDbgAssignForUnknownStore(DVR); |
2207 | } |
2208 | } |
2209 | } |
2210 | |
2211 | // Sort the fragment map for each DebugAggregate in ascending |
2212 | // order of fragment size - there should be no duplicates. |
2213 | for (auto &Pair : FragmentMap) { |
2214 | SmallVector<DebugVariable, 8> &Frags = Pair.second; |
2215 | std::sort(first: Frags.begin(), last: Frags.end(), |
2216 | comp: [](const DebugVariable &Next, const DebugVariable &Elmt) { |
2217 | return Elmt.getFragmentOrDefault().SizeInBits > |
2218 | Next.getFragmentOrDefault().SizeInBits; |
2219 | }); |
2220 | // Check for duplicates. |
2221 | assert(std::adjacent_find(Frags.begin(), Frags.end()) == Frags.end()); |
2222 | } |
2223 | |
2224 | // Build the map. |
2225 | AssignmentTrackingLowering::OverlapMap Map; |
2226 | for (auto &Pair : FragmentMap) { |
2227 | auto &Frags = Pair.second; |
2228 | for (auto It = Frags.begin(), IEnd = Frags.end(); It != IEnd; ++It) { |
2229 | DIExpression::FragmentInfo Frag = It->getFragmentOrDefault(); |
2230 | // Find the frags that this is contained within. |
2231 | // |
2232 | // Because Frags is sorted by size and none have the same offset and |
2233 | // size, we know that this frag can only be contained by subsequent |
2234 | // elements. |
2235 | SmallVector<DebugVariable, 8>::iterator OtherIt = It; |
2236 | ++OtherIt; |
2237 | VariableID ThisVar = FnVarLocs->insertVariable(V: *It); |
2238 | for (; OtherIt != IEnd; ++OtherIt) { |
2239 | DIExpression::FragmentInfo OtherFrag = OtherIt->getFragmentOrDefault(); |
2240 | VariableID OtherVar = FnVarLocs->insertVariable(V: *OtherIt); |
2241 | if (fullyContains(A: OtherFrag, B: Frag)) |
2242 | Map[OtherVar].push_back(Elt: ThisVar); |
2243 | } |
2244 | } |
2245 | } |
2246 | |
2247 | // VariableIDs are 1-based so the variable-tracking bitvector needs |
2248 | // NumVariables plus 1 bits. |
2249 | TrackedVariablesVectorSize = FnVarLocs->getNumVariables() + 1; |
2250 | |
2251 | // Finally, insert the declares afterwards, so the first IDs are all |
2252 | // partially stack homed vars. |
2253 | for (auto *DVR : DPDeclares) |
2254 | FnVarLocs->addSingleLocVar(Var: DebugVariable(DVR), Expr: DVR->getExpression(), |
2255 | DL: DVR->getDebugLoc(), |
2256 | R: RawLocationWrapper(DVR->getRawLocation())); |
2257 | return Map; |
2258 | } |
2259 | |
2260 | bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) { |
2261 | if (Fn.size() > MaxNumBlocks) { |
2262 | LLVM_DEBUG(dbgs() << "[AT] Dropping var locs in: " << Fn.getName() |
2263 | << ": too many blocks (" << Fn.size() << ")\n" ); |
2264 | at::deleteAll(F: &Fn); |
2265 | return false; |
2266 | } |
2267 | |
2268 | FnVarLocs = FnVarLocsBuilder; |
2269 | |
2270 | // The general structure here is inspired by VarLocBasedImpl.cpp |
2271 | // (LiveDebugValues). |
2272 | |
2273 | // Build the variable fragment overlap map. |
2274 | // Note that this pass doesn't handle partial overlaps correctly (FWIW |
2275 | // neither does LiveDebugVariables) because that is difficult to do and |
2276 | // appears to be rare occurance. |
2277 | VarContains = buildOverlapMapAndRecordDeclares( |
2278 | Fn, FnVarLocs, VarsWithStackSlot: *VarsWithStackSlot, UntaggedStoreVars, UnknownStoreVars, |
2279 | TrackedVariablesVectorSize); |
2280 | |
2281 | // Prepare for traversal. |
2282 | ReversePostOrderTraversal<Function *> RPOT(&Fn); |
2283 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2284 | std::greater<unsigned int>> |
2285 | Worklist; |
2286 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
2287 | std::greater<unsigned int>> |
2288 | Pending; |
2289 | DenseMap<unsigned int, BasicBlock *> OrderToBB; |
2290 | DenseMap<BasicBlock *, unsigned int> BBToOrder; |
2291 | { // Init OrderToBB and BBToOrder. |
2292 | unsigned int RPONumber = 0; |
2293 | for (BasicBlock *BB : RPOT) { |
2294 | OrderToBB[RPONumber] = BB; |
2295 | BBToOrder[BB] = RPONumber; |
2296 | Worklist.push(x: RPONumber); |
2297 | ++RPONumber; |
2298 | } |
2299 | LiveIn.reserve(NumEntries: RPONumber); |
2300 | LiveOut.reserve(NumEntries: RPONumber); |
2301 | } |
2302 | |
2303 | // Perform the traversal. |
2304 | // |
2305 | // This is a standard "union of predecessor outs" dataflow problem. To solve |
2306 | // it, we perform join() and process() using the two worklist method until |
2307 | // the LiveIn data for each block becomes unchanging. The "proof" that this |
2308 | // terminates can be put together by looking at the comments around LocKind, |
2309 | // Assignment, and the various join methods, which show that all the elements |
2310 | // involved are made up of join-semilattices; LiveIn(n) can only |
2311 | // monotonically increase in value throughout the dataflow. |
2312 | // |
2313 | SmallPtrSet<BasicBlock *, 16> Visited; |
2314 | while (!Worklist.empty()) { |
2315 | // We track what is on the pending worklist to avoid inserting the same |
2316 | // thing twice. |
2317 | SmallPtrSet<BasicBlock *, 16> OnPending; |
2318 | LLVM_DEBUG(dbgs() << "Processing Worklist\n" ); |
2319 | while (!Worklist.empty()) { |
2320 | BasicBlock *BB = OrderToBB[Worklist.top()]; |
2321 | LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n" ); |
2322 | Worklist.pop(); |
2323 | bool InChanged = join(BB: *BB, Visited); |
2324 | // Always consider LiveIn changed on the first visit. |
2325 | InChanged |= Visited.insert(Ptr: BB).second; |
2326 | if (InChanged) { |
2327 | LLVM_DEBUG(dbgs() << BB->getName() << " has new InLocs, process it\n" ); |
2328 | // Mutate a copy of LiveIn while processing BB. After calling process |
2329 | // LiveSet is the LiveOut set for BB. |
2330 | BlockInfo LiveSet = LiveIn[BB]; |
2331 | |
2332 | // Process the instructions in the block. |
2333 | process(BB&: *BB, LiveSet: &LiveSet); |
2334 | |
2335 | // Relatively expensive check: has anything changed in LiveOut for BB? |
2336 | if (LiveOut[BB] != LiveSet) { |
2337 | LLVM_DEBUG(dbgs() << BB->getName() |
2338 | << " has new OutLocs, add succs to worklist: [ " ); |
2339 | LiveOut[BB] = std::move(LiveSet); |
2340 | for (BasicBlock *Succ : successors(BB)) { |
2341 | if (OnPending.insert(Ptr: Succ).second) { |
2342 | LLVM_DEBUG(dbgs() << Succ->getName() << " " ); |
2343 | Pending.push(x: BBToOrder[Succ]); |
2344 | } |
2345 | } |
2346 | LLVM_DEBUG(dbgs() << "]\n" ); |
2347 | } |
2348 | } |
2349 | } |
2350 | Worklist.swap(pq&: Pending); |
2351 | // At this point, pending must be empty, since it was just the empty |
2352 | // worklist |
2353 | assert(Pending.empty() && "Pending should be empty" ); |
2354 | } |
2355 | |
2356 | // That's the hard part over. Now we just have some admin to do. |
2357 | |
2358 | // Record whether we inserted any intrinsics. |
2359 | bool InsertedAnyIntrinsics = false; |
2360 | |
2361 | // Identify and add defs for single location variables. |
2362 | // |
2363 | // Go through all of the defs that we plan to add. If the aggregate variable |
2364 | // it's a part of is not in the NotAlwaysStackHomed set we can emit a single |
2365 | // location def and omit the rest. Add an entry to AlwaysStackHomed so that |
2366 | // we can identify those uneeded defs later. |
2367 | DenseSet<DebugAggregate> AlwaysStackHomed; |
2368 | for (const auto &Pair : InsertBeforeMap) { |
2369 | auto &Vec = Pair.second; |
2370 | for (VarLocInfo VarLoc : Vec) { |
2371 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2372 | DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()}; |
2373 | |
2374 | // Skip this Var if it's not always stack homed. |
2375 | if (NotAlwaysStackHomed.contains(V: Aggr)) |
2376 | continue; |
2377 | |
2378 | // Skip complex cases such as when different fragments of a variable have |
2379 | // been split into different allocas. Skipping in this case means falling |
2380 | // back to using a list of defs (which could reduce coverage, but is no |
2381 | // less correct). |
2382 | bool Simple = |
2383 | VarLoc.Expr->getNumElements() == 1 && VarLoc.Expr->startsWithDeref(); |
2384 | if (!Simple) { |
2385 | NotAlwaysStackHomed.insert(V: Aggr); |
2386 | continue; |
2387 | } |
2388 | |
2389 | // All source assignments to this variable remain and all stores to any |
2390 | // part of the variable store to the same address (with varying |
2391 | // offsets). We can just emit a single location for the whole variable. |
2392 | // |
2393 | // Unless we've already done so, create the single location def now. |
2394 | if (AlwaysStackHomed.insert(V: Aggr).second) { |
2395 | assert(!VarLoc.Values.hasArgList()); |
2396 | // TODO: When more complex cases are handled VarLoc.Expr should be |
2397 | // built appropriately rather than always using an empty DIExpression. |
2398 | // The assert below is a reminder. |
2399 | assert(Simple); |
2400 | VarLoc.Expr = DIExpression::get(Context&: Fn.getContext(), Elements: {}); |
2401 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2402 | FnVarLocs->addSingleLocVar(Var, Expr: VarLoc.Expr, DL: VarLoc.DL, R: VarLoc.Values); |
2403 | InsertedAnyIntrinsics = true; |
2404 | } |
2405 | } |
2406 | } |
2407 | |
2408 | // Insert the other DEFs. |
2409 | for (const auto &[InsertBefore, Vec] : InsertBeforeMap) { |
2410 | SmallVector<VarLocInfo> NewDefs; |
2411 | for (const VarLocInfo &VarLoc : Vec) { |
2412 | DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID); |
2413 | DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()}; |
2414 | // If this variable is always stack homed then we have already inserted a |
2415 | // dbg.declare and deleted this dbg.value. |
2416 | if (AlwaysStackHomed.contains(V: Aggr)) |
2417 | continue; |
2418 | NewDefs.push_back(Elt: VarLoc); |
2419 | InsertedAnyIntrinsics = true; |
2420 | } |
2421 | |
2422 | FnVarLocs->setWedge(Before: InsertBefore, Wedge: std::move(NewDefs)); |
2423 | } |
2424 | |
2425 | InsertedAnyIntrinsics |= emitPromotedVarLocs(FnVarLocs); |
2426 | |
2427 | return InsertedAnyIntrinsics; |
2428 | } |
2429 | |
2430 | bool AssignmentTrackingLowering::emitPromotedVarLocs( |
2431 | FunctionVarLocsBuilder *FnVarLocs) { |
2432 | bool InsertedAnyIntrinsics = false; |
2433 | // Go through every block, translating debug intrinsics for fully promoted |
2434 | // variables into FnVarLocs location defs. No analysis required for these. |
2435 | auto TranslateDbgRecord = [&](DbgVariableRecord *Record) { |
2436 | // Skip variables that haven't been promoted - we've dealt with those |
2437 | // already. |
2438 | if (VarsWithStackSlot->contains(V: getAggregate(Var: Record))) |
2439 | return; |
2440 | auto InsertBefore = getNextNode(DVR: Record); |
2441 | assert(InsertBefore && "Unexpected: debug intrinsics after a terminator" ); |
2442 | FnVarLocs->addVarLoc(Before: InsertBefore, Var: DebugVariable(Record), |
2443 | Expr: Record->getExpression(), DL: Record->getDebugLoc(), |
2444 | R: RawLocationWrapper(Record->getRawLocation())); |
2445 | InsertedAnyIntrinsics = true; |
2446 | }; |
2447 | for (auto &BB : Fn) { |
2448 | for (auto &I : BB) { |
2449 | // Skip instructions other than dbg.values and dbg.assigns. |
2450 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2451 | if (DVR.isDbgValue() || DVR.isDbgAssign()) |
2452 | TranslateDbgRecord(&DVR); |
2453 | } |
2454 | } |
2455 | return InsertedAnyIntrinsics; |
2456 | } |
2457 | |
2458 | /// Remove redundant definitions within sequences of consecutive location defs. |
2459 | /// This is done using a backward scan to keep the last def describing a |
2460 | /// specific variable/fragment. |
2461 | /// |
2462 | /// This implements removeRedundantDbgInstrsUsingBackwardScan from |
2463 | /// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with |
2464 | /// FunctionVarLocsBuilder instead of with intrinsics. |
2465 | static bool |
2466 | removeRedundantDbgLocsUsingBackwardScan(const BasicBlock *BB, |
2467 | FunctionVarLocsBuilder &FnVarLocs) { |
2468 | bool Changed = false; |
2469 | SmallDenseMap<DebugAggregate, BitVector> VariableDefinedBytes; |
2470 | // Scan over the entire block, not just over the instructions mapped by |
2471 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2472 | // instructions. |
2473 | for (const Instruction &I : reverse(C: *BB)) { |
2474 | // Sequence of consecutive defs ended. Clear map for the next one. |
2475 | VariableDefinedBytes.clear(); |
2476 | |
2477 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2478 | // Get the location defs that start just before this instruction. |
2479 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2480 | if (!Locs) |
2481 | return; |
2482 | |
2483 | NumWedgesScanned++; |
2484 | bool ChangedThisWedge = false; |
2485 | // The new pruned set of defs, reversed because we're scanning backwards. |
2486 | SmallVector<VarLocInfo> NewDefsReversed; |
2487 | |
2488 | // Iterate over the existing defs in reverse. |
2489 | for (auto RIt = Locs->rbegin(), REnd = Locs->rend(); RIt != REnd; ++RIt) { |
2490 | NumDefsScanned++; |
2491 | DebugAggregate Aggr = |
2492 | getAggregate(FnVarLocs.getVariable(ID: RIt->VariableID)); |
2493 | uint64_t SizeInBits = Aggr.first->getSizeInBits().value_or(u: 0); |
2494 | uint64_t SizeInBytes = divideCeil(Numerator: SizeInBits, Denominator: 8); |
2495 | |
2496 | // Cutoff for large variables to prevent expensive bitvector operations. |
2497 | const uint64_t MaxSizeBytes = 2048; |
2498 | |
2499 | if (SizeInBytes == 0 || SizeInBytes > MaxSizeBytes) { |
2500 | // If the size is unknown (0) then keep this location def to be safe. |
2501 | // Do the same for defs of large variables, which would be expensive |
2502 | // to represent with a BitVector. |
2503 | NewDefsReversed.push_back(*RIt); |
2504 | continue; |
2505 | } |
2506 | |
2507 | // Only keep this location definition if it is not fully eclipsed by |
2508 | // other definitions in this wedge that come after it |
2509 | |
2510 | // Inert the bytes the location definition defines. |
2511 | auto InsertResult = |
2512 | VariableDefinedBytes.try_emplace(Key: Aggr, Args: BitVector(SizeInBytes)); |
2513 | bool FirstDefinition = InsertResult.second; |
2514 | BitVector &DefinedBytes = InsertResult.first->second; |
2515 | |
2516 | DIExpression::FragmentInfo Fragment = |
2517 | RIt->Expr->getFragmentInfo().value_or( |
2518 | DIExpression::FragmentInfo(SizeInBits, 0)); |
2519 | bool InvalidFragment = Fragment.endInBits() > SizeInBits; |
2520 | uint64_t StartInBytes = Fragment.startInBits() / 8; |
2521 | uint64_t EndInBytes = divideCeil(Numerator: Fragment.endInBits(), Denominator: 8); |
2522 | |
2523 | // If this defines any previously undefined bytes, keep it. |
2524 | if (FirstDefinition || InvalidFragment || |
2525 | DefinedBytes.find_first_unset_in(Begin: StartInBytes, End: EndInBytes) != -1) { |
2526 | if (!InvalidFragment) |
2527 | DefinedBytes.set(I: StartInBytes, E: EndInBytes); |
2528 | NewDefsReversed.push_back(*RIt); |
2529 | continue; |
2530 | } |
2531 | |
2532 | // Redundant def found: throw it away. Since the wedge of defs is being |
2533 | // rebuilt, doing nothing is the same as deleting an entry. |
2534 | ChangedThisWedge = true; |
2535 | NumDefsRemoved++; |
2536 | } |
2537 | |
2538 | // Un-reverse the defs and replace the wedge with the pruned version. |
2539 | if (ChangedThisWedge) { |
2540 | std::reverse(first: NewDefsReversed.begin(), last: NewDefsReversed.end()); |
2541 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefsReversed)); |
2542 | NumWedgesChanged++; |
2543 | Changed = true; |
2544 | } |
2545 | }; |
2546 | HandleLocsForWedge(&I); |
2547 | for (DbgVariableRecord &DVR : reverse(C: filterDbgVars(R: I.getDbgRecordRange()))) |
2548 | HandleLocsForWedge(&DVR); |
2549 | } |
2550 | |
2551 | return Changed; |
2552 | } |
2553 | |
2554 | /// Remove redundant location defs using a forward scan. This can remove a |
2555 | /// location definition that is redundant due to indicating that a variable has |
2556 | /// the same value as is already being indicated by an earlier def. |
2557 | /// |
2558 | /// This implements removeRedundantDbgInstrsUsingForwardScan from |
2559 | /// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with |
2560 | /// FunctionVarLocsBuilder instead of with intrinsics |
2561 | static bool |
2562 | removeRedundantDbgLocsUsingForwardScan(const BasicBlock *BB, |
2563 | FunctionVarLocsBuilder &FnVarLocs) { |
2564 | bool Changed = false; |
2565 | DenseMap<DebugVariable, std::pair<RawLocationWrapper, DIExpression *>> |
2566 | VariableMap; |
2567 | |
2568 | // Scan over the entire block, not just over the instructions mapped by |
2569 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2570 | // instructions. |
2571 | for (const Instruction &I : *BB) { |
2572 | // Get the defs that come just before this instruction. |
2573 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2574 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2575 | if (!Locs) |
2576 | return; |
2577 | |
2578 | NumWedgesScanned++; |
2579 | bool ChangedThisWedge = false; |
2580 | // The new pruned set of defs. |
2581 | SmallVector<VarLocInfo> NewDefs; |
2582 | |
2583 | // Iterate over the existing defs. |
2584 | for (const VarLocInfo &Loc : *Locs) { |
2585 | NumDefsScanned++; |
2586 | DebugVariable Key(FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(), |
2587 | std::nullopt, Loc.DL.getInlinedAt()); |
2588 | auto [VMI, Inserted] = VariableMap.try_emplace(Key); |
2589 | |
2590 | // Update the map if we found a new value/expression describing the |
2591 | // variable, or if the variable wasn't mapped already. |
2592 | if (Inserted || VMI->second.first != Loc.Values || |
2593 | VMI->second.second != Loc.Expr) { |
2594 | VMI->second = {Loc.Values, Loc.Expr}; |
2595 | NewDefs.push_back(Elt: Loc); |
2596 | continue; |
2597 | } |
2598 | |
2599 | // Did not insert this Loc, which is the same as removing it. |
2600 | ChangedThisWedge = true; |
2601 | NumDefsRemoved++; |
2602 | } |
2603 | |
2604 | // Replace the existing wedge with the pruned version. |
2605 | if (ChangedThisWedge) { |
2606 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs)); |
2607 | NumWedgesChanged++; |
2608 | Changed = true; |
2609 | } |
2610 | }; |
2611 | |
2612 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2613 | HandleLocsForWedge(&DVR); |
2614 | HandleLocsForWedge(&I); |
2615 | } |
2616 | |
2617 | return Changed; |
2618 | } |
2619 | |
2620 | static bool |
2621 | removeUndefDbgLocsFromEntryBlock(const BasicBlock *BB, |
2622 | FunctionVarLocsBuilder &FnVarLocs) { |
2623 | assert(BB->isEntryBlock()); |
2624 | // Do extra work to ensure that we remove semantically unimportant undefs. |
2625 | // |
2626 | // This is to work around the fact that SelectionDAG will hoist dbg.values |
2627 | // using argument values to the top of the entry block. That can move arg |
2628 | // dbg.values before undef and constant dbg.values which they previously |
2629 | // followed. The easiest thing to do is to just try to feed SelectionDAG |
2630 | // input it's happy with. |
2631 | // |
2632 | // Map of {Variable x: Fragments y} where the fragments y of variable x have |
2633 | // have at least one non-undef location defined already. Don't use directly, |
2634 | // instead call DefineBits and HasDefinedBits. |
2635 | SmallDenseMap<DebugAggregate, SmallDenseSet<DIExpression::FragmentInfo>> |
2636 | VarsWithDef; |
2637 | // Specify that V (a fragment of A) has a non-undef location. |
2638 | auto DefineBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) { |
2639 | VarsWithDef[A].insert(V: V.getFragmentOrDefault()); |
2640 | }; |
2641 | // Return true if a non-undef location has been defined for V (a fragment of |
2642 | // A). Doesn't imply that the location is currently non-undef, just that a |
2643 | // non-undef location has been seen previously. |
2644 | auto HasDefinedBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) { |
2645 | auto FragsIt = VarsWithDef.find(Val: A); |
2646 | if (FragsIt == VarsWithDef.end()) |
2647 | return false; |
2648 | return llvm::any_of(Range&: FragsIt->second, P: [V](auto Frag) { |
2649 | return DIExpression::fragmentsOverlap(Frag, V.getFragmentOrDefault()); |
2650 | }); |
2651 | }; |
2652 | |
2653 | bool Changed = false; |
2654 | |
2655 | // Scan over the entire block, not just over the instructions mapped by |
2656 | // FnVarLocs, because wedges in FnVarLocs may only be separated by debug |
2657 | // instructions. |
2658 | for (const Instruction &I : *BB) { |
2659 | // Get the defs that come just before this instruction. |
2660 | auto HandleLocsForWedge = [&](auto *WedgePosition) { |
2661 | const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition); |
2662 | if (!Locs) |
2663 | return; |
2664 | |
2665 | NumWedgesScanned++; |
2666 | bool ChangedThisWedge = false; |
2667 | // The new pruned set of defs. |
2668 | SmallVector<VarLocInfo> NewDefs; |
2669 | |
2670 | // Iterate over the existing defs. |
2671 | for (const VarLocInfo &Loc : *Locs) { |
2672 | NumDefsScanned++; |
2673 | DebugAggregate Aggr{FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(), |
2674 | Loc.DL.getInlinedAt()}; |
2675 | DebugVariable Var = FnVarLocs.getVariable(ID: Loc.VariableID); |
2676 | |
2677 | // Remove undef entries that are encountered before any non-undef |
2678 | // intrinsics from the entry block. |
2679 | if (Loc.Values.isKillLocation(Expression: Loc.Expr) && !HasDefinedBits(Aggr, Var)) { |
2680 | // Did not insert this Loc, which is the same as removing it. |
2681 | NumDefsRemoved++; |
2682 | ChangedThisWedge = true; |
2683 | continue; |
2684 | } |
2685 | |
2686 | DefineBits(Aggr, Var); |
2687 | NewDefs.push_back(Elt: Loc); |
2688 | } |
2689 | |
2690 | // Replace the existing wedge with the pruned version. |
2691 | if (ChangedThisWedge) { |
2692 | FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs)); |
2693 | NumWedgesChanged++; |
2694 | Changed = true; |
2695 | } |
2696 | }; |
2697 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
2698 | HandleLocsForWedge(&DVR); |
2699 | HandleLocsForWedge(&I); |
2700 | } |
2701 | |
2702 | return Changed; |
2703 | } |
2704 | |
2705 | static bool removeRedundantDbgLocs(const BasicBlock *BB, |
2706 | FunctionVarLocsBuilder &FnVarLocs) { |
2707 | bool MadeChanges = false; |
2708 | MadeChanges |= removeRedundantDbgLocsUsingBackwardScan(BB, FnVarLocs); |
2709 | if (BB->isEntryBlock()) |
2710 | MadeChanges |= removeUndefDbgLocsFromEntryBlock(BB, FnVarLocs); |
2711 | MadeChanges |= removeRedundantDbgLocsUsingForwardScan(BB, FnVarLocs); |
2712 | |
2713 | if (MadeChanges) |
2714 | LLVM_DEBUG(dbgs() << "Removed redundant dbg locs from: " << BB->getName() |
2715 | << "\n" ); |
2716 | return MadeChanges; |
2717 | } |
2718 | |
2719 | static DenseSet<DebugAggregate> findVarsWithStackSlot(Function &Fn) { |
2720 | DenseSet<DebugAggregate> Result; |
2721 | for (auto &BB : Fn) { |
2722 | for (auto &I : BB) { |
2723 | // Any variable linked to an instruction is considered |
2724 | // interesting. Ideally we only need to check Allocas, however, a |
2725 | // DIAssignID might get dropped from an alloca but not stores. In that |
2726 | // case, we need to consider the variable interesting for NFC behaviour |
2727 | // with this change. TODO: Consider only looking at allocas. |
2728 | for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: &I)) { |
2729 | Result.insert(V: {DVR->getVariable(), DVR->getDebugLoc().getInlinedAt()}); |
2730 | } |
2731 | } |
2732 | } |
2733 | return Result; |
2734 | } |
2735 | |
2736 | static void analyzeFunction(Function &Fn, const DataLayout &Layout, |
2737 | FunctionVarLocsBuilder *FnVarLocs) { |
2738 | // The analysis will generate location definitions for all variables, but we |
2739 | // only need to perform a dataflow on the set of variables which have a stack |
2740 | // slot. Find those now. |
2741 | DenseSet<DebugAggregate> VarsWithStackSlot = findVarsWithStackSlot(Fn); |
2742 | |
2743 | bool Changed = false; |
2744 | |
2745 | // Use a scope block to clean up AssignmentTrackingLowering before running |
2746 | // MemLocFragmentFill to reduce peak memory consumption. |
2747 | { |
2748 | AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot); |
2749 | Changed = Pass.run(FnVarLocsBuilder: FnVarLocs); |
2750 | } |
2751 | |
2752 | if (Changed) { |
2753 | MemLocFragmentFill Pass(Fn, &VarsWithStackSlot, |
2754 | shouldCoalesceFragments(F&: Fn)); |
2755 | Pass.run(FnVarLocs); |
2756 | |
2757 | // Remove redundant entries. As well as reducing memory consumption and |
2758 | // avoiding waiting cycles later by burning some now, this has another |
2759 | // important job. That is to work around some SelectionDAG quirks. See |
2760 | // removeRedundantDbgLocsUsingForwardScan comments for more info on that. |
2761 | for (auto &BB : Fn) |
2762 | removeRedundantDbgLocs(BB: &BB, FnVarLocs&: *FnVarLocs); |
2763 | } |
2764 | } |
2765 | |
2766 | FunctionVarLocs |
2767 | DebugAssignmentTrackingAnalysis::run(Function &F, |
2768 | FunctionAnalysisManager &FAM) { |
2769 | if (!isAssignmentTrackingEnabled(M: *F.getParent())) |
2770 | return FunctionVarLocs(); |
2771 | |
2772 | auto &DL = F.getDataLayout(); |
2773 | |
2774 | FunctionVarLocsBuilder Builder; |
2775 | analyzeFunction(Fn&: F, Layout: DL, FnVarLocs: &Builder); |
2776 | |
2777 | // Save these results. |
2778 | FunctionVarLocs Results; |
2779 | Results.init(Builder); |
2780 | return Results; |
2781 | } |
2782 | |
2783 | AnalysisKey DebugAssignmentTrackingAnalysis::Key; |
2784 | |
2785 | PreservedAnalyses |
2786 | DebugAssignmentTrackingPrinterPass::run(Function &F, |
2787 | FunctionAnalysisManager &FAM) { |
2788 | FAM.getResult<DebugAssignmentTrackingAnalysis>(IR&: F).print(OS, Fn: F); |
2789 | return PreservedAnalyses::all(); |
2790 | } |
2791 | |
2792 | bool AssignmentTrackingAnalysis::runOnFunction(Function &F) { |
2793 | if (!isAssignmentTrackingEnabled(M: *F.getParent())) |
2794 | return false; |
2795 | |
2796 | LLVM_DEBUG(dbgs() << "AssignmentTrackingAnalysis run on " << F.getName() |
2797 | << "\n" ); |
2798 | |
2799 | // Clear previous results. |
2800 | Results->clear(); |
2801 | |
2802 | FunctionVarLocsBuilder Builder; |
2803 | analyzeFunction(Fn&: F, Layout: F.getDataLayout(), FnVarLocs: &Builder); |
2804 | |
2805 | // Save these results. |
2806 | Results->init(Builder); |
2807 | |
2808 | if (PrintResults && isFunctionInPrintList(FunctionName: F.getName())) |
2809 | Results->print(OS&: errs(), Fn: F); |
2810 | |
2811 | // Return false because this pass does not modify the function. |
2812 | return false; |
2813 | } |
2814 | |
2815 | AssignmentTrackingAnalysis::AssignmentTrackingAnalysis() |
2816 | : FunctionPass(ID), Results(std::make_unique<FunctionVarLocs>()) {} |
2817 | |
2818 | char AssignmentTrackingAnalysis::ID = 0; |
2819 | |
2820 | INITIALIZE_PASS(AssignmentTrackingAnalysis, DEBUG_TYPE, |
2821 | "Assignment Tracking Analysis" , false, true) |
2822 | |