1//===-- AssignmentTrackingAnalysis.cpp ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
10#include "LiveDebugValues/LiveDebugValues.h"
11#include "llvm/ADT/BitVector.h"
12#include "llvm/ADT/DenseMapInfo.h"
13#include "llvm/ADT/IntervalMap.h"
14#include "llvm/ADT/PostOrderIterator.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/ADT/UniqueVector.h"
18#include "llvm/BinaryFormat/Dwarf.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/DebugInfo.h"
22#include "llvm/IR/DebugProgramInstruction.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/Instruction.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/PassManager.h"
29#include "llvm/IR/PrintPasses.h"
30#include "llvm/InitializePasses.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include <assert.h>
36#include <cstdint>
37#include <optional>
38#include <queue>
39#include <sstream>
40#include <unordered_map>
41
42using namespace llvm;
43#define DEBUG_TYPE "debug-ata"
44
45STATISTIC(NumDefsScanned, "Number of dbg locs that get scanned for removal");
46STATISTIC(NumDefsRemoved, "Number of dbg locs removed");
47STATISTIC(NumWedgesScanned, "Number of dbg wedges scanned");
48STATISTIC(NumWedgesChanged, "Number of dbg wedges changed");
49
50static cl::opt<unsigned>
51 MaxNumBlocks("debug-ata-max-blocks", cl::init(Val: 10000),
52 cl::desc("Maximum num basic blocks before debug info dropped"),
53 cl::Hidden);
54/// Option for debugging the pass, determines if the memory location fragment
55/// filling happens after generating the variable locations.
56static cl::opt<bool> EnableMemLocFragFill("mem-loc-frag-fill", cl::init(Val: true),
57 cl::Hidden);
58/// Print the results of the analysis. Respects -filter-print-funcs.
59static cl::opt<bool> PrintResults("print-debug-ata", cl::init(Val: false),
60 cl::Hidden);
61
62/// Coalesce adjacent dbg locs describing memory locations that have contiguous
63/// fragments. This reduces the cost of LiveDebugValues which does SSA
64/// construction for each explicitly stated variable fragment.
65static cl::opt<cl::boolOrDefault>
66 CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags", cl::Hidden);
67
68// Implicit conversions are disabled for enum class types, so unfortunately we
69// need to create a DenseMapInfo wrapper around the specified underlying type.
70template <> struct llvm::DenseMapInfo<VariableID> {
71 using Wrapped = DenseMapInfo<unsigned>;
72 static inline VariableID getEmptyKey() {
73 return static_cast<VariableID>(Wrapped::getEmptyKey());
74 }
75 static inline VariableID getTombstoneKey() {
76 return static_cast<VariableID>(Wrapped::getTombstoneKey());
77 }
78 static unsigned getHashValue(const VariableID &Val) {
79 return Wrapped::getHashValue(Val: static_cast<unsigned>(Val));
80 }
81 static bool isEqual(const VariableID &LHS, const VariableID &RHS) {
82 return LHS == RHS;
83 }
84};
85
86using VarLocInsertPt = PointerUnion<const Instruction *, const DbgRecord *>;
87
88template <> struct std::hash<VarLocInsertPt> {
89 std::size_t operator()(const VarLocInsertPt &Arg) const {
90 return std::hash<void *>()(Arg.getOpaqueValue());
91 }
92};
93
94/// Helper class to build FunctionVarLocs, since that class isn't easy to
95/// modify. TODO: There's not a great deal of value in the split, it could be
96/// worth merging the two classes.
97class FunctionVarLocsBuilder {
98 friend FunctionVarLocs;
99 UniqueVector<DebugVariable> Variables;
100 // Use an unordered_map so we don't invalidate iterators after
101 // insert/modifications.
102 std::unordered_map<VarLocInsertPt, SmallVector<VarLocInfo>> VarLocsBeforeInst;
103
104 SmallVector<VarLocInfo> SingleLocVars;
105
106public:
107 unsigned getNumVariables() const { return Variables.size(); }
108
109 /// Find or insert \p V and return the ID.
110 VariableID insertVariable(DebugVariable V) {
111 return static_cast<VariableID>(Variables.insert(Entry: V));
112 }
113
114 /// Get a variable from its \p ID.
115 const DebugVariable &getVariable(VariableID ID) const {
116 return Variables[static_cast<unsigned>(ID)];
117 }
118
119 /// Return ptr to wedge of defs or nullptr if no defs come just before /p
120 /// Before.
121 const SmallVectorImpl<VarLocInfo> *getWedge(VarLocInsertPt Before) const {
122 auto R = VarLocsBeforeInst.find(x: Before);
123 if (R == VarLocsBeforeInst.end())
124 return nullptr;
125 return &R->second;
126 }
127
128 /// Replace the defs that come just before /p Before with /p Wedge.
129 void setWedge(VarLocInsertPt Before, SmallVector<VarLocInfo> &&Wedge) {
130 VarLocsBeforeInst[Before] = std::move(Wedge);
131 }
132
133 /// Add a def for a variable that is valid for its lifetime.
134 void addSingleLocVar(DebugVariable Var, DIExpression *Expr, DebugLoc DL,
135 RawLocationWrapper R) {
136 VarLocInfo VarLoc;
137 VarLoc.VariableID = insertVariable(V: Var);
138 VarLoc.Expr = Expr;
139 VarLoc.DL = std::move(DL);
140 VarLoc.Values = R;
141 SingleLocVars.emplace_back(Args&: VarLoc);
142 }
143
144 /// Add a def to the wedge of defs just before /p Before.
145 void addVarLoc(VarLocInsertPt Before, DebugVariable Var, DIExpression *Expr,
146 DebugLoc DL, RawLocationWrapper R) {
147 VarLocInfo VarLoc;
148 VarLoc.VariableID = insertVariable(V: Var);
149 VarLoc.Expr = Expr;
150 VarLoc.DL = std::move(DL);
151 VarLoc.Values = R;
152 VarLocsBeforeInst[Before].emplace_back(Args&: VarLoc);
153 }
154};
155
156void FunctionVarLocs::print(raw_ostream &OS, const Function &Fn) const {
157 // Print the variable table first. TODO: Sorting by variable could make the
158 // output more stable?
159 unsigned Counter = -1;
160 OS << "=== Variables ===\n";
161 for (const DebugVariable &V : Variables) {
162 ++Counter;
163 // Skip first entry because it is a dummy entry.
164 if (Counter == 0) {
165 continue;
166 }
167 OS << "[" << Counter << "] " << V.getVariable()->getName();
168 if (auto F = V.getFragment())
169 OS << " bits [" << F->OffsetInBits << ", "
170 << F->OffsetInBits + F->SizeInBits << ")";
171 if (const auto *IA = V.getInlinedAt())
172 OS << " inlined-at " << *IA;
173 OS << "\n";
174 }
175
176 auto PrintLoc = [&OS](const VarLocInfo &Loc) {
177 OS << "DEF Var=[" << (unsigned)Loc.VariableID << "]"
178 << " Expr=" << *Loc.Expr << " Values=(";
179 for (auto *Op : Loc.Values.location_ops()) {
180 errs() << Op->getName() << " ";
181 }
182 errs() << ")\n";
183 };
184
185 // Print the single location variables.
186 OS << "=== Single location vars ===\n";
187 for (auto It = single_locs_begin(), End = single_locs_end(); It != End;
188 ++It) {
189 PrintLoc(*It);
190 }
191
192 // Print the non-single-location defs in line with IR.
193 OS << "=== In-line variable defs ===";
194 for (const BasicBlock &BB : Fn) {
195 OS << "\n" << BB.getName() << ":\n";
196 for (const Instruction &I : BB) {
197 for (auto It = locs_begin(Before: &I), End = locs_end(Before: &I); It != End; ++It) {
198 PrintLoc(*It);
199 }
200 OS << I << "\n";
201 }
202 }
203}
204
205void FunctionVarLocs::init(FunctionVarLocsBuilder &Builder) {
206 // Add the single-location variables first.
207 for (const auto &VarLoc : Builder.SingleLocVars)
208 VarLocRecords.emplace_back(Args: VarLoc);
209 // Mark the end of the section.
210 SingleVarLocEnd = VarLocRecords.size();
211
212 // Insert a contiguous block of VarLocInfos for each instruction, mapping it
213 // to the start and end position in the vector with VarLocsBeforeInst. This
214 // block includes VarLocs for any DbgVariableRecords attached to that
215 // instruction.
216 for (auto &P : Builder.VarLocsBeforeInst) {
217 // Process VarLocs attached to a DbgRecord alongside their marker
218 // Instruction.
219 if (isa<const DbgRecord *>(Val: P.first))
220 continue;
221 const Instruction *I = cast<const Instruction *>(Val: P.first);
222 unsigned BlockStart = VarLocRecords.size();
223 // Any VarLocInfos attached to a DbgRecord should now be remapped to their
224 // marker Instruction, in order of DbgRecord appearance and prior to any
225 // VarLocInfos attached directly to that instruction.
226 for (const DbgVariableRecord &DVR : filterDbgVars(R: I->getDbgRecordRange())) {
227 // Even though DVR defines a variable location, VarLocsBeforeInst can
228 // still be empty if that VarLoc was redundant.
229 auto It = Builder.VarLocsBeforeInst.find(x: &DVR);
230 if (It == Builder.VarLocsBeforeInst.end())
231 continue;
232 for (const VarLocInfo &VarLoc : It->second)
233 VarLocRecords.emplace_back(Args: VarLoc);
234 }
235 for (const VarLocInfo &VarLoc : P.second)
236 VarLocRecords.emplace_back(Args: VarLoc);
237 unsigned BlockEnd = VarLocRecords.size();
238 // Record the start and end indices.
239 if (BlockEnd != BlockStart)
240 VarLocsBeforeInst[I] = {BlockStart, BlockEnd};
241 }
242
243 // Copy the Variables vector from the builder's UniqueVector.
244 assert(Variables.empty() && "Expect clear before init");
245 // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values
246 // are one-based) so reserve an extra and insert a dummy.
247 Variables.reserve(N: Builder.Variables.size() + 1);
248 Variables.push_back(Elt: DebugVariable(nullptr, std::nullopt, nullptr));
249 Variables.append(in_start: Builder.Variables.begin(), in_end: Builder.Variables.end());
250}
251
252void FunctionVarLocs::clear() {
253 Variables.clear();
254 VarLocRecords.clear();
255 VarLocsBeforeInst.clear();
256 SingleVarLocEnd = 0;
257}
258
259/// Walk backwards along constant GEPs and bitcasts to the base storage from \p
260/// Start as far as possible. Prepend \Expression with the offset and append it
261/// with a DW_OP_deref that haes been implicit until now. Returns the walked-to
262/// value and modified expression.
263static std::pair<Value *, DIExpression *>
264walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start,
265 DIExpression *Expression) {
266 APInt OffsetInBytes(DL.getTypeSizeInBits(Ty: Start->getType()), false);
267 Value *End =
268 Start->stripAndAccumulateInBoundsConstantOffsets(DL, Offset&: OffsetInBytes);
269 SmallVector<uint64_t, 3> Ops;
270 if (OffsetInBytes.getBoolValue()) {
271 Ops = {dwarf::DW_OP_plus_uconst, OffsetInBytes.getZExtValue()};
272 Expression = DIExpression::prependOpcodes(
273 Expr: Expression, Ops, /*StackValue=*/false, /*EntryValue=*/false);
274 }
275 Expression = DIExpression::append(Expr: Expression, Ops: {dwarf::DW_OP_deref});
276 return {End, Expression};
277}
278
279/// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression
280/// doesn't explicitly describe a memory location with DW_OP_deref or if the
281/// expression is too complex to interpret.
282static std::optional<int64_t>
283getDerefOffsetInBytes(const DIExpression *DIExpr) {
284 int64_t Offset = 0;
285 const unsigned NumElements = DIExpr->getNumElements();
286 const auto Elements = DIExpr->getElements();
287 unsigned ExpectedDerefIdx = 0;
288 // Extract the offset.
289 if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) {
290 Offset = Elements[1];
291 ExpectedDerefIdx = 2;
292 } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) {
293 ExpectedDerefIdx = 3;
294 if (Elements[2] == dwarf::DW_OP_plus)
295 Offset = Elements[1];
296 else if (Elements[2] == dwarf::DW_OP_minus)
297 Offset = -Elements[1];
298 else
299 return std::nullopt;
300 }
301
302 // If that's all there is it means there's no deref.
303 if (ExpectedDerefIdx >= NumElements)
304 return std::nullopt;
305
306 // Check the next element is DW_OP_deref - otherwise this is too complex or
307 // isn't a deref expression.
308 if (Elements[ExpectedDerefIdx] != dwarf::DW_OP_deref)
309 return std::nullopt;
310
311 // Check the final operation is either the DW_OP_deref or is a fragment.
312 if (NumElements == ExpectedDerefIdx + 1)
313 return Offset; // Ends with deref.
314 unsigned ExpectedFragFirstIdx = ExpectedDerefIdx + 1;
315 unsigned ExpectedFragFinalIdx = ExpectedFragFirstIdx + 2;
316 if (NumElements == ExpectedFragFinalIdx + 1 &&
317 Elements[ExpectedFragFirstIdx] == dwarf::DW_OP_LLVM_fragment)
318 return Offset; // Ends with deref + fragment.
319
320 // Don't bother trying to interpret anything more complex.
321 return std::nullopt;
322}
323
324/// A whole (unfragmented) source variable.
325using DebugAggregate = std::pair<const DILocalVariable *, const DILocation *>;
326static DebugAggregate getAggregate(const DebugVariable &Var) {
327 return DebugAggregate(Var.getVariable(), Var.getInlinedAt());
328}
329
330static bool shouldCoalesceFragments(Function &F) {
331 // Enabling fragment coalescing reduces compiler run time when instruction
332 // referencing is enabled. However, it may cause LiveDebugVariables to create
333 // incorrect locations. Since instruction-referencing mode effectively
334 // bypasses LiveDebugVariables we only enable coalescing if the cl::opt flag
335 // has not been explicitly set and instruction-referencing is turned on.
336 switch (CoalesceAdjacentFragmentsOpt) {
337 case cl::boolOrDefault::BOU_UNSET:
338 return debuginfoShouldUseDebugInstrRef(T: F.getParent()->getTargetTriple());
339 case cl::boolOrDefault::BOU_TRUE:
340 return true;
341 case cl::boolOrDefault::BOU_FALSE:
342 return false;
343 }
344 llvm_unreachable("Unknown boolOrDefault value");
345}
346
347namespace {
348/// In dwarf emission, the following sequence
349/// 1. dbg.value ... Fragment(0, 64)
350/// 2. dbg.value ... Fragment(0, 32)
351/// effectively sets Fragment(32, 32) to undef (each def sets all bits not in
352/// the intersection of the fragments to having "no location"). This makes
353/// sense for implicit location values because splitting the computed values
354/// could be troublesome, and is probably quite uncommon. When we convert
355/// dbg.assigns to dbg.value+deref this kind of thing is common, and describing
356/// a location (memory) rather than a value means we don't need to worry about
357/// splitting any values, so we try to recover the rest of the fragment
358/// location here.
359/// This class performs a(nother) dataflow analysis over the function, adding
360/// variable locations so that any bits of a variable with a memory location
361/// have that location explicitly reinstated at each subsequent variable
362/// location definition that that doesn't overwrite those bits. i.e. after a
363/// variable location def, insert new defs for the memory location with
364/// fragments for the difference of "all bits currently in memory" and "the
365/// fragment of the second def".
366class MemLocFragmentFill {
367 Function &Fn;
368 FunctionVarLocsBuilder *FnVarLocs;
369 const DenseSet<DebugAggregate> *VarsWithStackSlot;
370 bool CoalesceAdjacentFragments;
371
372 // 0 = no memory location.
373 using BaseAddress = unsigned;
374 using OffsetInBitsTy = unsigned;
375 using FragTraits = IntervalMapHalfOpenInfo<OffsetInBitsTy>;
376 using FragsInMemMap = IntervalMap<
377 OffsetInBitsTy, BaseAddress,
378 IntervalMapImpl::NodeSizer<OffsetInBitsTy, BaseAddress>::LeafSize,
379 FragTraits>;
380 FragsInMemMap::Allocator IntervalMapAlloc;
381 using VarFragMap = DenseMap<unsigned, FragsInMemMap>;
382
383 /// IDs for memory location base addresses in maps. Use 0 to indicate that
384 /// there's no memory location.
385 UniqueVector<RawLocationWrapper> Bases;
386 UniqueVector<DebugAggregate> Aggregates;
387 DenseMap<const BasicBlock *, VarFragMap> LiveIn;
388 DenseMap<const BasicBlock *, VarFragMap> LiveOut;
389
390 struct FragMemLoc {
391 unsigned Var;
392 unsigned Base;
393 unsigned OffsetInBits;
394 unsigned SizeInBits;
395 DebugLoc DL;
396 };
397 using InsertMap = MapVector<VarLocInsertPt, SmallVector<FragMemLoc>>;
398
399 /// BBInsertBeforeMap holds a description for the set of location defs to be
400 /// inserted after the analysis is complete. It is updated during the dataflow
401 /// and the entry for a block is CLEARED each time it is (re-)visited. After
402 /// the dataflow is complete, each block entry will contain the set of defs
403 /// calculated during the final (fixed-point) iteration.
404 DenseMap<const BasicBlock *, InsertMap> BBInsertBeforeMap;
405
406 static bool intervalMapsAreEqual(const FragsInMemMap &A,
407 const FragsInMemMap &B) {
408 auto AIt = A.begin(), AEnd = A.end();
409 auto BIt = B.begin(), BEnd = B.end();
410 for (; AIt != AEnd; ++AIt, ++BIt) {
411 if (BIt == BEnd)
412 return false; // B has fewer elements than A.
413 if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop())
414 return false; // Interval is different.
415 if (*AIt != *BIt)
416 return false; // Value at interval is different.
417 }
418 // AIt == AEnd. Check BIt is also now at end.
419 return BIt == BEnd;
420 }
421
422 static bool varFragMapsAreEqual(const VarFragMap &A, const VarFragMap &B) {
423 if (A.size() != B.size())
424 return false;
425 for (const auto &APair : A) {
426 auto BIt = B.find(Val: APair.first);
427 if (BIt == B.end())
428 return false;
429 if (!intervalMapsAreEqual(A: APair.second, B: BIt->second))
430 return false;
431 }
432 return true;
433 }
434
435 /// Return a string for the value that \p BaseID represents.
436 std::string toString(unsigned BaseID) {
437 if (BaseID)
438 return Bases[BaseID].getVariableLocationOp(OpIdx: 0)->getName().str();
439 else
440 return "None";
441 }
442
443 /// Format string describing an FragsInMemMap (IntervalMap) interval.
444 std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) {
445 std::string String;
446 std::stringstream S(String);
447 if (It.valid()) {
448 S << "[" << It.start() << ", " << It.stop()
449 << "): " << toString(BaseID: It.value());
450 } else {
451 S << "invalid iterator (end)";
452 }
453 if (Newline)
454 S << "\n";
455 return S.str();
456 };
457
458 FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) {
459 FragsInMemMap Result(IntervalMapAlloc);
460 for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) {
461 LLVM_DEBUG(dbgs() << "a " << toString(AIt));
462 // This is basically copied from process() and inverted (process is
463 // performing something like a union whereas this is more of an
464 // intersect).
465
466 // There's no work to do if interval `a` overlaps no fragments in map `B`.
467 if (!B.overlaps(a: AIt.start(), b: AIt.stop()))
468 continue;
469
470 // Does StartBit intersect an existing fragment?
471 auto FirstOverlap = B.find(x: AIt.start());
472 assert(FirstOverlap != B.end());
473 bool IntersectStart = FirstOverlap.start() < AIt.start();
474 LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false)
475 << ", IntersectStart: " << IntersectStart << "\n");
476
477 // Does EndBit intersect an existing fragment?
478 auto LastOverlap = B.find(x: AIt.stop());
479 bool IntersectEnd =
480 LastOverlap != B.end() && LastOverlap.start() < AIt.stop();
481 LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false)
482 << ", IntersectEnd: " << IntersectEnd << "\n");
483
484 // Check if both ends of `a` intersect the same interval `b`.
485 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
486 // Insert `a` (`a` is contained in `b`) if the values match.
487 // [ a ]
488 // [ - b - ]
489 // -
490 // [ r ]
491 LLVM_DEBUG(dbgs() << "- a is contained within "
492 << toString(FirstOverlap));
493 if (*AIt && *AIt == *FirstOverlap)
494 Result.insert(a: AIt.start(), b: AIt.stop(), y: *AIt);
495 } else {
496 // There's an overlap but `a` is not fully contained within
497 // `b`. Shorten any end-point intersections.
498 // [ - a - ]
499 // [ - b - ]
500 // -
501 // [ r ]
502 auto Next = FirstOverlap;
503 if (IntersectStart) {
504 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
505 << toString(FirstOverlap));
506 if (*AIt && *AIt == *FirstOverlap)
507 Result.insert(a: AIt.start(), b: FirstOverlap.stop(), y: *AIt);
508 ++Next;
509 }
510 // [ - a - ]
511 // [ - b - ]
512 // -
513 // [ r ]
514 if (IntersectEnd) {
515 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
516 << toString(LastOverlap));
517 if (*AIt && *AIt == *LastOverlap)
518 Result.insert(a: LastOverlap.start(), b: AIt.stop(), y: *AIt);
519 }
520
521 // Insert all intervals in map `B` that are contained within interval
522 // `a` where the values match.
523 // [ - - a - - ]
524 // [ b1 ] [ b2 ]
525 // -
526 // [ r1 ] [ r2 ]
527 while (Next != B.end() && Next.start() < AIt.stop() &&
528 Next.stop() <= AIt.stop()) {
529 LLVM_DEBUG(dbgs()
530 << "- insert intersection of a and " << toString(Next));
531 if (*AIt && *AIt == *Next)
532 Result.insert(a: Next.start(), b: Next.stop(), y: *Next);
533 ++Next;
534 }
535 }
536 }
537 return Result;
538 }
539
540 /// Meet \p A and \p B, storing the result in \p A.
541 void meetVars(VarFragMap &A, const VarFragMap &B) {
542 // Meet A and B.
543 //
544 // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b)
545 for (auto It = A.begin(), End = A.end(); It != End; ++It) {
546 unsigned AVar = It->first;
547 FragsInMemMap &AFrags = It->second;
548 auto BIt = B.find(Val: AVar);
549 if (BIt == B.end()) {
550 A.erase(I: It);
551 continue; // Var has no bits defined in B.
552 }
553 LLVM_DEBUG(dbgs() << "meet fragment maps for "
554 << Aggregates[AVar].first->getName() << "\n");
555 AFrags = meetFragments(A: AFrags, B: BIt->second);
556 }
557 }
558
559 bool meet(const BasicBlock &BB,
560 const SmallPtrSet<BasicBlock *, 16> &Visited) {
561 LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName()
562 << "\n");
563
564 VarFragMap BBLiveIn;
565 bool FirstMeet = true;
566 // LiveIn locs for BB is the meet of the already-processed preds' LiveOut
567 // locs.
568 for (const BasicBlock *Pred : predecessors(BB: &BB)) {
569 // Ignore preds that haven't been processed yet. This is essentially the
570 // same as initialising all variables to implicit top value (⊤) which is
571 // the identity value for the meet operation.
572 if (!Visited.count(Ptr: Pred))
573 continue;
574
575 auto PredLiveOut = LiveOut.find(Val: Pred);
576 assert(PredLiveOut != LiveOut.end());
577
578 if (FirstMeet) {
579 LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n");
580 BBLiveIn = PredLiveOut->second;
581 FirstMeet = false;
582 } else {
583 LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName()
584 << "\n");
585 meetVars(A&: BBLiveIn, B: PredLiveOut->second);
586 }
587
588 // An empty set is ⊥ for the intersect-like meet operation. If we've
589 // already got ⊥ there's no need to run the code - we know the result is
590 // ⊥ since `meet(a, ⊥) = ⊥`.
591 if (BBLiveIn.size() == 0)
592 break;
593 }
594
595 // If there's no LiveIn entry for the block yet, add it.
596 auto [CurrentLiveInEntry, Inserted] = LiveIn.try_emplace(Key: &BB);
597 if (Inserted) {
598 LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName()
599 << "\n");
600 CurrentLiveInEntry->second = std::move(BBLiveIn);
601 return /*Changed=*/true;
602 }
603
604 // If the LiveIn set has changed (expensive check) update it and return
605 // true.
606 if (!varFragMapsAreEqual(A: BBLiveIn, B: CurrentLiveInEntry->second)) {
607 LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n");
608 CurrentLiveInEntry->second = std::move(BBLiveIn);
609 return /*Changed=*/true;
610 }
611
612 LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n");
613 return /*Changed=*/false;
614 }
615
616 void insertMemLoc(BasicBlock &BB, VarLocInsertPt Before, unsigned Var,
617 unsigned StartBit, unsigned EndBit, unsigned Base,
618 DebugLoc DL) {
619 assert(StartBit < EndBit && "Cannot create fragment of size <= 0");
620 if (!Base)
621 return;
622 FragMemLoc Loc;
623 Loc.Var = Var;
624 Loc.OffsetInBits = StartBit;
625 Loc.SizeInBits = EndBit - StartBit;
626 assert(Base && "Expected a non-zero ID for Base address");
627 Loc.Base = Base;
628 Loc.DL = DL;
629 BBInsertBeforeMap[&BB][Before].push_back(Elt: Loc);
630 LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName()
631 << " bits [" << StartBit << ", " << EndBit << ")\n");
632 }
633
634 /// Inserts a new dbg def if the interval found when looking up \p StartBit
635 /// in \p FragMap starts before \p StartBit or ends after \p EndBit (which
636 /// indicates - assuming StartBit->EndBit has just been inserted - that the
637 /// slice has been coalesced in the map).
638 void coalesceFragments(BasicBlock &BB, VarLocInsertPt Before, unsigned Var,
639 unsigned StartBit, unsigned EndBit, unsigned Base,
640 DebugLoc DL, const FragsInMemMap &FragMap) {
641 if (!CoalesceAdjacentFragments)
642 return;
643 // We've inserted the location into the map. The map will have coalesced
644 // adjacent intervals (variable fragments) that describe the same memory
645 // location. Use this knowledge to insert a debug location that describes
646 // that coalesced fragment. This may eclipse other locs we've just
647 // inserted. This is okay as redundant locs will be cleaned up later.
648 auto CoalescedFrag = FragMap.find(x: StartBit);
649 // Bail if no coalescing has taken place.
650 if (CoalescedFrag.start() == StartBit && CoalescedFrag.stop() == EndBit)
651 return;
652
653 LLVM_DEBUG(dbgs() << "- Insert loc for bits " << CoalescedFrag.start()
654 << " to " << CoalescedFrag.stop() << "\n");
655 insertMemLoc(BB, Before, Var, StartBit: CoalescedFrag.start(), EndBit: CoalescedFrag.stop(),
656 Base, DL);
657 }
658
659 void addDef(const VarLocInfo &VarLoc, VarLocInsertPt Before, BasicBlock &BB,
660 VarFragMap &LiveSet) {
661 DebugVariable DbgVar = FnVarLocs->getVariable(ID: VarLoc.VariableID);
662 if (skipVariable(V: DbgVar.getVariable()))
663 return;
664 // Don't bother doing anything for this variables if we know it's fully
665 // promoted. We're only interested in variables that (sometimes) live on
666 // the stack here.
667 if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgVar)))
668 return;
669 unsigned Var = Aggregates.insert(
670 Entry: DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt()));
671
672 // [StartBit: EndBit) are the bits affected by this def.
673 const DIExpression *DIExpr = VarLoc.Expr;
674 unsigned StartBit;
675 unsigned EndBit;
676 if (auto Frag = DIExpr->getFragmentInfo()) {
677 StartBit = Frag->OffsetInBits;
678 EndBit = StartBit + Frag->SizeInBits;
679 } else {
680 assert(static_cast<bool>(DbgVar.getVariable()->getSizeInBits()));
681 StartBit = 0;
682 EndBit = *DbgVar.getVariable()->getSizeInBits();
683 }
684
685 // We will only fill fragments for simple memory-describing dbg.value
686 // intrinsics. If the fragment offset is the same as the offset from the
687 // base pointer, do The Thing, otherwise fall back to normal dbg.value
688 // behaviour. AssignmentTrackingLowering has generated DIExpressions
689 // written in terms of the base pointer.
690 // TODO: Remove this condition since the fragment offset doesn't always
691 // equal the offset from base pointer (e.g. for a SROA-split variable).
692 const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr);
693 const unsigned Base =
694 DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit
695 ? Bases.insert(Entry: VarLoc.Values)
696 : 0;
697 LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " ["
698 << StartBit << ", " << EndBit << "): " << toString(Base)
699 << "\n");
700
701 // First of all, any locs that use mem that are disrupted need reinstating.
702 // Unfortunately, IntervalMap doesn't let us insert intervals that overlap
703 // with existing intervals so this code involves a lot of fiddling around
704 // with intervals to do that manually.
705 auto FragIt = LiveSet.find(Val: Var);
706
707 // Check if the variable does not exist in the map.
708 if (FragIt == LiveSet.end()) {
709 // Add this variable to the BB map.
710 auto P = LiveSet.try_emplace(Key: Var, Args: FragsInMemMap(IntervalMapAlloc));
711 assert(P.second && "Var already in map?");
712 // Add the interval to the fragment map.
713 P.first->second.insert(a: StartBit, b: EndBit, y: Base);
714 return;
715 }
716 // The variable has an entry in the map.
717
718 FragsInMemMap &FragMap = FragIt->second;
719 // First check the easy case: the new fragment `f` doesn't overlap with any
720 // intervals.
721 if (!FragMap.overlaps(a: StartBit, b: EndBit)) {
722 LLVM_DEBUG(dbgs() << "- No overlaps\n");
723 FragMap.insert(a: StartBit, b: EndBit, y: Base);
724 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL,
725 FragMap);
726 return;
727 }
728 // There is at least one overlap.
729
730 // Does StartBit intersect an existing fragment?
731 auto FirstOverlap = FragMap.find(x: StartBit);
732 assert(FirstOverlap != FragMap.end());
733 bool IntersectStart = FirstOverlap.start() < StartBit;
734
735 // Does EndBit intersect an existing fragment?
736 auto LastOverlap = FragMap.find(x: EndBit);
737 bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit;
738
739 // Check if both ends of `f` intersect the same interval `i`.
740 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
741 LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n");
742 // Shorten `i` so that there's space to insert `f`.
743 // [ f ]
744 // [ - i - ]
745 // +
746 // [ i ][ f ][ i ]
747
748 // Save values for use after inserting a new interval.
749 auto EndBitOfOverlap = FirstOverlap.stop();
750 unsigned OverlapValue = FirstOverlap.value();
751
752 // Shorten the overlapping interval.
753 FirstOverlap.setStop(StartBit);
754 insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit,
755 Base: OverlapValue, DL: VarLoc.DL);
756
757 // Insert a new interval to represent the end part.
758 FragMap.insert(a: EndBit, b: EndBitOfOverlap, y: OverlapValue);
759 insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: EndBitOfOverlap, Base: OverlapValue,
760 DL: VarLoc.DL);
761
762 // Insert the new (middle) fragment now there is space.
763 FragMap.insert(a: StartBit, b: EndBit, y: Base);
764 } else {
765 // There's an overlap but `f` may not be fully contained within
766 // `i`. Shorten any end-point intersections so that we can then
767 // insert `f`.
768 // [ - f - ]
769 // [ - i - ]
770 // | |
771 // [ i ]
772 // Shorten any end-point intersections.
773 if (IntersectStart) {
774 LLVM_DEBUG(dbgs() << "- Intersect interval at start\n");
775 // Split off at the intersection.
776 FirstOverlap.setStop(StartBit);
777 insertMemLoc(BB, Before, Var, StartBit: FirstOverlap.start(), EndBit: StartBit,
778 Base: *FirstOverlap, DL: VarLoc.DL);
779 }
780 // [ - f - ]
781 // [ - i - ]
782 // | |
783 // [ i ]
784 if (IntersectEnd) {
785 LLVM_DEBUG(dbgs() << "- Intersect interval at end\n");
786 // Split off at the intersection.
787 LastOverlap.setStart(EndBit);
788 insertMemLoc(BB, Before, Var, StartBit: EndBit, EndBit: LastOverlap.stop(), Base: *LastOverlap,
789 DL: VarLoc.DL);
790 }
791
792 LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n");
793 // FirstOverlap and LastOverlap have been shortened such that they're
794 // no longer overlapping with [StartBit, EndBit). Delete any overlaps
795 // that remain (these will be fully contained within `f`).
796 // [ - f - ] }
797 // [ - i - ] } Intersection shortening that has happened above.
798 // | | }
799 // [ i ] }
800 // -----------------
801 // [i2 ] } Intervals fully contained within `f` get erased.
802 // -----------------
803 // [ - f - ][ i ] } Completed insertion.
804 auto It = FirstOverlap;
805 if (IntersectStart)
806 ++It; // IntersectStart: first overlap has been shortened.
807 while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) {
808 LLVM_DEBUG(dbgs() << "- Erase " << toString(It));
809 It.erase(); // This increments It after removing the interval.
810 }
811 // We've dealt with all the overlaps now!
812 assert(!FragMap.overlaps(StartBit, EndBit));
813 LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n");
814 FragMap.insert(a: StartBit, b: EndBit, y: Base);
815 }
816
817 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, DL: VarLoc.DL,
818 FragMap);
819 }
820
821 bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); }
822
823 void process(BasicBlock &BB, VarFragMap &LiveSet) {
824 BBInsertBeforeMap[&BB].clear();
825 for (auto &I : BB) {
826 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
827 if (const auto *Locs = FnVarLocs->getWedge(Before: &DVR)) {
828 for (const VarLocInfo &Loc : *Locs) {
829 addDef(VarLoc: Loc, Before: &DVR, BB&: *I.getParent(), LiveSet);
830 }
831 }
832 }
833 if (const auto *Locs = FnVarLocs->getWedge(Before: &I)) {
834 for (const VarLocInfo &Loc : *Locs) {
835 addDef(VarLoc: Loc, Before: &I, BB&: *I.getParent(), LiveSet);
836 }
837 }
838 }
839 }
840
841public:
842 MemLocFragmentFill(Function &Fn,
843 const DenseSet<DebugAggregate> *VarsWithStackSlot,
844 bool CoalesceAdjacentFragments)
845 : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot),
846 CoalesceAdjacentFragments(CoalesceAdjacentFragments) {}
847
848 /// Add variable locations to \p FnVarLocs so that any bits of a variable
849 /// with a memory location have that location explicitly reinstated at each
850 /// subsequent variable location definition that that doesn't overwrite those
851 /// bits. i.e. after a variable location def, insert new defs for the memory
852 /// location with fragments for the difference of "all bits currently in
853 /// memory" and "the fragment of the second def". e.g.
854 ///
855 /// Before:
856 ///
857 /// var x bits 0 to 63: value in memory
858 /// more instructions
859 /// var x bits 0 to 31: value is %0
860 ///
861 /// After:
862 ///
863 /// var x bits 0 to 63: value in memory
864 /// more instructions
865 /// var x bits 0 to 31: value is %0
866 /// var x bits 32 to 61: value in memory ; <-- new loc def
867 ///
868 void run(FunctionVarLocsBuilder *FnVarLocs) {
869 if (!EnableMemLocFragFill)
870 return;
871
872 this->FnVarLocs = FnVarLocs;
873
874 // Prepare for traversal.
875 //
876 ReversePostOrderTraversal<Function *> RPOT(&Fn);
877 std::priority_queue<unsigned int, std::vector<unsigned int>,
878 std::greater<unsigned int>>
879 Worklist;
880 std::priority_queue<unsigned int, std::vector<unsigned int>,
881 std::greater<unsigned int>>
882 Pending;
883 DenseMap<unsigned int, BasicBlock *> OrderToBB;
884 DenseMap<BasicBlock *, unsigned int> BBToOrder;
885 { // Init OrderToBB and BBToOrder.
886 unsigned int RPONumber = 0;
887 for (BasicBlock *BB : RPOT) {
888 OrderToBB[RPONumber] = BB;
889 BBToOrder[BB] = RPONumber;
890 Worklist.push(x: RPONumber);
891 ++RPONumber;
892 }
893 LiveIn.reserve(NumEntries: RPONumber);
894 LiveOut.reserve(NumEntries: RPONumber);
895 }
896
897 // Perform the traversal.
898 //
899 // This is a standard "intersect of predecessor outs" dataflow problem. To
900 // solve it, we perform meet() and process() using the two worklist method
901 // until the LiveIn data for each block becomes unchanging.
902 //
903 // This dataflow is essentially working on maps of sets and at each meet we
904 // intersect the maps and the mapped sets. So, initialized live-in maps
905 // monotonically decrease in value throughout the dataflow.
906 SmallPtrSet<BasicBlock *, 16> Visited;
907 while (!Worklist.empty() || !Pending.empty()) {
908 // We track what is on the pending worklist to avoid inserting the same
909 // thing twice. We could avoid this with a custom priority queue, but
910 // this is probably not worth it.
911 SmallPtrSet<BasicBlock *, 16> OnPending;
912 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
913 while (!Worklist.empty()) {
914 BasicBlock *BB = OrderToBB[Worklist.top()];
915 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
916 Worklist.pop();
917 bool InChanged = meet(BB: *BB, Visited);
918 // Always consider LiveIn changed on the first visit.
919 InChanged |= Visited.insert(Ptr: BB).second;
920 if (InChanged) {
921 LLVM_DEBUG(dbgs()
922 << BB->getName() << " has new InLocs, process it\n");
923 // Mutate a copy of LiveIn while processing BB. Once we've processed
924 // the terminator LiveSet is the LiveOut set for BB.
925 // This is an expensive copy!
926 VarFragMap LiveSet = LiveIn[BB];
927
928 // Process the instructions in the block.
929 process(BB&: *BB, LiveSet);
930
931 // Relatively expensive check: has anything changed in LiveOut for BB?
932 if (!varFragMapsAreEqual(A: LiveOut[BB], B: LiveSet)) {
933 LLVM_DEBUG(dbgs() << BB->getName()
934 << " has new OutLocs, add succs to worklist: [ ");
935 LiveOut[BB] = std::move(LiveSet);
936 for (BasicBlock *Succ : successors(BB)) {
937 if (OnPending.insert(Ptr: Succ).second) {
938 LLVM_DEBUG(dbgs() << Succ->getName() << " ");
939 Pending.push(x: BBToOrder[Succ]);
940 }
941 }
942 LLVM_DEBUG(dbgs() << "]\n");
943 }
944 }
945 }
946 Worklist.swap(pq&: Pending);
947 // At this point, pending must be empty, since it was just the empty
948 // worklist
949 assert(Pending.empty() && "Pending should be empty");
950 }
951
952 // Insert new location defs.
953 for (auto &Pair : BBInsertBeforeMap) {
954 InsertMap &Map = Pair.second;
955 for (auto &Pair : Map) {
956 auto InsertBefore = Pair.first;
957 assert(InsertBefore && "should never be null");
958 auto FragMemLocs = Pair.second;
959 auto &Ctx = Fn.getContext();
960
961 for (auto &FragMemLoc : FragMemLocs) {
962 DIExpression *Expr = DIExpression::get(Context&: Ctx, Elements: {});
963 if (FragMemLoc.SizeInBits !=
964 *Aggregates[FragMemLoc.Var].first->getSizeInBits())
965 Expr = *DIExpression::createFragmentExpression(
966 Expr, OffsetInBits: FragMemLoc.OffsetInBits, SizeInBits: FragMemLoc.SizeInBits);
967 Expr = DIExpression::prepend(Expr, Flags: DIExpression::DerefAfter,
968 Offset: FragMemLoc.OffsetInBits / 8);
969 DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr,
970 FragMemLoc.DL.getInlinedAt());
971 FnVarLocs->addVarLoc(Before: InsertBefore, Var, Expr, DL: FragMemLoc.DL,
972 R: Bases[FragMemLoc.Base]);
973 }
974 }
975 }
976 }
977};
978
979/// AssignmentTrackingLowering encapsulates a dataflow analysis over a function
980/// that interprets assignment tracking debug info metadata and stores in IR to
981/// create a map of variable locations.
982class AssignmentTrackingLowering {
983public:
984 /// The kind of location in use for a variable, where Mem is the stack home,
985 /// Val is an SSA value or const, and None means that there is not one single
986 /// kind (either because there are multiple or because there is none; it may
987 /// prove useful to split this into two values in the future).
988 ///
989 /// LocKind is a join-semilattice with the partial order:
990 /// None > Mem, Val
991 ///
992 /// i.e.
993 /// join(Mem, Mem) = Mem
994 /// join(Val, Val) = Val
995 /// join(Mem, Val) = None
996 /// join(None, Mem) = None
997 /// join(None, Val) = None
998 /// join(None, None) = None
999 ///
1000 /// Note: the order is not `None > Val > Mem` because we're using DIAssignID
1001 /// to name assignments and are not tracking the actual stored values.
1002 /// Therefore currently there's no way to ensure that Mem values and Val
1003 /// values are the same. This could be a future extension, though it's not
1004 /// clear that many additional locations would be recovered that way in
1005 /// practice as the likelihood of this sitation arising naturally seems
1006 /// incredibly low.
1007 enum class LocKind { Mem, Val, None };
1008
1009 /// An abstraction of the assignment of a value to a variable or memory
1010 /// location.
1011 ///
1012 /// An Assignment is Known or NoneOrPhi. A Known Assignment means we have a
1013 /// DIAssignID ptr that represents it. NoneOrPhi means that we don't (or
1014 /// can't) know the ID of the last assignment that took place.
1015 ///
1016 /// The Status of the Assignment (Known or NoneOrPhi) is another
1017 /// join-semilattice. The partial order is:
1018 /// NoneOrPhi > Known {id_0, id_1, ...id_N}
1019 ///
1020 /// i.e. for all values x and y where x != y:
1021 /// join(x, x) = x
1022 /// join(x, y) = NoneOrPhi
1023 struct Assignment {
1024 enum S { Known, NoneOrPhi } Status;
1025 /// ID of the assignment. nullptr if Status is not Known.
1026 DIAssignID *ID;
1027 /// The dbg.assign that marks this dbg-def. Mem-defs don't use this field.
1028 /// May be nullptr.
1029 DbgVariableRecord *Source = nullptr;
1030
1031 bool isSameSourceAssignment(const Assignment &Other) const {
1032 // Don't include Source in the equality check. Assignments are
1033 // defined by their ID, not debug intrinsic(s).
1034 return std::tie(args: Status, args: ID) == std::tie(args: Other.Status, args: Other.ID);
1035 }
1036 void dump(raw_ostream &OS) {
1037 static const char *LUT[] = {"Known", "NoneOrPhi"};
1038 OS << LUT[Status] << "(id=";
1039 if (ID)
1040 OS << ID;
1041 else
1042 OS << "null";
1043 OS << ", s=";
1044 if (!Source)
1045 OS << "null";
1046 else
1047 OS << Source;
1048 OS << ")";
1049 }
1050
1051 static Assignment make(DIAssignID *ID, DbgVariableRecord *Source) {
1052 assert((!Source || Source->isDbgAssign()) &&
1053 "Cannot make an assignment from a non-assign DbgVariableRecord");
1054 return Assignment(Known, ID, Source);
1055 }
1056 static Assignment makeFromMemDef(DIAssignID *ID) {
1057 return Assignment(Known, ID);
1058 }
1059 static Assignment makeNoneOrPhi() { return Assignment(NoneOrPhi, nullptr); }
1060 // Again, need a Top value?
1061 Assignment() : Status(NoneOrPhi), ID(nullptr) {} // Can we delete this?
1062 Assignment(S Status, DIAssignID *ID) : Status(Status), ID(ID) {
1063 // If the Status is Known then we expect there to be an assignment ID.
1064 assert(Status == NoneOrPhi || ID);
1065 }
1066 Assignment(S Status, DIAssignID *ID, DbgVariableRecord *Source)
1067 : Status(Status), ID(ID), Source(Source) {
1068 // If the Status is Known then we expect there to be an assignment ID.
1069 assert(Status == NoneOrPhi || ID);
1070 }
1071 };
1072
1073 using AssignmentMap = SmallVector<Assignment>;
1074 using LocMap = SmallVector<LocKind>;
1075 using OverlapMap = DenseMap<VariableID, SmallVector<VariableID>>;
1076 using UntaggedStoreAssignmentMap =
1077 DenseMap<const Instruction *,
1078 SmallVector<std::pair<VariableID, at::AssignmentInfo>>>;
1079 using UnknownStoreAssignmentMap =
1080 DenseMap<const Instruction *, SmallVector<VariableID>>;
1081
1082private:
1083 /// The highest numbered VariableID for partially promoted variables plus 1,
1084 /// the values for which start at 1.
1085 unsigned TrackedVariablesVectorSize = 0;
1086 /// Map a variable to the set of variables that it fully contains.
1087 OverlapMap VarContains;
1088 /// Map untagged stores to the variable fragments they assign to. Used by
1089 /// processUntaggedInstruction.
1090 UntaggedStoreAssignmentMap UntaggedStoreVars;
1091 /// Map untagged unknown stores (e.g. strided/masked store intrinsics)
1092 /// to the variables they may assign to. Used by processUntaggedInstruction.
1093 UnknownStoreAssignmentMap UnknownStoreVars;
1094
1095 // Machinery to defer inserting dbg.values.
1096 using InstInsertMap = MapVector<VarLocInsertPt, SmallVector<VarLocInfo>>;
1097 InstInsertMap InsertBeforeMap;
1098 /// Clear the location definitions currently cached for insertion after /p
1099 /// After.
1100 void resetInsertionPoint(Instruction &After);
1101 void resetInsertionPoint(DbgVariableRecord &After);
1102
1103 void emitDbgValue(LocKind Kind, DbgVariableRecord *, VarLocInsertPt After);
1104
1105 static bool mapsAreEqual(const BitVector &Mask, const AssignmentMap &A,
1106 const AssignmentMap &B) {
1107 return llvm::all_of(Range: Mask.set_bits(), P: [&](unsigned VarID) {
1108 return A[VarID].isSameSourceAssignment(Other: B[VarID]);
1109 });
1110 }
1111
1112 /// Represents the stack and debug assignments in a block. Used to describe
1113 /// the live-in and live-out values for blocks, as well as the "current"
1114 /// value as we process each instruction in a block.
1115 struct BlockInfo {
1116 /// The set of variables (VariableID) being tracked in this block.
1117 BitVector VariableIDsInBlock;
1118 /// Dominating assignment to memory for each variable, indexed by
1119 /// VariableID.
1120 AssignmentMap StackHomeValue;
1121 /// Dominating assignemnt to each variable, indexed by VariableID.
1122 AssignmentMap DebugValue;
1123 /// Location kind for each variable. LiveLoc indicates whether the
1124 /// dominating assignment in StackHomeValue (LocKind::Mem), DebugValue
1125 /// (LocKind::Val), or neither (LocKind::None) is valid, in that order of
1126 /// preference. This cannot be derived by inspecting DebugValue and
1127 /// StackHomeValue due to the fact that there's no distinction in
1128 /// Assignment (the class) between whether an assignment is unknown or a
1129 /// merge of multiple assignments (both are Status::NoneOrPhi). In other
1130 /// words, the memory location may well be valid while both DebugValue and
1131 /// StackHomeValue contain Assignments that have a Status of NoneOrPhi.
1132 /// Indexed by VariableID.
1133 LocMap LiveLoc;
1134
1135 public:
1136 enum AssignmentKind { Stack, Debug };
1137 const AssignmentMap &getAssignmentMap(AssignmentKind Kind) const {
1138 switch (Kind) {
1139 case Stack:
1140 return StackHomeValue;
1141 case Debug:
1142 return DebugValue;
1143 }
1144 llvm_unreachable("Unknown AssignmentKind");
1145 }
1146 AssignmentMap &getAssignmentMap(AssignmentKind Kind) {
1147 return const_cast<AssignmentMap &>(
1148 const_cast<const BlockInfo *>(this)->getAssignmentMap(Kind));
1149 }
1150
1151 bool isVariableTracked(VariableID Var) const {
1152 return VariableIDsInBlock[static_cast<unsigned>(Var)];
1153 }
1154
1155 const Assignment &getAssignment(AssignmentKind Kind, VariableID Var) const {
1156 assert(isVariableTracked(Var) && "Var not tracked in block");
1157 return getAssignmentMap(Kind)[static_cast<unsigned>(Var)];
1158 }
1159
1160 LocKind getLocKind(VariableID Var) const {
1161 assert(isVariableTracked(Var) && "Var not tracked in block");
1162 return LiveLoc[static_cast<unsigned>(Var)];
1163 }
1164
1165 /// Set LocKind for \p Var only: does not set LocKind for VariableIDs of
1166 /// fragments contained win \p Var.
1167 void setLocKind(VariableID Var, LocKind K) {
1168 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1169 LiveLoc[static_cast<unsigned>(Var)] = K;
1170 }
1171
1172 /// Set the assignment in the \p Kind assignment map for \p Var only: does
1173 /// not set the assignment for VariableIDs of fragments contained win \p
1174 /// Var.
1175 void setAssignment(AssignmentKind Kind, VariableID Var,
1176 const Assignment &AV) {
1177 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1178 getAssignmentMap(Kind)[static_cast<unsigned>(Var)] = AV;
1179 }
1180
1181 /// Return true if there is an assignment matching \p AV in the \p Kind
1182 /// assignment map. Does consider assignments for VariableIDs of fragments
1183 /// contained win \p Var.
1184 bool hasAssignment(AssignmentKind Kind, VariableID Var,
1185 const Assignment &AV) const {
1186 if (!isVariableTracked(Var))
1187 return false;
1188 return AV.isSameSourceAssignment(Other: getAssignment(Kind, Var));
1189 }
1190
1191 /// Compare every element in each map to determine structural equality
1192 /// (slow).
1193 bool operator==(const BlockInfo &Other) const {
1194 return VariableIDsInBlock == Other.VariableIDsInBlock &&
1195 LiveLoc == Other.LiveLoc &&
1196 mapsAreEqual(Mask: VariableIDsInBlock, A: StackHomeValue,
1197 B: Other.StackHomeValue) &&
1198 mapsAreEqual(Mask: VariableIDsInBlock, A: DebugValue, B: Other.DebugValue);
1199 }
1200 bool operator!=(const BlockInfo &Other) const { return !(*this == Other); }
1201 bool isValid() {
1202 return LiveLoc.size() == DebugValue.size() &&
1203 LiveLoc.size() == StackHomeValue.size();
1204 }
1205
1206 /// Clear everything and initialise with ⊤-values for all variables.
1207 void init(int NumVars) {
1208 StackHomeValue.clear();
1209 DebugValue.clear();
1210 LiveLoc.clear();
1211 VariableIDsInBlock = BitVector(NumVars);
1212 StackHomeValue.insert(I: StackHomeValue.begin(), NumToInsert: NumVars,
1213 Elt: Assignment::makeNoneOrPhi());
1214 DebugValue.insert(I: DebugValue.begin(), NumToInsert: NumVars,
1215 Elt: Assignment::makeNoneOrPhi());
1216 LiveLoc.insert(I: LiveLoc.begin(), NumToInsert: NumVars, Elt: LocKind::None);
1217 }
1218
1219 /// Helper for join.
1220 template <typename ElmtType, typename FnInputType>
1221 static void joinElmt(int Index, SmallVector<ElmtType> &Target,
1222 const SmallVector<ElmtType> &A,
1223 const SmallVector<ElmtType> &B,
1224 ElmtType (*Fn)(FnInputType, FnInputType)) {
1225 Target[Index] = Fn(A[Index], B[Index]);
1226 }
1227
1228 /// See comment for AssignmentTrackingLowering::joinBlockInfo.
1229 static BlockInfo join(const BlockInfo &A, const BlockInfo &B, int NumVars) {
1230 // Join A and B.
1231 //
1232 // Intersect = join(a, b) for a in A, b in B where Var(a) == Var(b)
1233 // Difference = join(x, ⊤) for x where Var(x) is in A xor B
1234 // Join = Intersect ∪ Difference
1235 //
1236 // This is achieved by performing a join on elements from A and B with
1237 // variables common to both A and B (join elements indexed by var
1238 // intersect), then adding ⊤-value elements for vars in A xor B. The
1239 // latter part is equivalent to performing join on elements with variables
1240 // in A xor B with the ⊤-value for the map element since join(x, ⊤) = ⊤.
1241 // BlockInfo::init initializes all variable entries to the ⊤ value so we
1242 // don't need to explicitly perform that step as Join.VariableIDsInBlock
1243 // is set to the union of the variables in A and B at the end of this
1244 // function.
1245 BlockInfo Join;
1246 Join.init(NumVars);
1247
1248 BitVector Intersect = A.VariableIDsInBlock;
1249 Intersect &= B.VariableIDsInBlock;
1250
1251 for (auto VarID : Intersect.set_bits()) {
1252 joinElmt(Index: VarID, Target&: Join.LiveLoc, A: A.LiveLoc, B: B.LiveLoc, Fn: joinKind);
1253 joinElmt(Index: VarID, Target&: Join.DebugValue, A: A.DebugValue, B: B.DebugValue,
1254 Fn: joinAssignment);
1255 joinElmt(Index: VarID, Target&: Join.StackHomeValue, A: A.StackHomeValue, B: B.StackHomeValue,
1256 Fn: joinAssignment);
1257 }
1258
1259 Join.VariableIDsInBlock = A.VariableIDsInBlock;
1260 Join.VariableIDsInBlock |= B.VariableIDsInBlock;
1261 assert(Join.isValid());
1262 return Join;
1263 }
1264 };
1265
1266 Function &Fn;
1267 const DataLayout &Layout;
1268 const DenseSet<DebugAggregate> *VarsWithStackSlot;
1269 FunctionVarLocsBuilder *FnVarLocs;
1270 DenseMap<const BasicBlock *, BlockInfo> LiveIn;
1271 DenseMap<const BasicBlock *, BlockInfo> LiveOut;
1272
1273 /// Helper for process methods to track variables touched each frame.
1274 DenseSet<VariableID> VarsTouchedThisFrame;
1275
1276 /// The set of variables that sometimes are not located in their stack home.
1277 DenseSet<DebugAggregate> NotAlwaysStackHomed;
1278
1279 VariableID getVariableID(const DebugVariable &Var) {
1280 return FnVarLocs->insertVariable(V: Var);
1281 }
1282
1283 /// Join the LiveOut values of preds that are contained in \p Visited into
1284 /// LiveIn[BB]. Return True if LiveIn[BB] has changed as a result. LiveIn[BB]
1285 /// values monotonically increase. See the @link joinMethods join methods
1286 /// @endlink documentation for more info.
1287 bool join(const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited);
1288 ///@name joinMethods
1289 /// Functions that implement `join` (the least upper bound) for the
1290 /// join-semilattice types used in the dataflow. There is an explicit bottom
1291 /// value (⊥) for some types and and explicit top value (⊤) for all types.
1292 /// By definition:
1293 ///
1294 /// Join(A, B) >= A && Join(A, B) >= B
1295 /// Join(A, ⊥) = A
1296 /// Join(A, ⊤) = ⊤
1297 ///
1298 /// These invariants are important for monotonicity.
1299 ///
1300 /// For the map-type functions, all unmapped keys in an empty map are
1301 /// associated with a bottom value (⊥). This represents their values being
1302 /// unknown. Unmapped keys in non-empty maps (joining two maps with a key
1303 /// only present in one) represents either a variable going out of scope or
1304 /// dropped debug info. It is assumed the key is associated with a top value
1305 /// (⊤) in this case (unknown location / assignment).
1306 ///@{
1307 static LocKind joinKind(LocKind A, LocKind B);
1308 static Assignment joinAssignment(const Assignment &A, const Assignment &B);
1309 BlockInfo joinBlockInfo(const BlockInfo &A, const BlockInfo &B);
1310 ///@}
1311
1312 /// Process the instructions in \p BB updating \p LiveSet along the way. \p
1313 /// LiveSet must be initialized with the current live-in locations before
1314 /// calling this.
1315 void process(BasicBlock &BB, BlockInfo *LiveSet);
1316 ///@name processMethods
1317 /// Methods to process instructions in order to update the LiveSet (current
1318 /// location information).
1319 ///@{
1320 void processNonDbgInstruction(Instruction &I, BlockInfo *LiveSet);
1321 /// Update \p LiveSet after encountering an instruction with a DIAssignID
1322 /// attachment, \p I.
1323 void processTaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1324 /// Update \p LiveSet after encountering an instruciton without a DIAssignID
1325 /// attachment, \p I.
1326 void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1327 void processUnknownStoreToVariable(Instruction &I, VariableID &Var,
1328 BlockInfo *LiveSet);
1329 void processDbgAssign(DbgVariableRecord *Assign, BlockInfo *LiveSet);
1330 void processDbgVariableRecord(DbgVariableRecord &DVR, BlockInfo *LiveSet);
1331 void processDbgValue(DbgVariableRecord *DbgValue, BlockInfo *LiveSet);
1332 /// Add an assignment to memory for the variable /p Var.
1333 void addMemDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1334 /// Add an assignment to the variable /p Var.
1335 void addDbgDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1336 ///@}
1337
1338 /// Set the LocKind for \p Var.
1339 void setLocKind(BlockInfo *LiveSet, VariableID Var, LocKind K);
1340 /// Get the live LocKind for a \p Var. Requires addMemDef or addDbgDef to
1341 /// have been called for \p Var first.
1342 LocKind getLocKind(BlockInfo *LiveSet, VariableID Var);
1343 /// Return true if \p Var has an assignment in \p M matching \p AV.
1344 bool hasVarWithAssignment(BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind,
1345 VariableID Var, const Assignment &AV);
1346 /// Return the set of VariableIDs corresponding the fragments contained fully
1347 /// within the variable/fragment \p Var.
1348 ArrayRef<VariableID> getContainedFragments(VariableID Var) const;
1349
1350 /// Mark \p Var as having been touched this frame. Note, this applies only
1351 /// to the exact fragment \p Var and not to any fragments contained within.
1352 void touchFragment(VariableID Var);
1353
1354 /// Emit info for variables that are fully promoted.
1355 bool emitPromotedVarLocs(FunctionVarLocsBuilder *FnVarLocs);
1356
1357public:
1358 AssignmentTrackingLowering(Function &Fn, const DataLayout &Layout,
1359 const DenseSet<DebugAggregate> *VarsWithStackSlot)
1360 : Fn(Fn), Layout(Layout), VarsWithStackSlot(VarsWithStackSlot) {}
1361 /// Run the analysis, adding variable location info to \p FnVarLocs. Returns
1362 /// true if any variable locations have been added to FnVarLocs.
1363 bool run(FunctionVarLocsBuilder *FnVarLocs);
1364};
1365} // namespace
1366
1367ArrayRef<VariableID>
1368AssignmentTrackingLowering::getContainedFragments(VariableID Var) const {
1369 auto R = VarContains.find(Val: Var);
1370 if (R == VarContains.end())
1371 return {};
1372 return R->second;
1373}
1374
1375void AssignmentTrackingLowering::touchFragment(VariableID Var) {
1376 VarsTouchedThisFrame.insert(V: Var);
1377}
1378
1379void AssignmentTrackingLowering::setLocKind(BlockInfo *LiveSet, VariableID Var,
1380 LocKind K) {
1381 auto SetKind = [this](BlockInfo *LiveSet, VariableID Var, LocKind K) {
1382 LiveSet->setLocKind(Var, K);
1383 touchFragment(Var);
1384 };
1385 SetKind(LiveSet, Var, K);
1386
1387 // Update the LocKind for all fragments contained within Var.
1388 for (VariableID Frag : getContainedFragments(Var))
1389 SetKind(LiveSet, Frag, K);
1390}
1391
1392AssignmentTrackingLowering::LocKind
1393AssignmentTrackingLowering::getLocKind(BlockInfo *LiveSet, VariableID Var) {
1394 return LiveSet->getLocKind(Var);
1395}
1396
1397void AssignmentTrackingLowering::addMemDef(BlockInfo *LiveSet, VariableID Var,
1398 const Assignment &AV) {
1399 LiveSet->setAssignment(Kind: BlockInfo::Stack, Var, AV);
1400
1401 // Use this assignment for all fragments contained within Var, but do not
1402 // provide a Source because we cannot convert Var's value to a value for the
1403 // fragment.
1404 Assignment FragAV = AV;
1405 FragAV.Source = nullptr;
1406 for (VariableID Frag : getContainedFragments(Var))
1407 LiveSet->setAssignment(Kind: BlockInfo::Stack, Var: Frag, AV: FragAV);
1408}
1409
1410void AssignmentTrackingLowering::addDbgDef(BlockInfo *LiveSet, VariableID Var,
1411 const Assignment &AV) {
1412 LiveSet->setAssignment(Kind: BlockInfo::Debug, Var, AV);
1413
1414 // Use this assignment for all fragments contained within Var, but do not
1415 // provide a Source because we cannot convert Var's value to a value for the
1416 // fragment.
1417 Assignment FragAV = AV;
1418 FragAV.Source = nullptr;
1419 for (VariableID Frag : getContainedFragments(Var))
1420 LiveSet->setAssignment(Kind: BlockInfo::Debug, Var: Frag, AV: FragAV);
1421}
1422
1423static DIAssignID *getIDFromInst(const Instruction &I) {
1424 return cast<DIAssignID>(Val: I.getMetadata(KindID: LLVMContext::MD_DIAssignID));
1425}
1426
1427static DIAssignID *getIDFromMarker(const DbgVariableRecord &DVR) {
1428 assert(DVR.isDbgAssign() &&
1429 "Cannot get a DIAssignID from a non-assign DbgVariableRecord!");
1430 return DVR.getAssignID();
1431}
1432
1433/// Return true if \p Var has an assignment in \p M matching \p AV.
1434bool AssignmentTrackingLowering::hasVarWithAssignment(
1435 BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, VariableID Var,
1436 const Assignment &AV) {
1437 if (!LiveSet->hasAssignment(Kind, Var, AV))
1438 return false;
1439
1440 // Check all the frags contained within Var as these will have all been
1441 // mapped to AV at the last store to Var.
1442 for (VariableID Frag : getContainedFragments(Var))
1443 if (!LiveSet->hasAssignment(Kind, Var: Frag, AV))
1444 return false;
1445 return true;
1446}
1447
1448#ifndef NDEBUG
1449const char *locStr(AssignmentTrackingLowering::LocKind Loc) {
1450 using LocKind = AssignmentTrackingLowering::LocKind;
1451 switch (Loc) {
1452 case LocKind::Val:
1453 return "Val";
1454 case LocKind::Mem:
1455 return "Mem";
1456 case LocKind::None:
1457 return "None";
1458 };
1459 llvm_unreachable("unknown LocKind");
1460}
1461#endif
1462
1463VarLocInsertPt getNextNode(const DbgRecord *DVR) {
1464 auto NextIt = ++(DVR->getIterator());
1465 if (NextIt == DVR->getMarker()->getDbgRecordRange().end())
1466 return DVR->getMarker()->MarkedInstr;
1467 return &*NextIt;
1468}
1469VarLocInsertPt getNextNode(const Instruction *Inst) {
1470 const Instruction *Next = Inst->getNextNode();
1471 if (!Next->hasDbgRecords())
1472 return Next;
1473 return &*Next->getDbgRecordRange().begin();
1474}
1475VarLocInsertPt getNextNode(VarLocInsertPt InsertPt) {
1476 if (isa<const Instruction *>(Val: InsertPt))
1477 return getNextNode(Inst: cast<const Instruction *>(Val&: InsertPt));
1478 return getNextNode(DVR: cast<const DbgRecord *>(Val&: InsertPt));
1479}
1480
1481void AssignmentTrackingLowering::emitDbgValue(
1482 AssignmentTrackingLowering::LocKind Kind, DbgVariableRecord *Source,
1483 VarLocInsertPt After) {
1484
1485 DILocation *DL = Source->getDebugLoc();
1486 auto Emit = [this, Source, After, DL](Metadata *Val, DIExpression *Expr) {
1487 assert(Expr);
1488 if (!Val)
1489 Val = ValueAsMetadata::get(
1490 V: PoisonValue::get(T: Type::getInt1Ty(C&: Source->getContext())));
1491
1492 // Find a suitable insert point.
1493 auto InsertBefore = getNextNode(InsertPt: After);
1494 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1495
1496 VariableID Var = getVariableID(Var: DebugVariable(Source));
1497 VarLocInfo VarLoc;
1498 VarLoc.VariableID = Var;
1499 VarLoc.Expr = Expr;
1500 VarLoc.Values = RawLocationWrapper(Val);
1501 VarLoc.DL = DL;
1502 // Insert it into the map for later.
1503 InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc);
1504 };
1505
1506 // NOTE: This block can mutate Kind.
1507 if (Kind == LocKind::Mem) {
1508 assert(Source->isDbgAssign());
1509 const DbgVariableRecord *Assign = Source;
1510 // Check the address hasn't been dropped (e.g. the debug uses may not have
1511 // been replaced before deleting a Value).
1512 if (Assign->isKillAddress()) {
1513 // The address isn't valid so treat this as a non-memory def.
1514 Kind = LocKind::Val;
1515 } else {
1516 Value *Val = Assign->getAddress();
1517 DIExpression *Expr = Assign->getAddressExpression();
1518 assert(!Expr->getFragmentInfo() &&
1519 "fragment info should be stored in value-expression only");
1520 // Copy the fragment info over from the value-expression to the new
1521 // DIExpression.
1522 if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) {
1523 auto FragInfo = *OptFragInfo;
1524 Expr = *DIExpression::createFragmentExpression(
1525 Expr, OffsetInBits: FragInfo.OffsetInBits, SizeInBits: FragInfo.SizeInBits);
1526 }
1527 // The address-expression has an implicit deref, add it now.
1528 std::tie(args&: Val, args&: Expr) =
1529 walkToAllocaAndPrependOffsetDeref(DL: Layout, Start: Val, Expression: Expr);
1530 Emit(ValueAsMetadata::get(V: Val), Expr);
1531 return;
1532 }
1533 }
1534
1535 if (Kind == LocKind::Val) {
1536 Emit(Source->getRawLocation(), Source->getExpression());
1537 return;
1538 }
1539
1540 if (Kind == LocKind::None) {
1541 Emit(nullptr, Source->getExpression());
1542 return;
1543 }
1544}
1545
1546void AssignmentTrackingLowering::processNonDbgInstruction(
1547 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1548 if (I.hasMetadata(KindID: LLVMContext::MD_DIAssignID))
1549 processTaggedInstruction(I, LiveSet);
1550 else
1551 processUntaggedInstruction(I, LiveSet);
1552}
1553
1554void AssignmentTrackingLowering::processUnknownStoreToVariable(
1555 Instruction &I, VariableID &Var, BlockInfo *LiveSet) {
1556 // We may have assigned to some unknown fragment of the variable, so
1557 // treat the memory assignment as unknown for now.
1558 addMemDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi());
1559 // If we weren't already using a memory location, we don't need to do
1560 // anything more.
1561 if (getLocKind(LiveSet, Var) != LocKind::Mem)
1562 return;
1563 // If there is a live debug value for this variable, fall back to using
1564 // that.
1565 Assignment DbgAV = LiveSet->getAssignment(Kind: BlockInfo::Debug, Var);
1566 if (DbgAV.Status != Assignment::NoneOrPhi && DbgAV.Source) {
1567 LLVM_DEBUG(dbgs() << "Switching to fallback debug value: ";
1568 DbgAV.dump(dbgs()); dbgs() << "\n");
1569 setLocKind(LiveSet, Var, K: LocKind::Val);
1570 emitDbgValue(Kind: LocKind::Val, Source: DbgAV.Source, After: &I);
1571 return;
1572 }
1573 // Otherwise, find a suitable insert point, before the next instruction or
1574 // DbgRecord after I.
1575 auto InsertBefore = getNextNode(Inst: &I);
1576 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1577
1578 // Get DILocation for this assignment.
1579 DebugVariable V = FnVarLocs->getVariable(ID: Var);
1580 DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
1581 const DILocation *DILoc = DILocation::get(
1582 Context&: Fn.getContext(), Line: 0, Column: 0, Scope: V.getVariable()->getScope(), InlinedAt);
1583
1584 VarLocInfo VarLoc;
1585 VarLoc.VariableID = Var;
1586 VarLoc.Expr = DIExpression::get(Context&: I.getContext(), Elements: {});
1587 VarLoc.Values = RawLocationWrapper(
1588 ValueAsMetadata::get(V: PoisonValue::get(T: Type::getInt1Ty(C&: I.getContext()))));
1589 VarLoc.DL = DILoc;
1590 InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc);
1591}
1592
1593void AssignmentTrackingLowering::processUntaggedInstruction(
1594 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1595 // Interpret stack stores that are not tagged as an assignment in memory for
1596 // the variables associated with that address. These stores may not be tagged
1597 // because a) the store cannot be represented using dbg.assigns (non-const
1598 // length or offset) or b) the tag was accidentally dropped during
1599 // optimisations. For these stores we fall back to assuming that the stack
1600 // home is a valid location for the variables. The benefit is that this
1601 // prevents us missing an assignment and therefore incorrectly maintaining
1602 // earlier location definitions, and in many cases it should be a reasonable
1603 // assumption. However, this will occasionally lead to slight
1604 // inaccuracies. The value of a hoisted untagged store will be visible
1605 // "early", for example.
1606 assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
1607 auto It = UntaggedStoreVars.find(Val: &I);
1608 if (It == UntaggedStoreVars.end()) {
1609 // It is possible that we have an untagged unknown store, i.e. one that
1610 // cannot be represented as a simple (base, offset, size) - in this case we
1611 // should undef the memory location of the variable, as if we had a tagged
1612 // store that did not match the current assignment.
1613 // FIXME: It should be possible to support these stores, but it would
1614 // require more extensive changes to our representation of assignments.
1615 if (auto UnhandledStoreIt = UnknownStoreVars.find(Val: &I);
1616 UnhandledStoreIt != UnknownStoreVars.end()) {
1617 LLVM_DEBUG(dbgs() << "Processing untagged unknown store " << I << "\n");
1618 for (auto &Var : UnhandledStoreIt->second)
1619 processUnknownStoreToVariable(I, Var, LiveSet);
1620 }
1621 return; // No variables associated with the store destination.
1622 }
1623
1624 LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I
1625 << "\n");
1626 // Iterate over the variables that this store affects, add a NoneOrPhi dbg
1627 // and mem def, set lockind to Mem, and emit a location def for each.
1628 for (auto [Var, Info] : It->second) {
1629 // This instruction is treated as both a debug and memory assignment,
1630 // meaning the memory location should be used. We don't have an assignment
1631 // ID though so use Assignment::makeNoneOrPhi() to create an imaginary one.
1632 addMemDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi());
1633 addDbgDef(LiveSet, Var, AV: Assignment::makeNoneOrPhi());
1634 setLocKind(LiveSet, Var, K: LocKind::Mem);
1635 LLVM_DEBUG(dbgs() << " setting Stack LocKind to: " << locStr(LocKind::Mem)
1636 << "\n");
1637 // Build the dbg location def to insert.
1638 //
1639 // DIExpression: Add fragment and offset.
1640 DebugVariable V = FnVarLocs->getVariable(ID: Var);
1641 DIExpression *DIE = DIExpression::get(Context&: I.getContext(), Elements: {});
1642 if (auto Frag = V.getFragment()) {
1643 auto R = DIExpression::createFragmentExpression(Expr: DIE, OffsetInBits: Frag->OffsetInBits,
1644 SizeInBits: Frag->SizeInBits);
1645 assert(R && "unexpected createFragmentExpression failure");
1646 DIE = *R;
1647 }
1648 SmallVector<uint64_t, 3> Ops;
1649 if (Info.OffsetInBits)
1650 Ops = {dwarf::DW_OP_plus_uconst, Info.OffsetInBits / 8};
1651 Ops.push_back(Elt: dwarf::DW_OP_deref);
1652 DIE = DIExpression::prependOpcodes(Expr: DIE, Ops, /*StackValue=*/false,
1653 /*EntryValue=*/false);
1654 // Find a suitable insert point, before the next instruction or DbgRecord
1655 // after I.
1656 auto InsertBefore = getNextNode(Inst: &I);
1657 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1658
1659 // Get DILocation for this unrecorded assignment.
1660 DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
1661 const DILocation *DILoc = DILocation::get(
1662 Context&: Fn.getContext(), Line: 0, Column: 0, Scope: V.getVariable()->getScope(), InlinedAt);
1663
1664 VarLocInfo VarLoc;
1665 VarLoc.VariableID = static_cast<VariableID>(Var);
1666 VarLoc.Expr = DIE;
1667 VarLoc.Values = RawLocationWrapper(
1668 ValueAsMetadata::get(V: const_cast<AllocaInst *>(Info.Base)));
1669 VarLoc.DL = DILoc;
1670 // 3. Insert it into the map for later.
1671 InsertBeforeMap[InsertBefore].push_back(Elt: VarLoc);
1672 }
1673}
1674
1675void AssignmentTrackingLowering::processTaggedInstruction(
1676 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1677 auto LinkedDPAssigns = at::getDVRAssignmentMarkers(Inst: &I);
1678 // No dbg.assign intrinsics linked.
1679 // FIXME: All vars that have a stack slot this store modifies that don't have
1680 // a dbg.assign linked to it should probably treat this like an untagged
1681 // store.
1682 if (LinkedDPAssigns.empty())
1683 return;
1684
1685 LLVM_DEBUG(dbgs() << "processTaggedInstruction on " << I << "\n");
1686 for (DbgVariableRecord *Assign : LinkedDPAssigns) {
1687 VariableID Var = getVariableID(Var: DebugVariable(Assign));
1688 // Something has gone wrong if VarsWithStackSlot doesn't contain a variable
1689 // that is linked to a store.
1690 assert(VarsWithStackSlot->count(getAggregate(Assign)) &&
1691 "expected Assign's variable to have stack slot");
1692
1693 Assignment AV = Assignment::makeFromMemDef(ID: getIDFromInst(I));
1694 addMemDef(LiveSet, Var, AV);
1695
1696 LLVM_DEBUG(dbgs() << " linked to " << *Assign << "\n");
1697 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1698 << " -> ");
1699
1700 // The last assignment to the stack is now AV. Check if the last debug
1701 // assignment has a matching Assignment.
1702 if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Debug, Var, AV)) {
1703 // The StackHomeValue and DebugValue for this variable match so we can
1704 // emit a stack home location here.
1705 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1706 LLVM_DEBUG(dbgs() << " Stack val: "; AV.dump(dbgs()); dbgs() << "\n");
1707 LLVM_DEBUG(dbgs() << " Debug val: ";
1708 LiveSet->DebugValue[static_cast<unsigned>(Var)].dump(dbgs());
1709 dbgs() << "\n");
1710 setLocKind(LiveSet, Var, K: LocKind::Mem);
1711 emitDbgValue(Kind: LocKind::Mem, Source: Assign, After: &I);
1712 return;
1713 }
1714
1715 // The StackHomeValue and DebugValue for this variable do not match. I.e.
1716 // The value currently stored in the stack is not what we'd expect to
1717 // see, so we cannot use emit a stack home location here. Now we will
1718 // look at the live LocKind for the variable and determine an appropriate
1719 // dbg.value to emit.
1720 LocKind PrevLoc = getLocKind(LiveSet, Var);
1721 switch (PrevLoc) {
1722 case LocKind::Val: {
1723 // The value in memory in memory has changed but we're not currently
1724 // using the memory location. Do nothing.
1725 LLVM_DEBUG(dbgs() << "Val, (unchanged)\n";);
1726 setLocKind(LiveSet, Var, K: LocKind::Val);
1727 } break;
1728 case LocKind::Mem: {
1729 // There's been an assignment to memory that we were using as a
1730 // location for this variable, and the Assignment doesn't match what
1731 // we'd expect to see in memory.
1732 Assignment DbgAV = LiveSet->getAssignment(Kind: BlockInfo::Debug, Var);
1733 if (DbgAV.Status == Assignment::NoneOrPhi) {
1734 // We need to terminate any previously open location now.
1735 LLVM_DEBUG(dbgs() << "None, No Debug value available\n";);
1736 setLocKind(LiveSet, Var, K: LocKind::None);
1737 emitDbgValue(Kind: LocKind::None, Source: Assign, After: &I);
1738 } else {
1739 // The previous DebugValue Value can be used here.
1740 LLVM_DEBUG(dbgs() << "Val, Debug value is Known\n";);
1741 setLocKind(LiveSet, Var, K: LocKind::Val);
1742 if (DbgAV.Source) {
1743 emitDbgValue(Kind: LocKind::Val, Source: DbgAV.Source, After: &I);
1744 } else {
1745 // PrevAV.Source is nullptr so we must emit undef here.
1746 emitDbgValue(Kind: LocKind::None, Source: Assign, After: &I);
1747 }
1748 }
1749 } break;
1750 case LocKind::None: {
1751 // There's been an assignment to memory and we currently are
1752 // not tracking a location for the variable. Do not emit anything.
1753 LLVM_DEBUG(dbgs() << "None, (unchanged)\n";);
1754 setLocKind(LiveSet, Var, K: LocKind::None);
1755 } break;
1756 }
1757 }
1758}
1759
1760void AssignmentTrackingLowering::processDbgAssign(DbgVariableRecord *DbgAssign,
1761 BlockInfo *LiveSet) {
1762 // Only bother tracking variables that are at some point stack homed. Other
1763 // variables can be dealt with trivially later.
1764 if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgAssign)))
1765 return;
1766
1767 VariableID Var = getVariableID(Var: DebugVariable(DbgAssign));
1768 Assignment AV = Assignment::make(ID: getIDFromMarker(DVR: *DbgAssign), Source: DbgAssign);
1769 addDbgDef(LiveSet, Var, AV);
1770
1771 LLVM_DEBUG(dbgs() << "processDbgAssign on " << *DbgAssign << "\n";);
1772 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1773 << " -> ");
1774
1775 // Check if the DebugValue and StackHomeValue both hold the same
1776 // Assignment.
1777 if (hasVarWithAssignment(LiveSet, Kind: BlockInfo::Stack, Var, AV)) {
1778 // They match. We can use the stack home because the debug intrinsics
1779 // state that an assignment happened here, and we know that specific
1780 // assignment was the last one to take place in memory for this variable.
1781 LocKind Kind;
1782 if (DbgAssign->isKillAddress()) {
1783 LLVM_DEBUG(
1784 dbgs()
1785 << "Val, Stack matches Debug program but address is killed\n";);
1786 Kind = LocKind::Val;
1787 } else {
1788 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1789 Kind = LocKind::Mem;
1790 };
1791 setLocKind(LiveSet, Var, K: Kind);
1792 emitDbgValue(Kind, Source: DbgAssign, After: DbgAssign);
1793 } else {
1794 // The last assignment to the memory location isn't the one that we want
1795 // to show to the user so emit a dbg.value(Value). Value may be undef.
1796 LLVM_DEBUG(dbgs() << "Val, Stack contents is unknown\n";);
1797 setLocKind(LiveSet, Var, K: LocKind::Val);
1798 emitDbgValue(Kind: LocKind::Val, Source: DbgAssign, After: DbgAssign);
1799 }
1800}
1801
1802void AssignmentTrackingLowering::processDbgValue(DbgVariableRecord *DbgValue,
1803 BlockInfo *LiveSet) {
1804 // Only other tracking variables that are at some point stack homed.
1805 // Other variables can be dealt with trivally later.
1806 if (!VarsWithStackSlot->count(V: getAggregate(Var: DbgValue)))
1807 return;
1808
1809 VariableID Var = getVariableID(Var: DebugVariable(DbgValue));
1810 // We have no ID to create an Assignment with so we mark this assignment as
1811 // NoneOrPhi. Note that the dbg.value still exists, we just cannot determine
1812 // the assignment responsible for setting this value.
1813 // This is fine; dbg.values are essentially interchangable with unlinked
1814 // dbg.assigns, and some passes such as mem2reg and instcombine add them to
1815 // PHIs for promoted variables.
1816 Assignment AV = Assignment::makeNoneOrPhi();
1817 addDbgDef(LiveSet, Var, AV);
1818
1819 LLVM_DEBUG(dbgs() << "processDbgValue on " << *DbgValue << "\n";);
1820 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1821 << " -> Val, dbg.value override");
1822
1823 setLocKind(LiveSet, Var, K: LocKind::Val);
1824 emitDbgValue(Kind: LocKind::Val, Source: DbgValue, After: DbgValue);
1825}
1826
1827static bool hasZeroSizedFragment(DbgVariableRecord &DbgValue) {
1828 if (auto F = DbgValue.getExpression()->getFragmentInfo())
1829 return F->SizeInBits == 0;
1830 return false;
1831}
1832
1833void AssignmentTrackingLowering::processDbgVariableRecord(
1834 DbgVariableRecord &DVR, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1835 // Ignore assignments to zero bits of the variable.
1836 if (hasZeroSizedFragment(DbgValue&: DVR))
1837 return;
1838
1839 if (DVR.isDbgAssign())
1840 processDbgAssign(DbgAssign: &DVR, LiveSet);
1841 else if (DVR.isDbgValue())
1842 processDbgValue(DbgValue: &DVR, LiveSet);
1843}
1844
1845void AssignmentTrackingLowering::resetInsertionPoint(Instruction &After) {
1846 assert(!After.isTerminator() && "Can't insert after a terminator");
1847 auto *R = InsertBeforeMap.find(Key: getNextNode(Inst: &After));
1848 if (R == InsertBeforeMap.end())
1849 return;
1850 R->second.clear();
1851}
1852void AssignmentTrackingLowering::resetInsertionPoint(DbgVariableRecord &After) {
1853 auto *R = InsertBeforeMap.find(Key: getNextNode(DVR: &After));
1854 if (R == InsertBeforeMap.end())
1855 return;
1856 R->second.clear();
1857}
1858
1859void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) {
1860 // If the block starts with DbgRecords, we need to process those DbgRecords as
1861 // their own frame without processing any instructions first.
1862 bool ProcessedLeadingDbgRecords = !BB.begin()->hasDbgRecords();
1863 for (auto II = BB.begin(), EI = BB.end(); II != EI;) {
1864 assert(VarsTouchedThisFrame.empty());
1865 // Process the instructions in "frames". A "frame" includes a single
1866 // non-debug instruction followed any debug instructions before the
1867 // next non-debug instruction.
1868
1869 // Skip the current instruction if it has unprocessed DbgRecords attached
1870 // (see comment above `ProcessedLeadingDbgRecords`).
1871 if (ProcessedLeadingDbgRecords) {
1872 // II is now either a debug intrinsic, a non-debug instruction with no
1873 // attached DbgRecords, or a non-debug instruction with attached processed
1874 // DbgRecords.
1875 // II has not been processed.
1876 if (II->isTerminator())
1877 break;
1878 resetInsertionPoint(After&: *II);
1879 processNonDbgInstruction(I&: *II, LiveSet);
1880 assert(LiveSet->isValid());
1881 ++II;
1882 }
1883 // II is now either a debug intrinsic, a non-debug instruction with no
1884 // attached DbgRecords, or a non-debug instruction with attached unprocessed
1885 // DbgRecords.
1886 if (II != EI && II->hasDbgRecords()) {
1887 // Skip over non-variable debug records (i.e., labels). They're going to
1888 // be read from IR (possibly re-ordering them within the debug record
1889 // range) rather than from the analysis results.
1890 for (DbgVariableRecord &DVR : filterDbgVars(R: II->getDbgRecordRange())) {
1891 resetInsertionPoint(After&: DVR);
1892 processDbgVariableRecord(DVR, LiveSet);
1893 assert(LiveSet->isValid());
1894 }
1895 }
1896 ProcessedLeadingDbgRecords = true;
1897 // II is now a non-debug instruction either with no attached DbgRecords, or
1898 // with attached processed DbgRecords. II has not been processed, and all
1899 // debug instructions or DbgRecords in the frame preceding II have been
1900 // processed.
1901
1902 // We've processed everything in the "frame". Now determine which variables
1903 // cannot be represented by a dbg.declare.
1904 for (auto Var : VarsTouchedThisFrame) {
1905 LocKind Loc = getLocKind(LiveSet, Var);
1906 // If a variable's LocKind is anything other than LocKind::Mem then we
1907 // must note that it cannot be represented with a dbg.declare.
1908 // Note that this check is enough without having to check the result of
1909 // joins() because for join to produce anything other than Mem after
1910 // we've already seen a Mem we'd be joining None or Val with Mem. In that
1911 // case, we've already hit this codepath when we set the LocKind to Val
1912 // or None in that block.
1913 if (Loc != LocKind::Mem) {
1914 DebugVariable DbgVar = FnVarLocs->getVariable(ID: Var);
1915 DebugAggregate Aggr{DbgVar.getVariable(), DbgVar.getInlinedAt()};
1916 NotAlwaysStackHomed.insert(V: Aggr);
1917 }
1918 }
1919 VarsTouchedThisFrame.clear();
1920 }
1921}
1922
1923AssignmentTrackingLowering::LocKind
1924AssignmentTrackingLowering::joinKind(LocKind A, LocKind B) {
1925 // Partial order:
1926 // None > Mem, Val
1927 return A == B ? A : LocKind::None;
1928}
1929
1930AssignmentTrackingLowering::Assignment
1931AssignmentTrackingLowering::joinAssignment(const Assignment &A,
1932 const Assignment &B) {
1933 // Partial order:
1934 // NoneOrPhi(null, null) > Known(v, ?s)
1935
1936 // If either are NoneOrPhi the join is NoneOrPhi.
1937 // If either value is different then the result is
1938 // NoneOrPhi (joining two values is a Phi).
1939 if (!A.isSameSourceAssignment(Other: B))
1940 return Assignment::makeNoneOrPhi();
1941 if (A.Status == Assignment::NoneOrPhi)
1942 return Assignment::makeNoneOrPhi();
1943
1944 // Source is used to lookup the value + expression in the debug program if
1945 // the stack slot gets assigned a value earlier than expected. Because
1946 // we're only tracking the one dbg.assign, we can't capture debug PHIs.
1947 // It's unlikely that we're losing out on much coverage by avoiding that
1948 // extra work.
1949 // The Source may differ in this situation:
1950 // Pred.1:
1951 // dbg.assign i32 0, ..., !1, ...
1952 // Pred.2:
1953 // dbg.assign i32 1, ..., !1, ...
1954 // Here the same assignment (!1) was performed in both preds in the source,
1955 // but we can't use either one unless they are identical (e.g. .we don't
1956 // want to arbitrarily pick between constant values).
1957 auto JoinSource = [&]() -> DbgVariableRecord * {
1958 if (A.Source == B.Source)
1959 return A.Source;
1960 if (!A.Source || !B.Source)
1961 return nullptr;
1962 if (A.Source->isEquivalentTo(Other: *B.Source))
1963 return A.Source;
1964 return nullptr;
1965 };
1966 DbgVariableRecord *Source = JoinSource();
1967 assert(A.Status == B.Status && A.Status == Assignment::Known);
1968 assert(A.ID == B.ID);
1969 return Assignment::make(ID: A.ID, Source);
1970}
1971
1972AssignmentTrackingLowering::BlockInfo
1973AssignmentTrackingLowering::joinBlockInfo(const BlockInfo &A,
1974 const BlockInfo &B) {
1975 return BlockInfo::join(A, B, NumVars: TrackedVariablesVectorSize);
1976}
1977
1978bool AssignmentTrackingLowering::join(
1979 const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited) {
1980
1981 SmallVector<const BasicBlock *> VisitedPreds;
1982 // Ignore backedges if we have not visited the predecessor yet. As the
1983 // predecessor hasn't yet had locations propagated into it, most locations
1984 // will not yet be valid, so treat them as all being uninitialized and
1985 // potentially valid. If a location guessed to be correct here is
1986 // invalidated later, we will remove it when we revisit this block. This
1987 // is essentially the same as initialising all LocKinds and Assignments to
1988 // an implicit ⊥ value which is the identity value for the join operation.
1989 for (const BasicBlock *Pred : predecessors(BB: &BB)) {
1990 if (Visited.count(Ptr: Pred))
1991 VisitedPreds.push_back(Elt: Pred);
1992 }
1993
1994 // No preds visited yet.
1995 if (VisitedPreds.empty()) {
1996 auto It = LiveIn.try_emplace(Key: &BB, Args: BlockInfo());
1997 bool DidInsert = It.second;
1998 if (DidInsert)
1999 It.first->second.init(NumVars: TrackedVariablesVectorSize);
2000 return /*Changed*/ DidInsert;
2001 }
2002
2003 // Exactly one visited pred. Copy the LiveOut from that pred into BB LiveIn.
2004 if (VisitedPreds.size() == 1) {
2005 const BlockInfo &PredLiveOut = LiveOut.find(Val: VisitedPreds[0])->second;
2006
2007 // Check if there isn't an entry, or there is but the LiveIn set has
2008 // changed (expensive check).
2009 auto [CurrentLiveInEntry, Inserted] = LiveIn.try_emplace(Key: &BB, Args: PredLiveOut);
2010 if (Inserted)
2011 return /*Changed*/ true;
2012 if (PredLiveOut != CurrentLiveInEntry->second) {
2013 CurrentLiveInEntry->second = PredLiveOut;
2014 return /*Changed*/ true;
2015 }
2016 return /*Changed*/ false;
2017 }
2018
2019 // More than one pred. Join LiveOuts of blocks 1 and 2.
2020 assert(VisitedPreds.size() > 1);
2021 const BlockInfo &PredLiveOut0 = LiveOut.find(Val: VisitedPreds[0])->second;
2022 const BlockInfo &PredLiveOut1 = LiveOut.find(Val: VisitedPreds[1])->second;
2023 BlockInfo BBLiveIn = joinBlockInfo(A: PredLiveOut0, B: PredLiveOut1);
2024
2025 // Join the LiveOuts of subsequent blocks.
2026 ArrayRef Tail = ArrayRef(VisitedPreds).drop_front(N: 2);
2027 for (const BasicBlock *Pred : Tail) {
2028 const auto &PredLiveOut = LiveOut.find(Val: Pred);
2029 assert(PredLiveOut != LiveOut.end() &&
2030 "block should have been processed already");
2031 BBLiveIn = joinBlockInfo(A: std::move(BBLiveIn), B: PredLiveOut->second);
2032 }
2033
2034 // Save the joined result for BB.
2035 auto CurrentLiveInEntry = LiveIn.find(Val: &BB);
2036 // Check if there isn't an entry, or there is but the LiveIn set has changed
2037 // (expensive check).
2038 if (CurrentLiveInEntry == LiveIn.end())
2039 LiveIn.try_emplace(Key: &BB, Args: std::move(BBLiveIn));
2040 else if (BBLiveIn != CurrentLiveInEntry->second)
2041 CurrentLiveInEntry->second = std::move(BBLiveIn);
2042 else
2043 return /*Changed*/ false;
2044 return /*Changed*/ true;
2045}
2046
2047/// Return true if A fully contains B.
2048static bool fullyContains(DIExpression::FragmentInfo A,
2049 DIExpression::FragmentInfo B) {
2050 auto ALeft = A.OffsetInBits;
2051 auto BLeft = B.OffsetInBits;
2052 if (BLeft < ALeft)
2053 return false;
2054
2055 auto ARight = ALeft + A.SizeInBits;
2056 auto BRight = BLeft + B.SizeInBits;
2057 if (BRight > ARight)
2058 return false;
2059 return true;
2060}
2061
2062static std::optional<at::AssignmentInfo>
2063getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) {
2064 // Don't bother checking if this is an AllocaInst. We know this
2065 // instruction has no tag which means there are no variables associated
2066 // with it.
2067 if (const auto *SI = dyn_cast<StoreInst>(Val: &I))
2068 return at::getAssignmentInfo(DL: Layout, SI);
2069 if (const auto *MI = dyn_cast<MemIntrinsic>(Val: &I))
2070 return at::getAssignmentInfo(DL: Layout, I: MI);
2071 // Alloca or non-store-like inst.
2072 return std::nullopt;
2073}
2074
2075AllocaInst *getUnknownStore(const Instruction &I, const DataLayout &Layout) {
2076 auto *II = dyn_cast<IntrinsicInst>(Val: &I);
2077 if (!II)
2078 return nullptr;
2079 Intrinsic::ID ID = II->getIntrinsicID();
2080 if (ID != Intrinsic::experimental_vp_strided_store &&
2081 ID != Intrinsic::masked_store && ID != Intrinsic::vp_scatter &&
2082 ID != Intrinsic::masked_scatter && ID != Intrinsic::vp_store &&
2083 ID != Intrinsic::masked_compressstore)
2084 return nullptr;
2085 Value *MemOp = II->getArgOperand(i: 1);
2086 // We don't actually use the constant offset for now, but we may in future,
2087 // and the non-accumulating versions do not support a vector of pointers.
2088 APInt Offset(Layout.getIndexTypeSizeInBits(Ty: MemOp->getType()), 0);
2089 Value *Base = MemOp->stripAndAccumulateConstantOffsets(DL: Layout, Offset, AllowNonInbounds: true);
2090 // For Base pointers that are not an alloca instruction we don't need to do
2091 // anything, and simply return nullptr.
2092 return dyn_cast<AllocaInst>(Val: Base);
2093}
2094
2095/// Build a map of {Variable x: Variables y} where all variable fragments
2096/// contained within the variable fragment x are in set y. This means that
2097/// y does not contain all overlaps because partial overlaps are excluded.
2098///
2099/// While we're iterating over the function, add single location defs for
2100/// dbg.declares to \p FnVarLocs.
2101///
2102/// Variables that are interesting to this pass in are added to
2103/// FnVarLocs->Variables first. TrackedVariablesVectorSize is set to the ID of
2104/// the last interesting variable plus 1, meaning variables with ID 1
2105/// (inclusive) to TrackedVariablesVectorSize (exclusive) are interesting. The
2106/// subsequent variables are either stack homed or fully promoted.
2107///
2108/// Finally, populate UntaggedStoreVars with a mapping of untagged stores to
2109/// the stored-to variable fragments, and UnknownStoreVars with a mapping
2110/// of untagged unknown stores to the stored-to variable aggregates.
2111///
2112/// These tasks are bundled together to reduce the number of times we need
2113/// to iterate over the function as they can be achieved together in one pass.
2114static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
2115 Function &Fn, FunctionVarLocsBuilder *FnVarLocs,
2116 const DenseSet<DebugAggregate> &VarsWithStackSlot,
2117 AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars,
2118 AssignmentTrackingLowering::UnknownStoreAssignmentMap &UnknownStoreVars,
2119 unsigned &TrackedVariablesVectorSize) {
2120 DenseSet<DebugVariable> Seen;
2121 // Map of Variable: [Fragments].
2122 DenseMap<DebugAggregate, SmallVector<DebugVariable, 8>> FragmentMap;
2123 // Iterate over all instructions:
2124 // - dbg.declare -> add single location variable record
2125 // - dbg.* -> Add fragments to FragmentMap
2126 // - untagged store -> Add fragments to FragmentMap and update
2127 // UntaggedStoreVars, or add to UnknownStoreVars if
2128 // we can't determine the fragment overlap.
2129 // We need to add fragments for untagged stores too so that we can correctly
2130 // clobber overlapped fragment locations later.
2131 SmallVector<DbgVariableRecord *> DPDeclares;
2132 auto ProcessDbgRecord = [&](DbgVariableRecord *Record) {
2133 if (Record->isDbgDeclare()) {
2134 DPDeclares.push_back(Elt: Record);
2135 return;
2136 }
2137 DebugVariable DV = DebugVariable(Record);
2138 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
2139 if (!VarsWithStackSlot.contains(V: DA))
2140 return;
2141 if (Seen.insert(V: DV).second)
2142 FragmentMap[DA].push_back(Elt: DV);
2143 };
2144 for (auto &BB : Fn) {
2145 for (auto &I : BB) {
2146 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
2147 ProcessDbgRecord(&DVR);
2148 if (auto Info = getUntaggedStoreAssignmentInfo(I, Layout: Fn.getDataLayout())) {
2149 // Find markers linked to this alloca.
2150 auto HandleDbgAssignForStore = [&](DbgVariableRecord *Assign) {
2151 std::optional<DIExpression::FragmentInfo> FragInfo;
2152
2153 // Skip this assignment if the affected bits are outside of the
2154 // variable fragment.
2155 if (!at::calculateFragmentIntersect(
2156 DL: I.getDataLayout(), Dest: Info->Base,
2157 SliceOffsetInBits: Info->OffsetInBits, SliceSizeInBits: Info->SizeInBits, DVRAssign: Assign, Result&: FragInfo) ||
2158 (FragInfo && FragInfo->SizeInBits == 0))
2159 return;
2160
2161 // FragInfo from calculateFragmentIntersect is nullopt if the
2162 // resultant fragment matches DAI's fragment or entire variable - in
2163 // which case copy the fragment info from DAI. If FragInfo is still
2164 // nullopt after the copy it means "no fragment info" instead, which
2165 // is how it is usually interpreted.
2166 if (!FragInfo)
2167 FragInfo = Assign->getExpression()->getFragmentInfo();
2168
2169 DebugVariable DV =
2170 DebugVariable(Assign->getVariable(), FragInfo,
2171 Assign->getDebugLoc().getInlinedAt());
2172 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
2173 if (!VarsWithStackSlot.contains(V: DA))
2174 return;
2175
2176 // Cache this info for later.
2177 UntaggedStoreVars[&I].push_back(
2178 Elt: {FnVarLocs->insertVariable(V: DV), *Info});
2179
2180 if (Seen.insert(V: DV).second)
2181 FragmentMap[DA].push_back(Elt: DV);
2182 };
2183 for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: Info->Base))
2184 HandleDbgAssignForStore(DVR);
2185 } else if (auto *AI = getUnknownStore(I, Layout: Fn.getDataLayout())) {
2186 // Find markers linked to this alloca.
2187 auto HandleDbgAssignForUnknownStore = [&](DbgVariableRecord *Assign) {
2188 // Because we can't currently represent the fragment info for this
2189 // store, we treat it as an unusable store to the whole variable.
2190 DebugVariable DV =
2191 DebugVariable(Assign->getVariable(), std::nullopt,
2192 Assign->getDebugLoc().getInlinedAt());
2193 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
2194 if (!VarsWithStackSlot.contains(V: DA))
2195 return;
2196
2197 // Cache this info for later.
2198 UnknownStoreVars[&I].push_back(Elt: FnVarLocs->insertVariable(V: DV));
2199 };
2200 for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: AI))
2201 HandleDbgAssignForUnknownStore(DVR);
2202 }
2203 }
2204 }
2205
2206 // Sort the fragment map for each DebugAggregate in ascending
2207 // order of fragment size - there should be no duplicates.
2208 for (auto &Pair : FragmentMap) {
2209 SmallVector<DebugVariable, 8> &Frags = Pair.second;
2210 std::sort(first: Frags.begin(), last: Frags.end(),
2211 comp: [](const DebugVariable &Next, const DebugVariable &Elmt) {
2212 return Elmt.getFragmentOrDefault().SizeInBits >
2213 Next.getFragmentOrDefault().SizeInBits;
2214 });
2215 // Check for duplicates.
2216 assert(std::adjacent_find(Frags.begin(), Frags.end()) == Frags.end());
2217 }
2218
2219 // Build the map.
2220 AssignmentTrackingLowering::OverlapMap Map;
2221 for (auto &Pair : FragmentMap) {
2222 auto &Frags = Pair.second;
2223 for (auto It = Frags.begin(), IEnd = Frags.end(); It != IEnd; ++It) {
2224 DIExpression::FragmentInfo Frag = It->getFragmentOrDefault();
2225 // Find the frags that this is contained within.
2226 //
2227 // Because Frags is sorted by size and none have the same offset and
2228 // size, we know that this frag can only be contained by subsequent
2229 // elements.
2230 SmallVector<DebugVariable, 8>::iterator OtherIt = It;
2231 ++OtherIt;
2232 VariableID ThisVar = FnVarLocs->insertVariable(V: *It);
2233 for (; OtherIt != IEnd; ++OtherIt) {
2234 DIExpression::FragmentInfo OtherFrag = OtherIt->getFragmentOrDefault();
2235 VariableID OtherVar = FnVarLocs->insertVariable(V: *OtherIt);
2236 if (fullyContains(A: OtherFrag, B: Frag))
2237 Map[OtherVar].push_back(Elt: ThisVar);
2238 }
2239 }
2240 }
2241
2242 // VariableIDs are 1-based so the variable-tracking bitvector needs
2243 // NumVariables plus 1 bits.
2244 TrackedVariablesVectorSize = FnVarLocs->getNumVariables() + 1;
2245
2246 // Finally, insert the declares afterwards, so the first IDs are all
2247 // partially stack homed vars.
2248 for (auto *DVR : DPDeclares)
2249 FnVarLocs->addSingleLocVar(Var: DebugVariable(DVR), Expr: DVR->getExpression(),
2250 DL: DVR->getDebugLoc(),
2251 R: RawLocationWrapper(DVR->getRawLocation()));
2252 return Map;
2253}
2254
2255bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
2256 if (Fn.size() > MaxNumBlocks) {
2257 LLVM_DEBUG(dbgs() << "[AT] Dropping var locs in: " << Fn.getName()
2258 << ": too many blocks (" << Fn.size() << ")\n");
2259 at::deleteAll(F: &Fn);
2260 return false;
2261 }
2262
2263 FnVarLocs = FnVarLocsBuilder;
2264
2265 // The general structure here is inspired by VarLocBasedImpl.cpp
2266 // (LiveDebugValues).
2267
2268 // Build the variable fragment overlap map.
2269 // Note that this pass doesn't handle partial overlaps correctly (FWIW
2270 // neither does LiveDebugVariables) because that is difficult to do and
2271 // appears to be rare occurance.
2272 VarContains = buildOverlapMapAndRecordDeclares(
2273 Fn, FnVarLocs, VarsWithStackSlot: *VarsWithStackSlot, UntaggedStoreVars, UnknownStoreVars,
2274 TrackedVariablesVectorSize);
2275
2276 // Prepare for traversal.
2277 ReversePostOrderTraversal<Function *> RPOT(&Fn);
2278 std::priority_queue<unsigned int, std::vector<unsigned int>,
2279 std::greater<unsigned int>>
2280 Worklist;
2281 std::priority_queue<unsigned int, std::vector<unsigned int>,
2282 std::greater<unsigned int>>
2283 Pending;
2284 DenseMap<unsigned int, BasicBlock *> OrderToBB;
2285 DenseMap<BasicBlock *, unsigned int> BBToOrder;
2286 { // Init OrderToBB and BBToOrder.
2287 unsigned int RPONumber = 0;
2288 for (BasicBlock *BB : RPOT) {
2289 OrderToBB[RPONumber] = BB;
2290 BBToOrder[BB] = RPONumber;
2291 Worklist.push(x: RPONumber);
2292 ++RPONumber;
2293 }
2294 LiveIn.reserve(NumEntries: RPONumber);
2295 LiveOut.reserve(NumEntries: RPONumber);
2296 }
2297
2298 // Perform the traversal.
2299 //
2300 // This is a standard "union of predecessor outs" dataflow problem. To solve
2301 // it, we perform join() and process() using the two worklist method until
2302 // the LiveIn data for each block becomes unchanging. The "proof" that this
2303 // terminates can be put together by looking at the comments around LocKind,
2304 // Assignment, and the various join methods, which show that all the elements
2305 // involved are made up of join-semilattices; LiveIn(n) can only
2306 // monotonically increase in value throughout the dataflow.
2307 //
2308 SmallPtrSet<BasicBlock *, 16> Visited;
2309 while (!Worklist.empty()) {
2310 // We track what is on the pending worklist to avoid inserting the same
2311 // thing twice.
2312 SmallPtrSet<BasicBlock *, 16> OnPending;
2313 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
2314 while (!Worklist.empty()) {
2315 BasicBlock *BB = OrderToBB[Worklist.top()];
2316 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
2317 Worklist.pop();
2318 bool InChanged = join(BB: *BB, Visited);
2319 // Always consider LiveIn changed on the first visit.
2320 InChanged |= Visited.insert(Ptr: BB).second;
2321 if (InChanged) {
2322 LLVM_DEBUG(dbgs() << BB->getName() << " has new InLocs, process it\n");
2323 // Mutate a copy of LiveIn while processing BB. After calling process
2324 // LiveSet is the LiveOut set for BB.
2325 BlockInfo LiveSet = LiveIn[BB];
2326
2327 // Process the instructions in the block.
2328 process(BB&: *BB, LiveSet: &LiveSet);
2329
2330 // Relatively expensive check: has anything changed in LiveOut for BB?
2331 if (LiveOut[BB] != LiveSet) {
2332 LLVM_DEBUG(dbgs() << BB->getName()
2333 << " has new OutLocs, add succs to worklist: [ ");
2334 LiveOut[BB] = std::move(LiveSet);
2335 for (BasicBlock *Succ : successors(BB)) {
2336 if (OnPending.insert(Ptr: Succ).second) {
2337 LLVM_DEBUG(dbgs() << Succ->getName() << " ");
2338 Pending.push(x: BBToOrder[Succ]);
2339 }
2340 }
2341 LLVM_DEBUG(dbgs() << "]\n");
2342 }
2343 }
2344 }
2345 Worklist.swap(pq&: Pending);
2346 // At this point, pending must be empty, since it was just the empty
2347 // worklist
2348 assert(Pending.empty() && "Pending should be empty");
2349 }
2350
2351 // That's the hard part over. Now we just have some admin to do.
2352
2353 // Record whether we inserted any intrinsics.
2354 bool InsertedAnyIntrinsics = false;
2355
2356 // Identify and add defs for single location variables.
2357 //
2358 // Go through all of the defs that we plan to add. If the aggregate variable
2359 // it's a part of is not in the NotAlwaysStackHomed set we can emit a single
2360 // location def and omit the rest. Add an entry to AlwaysStackHomed so that
2361 // we can identify those uneeded defs later.
2362 DenseSet<DebugAggregate> AlwaysStackHomed;
2363 for (const auto &Pair : InsertBeforeMap) {
2364 auto &Vec = Pair.second;
2365 for (VarLocInfo VarLoc : Vec) {
2366 DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID);
2367 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2368
2369 // Skip this Var if it's not always stack homed.
2370 if (NotAlwaysStackHomed.contains(V: Aggr))
2371 continue;
2372
2373 // Skip complex cases such as when different fragments of a variable have
2374 // been split into different allocas. Skipping in this case means falling
2375 // back to using a list of defs (which could reduce coverage, but is no
2376 // less correct).
2377 bool Simple =
2378 VarLoc.Expr->getNumElements() == 1 && VarLoc.Expr->startsWithDeref();
2379 if (!Simple) {
2380 NotAlwaysStackHomed.insert(V: Aggr);
2381 continue;
2382 }
2383
2384 // All source assignments to this variable remain and all stores to any
2385 // part of the variable store to the same address (with varying
2386 // offsets). We can just emit a single location for the whole variable.
2387 //
2388 // Unless we've already done so, create the single location def now.
2389 if (AlwaysStackHomed.insert(V: Aggr).second) {
2390 assert(!VarLoc.Values.hasArgList());
2391 // TODO: When more complex cases are handled VarLoc.Expr should be
2392 // built appropriately rather than always using an empty DIExpression.
2393 // The assert below is a reminder.
2394 assert(Simple);
2395 VarLoc.Expr = DIExpression::get(Context&: Fn.getContext(), Elements: {});
2396 DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID);
2397 FnVarLocs->addSingleLocVar(Var, Expr: VarLoc.Expr, DL: VarLoc.DL, R: VarLoc.Values);
2398 InsertedAnyIntrinsics = true;
2399 }
2400 }
2401 }
2402
2403 // Insert the other DEFs.
2404 for (const auto &[InsertBefore, Vec] : InsertBeforeMap) {
2405 SmallVector<VarLocInfo> NewDefs;
2406 for (const VarLocInfo &VarLoc : Vec) {
2407 DebugVariable Var = FnVarLocs->getVariable(ID: VarLoc.VariableID);
2408 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2409 // If this variable is always stack homed then we have already inserted a
2410 // dbg.declare and deleted this dbg.value.
2411 if (AlwaysStackHomed.contains(V: Aggr))
2412 continue;
2413 NewDefs.push_back(Elt: VarLoc);
2414 InsertedAnyIntrinsics = true;
2415 }
2416
2417 FnVarLocs->setWedge(Before: InsertBefore, Wedge: std::move(NewDefs));
2418 }
2419
2420 InsertedAnyIntrinsics |= emitPromotedVarLocs(FnVarLocs);
2421
2422 return InsertedAnyIntrinsics;
2423}
2424
2425bool AssignmentTrackingLowering::emitPromotedVarLocs(
2426 FunctionVarLocsBuilder *FnVarLocs) {
2427 bool InsertedAnyIntrinsics = false;
2428 // Go through every block, translating debug intrinsics for fully promoted
2429 // variables into FnVarLocs location defs. No analysis required for these.
2430 auto TranslateDbgRecord = [&](DbgVariableRecord *Record) {
2431 // Skip variables that haven't been promoted - we've dealt with those
2432 // already.
2433 if (VarsWithStackSlot->contains(V: getAggregate(Var: Record)))
2434 return;
2435 auto InsertBefore = getNextNode(DVR: Record);
2436 assert(InsertBefore && "Unexpected: debug intrinsics after a terminator");
2437 FnVarLocs->addVarLoc(Before: InsertBefore, Var: DebugVariable(Record),
2438 Expr: Record->getExpression(), DL: Record->getDebugLoc(),
2439 R: RawLocationWrapper(Record->getRawLocation()));
2440 InsertedAnyIntrinsics = true;
2441 };
2442 for (auto &BB : Fn) {
2443 for (auto &I : BB) {
2444 // Skip instructions other than dbg.values and dbg.assigns.
2445 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
2446 if (DVR.isDbgValue() || DVR.isDbgAssign())
2447 TranslateDbgRecord(&DVR);
2448 }
2449 }
2450 return InsertedAnyIntrinsics;
2451}
2452
2453/// Remove redundant definitions within sequences of consecutive location defs.
2454/// This is done using a backward scan to keep the last def describing a
2455/// specific variable/fragment.
2456///
2457/// This implements removeRedundantDbgInstrsUsingBackwardScan from
2458/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2459/// FunctionVarLocsBuilder instead of with intrinsics.
2460static bool
2461removeRedundantDbgLocsUsingBackwardScan(const BasicBlock *BB,
2462 FunctionVarLocsBuilder &FnVarLocs) {
2463 bool Changed = false;
2464 SmallDenseMap<DebugAggregate, BitVector> VariableDefinedBytes;
2465 // Scan over the entire block, not just over the instructions mapped by
2466 // FnVarLocs, because wedges in FnVarLocs may only be separated by debug
2467 // instructions.
2468 for (const Instruction &I : reverse(C: *BB)) {
2469 // Sequence of consecutive defs ended. Clear map for the next one.
2470 VariableDefinedBytes.clear();
2471
2472 auto HandleLocsForWedge = [&](auto *WedgePosition) {
2473 // Get the location defs that start just before this instruction.
2474 const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition);
2475 if (!Locs)
2476 return;
2477
2478 NumWedgesScanned++;
2479 bool ChangedThisWedge = false;
2480 // The new pruned set of defs, reversed because we're scanning backwards.
2481 SmallVector<VarLocInfo> NewDefsReversed;
2482
2483 // Iterate over the existing defs in reverse.
2484 for (auto RIt = Locs->rbegin(), REnd = Locs->rend(); RIt != REnd; ++RIt) {
2485 NumDefsScanned++;
2486 DebugAggregate Aggr =
2487 getAggregate(FnVarLocs.getVariable(ID: RIt->VariableID));
2488 uint64_t SizeInBits = Aggr.first->getSizeInBits().value_or(u: 0);
2489 uint64_t SizeInBytes = divideCeil(Numerator: SizeInBits, Denominator: 8);
2490
2491 // Cutoff for large variables to prevent expensive bitvector operations.
2492 const uint64_t MaxSizeBytes = 2048;
2493
2494 if (SizeInBytes == 0 || SizeInBytes > MaxSizeBytes) {
2495 // If the size is unknown (0) then keep this location def to be safe.
2496 // Do the same for defs of large variables, which would be expensive
2497 // to represent with a BitVector.
2498 NewDefsReversed.push_back(*RIt);
2499 continue;
2500 }
2501
2502 // Only keep this location definition if it is not fully eclipsed by
2503 // other definitions in this wedge that come after it
2504
2505 // Inert the bytes the location definition defines.
2506 auto InsertResult =
2507 VariableDefinedBytes.try_emplace(Key: Aggr, Args: BitVector(SizeInBytes));
2508 bool FirstDefinition = InsertResult.second;
2509 BitVector &DefinedBytes = InsertResult.first->second;
2510
2511 DIExpression::FragmentInfo Fragment =
2512 RIt->Expr->getFragmentInfo().value_or(
2513 DIExpression::FragmentInfo(SizeInBits, 0));
2514 bool InvalidFragment = Fragment.endInBits() > SizeInBits;
2515 uint64_t StartInBytes = Fragment.startInBits() / 8;
2516 uint64_t EndInBytes = divideCeil(Numerator: Fragment.endInBits(), Denominator: 8);
2517
2518 // If this defines any previously undefined bytes, keep it.
2519 if (FirstDefinition || InvalidFragment ||
2520 DefinedBytes.find_first_unset_in(Begin: StartInBytes, End: EndInBytes) != -1) {
2521 if (!InvalidFragment)
2522 DefinedBytes.set(I: StartInBytes, E: EndInBytes);
2523 NewDefsReversed.push_back(*RIt);
2524 continue;
2525 }
2526
2527 // Redundant def found: throw it away. Since the wedge of defs is being
2528 // rebuilt, doing nothing is the same as deleting an entry.
2529 ChangedThisWedge = true;
2530 NumDefsRemoved++;
2531 }
2532
2533 // Un-reverse the defs and replace the wedge with the pruned version.
2534 if (ChangedThisWedge) {
2535 std::reverse(first: NewDefsReversed.begin(), last: NewDefsReversed.end());
2536 FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefsReversed));
2537 NumWedgesChanged++;
2538 Changed = true;
2539 }
2540 };
2541 HandleLocsForWedge(&I);
2542 for (DbgVariableRecord &DVR : reverse(C: filterDbgVars(R: I.getDbgRecordRange())))
2543 HandleLocsForWedge(&DVR);
2544 }
2545
2546 return Changed;
2547}
2548
2549/// Remove redundant location defs using a forward scan. This can remove a
2550/// location definition that is redundant due to indicating that a variable has
2551/// the same value as is already being indicated by an earlier def.
2552///
2553/// This implements removeRedundantDbgInstrsUsingForwardScan from
2554/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2555/// FunctionVarLocsBuilder instead of with intrinsics
2556static bool
2557removeRedundantDbgLocsUsingForwardScan(const BasicBlock *BB,
2558 FunctionVarLocsBuilder &FnVarLocs) {
2559 bool Changed = false;
2560 DenseMap<DebugVariable, std::pair<RawLocationWrapper, DIExpression *>>
2561 VariableMap;
2562
2563 // Scan over the entire block, not just over the instructions mapped by
2564 // FnVarLocs, because wedges in FnVarLocs may only be separated by debug
2565 // instructions.
2566 for (const Instruction &I : *BB) {
2567 // Get the defs that come just before this instruction.
2568 auto HandleLocsForWedge = [&](auto *WedgePosition) {
2569 const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition);
2570 if (!Locs)
2571 return;
2572
2573 NumWedgesScanned++;
2574 bool ChangedThisWedge = false;
2575 // The new pruned set of defs.
2576 SmallVector<VarLocInfo> NewDefs;
2577
2578 // Iterate over the existing defs.
2579 for (const VarLocInfo &Loc : *Locs) {
2580 NumDefsScanned++;
2581 DebugVariable Key(FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(),
2582 std::nullopt, Loc.DL.getInlinedAt());
2583 auto [VMI, Inserted] = VariableMap.try_emplace(Key);
2584
2585 // Update the map if we found a new value/expression describing the
2586 // variable, or if the variable wasn't mapped already.
2587 if (Inserted || VMI->second.first != Loc.Values ||
2588 VMI->second.second != Loc.Expr) {
2589 VMI->second = {Loc.Values, Loc.Expr};
2590 NewDefs.push_back(Elt: Loc);
2591 continue;
2592 }
2593
2594 // Did not insert this Loc, which is the same as removing it.
2595 ChangedThisWedge = true;
2596 NumDefsRemoved++;
2597 }
2598
2599 // Replace the existing wedge with the pruned version.
2600 if (ChangedThisWedge) {
2601 FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs));
2602 NumWedgesChanged++;
2603 Changed = true;
2604 }
2605 };
2606
2607 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
2608 HandleLocsForWedge(&DVR);
2609 HandleLocsForWedge(&I);
2610 }
2611
2612 return Changed;
2613}
2614
2615static bool
2616removeUndefDbgLocsFromEntryBlock(const BasicBlock *BB,
2617 FunctionVarLocsBuilder &FnVarLocs) {
2618 assert(BB->isEntryBlock());
2619 // Do extra work to ensure that we remove semantically unimportant undefs.
2620 //
2621 // This is to work around the fact that SelectionDAG will hoist dbg.values
2622 // using argument values to the top of the entry block. That can move arg
2623 // dbg.values before undef and constant dbg.values which they previously
2624 // followed. The easiest thing to do is to just try to feed SelectionDAG
2625 // input it's happy with.
2626 //
2627 // Map of {Variable x: Fragments y} where the fragments y of variable x have
2628 // have at least one non-undef location defined already. Don't use directly,
2629 // instead call DefineBits and HasDefinedBits.
2630 SmallDenseMap<DebugAggregate, SmallDenseSet<DIExpression::FragmentInfo>>
2631 VarsWithDef;
2632 // Specify that V (a fragment of A) has a non-undef location.
2633 auto DefineBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2634 VarsWithDef[A].insert(V: V.getFragmentOrDefault());
2635 };
2636 // Return true if a non-undef location has been defined for V (a fragment of
2637 // A). Doesn't imply that the location is currently non-undef, just that a
2638 // non-undef location has been seen previously.
2639 auto HasDefinedBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2640 auto FragsIt = VarsWithDef.find(Val: A);
2641 if (FragsIt == VarsWithDef.end())
2642 return false;
2643 return llvm::any_of(Range&: FragsIt->second, P: [V](auto Frag) {
2644 return DIExpression::fragmentsOverlap(Frag, V.getFragmentOrDefault());
2645 });
2646 };
2647
2648 bool Changed = false;
2649
2650 // Scan over the entire block, not just over the instructions mapped by
2651 // FnVarLocs, because wedges in FnVarLocs may only be separated by debug
2652 // instructions.
2653 for (const Instruction &I : *BB) {
2654 // Get the defs that come just before this instruction.
2655 auto HandleLocsForWedge = [&](auto *WedgePosition) {
2656 const auto *Locs = FnVarLocs.getWedge(Before: WedgePosition);
2657 if (!Locs)
2658 return;
2659
2660 NumWedgesScanned++;
2661 bool ChangedThisWedge = false;
2662 // The new pruned set of defs.
2663 SmallVector<VarLocInfo> NewDefs;
2664
2665 // Iterate over the existing defs.
2666 for (const VarLocInfo &Loc : *Locs) {
2667 NumDefsScanned++;
2668 DebugAggregate Aggr{FnVarLocs.getVariable(ID: Loc.VariableID).getVariable(),
2669 Loc.DL.getInlinedAt()};
2670 DebugVariable Var = FnVarLocs.getVariable(ID: Loc.VariableID);
2671
2672 // Remove undef entries that are encountered before any non-undef
2673 // intrinsics from the entry block.
2674 if (Loc.Values.isKillLocation(Expression: Loc.Expr) && !HasDefinedBits(Aggr, Var)) {
2675 // Did not insert this Loc, which is the same as removing it.
2676 NumDefsRemoved++;
2677 ChangedThisWedge = true;
2678 continue;
2679 }
2680
2681 DefineBits(Aggr, Var);
2682 NewDefs.push_back(Elt: Loc);
2683 }
2684
2685 // Replace the existing wedge with the pruned version.
2686 if (ChangedThisWedge) {
2687 FnVarLocs.setWedge(Before: WedgePosition, Wedge: std::move(NewDefs));
2688 NumWedgesChanged++;
2689 Changed = true;
2690 }
2691 };
2692 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange()))
2693 HandleLocsForWedge(&DVR);
2694 HandleLocsForWedge(&I);
2695 }
2696
2697 return Changed;
2698}
2699
2700static bool removeRedundantDbgLocs(const BasicBlock *BB,
2701 FunctionVarLocsBuilder &FnVarLocs) {
2702 bool MadeChanges = false;
2703 MadeChanges |= removeRedundantDbgLocsUsingBackwardScan(BB, FnVarLocs);
2704 if (BB->isEntryBlock())
2705 MadeChanges |= removeUndefDbgLocsFromEntryBlock(BB, FnVarLocs);
2706 MadeChanges |= removeRedundantDbgLocsUsingForwardScan(BB, FnVarLocs);
2707
2708 if (MadeChanges)
2709 LLVM_DEBUG(dbgs() << "Removed redundant dbg locs from: " << BB->getName()
2710 << "\n");
2711 return MadeChanges;
2712}
2713
2714static DenseSet<DebugAggregate> findVarsWithStackSlot(Function &Fn) {
2715 DenseSet<DebugAggregate> Result;
2716 for (auto &BB : Fn) {
2717 for (auto &I : BB) {
2718 // Any variable linked to an instruction is considered
2719 // interesting. Ideally we only need to check Allocas, however, a
2720 // DIAssignID might get dropped from an alloca but not stores. In that
2721 // case, we need to consider the variable interesting for NFC behaviour
2722 // with this change. TODO: Consider only looking at allocas.
2723 for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Inst: &I)) {
2724 Result.insert(V: {DVR->getVariable(), DVR->getDebugLoc().getInlinedAt()});
2725 }
2726 }
2727 }
2728 return Result;
2729}
2730
2731static void analyzeFunction(Function &Fn, const DataLayout &Layout,
2732 FunctionVarLocsBuilder *FnVarLocs) {
2733 // The analysis will generate location definitions for all variables, but we
2734 // only need to perform a dataflow on the set of variables which have a stack
2735 // slot. Find those now.
2736 DenseSet<DebugAggregate> VarsWithStackSlot = findVarsWithStackSlot(Fn);
2737
2738 bool Changed = false;
2739
2740 // Use a scope block to clean up AssignmentTrackingLowering before running
2741 // MemLocFragmentFill to reduce peak memory consumption.
2742 {
2743 AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot);
2744 Changed = Pass.run(FnVarLocsBuilder: FnVarLocs);
2745 }
2746
2747 if (Changed) {
2748 MemLocFragmentFill Pass(Fn, &VarsWithStackSlot,
2749 shouldCoalesceFragments(F&: Fn));
2750 Pass.run(FnVarLocs);
2751
2752 // Remove redundant entries. As well as reducing memory consumption and
2753 // avoiding waiting cycles later by burning some now, this has another
2754 // important job. That is to work around some SelectionDAG quirks. See
2755 // removeRedundantDbgLocsUsingForwardScan comments for more info on that.
2756 for (auto &BB : Fn)
2757 removeRedundantDbgLocs(BB: &BB, FnVarLocs&: *FnVarLocs);
2758 }
2759}
2760
2761FunctionVarLocs
2762DebugAssignmentTrackingAnalysis::run(Function &F,
2763 FunctionAnalysisManager &FAM) {
2764 if (!isAssignmentTrackingEnabled(M: *F.getParent()))
2765 return FunctionVarLocs();
2766
2767 auto &DL = F.getDataLayout();
2768
2769 FunctionVarLocsBuilder Builder;
2770 analyzeFunction(Fn&: F, Layout: DL, FnVarLocs: &Builder);
2771
2772 // Save these results.
2773 FunctionVarLocs Results;
2774 Results.init(Builder);
2775 return Results;
2776}
2777
2778AnalysisKey DebugAssignmentTrackingAnalysis::Key;
2779
2780PreservedAnalyses
2781DebugAssignmentTrackingPrinterPass::run(Function &F,
2782 FunctionAnalysisManager &FAM) {
2783 FAM.getResult<DebugAssignmentTrackingAnalysis>(IR&: F).print(OS, Fn: F);
2784 return PreservedAnalyses::all();
2785}
2786
2787bool AssignmentTrackingAnalysis::runOnFunction(Function &F) {
2788 if (!isAssignmentTrackingEnabled(M: *F.getParent()))
2789 return false;
2790
2791 LLVM_DEBUG(dbgs() << "AssignmentTrackingAnalysis run on " << F.getName()
2792 << "\n");
2793
2794 // Clear previous results.
2795 Results->clear();
2796
2797 FunctionVarLocsBuilder Builder;
2798 analyzeFunction(Fn&: F, Layout: F.getDataLayout(), FnVarLocs: &Builder);
2799
2800 // Save these results.
2801 Results->init(Builder);
2802
2803 if (PrintResults && isFunctionInPrintList(FunctionName: F.getName()))
2804 Results->print(OS&: errs(), Fn: F);
2805
2806 // Return false because this pass does not modify the function.
2807 return false;
2808}
2809
2810AssignmentTrackingAnalysis::AssignmentTrackingAnalysis()
2811 : FunctionPass(ID), Results(std::make_unique<FunctionVarLocs>()) {}
2812
2813char AssignmentTrackingAnalysis::ID = 0;
2814
2815INITIALIZE_PASS(AssignmentTrackingAnalysis, DEBUG_TYPE,
2816 "Assignment Tracking Analysis", false, true)
2817