| 1 | //===- VarLocBasedImpl.cpp - Tracking Debug Value MIs with VarLoc class----===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// |
| 9 | /// \file VarLocBasedImpl.cpp |
| 10 | /// |
| 11 | /// LiveDebugValues is an optimistic "available expressions" dataflow |
| 12 | /// algorithm. The set of expressions is the set of machine locations |
| 13 | /// (registers, spill slots, constants, and target indices) that a variable |
| 14 | /// fragment might be located, qualified by a DIExpression and indirect-ness |
| 15 | /// flag, while each variable is identified by a DebugVariable object. The |
| 16 | /// availability of an expression begins when a DBG_VALUE instruction specifies |
| 17 | /// the location of a DebugVariable, and continues until that location is |
| 18 | /// clobbered or re-specified by a different DBG_VALUE for the same |
| 19 | /// DebugVariable. |
| 20 | /// |
| 21 | /// The output of LiveDebugValues is additional DBG_VALUE instructions, |
| 22 | /// placed to extend variable locations as far they're available. This file |
| 23 | /// and the VarLocBasedLDV class is an implementation that explicitly tracks |
| 24 | /// locations, using the VarLoc class. |
| 25 | /// |
| 26 | /// The canonical "available expressions" problem doesn't have expression |
| 27 | /// clobbering, instead when a variable is re-assigned, any expressions using |
| 28 | /// that variable get invalidated. LiveDebugValues can map onto "available |
| 29 | /// expressions" by having every register represented by a variable, which is |
| 30 | /// used in an expression that becomes available at a DBG_VALUE instruction. |
| 31 | /// When the register is clobbered, its variable is effectively reassigned, and |
| 32 | /// expressions computed from it become unavailable. A similar construct is |
| 33 | /// needed when a DebugVariable has its location re-specified, to invalidate |
| 34 | /// all other locations for that DebugVariable. |
| 35 | /// |
| 36 | /// Using the dataflow analysis to compute the available expressions, we create |
| 37 | /// a DBG_VALUE at the beginning of each block where the expression is |
| 38 | /// live-in. This propagates variable locations into every basic block where |
| 39 | /// the location can be determined, rather than only having DBG_VALUEs in blocks |
| 40 | /// where locations are specified due to an assignment or some optimization. |
| 41 | /// Movements of values between registers and spill slots are annotated with |
| 42 | /// DBG_VALUEs too to track variable values bewteen locations. All this allows |
| 43 | /// DbgEntityHistoryCalculator to focus on only the locations within individual |
| 44 | /// blocks, facilitating testing and improving modularity. |
| 45 | /// |
| 46 | /// We follow an optimisic dataflow approach, with this lattice: |
| 47 | /// |
| 48 | /// \verbatim |
| 49 | /// ┬ "Unknown" |
| 50 | /// | |
| 51 | /// v |
| 52 | /// True |
| 53 | /// | |
| 54 | /// v |
| 55 | /// ⊥ False |
| 56 | /// \endverbatim With "True" signifying that the expression is available (and |
| 57 | /// thus a DebugVariable's location is the corresponding register), while |
| 58 | /// "False" signifies that the expression is unavailable. "Unknown"s never |
| 59 | /// survive to the end of the analysis (see below). |
| 60 | /// |
| 61 | /// Formally, all DebugVariable locations that are live-out of a block are |
| 62 | /// initialized to \top. A blocks live-in values take the meet of the lattice |
| 63 | /// value for every predecessors live-outs, except for the entry block, where |
| 64 | /// all live-ins are \bot. The usual dataflow propagation occurs: the transfer |
| 65 | /// function for a block assigns an expression for a DebugVariable to be "True" |
| 66 | /// if a DBG_VALUE in the block specifies it; "False" if the location is |
| 67 | /// clobbered; or the live-in value if it is unaffected by the block. We |
| 68 | /// visit each block in reverse post order until a fixedpoint is reached. The |
| 69 | /// solution produced is maximal. |
| 70 | /// |
| 71 | /// Intuitively, we start by assuming that every expression / variable location |
| 72 | /// is at least "True", and then propagate "False" from the entry block and any |
| 73 | /// clobbers until there are no more changes to make. This gives us an accurate |
| 74 | /// solution because all incorrect locations will have a "False" propagated into |
| 75 | /// them. It also gives us a solution that copes well with loops by assuming |
| 76 | /// that variable locations are live-through every loop, and then removing those |
| 77 | /// that are not through dataflow. |
| 78 | /// |
| 79 | /// Within LiveDebugValues: each variable location is represented by a |
| 80 | /// VarLoc object that identifies the source variable, the set of |
| 81 | /// machine-locations that currently describe it (a single location for |
| 82 | /// DBG_VALUE or multiple for DBG_VALUE_LIST), and the DBG_VALUE inst that |
| 83 | /// specifies the location. Each VarLoc is indexed in the (function-scope) \p |
| 84 | /// VarLocMap, giving each VarLoc a set of unique indexes, each of which |
| 85 | /// corresponds to one of the VarLoc's machine-locations and can be used to |
| 86 | /// lookup the VarLoc in the VarLocMap. Rather than operate directly on machine |
| 87 | /// locations, the dataflow analysis in this pass identifies locations by their |
| 88 | /// indices in the VarLocMap, meaning all the variable locations in a block can |
| 89 | /// be described by a sparse vector of VarLocMap indices. |
| 90 | /// |
| 91 | /// All the storage for the dataflow analysis is local to the ExtendRanges |
| 92 | /// method and passed down to helper methods. "OutLocs" and "InLocs" record the |
| 93 | /// in and out lattice values for each block. "OpenRanges" maintains a list of |
| 94 | /// variable locations and, with the "process" method, evaluates the transfer |
| 95 | /// function of each block. "flushPendingLocs" installs debug value instructions |
| 96 | /// for each live-in location at the start of blocks, while "Transfers" records |
| 97 | /// transfers of values between machine-locations. |
| 98 | /// |
| 99 | /// We avoid explicitly representing the "Unknown" (\top) lattice value in the |
| 100 | /// implementation. Instead, unvisited blocks implicitly have all lattice |
| 101 | /// values set as "Unknown". After being visited, there will be path back to |
| 102 | /// the entry block where the lattice value is "False", and as the transfer |
| 103 | /// function cannot make new "Unknown" locations, there are no scenarios where |
| 104 | /// a block can have an "Unknown" location after being visited. Similarly, we |
| 105 | /// don't enumerate all possible variable locations before exploring the |
| 106 | /// function: when a new location is discovered, all blocks previously explored |
| 107 | /// were implicitly "False" but unrecorded, and become explicitly "False" when |
| 108 | /// a new VarLoc is created with its bit not set in predecessor InLocs or |
| 109 | /// OutLocs. |
| 110 | /// |
| 111 | //===----------------------------------------------------------------------===// |
| 112 | |
| 113 | #include "LiveDebugValues.h" |
| 114 | |
| 115 | #include "llvm/ADT/CoalescingBitVector.h" |
| 116 | #include "llvm/ADT/DenseMap.h" |
| 117 | #include "llvm/ADT/PostOrderIterator.h" |
| 118 | #include "llvm/ADT/SmallPtrSet.h" |
| 119 | #include "llvm/ADT/SmallSet.h" |
| 120 | #include "llvm/ADT/SmallVector.h" |
| 121 | #include "llvm/ADT/Statistic.h" |
| 122 | #include "llvm/BinaryFormat/Dwarf.h" |
| 123 | #include "llvm/CodeGen/LexicalScopes.h" |
| 124 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 125 | #include "llvm/CodeGen/MachineFunction.h" |
| 126 | #include "llvm/CodeGen/MachineInstr.h" |
| 127 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 128 | #include "llvm/CodeGen/MachineInstrBundle.h" |
| 129 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 130 | #include "llvm/CodeGen/MachineOperand.h" |
| 131 | #include "llvm/CodeGen/PseudoSourceValue.h" |
| 132 | #include "llvm/CodeGen/TargetFrameLowering.h" |
| 133 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 134 | #include "llvm/CodeGen/TargetLowering.h" |
| 135 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 136 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 137 | #include "llvm/Config/llvm-config.h" |
| 138 | #include "llvm/IR/DebugInfoMetadata.h" |
| 139 | #include "llvm/IR/DebugLoc.h" |
| 140 | #include "llvm/IR/Function.h" |
| 141 | #include "llvm/MC/MCRegisterInfo.h" |
| 142 | #include "llvm/Support/Casting.h" |
| 143 | #include "llvm/Support/Debug.h" |
| 144 | #include "llvm/Support/TypeSize.h" |
| 145 | #include "llvm/Support/raw_ostream.h" |
| 146 | #include "llvm/Target/TargetMachine.h" |
| 147 | #include <cassert> |
| 148 | #include <cstdint> |
| 149 | #include <functional> |
| 150 | #include <map> |
| 151 | #include <optional> |
| 152 | #include <queue> |
| 153 | #include <tuple> |
| 154 | #include <utility> |
| 155 | #include <vector> |
| 156 | |
| 157 | using namespace llvm; |
| 158 | |
| 159 | #define DEBUG_TYPE "livedebugvalues" |
| 160 | |
| 161 | STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted" ); |
| 162 | |
| 163 | /// If \p Op is a stack or frame register return true, otherwise return false. |
| 164 | /// This is used to avoid basing the debug entry values on the registers, since |
| 165 | /// we do not support it at the moment. |
| 166 | static bool isRegOtherThanSPAndFP(const MachineOperand &Op, |
| 167 | const MachineInstr &MI, |
| 168 | const TargetRegisterInfo *TRI) { |
| 169 | if (!Op.isReg()) |
| 170 | return false; |
| 171 | |
| 172 | const MachineFunction *MF = MI.getParent()->getParent(); |
| 173 | const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); |
| 174 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); |
| 175 | Register FP = TRI->getFrameRegister(MF: *MF); |
| 176 | Register Reg = Op.getReg(); |
| 177 | |
| 178 | return Reg && Reg != SP && Reg != FP; |
| 179 | } |
| 180 | |
| 181 | namespace { |
| 182 | |
| 183 | // Max out the number of statically allocated elements in DefinedRegsSet, as |
| 184 | // this prevents fallback to std::set::count() operations. |
| 185 | using DefinedRegsSet = SmallSet<Register, 32>; |
| 186 | |
| 187 | // The IDs in this set correspond to MachineLocs in VarLocs, as well as VarLocs |
| 188 | // that represent Entry Values; every VarLoc in the set will also appear |
| 189 | // exactly once at Location=0. |
| 190 | // As a result, each VarLoc may appear more than once in this "set", but each |
| 191 | // range corresponding to a Reg, SpillLoc, or EntryValue type will still be a |
| 192 | // "true" set (i.e. each VarLoc may appear only once), and the range Location=0 |
| 193 | // is the set of all VarLocs. |
| 194 | using VarLocSet = CoalescingBitVector<uint64_t>; |
| 195 | |
| 196 | /// A type-checked pair of {Register Location (or 0), Index}, used to index |
| 197 | /// into a \ref VarLocMap. This can be efficiently converted to a 64-bit int |
| 198 | /// for insertion into a \ref VarLocSet, and efficiently converted back. The |
| 199 | /// type-checker helps ensure that the conversions aren't lossy. |
| 200 | /// |
| 201 | /// Why encode a location /into/ the VarLocMap index? This makes it possible |
| 202 | /// to find the open VarLocs killed by a register def very quickly. This is a |
| 203 | /// performance-critical operation for LiveDebugValues. |
| 204 | struct LocIndex { |
| 205 | using u32_location_t = uint32_t; |
| 206 | using u32_index_t = uint32_t; |
| 207 | |
| 208 | u32_location_t Location; // Physical registers live in the range [1;2^30) (see |
| 209 | // \ref MCRegister), so we have plenty of range left |
| 210 | // here to encode non-register locations. |
| 211 | u32_index_t Index; |
| 212 | |
| 213 | /// The location that has an entry for every VarLoc in the map. |
| 214 | static constexpr u32_location_t kUniversalLocation = 0; |
| 215 | |
| 216 | /// The first location that is reserved for VarLocs with locations of kind |
| 217 | /// RegisterKind. |
| 218 | static constexpr u32_location_t kFirstRegLocation = 1; |
| 219 | |
| 220 | /// The first location greater than 0 that is not reserved for VarLocs with |
| 221 | /// locations of kind RegisterKind. |
| 222 | static constexpr u32_location_t kFirstInvalidRegLocation = 1 << 30; |
| 223 | |
| 224 | /// A special location reserved for VarLocs with locations of kind |
| 225 | /// SpillLocKind. |
| 226 | static constexpr u32_location_t kSpillLocation = kFirstInvalidRegLocation; |
| 227 | |
| 228 | /// A special location reserved for VarLocs of kind EntryValueBackupKind and |
| 229 | /// EntryValueCopyBackupKind. |
| 230 | static constexpr u32_location_t kEntryValueBackupLocation = |
| 231 | kFirstInvalidRegLocation + 1; |
| 232 | |
| 233 | /// A special location reserved for VarLocs with locations of kind |
| 234 | /// WasmLocKind. |
| 235 | /// TODO Placing all Wasm target index locations in this single kWasmLocation |
| 236 | /// may cause slowdown in compilation time in very large functions. Consider |
| 237 | /// giving a each target index/offset pair its own u32_location_t if this |
| 238 | /// becomes a problem. |
| 239 | static constexpr u32_location_t kWasmLocation = kFirstInvalidRegLocation + 2; |
| 240 | |
| 241 | /// The first location that is reserved for VarLocs with locations of kind |
| 242 | /// VirtualRegisterKind. |
| 243 | static constexpr u32_location_t kFirstVirtualRegLocation = 1 << 31; |
| 244 | |
| 245 | LocIndex(u32_location_t Location, u32_index_t Index) |
| 246 | : Location(Location), Index(Index) {} |
| 247 | |
| 248 | uint64_t getAsRawInteger() const { |
| 249 | return (static_cast<uint64_t>(Location) << 32) | Index; |
| 250 | } |
| 251 | |
| 252 | template<typename IntT> static LocIndex fromRawInteger(IntT ID) { |
| 253 | static_assert(std::is_unsigned_v<IntT> && sizeof(ID) == sizeof(uint64_t), |
| 254 | "Cannot convert raw integer to LocIndex" ); |
| 255 | return {static_cast<u32_location_t>(ID >> 32), |
| 256 | static_cast<u32_index_t>(ID)}; |
| 257 | } |
| 258 | |
| 259 | /// Get the start of the interval reserved for VarLocs of kind RegisterKind |
| 260 | /// which reside in \p Reg. The end is at rawIndexForReg(Reg+1)-1. |
| 261 | static uint64_t rawIndexForReg(Register Reg) { |
| 262 | return LocIndex(Reg, 0).getAsRawInteger(); |
| 263 | } |
| 264 | |
| 265 | /// Return a range covering all set indices in the interval reserved for |
| 266 | /// \p Location in \p Set. |
| 267 | static auto indexRangeForLocation(const VarLocSet &Set, |
| 268 | u32_location_t Location) { |
| 269 | uint64_t Start = LocIndex(Location, 0).getAsRawInteger(); |
| 270 | uint64_t End = LocIndex(Location + 1, 0).getAsRawInteger(); |
| 271 | return Set.half_open_range(Start, End); |
| 272 | } |
| 273 | }; |
| 274 | |
| 275 | // Simple Set for storing all the VarLoc Indices at a Location bucket. |
| 276 | using VarLocsInRange = SmallSet<LocIndex::u32_index_t, 32>; |
| 277 | // Vector of all `LocIndex`s for a given VarLoc; the same Location should not |
| 278 | // appear in any two of these, as each VarLoc appears at most once in any |
| 279 | // Location bucket. |
| 280 | using LocIndices = SmallVector<LocIndex, 2>; |
| 281 | |
| 282 | class VarLocBasedLDV : public LDVImpl { |
| 283 | private: |
| 284 | const TargetRegisterInfo *TRI; |
| 285 | const TargetInstrInfo *TII; |
| 286 | const TargetFrameLowering *TFI; |
| 287 | bool ShouldEmitDebugEntryValues; |
| 288 | BitVector CalleeSavedRegs; |
| 289 | LexicalScopes LS; |
| 290 | VarLocSet::Allocator Alloc; |
| 291 | |
| 292 | const MachineInstr *LastNonDbgMI; |
| 293 | |
| 294 | enum struct TransferKind { TransferCopy, TransferSpill, TransferRestore }; |
| 295 | |
| 296 | using FragmentInfo = DIExpression::FragmentInfo; |
| 297 | using OptFragmentInfo = std::optional<DIExpression::FragmentInfo>; |
| 298 | |
| 299 | /// A pair of debug variable and value location. |
| 300 | struct VarLoc { |
| 301 | // The location at which a spilled variable resides. It consists of a |
| 302 | // register and an offset. |
| 303 | struct SpillLoc { |
| 304 | unsigned SpillBase; |
| 305 | StackOffset SpillOffset; |
| 306 | bool operator==(const SpillLoc &Other) const { |
| 307 | return SpillBase == Other.SpillBase && SpillOffset == Other.SpillOffset; |
| 308 | } |
| 309 | bool operator!=(const SpillLoc &Other) const { |
| 310 | return !(*this == Other); |
| 311 | } |
| 312 | }; |
| 313 | |
| 314 | // Target indices used for wasm-specific locations. |
| 315 | struct WasmLoc { |
| 316 | // One of TargetIndex values defined in WebAssembly.h. We deal with |
| 317 | // local-related TargetIndex in this analysis (TI_LOCAL and |
| 318 | // TI_LOCAL_INDIRECT). Stack operands (TI_OPERAND_STACK) will be handled |
| 319 | // separately WebAssemblyDebugFixup pass, and we don't associate debug |
| 320 | // info with values in global operands (TI_GLOBAL_RELOC) at the moment. |
| 321 | int Index; |
| 322 | int64_t Offset; |
| 323 | bool operator==(const WasmLoc &Other) const { |
| 324 | return Index == Other.Index && Offset == Other.Offset; |
| 325 | } |
| 326 | bool operator!=(const WasmLoc &Other) const { return !(*this == Other); } |
| 327 | }; |
| 328 | |
| 329 | /// Identity of the variable at this location. |
| 330 | const DebugVariable Var; |
| 331 | |
| 332 | /// The expression applied to this location. |
| 333 | const DIExpression *Expr; |
| 334 | |
| 335 | /// DBG_VALUE to clone var/expr information from if this location |
| 336 | /// is moved. |
| 337 | const MachineInstr &MI; |
| 338 | |
| 339 | enum class MachineLocKind { |
| 340 | InvalidKind = 0, |
| 341 | RegisterKind, |
| 342 | SpillLocKind, |
| 343 | ImmediateKind, |
| 344 | WasmLocKind |
| 345 | }; |
| 346 | |
| 347 | enum class EntryValueLocKind { |
| 348 | NonEntryValueKind = 0, |
| 349 | EntryValueKind, |
| 350 | EntryValueBackupKind, |
| 351 | EntryValueCopyBackupKind |
| 352 | } EVKind = EntryValueLocKind::NonEntryValueKind; |
| 353 | |
| 354 | /// The value location. Stored separately to avoid repeatedly |
| 355 | /// extracting it from MI. |
| 356 | union MachineLocValue { |
| 357 | uint64_t RegNo; |
| 358 | SpillLoc SpillLocation; |
| 359 | uint64_t Hash; |
| 360 | int64_t Immediate; |
| 361 | const ConstantFP *FPImm; |
| 362 | const ConstantInt *CImm; |
| 363 | WasmLoc WasmLocation; |
| 364 | MachineLocValue() : Hash(0) {} |
| 365 | }; |
| 366 | |
| 367 | /// A single machine location; its Kind is either a register, spill |
| 368 | /// location, or immediate value. |
| 369 | /// If the VarLoc is not a NonEntryValueKind, then it will use only a |
| 370 | /// single MachineLoc of RegisterKind. |
| 371 | struct MachineLoc { |
| 372 | MachineLocKind Kind; |
| 373 | MachineLocValue Value; |
| 374 | bool operator==(const MachineLoc &Other) const { |
| 375 | if (Kind != Other.Kind) |
| 376 | return false; |
| 377 | switch (Kind) { |
| 378 | case MachineLocKind::SpillLocKind: |
| 379 | return Value.SpillLocation == Other.Value.SpillLocation; |
| 380 | case MachineLocKind::WasmLocKind: |
| 381 | return Value.WasmLocation == Other.Value.WasmLocation; |
| 382 | case MachineLocKind::RegisterKind: |
| 383 | case MachineLocKind::ImmediateKind: |
| 384 | return Value.Hash == Other.Value.Hash; |
| 385 | default: |
| 386 | llvm_unreachable("Invalid kind" ); |
| 387 | } |
| 388 | } |
| 389 | bool operator<(const MachineLoc &Other) const { |
| 390 | switch (Kind) { |
| 391 | case MachineLocKind::SpillLocKind: |
| 392 | return std::make_tuple( |
| 393 | args: Kind, args: Value.SpillLocation.SpillBase, |
| 394 | args: Value.SpillLocation.SpillOffset.getFixed(), |
| 395 | args: Value.SpillLocation.SpillOffset.getScalable()) < |
| 396 | std::make_tuple( |
| 397 | args: Other.Kind, args: Other.Value.SpillLocation.SpillBase, |
| 398 | args: Other.Value.SpillLocation.SpillOffset.getFixed(), |
| 399 | args: Other.Value.SpillLocation.SpillOffset.getScalable()); |
| 400 | case MachineLocKind::WasmLocKind: |
| 401 | return std::make_tuple(args: Kind, args: Value.WasmLocation.Index, |
| 402 | args: Value.WasmLocation.Offset) < |
| 403 | std::make_tuple(args: Other.Kind, args: Other.Value.WasmLocation.Index, |
| 404 | args: Other.Value.WasmLocation.Offset); |
| 405 | case MachineLocKind::RegisterKind: |
| 406 | case MachineLocKind::ImmediateKind: |
| 407 | return std::tie(args: Kind, args: Value.Hash) < |
| 408 | std::tie(args: Other.Kind, args: Other.Value.Hash); |
| 409 | default: |
| 410 | llvm_unreachable("Invalid kind" ); |
| 411 | } |
| 412 | } |
| 413 | }; |
| 414 | |
| 415 | /// The set of machine locations used to determine the variable's value, in |
| 416 | /// conjunction with Expr. Initially populated with MI's debug operands, |
| 417 | /// but may be transformed independently afterwards. |
| 418 | SmallVector<MachineLoc, 8> Locs; |
| 419 | /// Used to map the index of each location in Locs back to the index of its |
| 420 | /// original debug operand in MI. Used when multiple location operands are |
| 421 | /// coalesced and the original MI's operands need to be accessed while |
| 422 | /// emitting a debug value. |
| 423 | SmallVector<unsigned, 8> OrigLocMap; |
| 424 | |
| 425 | VarLoc(const MachineInstr &MI) |
| 426 | : Var(MI.getDebugVariable(), MI.getDebugExpression(), |
| 427 | MI.getDebugLoc()->getInlinedAt()), |
| 428 | Expr(MI.getDebugExpression()), MI(MI) { |
| 429 | assert(MI.isDebugValue() && "not a DBG_VALUE" ); |
| 430 | assert((MI.isDebugValueList() || MI.getNumOperands() == 4) && |
| 431 | "malformed DBG_VALUE" ); |
| 432 | for (const MachineOperand &Op : MI.debug_operands()) { |
| 433 | MachineLoc ML = GetLocForOp(Op); |
| 434 | auto It = find(Range&: Locs, Val: ML); |
| 435 | if (It == Locs.end()) { |
| 436 | Locs.push_back(Elt: ML); |
| 437 | OrigLocMap.push_back(Elt: MI.getDebugOperandIndex(Op: &Op)); |
| 438 | } else { |
| 439 | // ML duplicates an element in Locs; replace references to Op |
| 440 | // with references to the duplicating element. |
| 441 | unsigned OpIdx = Locs.size(); |
| 442 | unsigned DuplicatingIdx = std::distance(first: Locs.begin(), last: It); |
| 443 | Expr = DIExpression::replaceArg(Expr, OldArg: OpIdx, NewArg: DuplicatingIdx); |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | // We create the debug entry values from the factory functions rather |
| 448 | // than from this ctor. |
| 449 | assert(EVKind != EntryValueLocKind::EntryValueKind && |
| 450 | !isEntryBackupLoc()); |
| 451 | } |
| 452 | |
| 453 | static MachineLoc GetLocForOp(const MachineOperand &Op) { |
| 454 | MachineLocKind Kind; |
| 455 | MachineLocValue Loc; |
| 456 | if (Op.isReg()) { |
| 457 | Kind = MachineLocKind::RegisterKind; |
| 458 | Loc.RegNo = Op.getReg(); |
| 459 | } else if (Op.isImm()) { |
| 460 | Kind = MachineLocKind::ImmediateKind; |
| 461 | Loc.Immediate = Op.getImm(); |
| 462 | } else if (Op.isFPImm()) { |
| 463 | Kind = MachineLocKind::ImmediateKind; |
| 464 | Loc.FPImm = Op.getFPImm(); |
| 465 | } else if (Op.isCImm()) { |
| 466 | Kind = MachineLocKind::ImmediateKind; |
| 467 | Loc.CImm = Op.getCImm(); |
| 468 | } else if (Op.isTargetIndex()) { |
| 469 | Kind = MachineLocKind::WasmLocKind; |
| 470 | Loc.WasmLocation = {.Index: Op.getIndex(), .Offset: Op.getOffset()}; |
| 471 | } else |
| 472 | llvm_unreachable("Invalid Op kind for MachineLoc." ); |
| 473 | return {.Kind: Kind, .Value: Loc}; |
| 474 | } |
| 475 | |
| 476 | /// Take the variable and machine-location in DBG_VALUE MI, and build an |
| 477 | /// entry location using the given expression. |
| 478 | static VarLoc CreateEntryLoc(const MachineInstr &MI, |
| 479 | const DIExpression *EntryExpr, Register Reg) { |
| 480 | VarLoc VL(MI); |
| 481 | assert(VL.Locs.size() == 1 && |
| 482 | VL.Locs[0].Kind == MachineLocKind::RegisterKind); |
| 483 | VL.EVKind = EntryValueLocKind::EntryValueKind; |
| 484 | VL.Expr = EntryExpr; |
| 485 | VL.Locs[0].Value.RegNo = Reg; |
| 486 | return VL; |
| 487 | } |
| 488 | |
| 489 | /// Take the variable and machine-location from the DBG_VALUE (from the |
| 490 | /// function entry), and build an entry value backup location. The backup |
| 491 | /// location will turn into the normal location if the backup is valid at |
| 492 | /// the time of the primary location clobbering. |
| 493 | static VarLoc CreateEntryBackupLoc(const MachineInstr &MI, |
| 494 | const DIExpression *EntryExpr) { |
| 495 | VarLoc VL(MI); |
| 496 | assert(VL.Locs.size() == 1 && |
| 497 | VL.Locs[0].Kind == MachineLocKind::RegisterKind); |
| 498 | VL.EVKind = EntryValueLocKind::EntryValueBackupKind; |
| 499 | VL.Expr = EntryExpr; |
| 500 | return VL; |
| 501 | } |
| 502 | |
| 503 | /// Take the variable and machine-location from the DBG_VALUE (from the |
| 504 | /// function entry), and build a copy of an entry value backup location by |
| 505 | /// setting the register location to NewReg. |
| 506 | static VarLoc CreateEntryCopyBackupLoc(const MachineInstr &MI, |
| 507 | const DIExpression *EntryExpr, |
| 508 | Register NewReg) { |
| 509 | VarLoc VL(MI); |
| 510 | assert(VL.Locs.size() == 1 && |
| 511 | VL.Locs[0].Kind == MachineLocKind::RegisterKind); |
| 512 | VL.EVKind = EntryValueLocKind::EntryValueCopyBackupKind; |
| 513 | VL.Expr = EntryExpr; |
| 514 | VL.Locs[0].Value.RegNo = NewReg; |
| 515 | return VL; |
| 516 | } |
| 517 | |
| 518 | /// Copy the register location in DBG_VALUE MI, updating the register to |
| 519 | /// be NewReg. |
| 520 | static VarLoc CreateCopyLoc(const VarLoc &OldVL, const MachineLoc &OldML, |
| 521 | Register NewReg) { |
| 522 | VarLoc VL = OldVL; |
| 523 | for (MachineLoc &ML : VL.Locs) |
| 524 | if (ML == OldML) { |
| 525 | ML.Kind = MachineLocKind::RegisterKind; |
| 526 | ML.Value.RegNo = NewReg; |
| 527 | return VL; |
| 528 | } |
| 529 | llvm_unreachable("Should have found OldML in new VarLoc." ); |
| 530 | } |
| 531 | |
| 532 | /// Take the variable described by DBG_VALUE* MI, and create a VarLoc |
| 533 | /// locating it in the specified spill location. |
| 534 | static VarLoc CreateSpillLoc(const VarLoc &OldVL, const MachineLoc &OldML, |
| 535 | unsigned SpillBase, StackOffset SpillOffset) { |
| 536 | VarLoc VL = OldVL; |
| 537 | for (MachineLoc &ML : VL.Locs) |
| 538 | if (ML == OldML) { |
| 539 | ML.Kind = MachineLocKind::SpillLocKind; |
| 540 | ML.Value.SpillLocation = {.SpillBase: SpillBase, .SpillOffset: SpillOffset}; |
| 541 | return VL; |
| 542 | } |
| 543 | llvm_unreachable("Should have found OldML in new VarLoc." ); |
| 544 | } |
| 545 | |
| 546 | /// Create a DBG_VALUE representing this VarLoc in the given function. |
| 547 | /// Copies variable-specific information such as DILocalVariable and |
| 548 | /// inlining information from the original DBG_VALUE instruction, which may |
| 549 | /// have been several transfers ago. |
| 550 | MachineInstr *BuildDbgValue(MachineFunction &MF) const { |
| 551 | assert(!isEntryBackupLoc() && |
| 552 | "Tried to produce DBG_VALUE for backup VarLoc" ); |
| 553 | const DebugLoc &DbgLoc = MI.getDebugLoc(); |
| 554 | bool Indirect = MI.isIndirectDebugValue(); |
| 555 | const auto &IID = MI.getDesc(); |
| 556 | const DILocalVariable *Var = MI.getDebugVariable(); |
| 557 | NumInserted++; |
| 558 | |
| 559 | const DIExpression *DIExpr = Expr; |
| 560 | SmallVector<MachineOperand, 8> MOs; |
| 561 | for (unsigned I = 0, E = Locs.size(); I < E; ++I) { |
| 562 | MachineLocKind LocKind = Locs[I].Kind; |
| 563 | MachineLocValue Loc = Locs[I].Value; |
| 564 | const MachineOperand &Orig = MI.getDebugOperand(Index: OrigLocMap[I]); |
| 565 | switch (LocKind) { |
| 566 | case MachineLocKind::RegisterKind: |
| 567 | // An entry value is a register location -- but with an updated |
| 568 | // expression. The register location of such DBG_VALUE is always the |
| 569 | // one from the entry DBG_VALUE, it does not matter if the entry value |
| 570 | // was copied in to another register due to some optimizations. |
| 571 | // Non-entry value register locations are like the source |
| 572 | // DBG_VALUE, but with the register number from this VarLoc. |
| 573 | MOs.push_back(Elt: MachineOperand::CreateReg( |
| 574 | Reg: EVKind == EntryValueLocKind::EntryValueKind ? Orig.getReg() |
| 575 | : Register(Loc.RegNo), |
| 576 | isDef: false)); |
| 577 | break; |
| 578 | case MachineLocKind::SpillLocKind: { |
| 579 | // Spills are indirect DBG_VALUEs, with a base register and offset. |
| 580 | // Use the original DBG_VALUEs expression to build the spilt location |
| 581 | // on top of. FIXME: spill locations created before this pass runs |
| 582 | // are not recognized, and not handled here. |
| 583 | unsigned Base = Loc.SpillLocation.SpillBase; |
| 584 | auto *TRI = MF.getSubtarget().getRegisterInfo(); |
| 585 | if (MI.isNonListDebugValue()) { |
| 586 | auto Deref = Indirect ? DIExpression::DerefAfter : 0; |
| 587 | DIExpr = TRI->prependOffsetExpression( |
| 588 | Expr: DIExpr, PrependFlags: DIExpression::ApplyOffset | Deref, |
| 589 | Offset: Loc.SpillLocation.SpillOffset); |
| 590 | Indirect = true; |
| 591 | } else { |
| 592 | SmallVector<uint64_t, 4> Ops; |
| 593 | TRI->getOffsetOpcodes(Offset: Loc.SpillLocation.SpillOffset, Ops); |
| 594 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
| 595 | DIExpr = DIExpression::appendOpsToArg(Expr: DIExpr, Ops, ArgNo: I); |
| 596 | } |
| 597 | MOs.push_back(Elt: MachineOperand::CreateReg(Reg: Base, isDef: false)); |
| 598 | break; |
| 599 | } |
| 600 | case MachineLocKind::ImmediateKind: { |
| 601 | MOs.push_back(Elt: Orig); |
| 602 | break; |
| 603 | } |
| 604 | case MachineLocKind::WasmLocKind: { |
| 605 | MOs.push_back(Elt: Orig); |
| 606 | break; |
| 607 | } |
| 608 | case MachineLocKind::InvalidKind: |
| 609 | llvm_unreachable("Tried to produce DBG_VALUE for invalid VarLoc" ); |
| 610 | } |
| 611 | } |
| 612 | return BuildMI(MF, DL: DbgLoc, MCID: IID, IsIndirect: Indirect, MOs, Variable: Var, Expr: DIExpr); |
| 613 | } |
| 614 | |
| 615 | /// Is the Loc field a constant or constant object? |
| 616 | bool isConstant(MachineLocKind Kind) const { |
| 617 | return Kind == MachineLocKind::ImmediateKind; |
| 618 | } |
| 619 | |
| 620 | /// Check if the Loc field is an entry backup location. |
| 621 | bool isEntryBackupLoc() const { |
| 622 | return EVKind == EntryValueLocKind::EntryValueBackupKind || |
| 623 | EVKind == EntryValueLocKind::EntryValueCopyBackupKind; |
| 624 | } |
| 625 | |
| 626 | /// If this variable is described by register \p Reg holding the entry |
| 627 | /// value, return true. |
| 628 | bool isEntryValueBackupReg(Register Reg) const { |
| 629 | return EVKind == EntryValueLocKind::EntryValueBackupKind && usesReg(Reg); |
| 630 | } |
| 631 | |
| 632 | /// If this variable is described by register \p Reg holding a copy of the |
| 633 | /// entry value, return true. |
| 634 | bool isEntryValueCopyBackupReg(Register Reg) const { |
| 635 | return EVKind == EntryValueLocKind::EntryValueCopyBackupKind && |
| 636 | usesReg(Reg); |
| 637 | } |
| 638 | |
| 639 | /// If this variable is described in whole or part by \p Reg, return true. |
| 640 | bool usesReg(Register Reg) const { |
| 641 | MachineLoc RegML; |
| 642 | RegML.Kind = MachineLocKind::RegisterKind; |
| 643 | RegML.Value.RegNo = Reg; |
| 644 | return is_contained(Range: Locs, Element: RegML); |
| 645 | } |
| 646 | |
| 647 | /// If this variable is described in whole or part by \p Reg, return true. |
| 648 | unsigned getRegIdx(Register Reg) const { |
| 649 | for (unsigned Idx = 0; Idx < Locs.size(); ++Idx) |
| 650 | if (Locs[Idx].Kind == MachineLocKind::RegisterKind && |
| 651 | Register{static_cast<unsigned>(Locs[Idx].Value.RegNo)} == Reg) |
| 652 | return Idx; |
| 653 | llvm_unreachable("Could not find given Reg in Locs" ); |
| 654 | } |
| 655 | |
| 656 | /// If this variable is described in whole or part by 1 or more registers, |
| 657 | /// add each of them to \p Regs and return true. |
| 658 | bool getDescribingRegs(SmallVectorImpl<uint32_t> &Regs) const { |
| 659 | bool AnyRegs = false; |
| 660 | for (const auto &Loc : Locs) |
| 661 | if (Loc.Kind == MachineLocKind::RegisterKind) { |
| 662 | Regs.push_back(Elt: Loc.Value.RegNo); |
| 663 | AnyRegs = true; |
| 664 | } |
| 665 | return AnyRegs; |
| 666 | } |
| 667 | |
| 668 | bool containsSpillLocs() const { |
| 669 | return any_of(Range: Locs, P: [](VarLoc::MachineLoc ML) { |
| 670 | return ML.Kind == VarLoc::MachineLocKind::SpillLocKind; |
| 671 | }); |
| 672 | } |
| 673 | |
| 674 | /// If this variable is described in whole or part by \p SpillLocation, |
| 675 | /// return true. |
| 676 | bool usesSpillLoc(SpillLoc SpillLocation) const { |
| 677 | MachineLoc SpillML; |
| 678 | SpillML.Kind = MachineLocKind::SpillLocKind; |
| 679 | SpillML.Value.SpillLocation = SpillLocation; |
| 680 | return is_contained(Range: Locs, Element: SpillML); |
| 681 | } |
| 682 | |
| 683 | /// If this variable is described in whole or part by \p SpillLocation, |
| 684 | /// return the index . |
| 685 | unsigned getSpillLocIdx(SpillLoc SpillLocation) const { |
| 686 | for (unsigned Idx = 0; Idx < Locs.size(); ++Idx) |
| 687 | if (Locs[Idx].Kind == MachineLocKind::SpillLocKind && |
| 688 | Locs[Idx].Value.SpillLocation == SpillLocation) |
| 689 | return Idx; |
| 690 | llvm_unreachable("Could not find given SpillLoc in Locs" ); |
| 691 | } |
| 692 | |
| 693 | bool containsWasmLocs() const { |
| 694 | return any_of(Range: Locs, P: [](VarLoc::MachineLoc ML) { |
| 695 | return ML.Kind == VarLoc::MachineLocKind::WasmLocKind; |
| 696 | }); |
| 697 | } |
| 698 | |
| 699 | /// If this variable is described in whole or part by \p WasmLocation, |
| 700 | /// return true. |
| 701 | bool usesWasmLoc(WasmLoc WasmLocation) const { |
| 702 | MachineLoc WasmML; |
| 703 | WasmML.Kind = MachineLocKind::WasmLocKind; |
| 704 | WasmML.Value.WasmLocation = WasmLocation; |
| 705 | return is_contained(Range: Locs, Element: WasmML); |
| 706 | } |
| 707 | |
| 708 | /// Determine whether the lexical scope of this value's debug location |
| 709 | /// dominates MBB. |
| 710 | bool dominates(LexicalScopes &LS, MachineBasicBlock &MBB) const { |
| 711 | return LS.dominates(DL: MI.getDebugLoc().get(), MBB: &MBB); |
| 712 | } |
| 713 | |
| 714 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
| 715 | // TRI and TII can be null. |
| 716 | void dump(const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, |
| 717 | raw_ostream &Out = dbgs()) const { |
| 718 | Out << "VarLoc(" ; |
| 719 | for (const MachineLoc &MLoc : Locs) { |
| 720 | if (Locs.begin() != &MLoc) |
| 721 | Out << ", " ; |
| 722 | switch (MLoc.Kind) { |
| 723 | case MachineLocKind::RegisterKind: |
| 724 | Out << printReg(MLoc.Value.RegNo, TRI); |
| 725 | break; |
| 726 | case MachineLocKind::SpillLocKind: |
| 727 | Out << printReg(MLoc.Value.SpillLocation.SpillBase, TRI); |
| 728 | Out << "[" << MLoc.Value.SpillLocation.SpillOffset.getFixed() << " + " |
| 729 | << MLoc.Value.SpillLocation.SpillOffset.getScalable() |
| 730 | << "x vscale" |
| 731 | << "]" ; |
| 732 | break; |
| 733 | case MachineLocKind::ImmediateKind: |
| 734 | Out << MLoc.Value.Immediate; |
| 735 | break; |
| 736 | case MachineLocKind::WasmLocKind: { |
| 737 | if (TII) { |
| 738 | auto Indices = TII->getSerializableTargetIndices(); |
| 739 | auto Found = |
| 740 | find_if(Indices, [&](const std::pair<int, const char *> &I) { |
| 741 | return I.first == MLoc.Value.WasmLocation.Index; |
| 742 | }); |
| 743 | assert(Found != Indices.end()); |
| 744 | Out << Found->second; |
| 745 | if (MLoc.Value.WasmLocation.Offset > 0) |
| 746 | Out << " + " << MLoc.Value.WasmLocation.Offset; |
| 747 | } else { |
| 748 | Out << "WasmLoc" ; |
| 749 | } |
| 750 | break; |
| 751 | } |
| 752 | case MachineLocKind::InvalidKind: |
| 753 | llvm_unreachable("Invalid VarLoc in dump method" ); |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | Out << ", \"" << Var.getVariable()->getName() << "\", " << *Expr << ", " ; |
| 758 | if (Var.getInlinedAt()) |
| 759 | Out << "!" << Var.getInlinedAt()->getMetadataID() << ")\n" ; |
| 760 | else |
| 761 | Out << "(null))" ; |
| 762 | |
| 763 | if (isEntryBackupLoc()) |
| 764 | Out << " (backup loc)\n" ; |
| 765 | else |
| 766 | Out << "\n" ; |
| 767 | } |
| 768 | #endif |
| 769 | |
| 770 | bool operator==(const VarLoc &Other) const { |
| 771 | return std::tie(args: EVKind, args: Var, args: Expr, args: Locs) == |
| 772 | std::tie(args: Other.EVKind, args: Other.Var, args: Other.Expr, args: Other.Locs); |
| 773 | } |
| 774 | |
| 775 | /// This operator guarantees that VarLocs are sorted by Variable first. |
| 776 | bool operator<(const VarLoc &Other) const { |
| 777 | return std::tie(args: Var, args: EVKind, args: Locs, args: Expr) < |
| 778 | std::tie(args: Other.Var, args: Other.EVKind, args: Other.Locs, args: Other.Expr); |
| 779 | } |
| 780 | }; |
| 781 | |
| 782 | #ifndef NDEBUG |
| 783 | using VarVec = SmallVector<VarLoc, 32>; |
| 784 | #endif |
| 785 | |
| 786 | /// VarLocMap is used for two things: |
| 787 | /// 1) Assigning LocIndices to a VarLoc. The LocIndices can be used to |
| 788 | /// virtually insert a VarLoc into a VarLocSet. |
| 789 | /// 2) Given a LocIndex, look up the unique associated VarLoc. |
| 790 | class VarLocMap { |
| 791 | /// Map a VarLoc to an index within the vector reserved for its location |
| 792 | /// within Loc2Vars. |
| 793 | std::map<VarLoc, LocIndices> Var2Indices; |
| 794 | |
| 795 | /// Map a location to a vector which holds VarLocs which live in that |
| 796 | /// location. |
| 797 | SmallDenseMap<LocIndex::u32_location_t, std::vector<VarLoc>> Loc2Vars; |
| 798 | |
| 799 | public: |
| 800 | /// Retrieve LocIndices for \p VL. |
| 801 | LocIndices insert(const VarLoc &VL) { |
| 802 | LocIndices &Indices = Var2Indices[VL]; |
| 803 | // If Indices is not empty, VL is already in the map. |
| 804 | if (!Indices.empty()) |
| 805 | return Indices; |
| 806 | SmallVector<LocIndex::u32_location_t, 4> Locations; |
| 807 | // LocIndices are determined by EVKind and MLs; each Register has a |
| 808 | // unique location, while all SpillLocs use a single bucket, and any EV |
| 809 | // VarLocs use only the Backup bucket or none at all (except the |
| 810 | // compulsory entry at the universal location index). LocIndices will |
| 811 | // always have an index at the universal location index as the last index. |
| 812 | if (VL.EVKind == VarLoc::EntryValueLocKind::NonEntryValueKind) { |
| 813 | VL.getDescribingRegs(Regs&: Locations); |
| 814 | assert(all_of(Locations, |
| 815 | [](auto RegNo) { |
| 816 | return (RegNo < LocIndex::kFirstInvalidRegLocation) || |
| 817 | (LocIndex::kFirstVirtualRegLocation <= RegNo); |
| 818 | }) && |
| 819 | "Physical or virtual register out of range?" ); |
| 820 | if (VL.containsSpillLocs()) |
| 821 | Locations.push_back(Elt: LocIndex::kSpillLocation); |
| 822 | if (VL.containsWasmLocs()) |
| 823 | Locations.push_back(Elt: LocIndex::kWasmLocation); |
| 824 | } else if (VL.EVKind != VarLoc::EntryValueLocKind::EntryValueKind) { |
| 825 | LocIndex::u32_location_t Loc = LocIndex::kEntryValueBackupLocation; |
| 826 | Locations.push_back(Elt: Loc); |
| 827 | } |
| 828 | Locations.push_back(Elt: LocIndex::kUniversalLocation); |
| 829 | for (LocIndex::u32_location_t Location : Locations) { |
| 830 | auto &Vars = Loc2Vars[Location]; |
| 831 | Indices.push_back( |
| 832 | Elt: {Location, static_cast<LocIndex::u32_index_t>(Vars.size())}); |
| 833 | Vars.push_back(x: VL); |
| 834 | } |
| 835 | return Indices; |
| 836 | } |
| 837 | |
| 838 | LocIndices getAllIndices(const VarLoc &VL) const { |
| 839 | auto IndIt = Var2Indices.find(x: VL); |
| 840 | assert(IndIt != Var2Indices.end() && "VarLoc not tracked" ); |
| 841 | return IndIt->second; |
| 842 | } |
| 843 | |
| 844 | /// Retrieve the unique VarLoc associated with \p ID. |
| 845 | const VarLoc &operator[](LocIndex ID) const { |
| 846 | auto LocIt = Loc2Vars.find(Val: ID.Location); |
| 847 | assert(LocIt != Loc2Vars.end() && "Location not tracked" ); |
| 848 | return LocIt->second[ID.Index]; |
| 849 | } |
| 850 | }; |
| 851 | |
| 852 | using VarLocInMBB = |
| 853 | SmallDenseMap<const MachineBasicBlock *, std::unique_ptr<VarLocSet>>; |
| 854 | struct TransferDebugPair { |
| 855 | MachineInstr *TransferInst; ///< Instruction where this transfer occurs. |
| 856 | LocIndex LocationID; ///< Location number for the transfer dest. |
| 857 | }; |
| 858 | using TransferMap = SmallVector<TransferDebugPair, 4>; |
| 859 | // Types for recording Entry Var Locations emitted by a single MachineInstr, |
| 860 | // as well as recording MachineInstr which last defined a register. |
| 861 | using InstToEntryLocMap = std::multimap<const MachineInstr *, LocIndex>; |
| 862 | using RegDefToInstMap = DenseMap<Register, MachineInstr *>; |
| 863 | |
| 864 | // Types for recording sets of variable fragments that overlap. For a given |
| 865 | // local variable, we record all other fragments of that variable that could |
| 866 | // overlap it, to reduce search time. |
| 867 | using FragmentOfVar = |
| 868 | std::pair<const DILocalVariable *, DIExpression::FragmentInfo>; |
| 869 | using OverlapMap = |
| 870 | DenseMap<FragmentOfVar, SmallVector<DIExpression::FragmentInfo, 1>>; |
| 871 | |
| 872 | // Helper while building OverlapMap, a map of all fragments seen for a given |
| 873 | // DILocalVariable. |
| 874 | using VarToFragments = |
| 875 | DenseMap<const DILocalVariable *, SmallSet<FragmentInfo, 4>>; |
| 876 | |
| 877 | /// Collects all VarLocs from \p CollectFrom. Each unique VarLoc is added |
| 878 | /// to \p Collected once, in order of insertion into \p VarLocIDs. |
| 879 | static void collectAllVarLocs(SmallVectorImpl<VarLoc> &Collected, |
| 880 | const VarLocSet &CollectFrom, |
| 881 | const VarLocMap &VarLocIDs); |
| 882 | |
| 883 | /// Get the registers which are used by VarLocs of kind RegisterKind tracked |
| 884 | /// by \p CollectFrom. |
| 885 | void getUsedRegs(const VarLocSet &CollectFrom, |
| 886 | SmallVectorImpl<Register> &UsedRegs) const; |
| 887 | |
| 888 | /// This holds the working set of currently open ranges. For fast |
| 889 | /// access, this is done both as a set of VarLocIDs, and a map of |
| 890 | /// DebugVariable to recent VarLocID. Note that a DBG_VALUE ends all |
| 891 | /// previous open ranges for the same variable. In addition, we keep |
| 892 | /// two different maps (Vars/EntryValuesBackupVars), so erase/insert |
| 893 | /// methods act differently depending on whether a VarLoc is primary |
| 894 | /// location or backup one. In the case the VarLoc is backup location |
| 895 | /// we will erase/insert from the EntryValuesBackupVars map, otherwise |
| 896 | /// we perform the operation on the Vars. |
| 897 | class OpenRangesSet { |
| 898 | VarLocSet::Allocator &Alloc; |
| 899 | VarLocSet VarLocs; |
| 900 | // Map the DebugVariable to recent primary location ID. |
| 901 | SmallDenseMap<DebugVariable, LocIndices, 8> Vars; |
| 902 | // Map the DebugVariable to recent backup location ID. |
| 903 | SmallDenseMap<DebugVariable, LocIndices, 8> EntryValuesBackupVars; |
| 904 | OverlapMap &OverlappingFragments; |
| 905 | |
| 906 | public: |
| 907 | OpenRangesSet(VarLocSet::Allocator &Alloc, OverlapMap &_OLapMap) |
| 908 | : Alloc(Alloc), VarLocs(Alloc), OverlappingFragments(_OLapMap) {} |
| 909 | |
| 910 | const VarLocSet &getVarLocs() const { return VarLocs; } |
| 911 | |
| 912 | // Fetches all VarLocs in \p VarLocIDs and inserts them into \p Collected. |
| 913 | // This method is needed to get every VarLoc once, as each VarLoc may have |
| 914 | // multiple indices in a VarLocMap (corresponding to each applicable |
| 915 | // location), but all VarLocs appear exactly once at the universal location |
| 916 | // index. |
| 917 | void getUniqueVarLocs(SmallVectorImpl<VarLoc> &Collected, |
| 918 | const VarLocMap &VarLocIDs) const { |
| 919 | collectAllVarLocs(Collected, CollectFrom: VarLocs, VarLocIDs); |
| 920 | } |
| 921 | |
| 922 | /// Terminate all open ranges for VL.Var by removing it from the set. |
| 923 | void erase(const VarLoc &VL); |
| 924 | |
| 925 | /// Terminate all open ranges listed as indices in \c KillSet with |
| 926 | /// \c Location by removing them from the set. |
| 927 | void erase(const VarLocsInRange &KillSet, const VarLocMap &VarLocIDs, |
| 928 | LocIndex::u32_location_t Location); |
| 929 | |
| 930 | /// Insert a new range into the set. |
| 931 | void insert(LocIndices VarLocIDs, const VarLoc &VL); |
| 932 | |
| 933 | /// Insert a set of ranges. |
| 934 | void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map); |
| 935 | |
| 936 | std::optional<LocIndices> getEntryValueBackup(DebugVariable Var); |
| 937 | |
| 938 | /// Empty the set. |
| 939 | void clear() { |
| 940 | VarLocs.clear(); |
| 941 | Vars.clear(); |
| 942 | EntryValuesBackupVars.clear(); |
| 943 | } |
| 944 | |
| 945 | /// Return whether the set is empty or not. |
| 946 | bool empty() const { |
| 947 | assert(Vars.empty() == EntryValuesBackupVars.empty() && |
| 948 | Vars.empty() == VarLocs.empty() && |
| 949 | "open ranges are inconsistent" ); |
| 950 | return VarLocs.empty(); |
| 951 | } |
| 952 | |
| 953 | /// Get an empty range of VarLoc IDs. |
| 954 | auto getEmptyVarLocRange() const { |
| 955 | return iterator_range<VarLocSet::const_iterator>(getVarLocs().end(), |
| 956 | getVarLocs().end()); |
| 957 | } |
| 958 | |
| 959 | /// Get all set IDs for VarLocs with MLs of kind RegisterKind in \p Reg. |
| 960 | auto getRegisterVarLocs(Register Reg) const { |
| 961 | return LocIndex::indexRangeForLocation(Set: getVarLocs(), Location: Reg); |
| 962 | } |
| 963 | |
| 964 | /// Get all set IDs for VarLocs with MLs of kind SpillLocKind. |
| 965 | auto getSpillVarLocs() const { |
| 966 | return LocIndex::indexRangeForLocation(Set: getVarLocs(), |
| 967 | Location: LocIndex::kSpillLocation); |
| 968 | } |
| 969 | |
| 970 | /// Get all set IDs for VarLocs of EVKind EntryValueBackupKind or |
| 971 | /// EntryValueCopyBackupKind. |
| 972 | auto getEntryValueBackupVarLocs() const { |
| 973 | return LocIndex::indexRangeForLocation( |
| 974 | Set: getVarLocs(), Location: LocIndex::kEntryValueBackupLocation); |
| 975 | } |
| 976 | |
| 977 | /// Get all set IDs for VarLocs with MLs of kind WasmLocKind. |
| 978 | auto getWasmVarLocs() const { |
| 979 | return LocIndex::indexRangeForLocation(Set: getVarLocs(), |
| 980 | Location: LocIndex::kWasmLocation); |
| 981 | } |
| 982 | }; |
| 983 | |
| 984 | /// Collect all VarLoc IDs from \p CollectFrom for VarLocs with MLs of kind |
| 985 | /// RegisterKind which are located in any reg in \p Regs. The IDs for each |
| 986 | /// VarLoc correspond to entries in the universal location bucket, which every |
| 987 | /// VarLoc has exactly 1 entry for. Insert collected IDs into \p Collected. |
| 988 | static void collectIDsForRegs(VarLocsInRange &Collected, |
| 989 | const DefinedRegsSet &Regs, |
| 990 | const VarLocSet &CollectFrom, |
| 991 | const VarLocMap &VarLocIDs); |
| 992 | |
| 993 | VarLocSet &getVarLocsInMBB(const MachineBasicBlock *MBB, VarLocInMBB &Locs) { |
| 994 | std::unique_ptr<VarLocSet> &VLS = Locs[MBB]; |
| 995 | if (!VLS) |
| 996 | VLS = std::make_unique<VarLocSet>(args&: Alloc); |
| 997 | return *VLS; |
| 998 | } |
| 999 | |
| 1000 | const VarLocSet &getVarLocsInMBB(const MachineBasicBlock *MBB, |
| 1001 | const VarLocInMBB &Locs) const { |
| 1002 | auto It = Locs.find(Val: MBB); |
| 1003 | assert(It != Locs.end() && "MBB not in map" ); |
| 1004 | return *It->second; |
| 1005 | } |
| 1006 | |
| 1007 | /// Tests whether this instruction is a spill to a stack location. |
| 1008 | bool isSpillInstruction(const MachineInstr &MI, MachineFunction *MF); |
| 1009 | |
| 1010 | /// Decide if @MI is a spill instruction and return true if it is. We use 2 |
| 1011 | /// criteria to make this decision: |
| 1012 | /// - Is this instruction a store to a spill slot? |
| 1013 | /// - Is there a register operand that is both used and killed? |
| 1014 | /// TODO: Store optimization can fold spills into other stores (including |
| 1015 | /// other spills). We do not handle this yet (more than one memory operand). |
| 1016 | bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF, |
| 1017 | Register &Reg); |
| 1018 | |
| 1019 | /// Returns true if the given machine instruction is a debug value which we |
| 1020 | /// can emit entry values for. |
| 1021 | /// |
| 1022 | /// Currently, we generate debug entry values only for parameters that are |
| 1023 | /// unmodified throughout the function and located in a register. |
| 1024 | bool isEntryValueCandidate(const MachineInstr &MI, |
| 1025 | const DefinedRegsSet &Regs) const; |
| 1026 | |
| 1027 | /// If a given instruction is identified as a spill, return the spill location |
| 1028 | /// and set \p Reg to the spilled register. |
| 1029 | std::optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, |
| 1030 | MachineFunction *MF, |
| 1031 | Register &Reg); |
| 1032 | /// Given a spill instruction, extract the register and offset used to |
| 1033 | /// address the spill location in a target independent way. |
| 1034 | VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); |
| 1035 | void insertTransferDebugPair(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1036 | TransferMap &Transfers, VarLocMap &VarLocIDs, |
| 1037 | LocIndex OldVarID, TransferKind Kind, |
| 1038 | const VarLoc::MachineLoc &OldLoc, |
| 1039 | Register NewReg = Register()); |
| 1040 | |
| 1041 | void transferDebugValue(const MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1042 | VarLocMap &VarLocIDs, |
| 1043 | InstToEntryLocMap &EntryValTransfers, |
| 1044 | RegDefToInstMap &RegSetInstrs); |
| 1045 | void transferSpillOrRestoreInst(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1046 | VarLocMap &VarLocIDs, TransferMap &Transfers); |
| 1047 | void cleanupEntryValueTransfers(const MachineInstr *MI, |
| 1048 | OpenRangesSet &OpenRanges, |
| 1049 | VarLocMap &VarLocIDs, const VarLoc &EntryVL, |
| 1050 | InstToEntryLocMap &EntryValTransfers); |
| 1051 | void removeEntryValue(const MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1052 | VarLocMap &VarLocIDs, const VarLoc &EntryVL, |
| 1053 | InstToEntryLocMap &EntryValTransfers, |
| 1054 | RegDefToInstMap &RegSetInstrs); |
| 1055 | void emitEntryValues(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1056 | VarLocMap &VarLocIDs, |
| 1057 | InstToEntryLocMap &EntryValTransfers, |
| 1058 | VarLocsInRange &KillSet); |
| 1059 | void recordEntryValue(const MachineInstr &MI, |
| 1060 | const DefinedRegsSet &DefinedRegs, |
| 1061 | OpenRangesSet &OpenRanges, VarLocMap &VarLocIDs); |
| 1062 | void transferRegisterCopy(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1063 | VarLocMap &VarLocIDs, TransferMap &Transfers); |
| 1064 | void transferRegisterDef(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1065 | VarLocMap &VarLocIDs, |
| 1066 | InstToEntryLocMap &EntryValTransfers, |
| 1067 | RegDefToInstMap &RegSetInstrs); |
| 1068 | void transferWasmDef(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1069 | VarLocMap &VarLocIDs); |
| 1070 | bool transferTerminator(MachineBasicBlock *MBB, OpenRangesSet &OpenRanges, |
| 1071 | VarLocInMBB &OutLocs, const VarLocMap &VarLocIDs); |
| 1072 | |
| 1073 | void process(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 1074 | VarLocMap &VarLocIDs, TransferMap &Transfers, |
| 1075 | InstToEntryLocMap &EntryValTransfers, |
| 1076 | RegDefToInstMap &RegSetInstrs); |
| 1077 | |
| 1078 | void accumulateFragmentMap(MachineInstr &MI, VarToFragments &SeenFragments, |
| 1079 | OverlapMap &OLapMap); |
| 1080 | |
| 1081 | bool join(MachineBasicBlock &MBB, VarLocInMBB &OutLocs, VarLocInMBB &InLocs, |
| 1082 | const VarLocMap &VarLocIDs, |
| 1083 | SmallPtrSet<const MachineBasicBlock *, 16> &Visited, |
| 1084 | SmallPtrSetImpl<const MachineBasicBlock *> &ArtificialBlocks); |
| 1085 | |
| 1086 | /// Create DBG_VALUE insts for inlocs that have been propagated but |
| 1087 | /// had their instruction creation deferred. |
| 1088 | void flushPendingLocs(VarLocInMBB &PendingInLocs, VarLocMap &VarLocIDs); |
| 1089 | |
| 1090 | bool ExtendRanges(MachineFunction &MF, MachineDominatorTree *DomTree, |
| 1091 | bool ShouldEmitDebugEntryValues, unsigned InputBBLimit, |
| 1092 | unsigned InputDbgValLimit) override; |
| 1093 | |
| 1094 | public: |
| 1095 | /// Default construct and initialize the pass. |
| 1096 | VarLocBasedLDV(); |
| 1097 | |
| 1098 | ~VarLocBasedLDV() override; |
| 1099 | |
| 1100 | /// Print to ostream with a message. |
| 1101 | void printVarLocInMBB(const MachineFunction &MF, const VarLocInMBB &V, |
| 1102 | const VarLocMap &VarLocIDs, const char *msg, |
| 1103 | raw_ostream &Out) const; |
| 1104 | }; |
| 1105 | |
| 1106 | } // end anonymous namespace |
| 1107 | |
| 1108 | //===----------------------------------------------------------------------===// |
| 1109 | // Implementation |
| 1110 | //===----------------------------------------------------------------------===// |
| 1111 | |
| 1112 | VarLocBasedLDV::VarLocBasedLDV() = default; |
| 1113 | |
| 1114 | VarLocBasedLDV::~VarLocBasedLDV() = default; |
| 1115 | |
| 1116 | /// Erase a variable from the set of open ranges, and additionally erase any |
| 1117 | /// fragments that may overlap it. If the VarLoc is a backup location, erase |
| 1118 | /// the variable from the EntryValuesBackupVars set, indicating we should stop |
| 1119 | /// tracking its backup entry location. Otherwise, if the VarLoc is primary |
| 1120 | /// location, erase the variable from the Vars set. |
| 1121 | void VarLocBasedLDV::OpenRangesSet::erase(const VarLoc &VL) { |
| 1122 | // Erasure helper. |
| 1123 | auto DoErase = [&VL, this](DebugVariable VarToErase) { |
| 1124 | auto *EraseFrom = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; |
| 1125 | auto It = EraseFrom->find(Val: VarToErase); |
| 1126 | if (It != EraseFrom->end()) { |
| 1127 | LocIndices IDs = It->second; |
| 1128 | for (LocIndex ID : IDs) |
| 1129 | VarLocs.reset(Index: ID.getAsRawInteger()); |
| 1130 | EraseFrom->erase(I: It); |
| 1131 | } |
| 1132 | }; |
| 1133 | |
| 1134 | DebugVariable Var = VL.Var; |
| 1135 | |
| 1136 | // Erase the variable/fragment that ends here. |
| 1137 | DoErase(Var); |
| 1138 | |
| 1139 | // Extract the fragment. Interpret an empty fragment as one that covers all |
| 1140 | // possible bits. |
| 1141 | FragmentInfo ThisFragment = Var.getFragmentOrDefault(); |
| 1142 | |
| 1143 | // There may be fragments that overlap the designated fragment. Look them up |
| 1144 | // in the pre-computed overlap map, and erase them too. |
| 1145 | auto MapIt = OverlappingFragments.find(Val: {Var.getVariable(), ThisFragment}); |
| 1146 | if (MapIt != OverlappingFragments.end()) { |
| 1147 | for (auto Fragment : MapIt->second) { |
| 1148 | VarLocBasedLDV::OptFragmentInfo FragmentHolder; |
| 1149 | if (!DebugVariable::isDefaultFragment(F: Fragment)) |
| 1150 | FragmentHolder = VarLocBasedLDV::OptFragmentInfo(Fragment); |
| 1151 | DoErase({Var.getVariable(), FragmentHolder, Var.getInlinedAt()}); |
| 1152 | } |
| 1153 | } |
| 1154 | } |
| 1155 | |
| 1156 | void VarLocBasedLDV::OpenRangesSet::erase(const VarLocsInRange &KillSet, |
| 1157 | const VarLocMap &VarLocIDs, |
| 1158 | LocIndex::u32_location_t Location) { |
| 1159 | VarLocSet RemoveSet(Alloc); |
| 1160 | for (LocIndex::u32_index_t ID : KillSet) { |
| 1161 | const VarLoc &VL = VarLocIDs[LocIndex(Location, ID)]; |
| 1162 | auto *EraseFrom = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; |
| 1163 | EraseFrom->erase(Val: VL.Var); |
| 1164 | LocIndices VLI = VarLocIDs.getAllIndices(VL); |
| 1165 | for (LocIndex ID : VLI) |
| 1166 | RemoveSet.set(ID.getAsRawInteger()); |
| 1167 | } |
| 1168 | VarLocs.intersectWithComplement(Other: RemoveSet); |
| 1169 | } |
| 1170 | |
| 1171 | void VarLocBasedLDV::OpenRangesSet::insertFromLocSet(const VarLocSet &ToLoad, |
| 1172 | const VarLocMap &Map) { |
| 1173 | VarLocsInRange UniqueVarLocIDs; |
| 1174 | DefinedRegsSet Regs; |
| 1175 | Regs.insert(V: LocIndex::kUniversalLocation); |
| 1176 | collectIDsForRegs(Collected&: UniqueVarLocIDs, Regs, CollectFrom: ToLoad, VarLocIDs: Map); |
| 1177 | for (uint64_t ID : UniqueVarLocIDs) { |
| 1178 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1179 | const VarLoc &VarL = Map[Idx]; |
| 1180 | const LocIndices Indices = Map.getAllIndices(VL: VarL); |
| 1181 | insert(VarLocIDs: Indices, VL: VarL); |
| 1182 | } |
| 1183 | } |
| 1184 | |
| 1185 | void VarLocBasedLDV::OpenRangesSet::insert(LocIndices VarLocIDs, |
| 1186 | const VarLoc &VL) { |
| 1187 | auto *InsertInto = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; |
| 1188 | for (LocIndex ID : VarLocIDs) |
| 1189 | VarLocs.set(ID.getAsRawInteger()); |
| 1190 | InsertInto->insert(KV: {VL.Var, VarLocIDs}); |
| 1191 | } |
| 1192 | |
| 1193 | /// Return the Loc ID of an entry value backup location, if it exists for the |
| 1194 | /// variable. |
| 1195 | std::optional<LocIndices> |
| 1196 | VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) { |
| 1197 | auto It = EntryValuesBackupVars.find(Val: Var); |
| 1198 | if (It != EntryValuesBackupVars.end()) |
| 1199 | return It->second; |
| 1200 | |
| 1201 | return std::nullopt; |
| 1202 | } |
| 1203 | |
| 1204 | void VarLocBasedLDV::collectIDsForRegs(VarLocsInRange &Collected, |
| 1205 | const DefinedRegsSet &Regs, |
| 1206 | const VarLocSet &CollectFrom, |
| 1207 | const VarLocMap &VarLocIDs) { |
| 1208 | assert(!Regs.empty() && "Nothing to collect" ); |
| 1209 | SmallVector<Register, 32> SortedRegs; |
| 1210 | append_range(C&: SortedRegs, R: Regs); |
| 1211 | array_pod_sort(Start: SortedRegs.begin(), End: SortedRegs.end()); |
| 1212 | auto It = CollectFrom.find(Index: LocIndex::rawIndexForReg(Reg: SortedRegs.front())); |
| 1213 | auto End = CollectFrom.end(); |
| 1214 | for (Register Reg : SortedRegs) { |
| 1215 | // The half-open interval [FirstIndexForReg, FirstInvalidIndex) contains |
| 1216 | // all possible VarLoc IDs for VarLocs with MLs of kind RegisterKind which |
| 1217 | // live in Reg. |
| 1218 | uint64_t FirstIndexForReg = LocIndex::rawIndexForReg(Reg); |
| 1219 | uint64_t FirstInvalidIndex = LocIndex::rawIndexForReg(Reg: Reg + 1); |
| 1220 | It.advanceToLowerBound(Index: FirstIndexForReg); |
| 1221 | |
| 1222 | // Iterate through that half-open interval and collect all the set IDs. |
| 1223 | for (; It != End && *It < FirstInvalidIndex; ++It) { |
| 1224 | LocIndex ItIdx = LocIndex::fromRawInteger(ID: *It); |
| 1225 | const VarLoc &VL = VarLocIDs[ItIdx]; |
| 1226 | LocIndices LI = VarLocIDs.getAllIndices(VL); |
| 1227 | // For now, the back index is always the universal location index. |
| 1228 | assert(LI.back().Location == LocIndex::kUniversalLocation && |
| 1229 | "Unexpected order of LocIndices for VarLoc; was it inserted into " |
| 1230 | "the VarLocMap correctly?" ); |
| 1231 | Collected.insert(V: LI.back().Index); |
| 1232 | } |
| 1233 | |
| 1234 | if (It == End) |
| 1235 | return; |
| 1236 | } |
| 1237 | } |
| 1238 | |
| 1239 | void VarLocBasedLDV::getUsedRegs(const VarLocSet &CollectFrom, |
| 1240 | SmallVectorImpl<Register> &UsedRegs) const { |
| 1241 | // All register-based VarLocs are assigned indices greater than or equal to |
| 1242 | // FirstRegIndex. |
| 1243 | uint64_t FirstRegIndex = |
| 1244 | LocIndex::rawIndexForReg(Reg: LocIndex::kFirstRegLocation); |
| 1245 | uint64_t FirstInvalidIndex = |
| 1246 | LocIndex::rawIndexForReg(Reg: LocIndex::kFirstInvalidRegLocation); |
| 1247 | uint64_t FirstVirtualRegIndex = |
| 1248 | LocIndex::rawIndexForReg(Reg: LocIndex::kFirstVirtualRegLocation); |
| 1249 | auto doGetUsedRegs = [&](VarLocSet::const_iterator &It) { |
| 1250 | // We found a VarLoc ID for a VarLoc that lives in a register. Figure out |
| 1251 | // which register and add it to UsedRegs. |
| 1252 | uint32_t FoundReg = LocIndex::fromRawInteger(ID: *It).Location; |
| 1253 | assert((UsedRegs.empty() || FoundReg != UsedRegs.back()) && |
| 1254 | "Duplicate used reg" ); |
| 1255 | UsedRegs.push_back(Elt: FoundReg); |
| 1256 | |
| 1257 | // Skip to the next /set/ register. Note that this finds a lower bound, so |
| 1258 | // even if there aren't any VarLocs living in `FoundReg+1`, we're still |
| 1259 | // guaranteed to move on to the next register (or to end()). |
| 1260 | uint64_t NextRegIndex = LocIndex::rawIndexForReg(Reg: FoundReg + 1); |
| 1261 | It.advanceToLowerBound(Index: NextRegIndex); |
| 1262 | }; |
| 1263 | for (auto It = CollectFrom.find(Index: FirstRegIndex), |
| 1264 | End = CollectFrom.find(Index: FirstInvalidIndex); |
| 1265 | It != End;) { |
| 1266 | doGetUsedRegs(It); |
| 1267 | } |
| 1268 | for (auto It = CollectFrom.find(Index: FirstVirtualRegIndex), |
| 1269 | End = CollectFrom.end(); |
| 1270 | It != End;) { |
| 1271 | doGetUsedRegs(It); |
| 1272 | } |
| 1273 | } |
| 1274 | |
| 1275 | //===----------------------------------------------------------------------===// |
| 1276 | // Debug Range Extension Implementation |
| 1277 | //===----------------------------------------------------------------------===// |
| 1278 | |
| 1279 | #ifndef NDEBUG |
| 1280 | void VarLocBasedLDV::printVarLocInMBB(const MachineFunction &MF, |
| 1281 | const VarLocInMBB &V, |
| 1282 | const VarLocMap &VarLocIDs, |
| 1283 | const char *msg, |
| 1284 | raw_ostream &Out) const { |
| 1285 | Out << '\n' << msg << '\n'; |
| 1286 | for (const MachineBasicBlock &BB : MF) { |
| 1287 | if (!V.count(&BB)) |
| 1288 | continue; |
| 1289 | const VarLocSet &L = getVarLocsInMBB(&BB, V); |
| 1290 | if (L.empty()) |
| 1291 | continue; |
| 1292 | SmallVector<VarLoc, 32> VarLocs; |
| 1293 | collectAllVarLocs(VarLocs, L, VarLocIDs); |
| 1294 | Out << "MBB: " << BB.getNumber() << ":\n" ; |
| 1295 | for (const VarLoc &VL : VarLocs) { |
| 1296 | Out << " Var: " << VL.Var.getVariable()->getName(); |
| 1297 | Out << " MI: " ; |
| 1298 | VL.dump(TRI, TII, Out); |
| 1299 | } |
| 1300 | } |
| 1301 | Out << "\n" ; |
| 1302 | } |
| 1303 | #endif |
| 1304 | |
| 1305 | VarLocBasedLDV::VarLoc::SpillLoc |
| 1306 | VarLocBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { |
| 1307 | assert(MI.hasOneMemOperand() && |
| 1308 | "Spill instruction does not have exactly one memory operand?" ); |
| 1309 | auto MMOI = MI.memoperands_begin(); |
| 1310 | const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); |
| 1311 | assert(PVal->kind() == PseudoSourceValue::FixedStack && |
| 1312 | "Inconsistent memory operand in spill instruction" ); |
| 1313 | int FI = cast<FixedStackPseudoSourceValue>(Val: PVal)->getFrameIndex(); |
| 1314 | const MachineBasicBlock *MBB = MI.getParent(); |
| 1315 | Register Reg; |
| 1316 | StackOffset Offset = TFI->getFrameIndexReference(MF: *MBB->getParent(), FI, FrameReg&: Reg); |
| 1317 | return {.SpillBase: Reg, .SpillOffset: Offset}; |
| 1318 | } |
| 1319 | |
| 1320 | /// Do cleanup of \p EntryValTransfers created by \p TRInst, by removing the |
| 1321 | /// Transfer, which uses the to-be-deleted \p EntryVL. |
| 1322 | void VarLocBasedLDV::cleanupEntryValueTransfers( |
| 1323 | const MachineInstr *TRInst, OpenRangesSet &OpenRanges, VarLocMap &VarLocIDs, |
| 1324 | const VarLoc &EntryVL, InstToEntryLocMap &EntryValTransfers) { |
| 1325 | if (EntryValTransfers.empty() || TRInst == nullptr) |
| 1326 | return; |
| 1327 | |
| 1328 | auto TransRange = EntryValTransfers.equal_range(x: TRInst); |
| 1329 | for (auto &TDPair : llvm::make_range(p: TransRange)) { |
| 1330 | const VarLoc &EmittedEV = VarLocIDs[TDPair.second]; |
| 1331 | if (std::tie(args: EntryVL.Var, args: EntryVL.Locs[0].Value.RegNo, args: EntryVL.Expr) == |
| 1332 | std::tie(args: EmittedEV.Var, args: EmittedEV.Locs[0].Value.RegNo, |
| 1333 | args: EmittedEV.Expr)) { |
| 1334 | OpenRanges.erase(VL: EmittedEV); |
| 1335 | EntryValTransfers.erase(x: TRInst); |
| 1336 | break; |
| 1337 | } |
| 1338 | } |
| 1339 | } |
| 1340 | |
| 1341 | /// Try to salvage the debug entry value if we encounter a new debug value |
| 1342 | /// describing the same parameter, otherwise stop tracking the value. Return |
| 1343 | /// true if we should stop tracking the entry value and do the cleanup of |
| 1344 | /// emitted Entry Value Transfers, otherwise return false. |
| 1345 | void VarLocBasedLDV::removeEntryValue(const MachineInstr &MI, |
| 1346 | OpenRangesSet &OpenRanges, |
| 1347 | VarLocMap &VarLocIDs, |
| 1348 | const VarLoc &EntryVL, |
| 1349 | InstToEntryLocMap &EntryValTransfers, |
| 1350 | RegDefToInstMap &RegSetInstrs) { |
| 1351 | // Skip the DBG_VALUE which is the debug entry value itself. |
| 1352 | if (&MI == &EntryVL.MI) |
| 1353 | return; |
| 1354 | |
| 1355 | // If the parameter's location is not register location, we can not track |
| 1356 | // the entry value any more. It doesn't have the TransferInst which defines |
| 1357 | // register, so no Entry Value Transfers have been emitted already. |
| 1358 | if (!MI.getDebugOperand(Index: 0).isReg()) |
| 1359 | return; |
| 1360 | |
| 1361 | // Try to get non-debug instruction responsible for the DBG_VALUE. |
| 1362 | Register Reg = MI.getDebugOperand(Index: 0).getReg(); |
| 1363 | const MachineInstr *TransferInst = |
| 1364 | Reg.isValid() ? RegSetInstrs.lookup(Val: Reg) : nullptr; |
| 1365 | |
| 1366 | // Case of the parameter's DBG_VALUE at the start of entry MBB. |
| 1367 | if (!TransferInst && !LastNonDbgMI && MI.getParent()->isEntryBlock()) |
| 1368 | return; |
| 1369 | |
| 1370 | // If the debug expression from the DBG_VALUE is not empty, we can assume the |
| 1371 | // parameter's value has changed indicating that we should stop tracking its |
| 1372 | // entry value as well. |
| 1373 | if (MI.getDebugExpression()->getNumElements() == 0 && TransferInst) { |
| 1374 | // If the DBG_VALUE comes from a copy instruction that copies the entry |
| 1375 | // value, it means the parameter's value has not changed and we should be |
| 1376 | // able to use its entry value. |
| 1377 | // TODO: Try to keep tracking of an entry value if we encounter a propagated |
| 1378 | // DBG_VALUE describing the copy of the entry value. (Propagated entry value |
| 1379 | // does not indicate the parameter modification.) |
| 1380 | auto DestSrc = TII->isCopyLikeInstr(MI: *TransferInst); |
| 1381 | if (DestSrc) { |
| 1382 | const MachineOperand *SrcRegOp, *DestRegOp; |
| 1383 | SrcRegOp = DestSrc->Source; |
| 1384 | DestRegOp = DestSrc->Destination; |
| 1385 | if (Reg == DestRegOp->getReg()) { |
| 1386 | for (uint64_t ID : OpenRanges.getEntryValueBackupVarLocs()) { |
| 1387 | const VarLoc &VL = VarLocIDs[LocIndex::fromRawInteger(ID)]; |
| 1388 | if (VL.isEntryValueCopyBackupReg(Reg) && |
| 1389 | // Entry Values should not be variadic. |
| 1390 | VL.MI.getDebugOperand(Index: 0).getReg() == SrcRegOp->getReg()) |
| 1391 | return; |
| 1392 | } |
| 1393 | } |
| 1394 | } |
| 1395 | } |
| 1396 | |
| 1397 | LLVM_DEBUG(dbgs() << "Deleting a DBG entry value because of: " ; |
| 1398 | MI.print(dbgs(), /*IsStandalone*/ false, |
| 1399 | /*SkipOpers*/ false, /*SkipDebugLoc*/ false, |
| 1400 | /*AddNewLine*/ true, TII)); |
| 1401 | cleanupEntryValueTransfers(TRInst: TransferInst, OpenRanges, VarLocIDs, EntryVL, |
| 1402 | EntryValTransfers); |
| 1403 | OpenRanges.erase(VL: EntryVL); |
| 1404 | } |
| 1405 | |
| 1406 | /// End all previous ranges related to @MI and start a new range from @MI |
| 1407 | /// if it is a DBG_VALUE instr. |
| 1408 | void VarLocBasedLDV::transferDebugValue(const MachineInstr &MI, |
| 1409 | OpenRangesSet &OpenRanges, |
| 1410 | VarLocMap &VarLocIDs, |
| 1411 | InstToEntryLocMap &EntryValTransfers, |
| 1412 | RegDefToInstMap &RegSetInstrs) { |
| 1413 | if (!MI.isDebugValue()) |
| 1414 | return; |
| 1415 | const DILocalVariable *Var = MI.getDebugVariable(); |
| 1416 | const DIExpression *Expr = MI.getDebugExpression(); |
| 1417 | const DILocation *DebugLoc = MI.getDebugLoc(); |
| 1418 | const DILocation *InlinedAt = DebugLoc->getInlinedAt(); |
| 1419 | assert(Var->isValidLocationForIntrinsic(DebugLoc) && |
| 1420 | "Expected inlined-at fields to agree" ); |
| 1421 | |
| 1422 | DebugVariable V(Var, Expr, InlinedAt); |
| 1423 | |
| 1424 | // Check if this DBG_VALUE indicates a parameter's value changing. |
| 1425 | // If that is the case, we should stop tracking its entry value. |
| 1426 | auto EntryValBackupID = OpenRanges.getEntryValueBackup(Var: V); |
| 1427 | if (Var->isParameter() && EntryValBackupID) { |
| 1428 | const VarLoc &EntryVL = VarLocIDs[EntryValBackupID->back()]; |
| 1429 | removeEntryValue(MI, OpenRanges, VarLocIDs, EntryVL, EntryValTransfers, |
| 1430 | RegSetInstrs); |
| 1431 | } |
| 1432 | |
| 1433 | if (all_of(Range: MI.debug_operands(), P: [](const MachineOperand &MO) { |
| 1434 | return (MO.isReg() && MO.getReg()) || MO.isImm() || MO.isFPImm() || |
| 1435 | MO.isCImm() || MO.isTargetIndex(); |
| 1436 | })) { |
| 1437 | // Use normal VarLoc constructor for registers and immediates. |
| 1438 | VarLoc VL(MI); |
| 1439 | // End all previous ranges of VL.Var. |
| 1440 | OpenRanges.erase(VL); |
| 1441 | |
| 1442 | LocIndices IDs = VarLocIDs.insert(VL); |
| 1443 | // Add the VarLoc to OpenRanges from this DBG_VALUE. |
| 1444 | OpenRanges.insert(VarLocIDs: IDs, VL); |
| 1445 | } else if (MI.memoperands().size() > 0) { |
| 1446 | llvm_unreachable("DBG_VALUE with mem operand encountered after regalloc?" ); |
| 1447 | } else { |
| 1448 | // This must be an undefined location. If it has an open range, erase it. |
| 1449 | assert(MI.isUndefDebugValue() && |
| 1450 | "Unexpected non-undef DBG_VALUE encountered" ); |
| 1451 | VarLoc VL(MI); |
| 1452 | OpenRanges.erase(VL); |
| 1453 | } |
| 1454 | } |
| 1455 | |
| 1456 | // This should be removed later, doesn't fit the new design. |
| 1457 | void VarLocBasedLDV::collectAllVarLocs(SmallVectorImpl<VarLoc> &Collected, |
| 1458 | const VarLocSet &CollectFrom, |
| 1459 | const VarLocMap &VarLocIDs) { |
| 1460 | // The half-open interval [FirstIndexForReg, FirstInvalidIndex) contains all |
| 1461 | // possible VarLoc IDs for VarLocs with MLs of kind RegisterKind which live |
| 1462 | // in Reg. |
| 1463 | uint64_t FirstIndex = LocIndex::rawIndexForReg(Reg: LocIndex::kUniversalLocation); |
| 1464 | uint64_t FirstInvalidIndex = |
| 1465 | LocIndex::rawIndexForReg(Reg: LocIndex::kUniversalLocation + 1); |
| 1466 | // Iterate through that half-open interval and collect all the set IDs. |
| 1467 | for (auto It = CollectFrom.find(Index: FirstIndex), End = CollectFrom.end(); |
| 1468 | It != End && *It < FirstInvalidIndex; ++It) { |
| 1469 | LocIndex RegIdx = LocIndex::fromRawInteger(ID: *It); |
| 1470 | Collected.push_back(Elt: VarLocIDs[RegIdx]); |
| 1471 | } |
| 1472 | } |
| 1473 | |
| 1474 | /// Turn the entry value backup locations into primary locations. |
| 1475 | void VarLocBasedLDV::emitEntryValues(MachineInstr &MI, |
| 1476 | OpenRangesSet &OpenRanges, |
| 1477 | VarLocMap &VarLocIDs, |
| 1478 | InstToEntryLocMap &EntryValTransfers, |
| 1479 | VarLocsInRange &KillSet) { |
| 1480 | // Do not insert entry value locations after a terminator. |
| 1481 | if (MI.isTerminator()) |
| 1482 | return; |
| 1483 | |
| 1484 | for (uint32_t ID : KillSet) { |
| 1485 | // The KillSet IDs are indices for the universal location bucket. |
| 1486 | LocIndex Idx = LocIndex(LocIndex::kUniversalLocation, ID); |
| 1487 | const VarLoc &VL = VarLocIDs[Idx]; |
| 1488 | if (!VL.Var.getVariable()->isParameter()) |
| 1489 | continue; |
| 1490 | |
| 1491 | auto DebugVar = VL.Var; |
| 1492 | std::optional<LocIndices> EntryValBackupIDs = |
| 1493 | OpenRanges.getEntryValueBackup(Var: DebugVar); |
| 1494 | |
| 1495 | // If the parameter has the entry value backup, it means we should |
| 1496 | // be able to use its entry value. |
| 1497 | if (!EntryValBackupIDs) |
| 1498 | continue; |
| 1499 | |
| 1500 | const VarLoc &EntryVL = VarLocIDs[EntryValBackupIDs->back()]; |
| 1501 | VarLoc EntryLoc = VarLoc::CreateEntryLoc(MI: EntryVL.MI, EntryExpr: EntryVL.Expr, |
| 1502 | Reg: EntryVL.Locs[0].Value.RegNo); |
| 1503 | LocIndices EntryValueIDs = VarLocIDs.insert(VL: EntryLoc); |
| 1504 | assert(EntryValueIDs.size() == 1 && |
| 1505 | "EntryValue loc should not be variadic" ); |
| 1506 | EntryValTransfers.insert(x: {&MI, EntryValueIDs.back()}); |
| 1507 | OpenRanges.insert(VarLocIDs: EntryValueIDs, VL: EntryLoc); |
| 1508 | } |
| 1509 | } |
| 1510 | |
| 1511 | /// Create new TransferDebugPair and insert it in \p Transfers. The VarLoc |
| 1512 | /// with \p OldVarID should be deleted form \p OpenRanges and replaced with |
| 1513 | /// new VarLoc. If \p NewReg is different than default zero value then the |
| 1514 | /// new location will be register location created by the copy like instruction, |
| 1515 | /// otherwise it is variable's location on the stack. |
| 1516 | void VarLocBasedLDV::insertTransferDebugPair( |
| 1517 | MachineInstr &MI, OpenRangesSet &OpenRanges, TransferMap &Transfers, |
| 1518 | VarLocMap &VarLocIDs, LocIndex OldVarID, TransferKind Kind, |
| 1519 | const VarLoc::MachineLoc &OldLoc, Register NewReg) { |
| 1520 | const VarLoc &OldVarLoc = VarLocIDs[OldVarID]; |
| 1521 | |
| 1522 | auto ProcessVarLoc = [&MI, &OpenRanges, &Transfers, &VarLocIDs](VarLoc &VL) { |
| 1523 | LocIndices LocIds = VarLocIDs.insert(VL); |
| 1524 | |
| 1525 | // Close this variable's previous location range. |
| 1526 | OpenRanges.erase(VL); |
| 1527 | |
| 1528 | // Record the new location as an open range, and a postponed transfer |
| 1529 | // inserting a DBG_VALUE for this location. |
| 1530 | OpenRanges.insert(VarLocIDs: LocIds, VL); |
| 1531 | assert(!MI.isTerminator() && "Cannot insert DBG_VALUE after terminator" ); |
| 1532 | TransferDebugPair MIP = {.TransferInst: &MI, .LocationID: LocIds.back()}; |
| 1533 | Transfers.push_back(Elt: MIP); |
| 1534 | }; |
| 1535 | |
| 1536 | // End all previous ranges of VL.Var. |
| 1537 | OpenRanges.erase(VL: VarLocIDs[OldVarID]); |
| 1538 | switch (Kind) { |
| 1539 | case TransferKind::TransferCopy: { |
| 1540 | assert(NewReg && |
| 1541 | "No register supplied when handling a copy of a debug value" ); |
| 1542 | // Create a DBG_VALUE instruction to describe the Var in its new |
| 1543 | // register location. |
| 1544 | VarLoc VL = VarLoc::CreateCopyLoc(OldVL: OldVarLoc, OldML: OldLoc, NewReg); |
| 1545 | ProcessVarLoc(VL); |
| 1546 | LLVM_DEBUG({ |
| 1547 | dbgs() << "Creating VarLoc for register copy:" ; |
| 1548 | VL.dump(TRI, TII); |
| 1549 | }); |
| 1550 | return; |
| 1551 | } |
| 1552 | case TransferKind::TransferSpill: { |
| 1553 | // Create a DBG_VALUE instruction to describe the Var in its spilled |
| 1554 | // location. |
| 1555 | VarLoc::SpillLoc SpillLocation = extractSpillBaseRegAndOffset(MI); |
| 1556 | VarLoc VL = VarLoc::CreateSpillLoc( |
| 1557 | OldVL: OldVarLoc, OldML: OldLoc, SpillBase: SpillLocation.SpillBase, SpillOffset: SpillLocation.SpillOffset); |
| 1558 | ProcessVarLoc(VL); |
| 1559 | LLVM_DEBUG({ |
| 1560 | dbgs() << "Creating VarLoc for spill:" ; |
| 1561 | VL.dump(TRI, TII); |
| 1562 | }); |
| 1563 | return; |
| 1564 | } |
| 1565 | case TransferKind::TransferRestore: { |
| 1566 | assert(NewReg && |
| 1567 | "No register supplied when handling a restore of a debug value" ); |
| 1568 | // DebugInstr refers to the pre-spill location, therefore we can reuse |
| 1569 | // its expression. |
| 1570 | VarLoc VL = VarLoc::CreateCopyLoc(OldVL: OldVarLoc, OldML: OldLoc, NewReg); |
| 1571 | ProcessVarLoc(VL); |
| 1572 | LLVM_DEBUG({ |
| 1573 | dbgs() << "Creating VarLoc for restore:" ; |
| 1574 | VL.dump(TRI, TII); |
| 1575 | }); |
| 1576 | return; |
| 1577 | } |
| 1578 | } |
| 1579 | llvm_unreachable("Invalid transfer kind" ); |
| 1580 | } |
| 1581 | |
| 1582 | /// A definition of a register may mark the end of a range. |
| 1583 | void VarLocBasedLDV::transferRegisterDef(MachineInstr &MI, |
| 1584 | OpenRangesSet &OpenRanges, |
| 1585 | VarLocMap &VarLocIDs, |
| 1586 | InstToEntryLocMap &EntryValTransfers, |
| 1587 | RegDefToInstMap &RegSetInstrs) { |
| 1588 | |
| 1589 | // Meta Instructions do not affect the debug liveness of any register they |
| 1590 | // define. |
| 1591 | if (MI.isMetaInstruction()) |
| 1592 | return; |
| 1593 | |
| 1594 | MachineFunction *MF = MI.getMF(); |
| 1595 | const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); |
| 1596 | Register SP = TLI->getStackPointerRegisterToSaveRestore(); |
| 1597 | |
| 1598 | // Find the regs killed by MI, and find regmasks of preserved regs. |
| 1599 | DefinedRegsSet DeadRegs; |
| 1600 | SmallVector<const uint32_t *, 4> RegMasks; |
| 1601 | for (const MachineOperand &MO : MI.operands()) { |
| 1602 | // Determine whether the operand is a register def. |
| 1603 | if (MO.isReg() && MO.isDef() && MO.getReg() && MO.getReg().isPhysical() && |
| 1604 | !(MI.isCall() && MO.getReg() == SP)) { |
| 1605 | // Remove ranges of all aliased registers. |
| 1606 | for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) |
| 1607 | // FIXME: Can we break out of this loop early if no insertion occurs? |
| 1608 | DeadRegs.insert(V: (*RAI).id()); |
| 1609 | RegSetInstrs.erase(Val: MO.getReg()); |
| 1610 | RegSetInstrs.insert(KV: {MO.getReg(), &MI}); |
| 1611 | } else if (MO.isRegMask()) { |
| 1612 | RegMasks.push_back(Elt: MO.getRegMask()); |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | // Erase VarLocs which reside in one of the dead registers. For performance |
| 1617 | // reasons, it's critical to not iterate over the full set of open VarLocs. |
| 1618 | // Iterate over the set of dying/used regs instead. |
| 1619 | if (!RegMasks.empty()) { |
| 1620 | SmallVector<Register, 32> UsedRegs; |
| 1621 | getUsedRegs(CollectFrom: OpenRanges.getVarLocs(), UsedRegs); |
| 1622 | for (Register Reg : UsedRegs) { |
| 1623 | // Remove ranges of all clobbered registers. Register masks don't usually |
| 1624 | // list SP as preserved. Assume that call instructions never clobber SP, |
| 1625 | // because some backends (e.g., AArch64) never list SP in the regmask. |
| 1626 | // While the debug info may be off for an instruction or two around |
| 1627 | // callee-cleanup calls, transferring the DEBUG_VALUE across the call is |
| 1628 | // still a better user experience. |
| 1629 | if (Reg == SP) |
| 1630 | continue; |
| 1631 | bool AnyRegMaskKillsReg = |
| 1632 | any_of(Range&: RegMasks, P: [Reg](const uint32_t *RegMask) { |
| 1633 | return MachineOperand::clobbersPhysReg(RegMask, PhysReg: Reg); |
| 1634 | }); |
| 1635 | if (AnyRegMaskKillsReg) |
| 1636 | DeadRegs.insert(V: Reg); |
| 1637 | if (AnyRegMaskKillsReg) { |
| 1638 | RegSetInstrs.erase(Val: Reg); |
| 1639 | RegSetInstrs.insert(KV: {Reg, &MI}); |
| 1640 | } |
| 1641 | } |
| 1642 | } |
| 1643 | |
| 1644 | if (DeadRegs.empty()) |
| 1645 | return; |
| 1646 | |
| 1647 | VarLocsInRange KillSet; |
| 1648 | collectIDsForRegs(Collected&: KillSet, Regs: DeadRegs, CollectFrom: OpenRanges.getVarLocs(), VarLocIDs); |
| 1649 | OpenRanges.erase(KillSet, VarLocIDs, Location: LocIndex::kUniversalLocation); |
| 1650 | |
| 1651 | if (ShouldEmitDebugEntryValues) |
| 1652 | emitEntryValues(MI, OpenRanges, VarLocIDs, EntryValTransfers, KillSet); |
| 1653 | } |
| 1654 | |
| 1655 | void VarLocBasedLDV::transferWasmDef(MachineInstr &MI, |
| 1656 | OpenRangesSet &OpenRanges, |
| 1657 | VarLocMap &VarLocIDs) { |
| 1658 | // If this is not a Wasm local.set or local.tee, which sets local values, |
| 1659 | // return. |
| 1660 | int Index; |
| 1661 | int64_t Offset; |
| 1662 | if (!TII->isExplicitTargetIndexDef(MI, Index, Offset)) |
| 1663 | return; |
| 1664 | |
| 1665 | // Find the target indices killed by MI, and delete those variable locations |
| 1666 | // from the open range. |
| 1667 | VarLocsInRange KillSet; |
| 1668 | VarLoc::WasmLoc Loc{.Index: Index, .Offset: Offset}; |
| 1669 | for (uint64_t ID : OpenRanges.getWasmVarLocs()) { |
| 1670 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1671 | const VarLoc &VL = VarLocIDs[Idx]; |
| 1672 | assert(VL.containsWasmLocs() && "Broken VarLocSet?" ); |
| 1673 | if (VL.usesWasmLoc(WasmLocation: Loc)) |
| 1674 | KillSet.insert(V: ID); |
| 1675 | } |
| 1676 | OpenRanges.erase(KillSet, VarLocIDs, Location: LocIndex::kWasmLocation); |
| 1677 | } |
| 1678 | |
| 1679 | bool VarLocBasedLDV::isSpillInstruction(const MachineInstr &MI, |
| 1680 | MachineFunction *MF) { |
| 1681 | // TODO: Handle multiple stores folded into one. |
| 1682 | if (!MI.hasOneMemOperand()) |
| 1683 | return false; |
| 1684 | |
| 1685 | if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) |
| 1686 | return false; // This is not a spill instruction, since no valid size was |
| 1687 | // returned from either function. |
| 1688 | |
| 1689 | return true; |
| 1690 | } |
| 1691 | |
| 1692 | bool VarLocBasedLDV::isLocationSpill(const MachineInstr &MI, |
| 1693 | MachineFunction *MF, Register &Reg) { |
| 1694 | if (!isSpillInstruction(MI, MF)) |
| 1695 | return false; |
| 1696 | |
| 1697 | auto isKilledReg = [&](const MachineOperand MO, Register &Reg) { |
| 1698 | if (!MO.isReg() || !MO.isUse()) { |
| 1699 | Reg = 0; |
| 1700 | return false; |
| 1701 | } |
| 1702 | Reg = MO.getReg(); |
| 1703 | return MO.isKill(); |
| 1704 | }; |
| 1705 | |
| 1706 | for (const MachineOperand &MO : MI.operands()) { |
| 1707 | // In a spill instruction generated by the InlineSpiller the spilled |
| 1708 | // register has its kill flag set. |
| 1709 | if (isKilledReg(MO, Reg)) |
| 1710 | return true; |
| 1711 | if (Reg != 0) { |
| 1712 | // Check whether next instruction kills the spilled register. |
| 1713 | // FIXME: Current solution does not cover search for killed register in |
| 1714 | // bundles and instructions further down the chain. |
| 1715 | auto NextI = std::next(x: MI.getIterator()); |
| 1716 | // Skip next instruction that points to basic block end iterator. |
| 1717 | if (MI.getParent()->end() == NextI) |
| 1718 | continue; |
| 1719 | Register RegNext; |
| 1720 | for (const MachineOperand &MONext : NextI->operands()) { |
| 1721 | // Return true if we came across the register from the |
| 1722 | // previous spill instruction that is killed in NextI. |
| 1723 | if (isKilledReg(MONext, RegNext) && RegNext == Reg) |
| 1724 | return true; |
| 1725 | } |
| 1726 | } |
| 1727 | } |
| 1728 | // Return false if we didn't find spilled register. |
| 1729 | return false; |
| 1730 | } |
| 1731 | |
| 1732 | std::optional<VarLocBasedLDV::VarLoc::SpillLoc> |
| 1733 | VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, |
| 1734 | MachineFunction *MF, Register &Reg) { |
| 1735 | if (!MI.hasOneMemOperand()) |
| 1736 | return std::nullopt; |
| 1737 | |
| 1738 | // FIXME: Handle folded restore instructions with more than one memory |
| 1739 | // operand. |
| 1740 | if (MI.getRestoreSize(TII)) { |
| 1741 | Reg = MI.getOperand(i: 0).getReg(); |
| 1742 | return extractSpillBaseRegAndOffset(MI); |
| 1743 | } |
| 1744 | return std::nullopt; |
| 1745 | } |
| 1746 | |
| 1747 | /// A spilled register may indicate that we have to end the current range of |
| 1748 | /// a variable and create a new one for the spill location. |
| 1749 | /// A restored register may indicate the reverse situation. |
| 1750 | /// We don't want to insert any instructions in process(), so we just create |
| 1751 | /// the DBG_VALUE without inserting it and keep track of it in \p Transfers. |
| 1752 | /// It will be inserted into the BB when we're done iterating over the |
| 1753 | /// instructions. |
| 1754 | void VarLocBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI, |
| 1755 | OpenRangesSet &OpenRanges, |
| 1756 | VarLocMap &VarLocIDs, |
| 1757 | TransferMap &Transfers) { |
| 1758 | MachineFunction *MF = MI.getMF(); |
| 1759 | TransferKind TKind; |
| 1760 | Register Reg; |
| 1761 | std::optional<VarLoc::SpillLoc> Loc; |
| 1762 | |
| 1763 | LLVM_DEBUG(dbgs() << "Examining instruction: " ; MI.dump();); |
| 1764 | |
| 1765 | // First, if there are any DBG_VALUEs pointing at a spill slot that is |
| 1766 | // written to, then close the variable location. The value in memory |
| 1767 | // will have changed. |
| 1768 | VarLocsInRange KillSet; |
| 1769 | if (isSpillInstruction(MI, MF)) { |
| 1770 | Loc = extractSpillBaseRegAndOffset(MI); |
| 1771 | for (uint64_t ID : OpenRanges.getSpillVarLocs()) { |
| 1772 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1773 | const VarLoc &VL = VarLocIDs[Idx]; |
| 1774 | assert(VL.containsSpillLocs() && "Broken VarLocSet?" ); |
| 1775 | if (VL.usesSpillLoc(SpillLocation: *Loc)) { |
| 1776 | // This location is overwritten by the current instruction -- terminate |
| 1777 | // the open range, and insert an explicit DBG_VALUE $noreg. |
| 1778 | // |
| 1779 | // Doing this at a later stage would require re-interpreting all |
| 1780 | // DBG_VALUes and DIExpressions to identify whether they point at |
| 1781 | // memory, and then analysing all memory writes to see if they |
| 1782 | // overwrite that memory, which is expensive. |
| 1783 | // |
| 1784 | // At this stage, we already know which DBG_VALUEs are for spills and |
| 1785 | // where they are located; it's best to fix handle overwrites now. |
| 1786 | KillSet.insert(V: ID); |
| 1787 | unsigned SpillLocIdx = VL.getSpillLocIdx(SpillLocation: *Loc); |
| 1788 | VarLoc::MachineLoc OldLoc = VL.Locs[SpillLocIdx]; |
| 1789 | VarLoc UndefVL = VarLoc::CreateCopyLoc(OldVL: VL, OldML: OldLoc, NewReg: 0); |
| 1790 | LocIndices UndefLocIDs = VarLocIDs.insert(VL: UndefVL); |
| 1791 | Transfers.push_back(Elt: {.TransferInst: &MI, .LocationID: UndefLocIDs.back()}); |
| 1792 | } |
| 1793 | } |
| 1794 | OpenRanges.erase(KillSet, VarLocIDs, Location: LocIndex::kSpillLocation); |
| 1795 | } |
| 1796 | |
| 1797 | // Try to recognise spill and restore instructions that may create a new |
| 1798 | // variable location. |
| 1799 | if (isLocationSpill(MI, MF, Reg)) { |
| 1800 | TKind = TransferKind::TransferSpill; |
| 1801 | LLVM_DEBUG(dbgs() << "Recognized as spill: " ; MI.dump();); |
| 1802 | LLVM_DEBUG(dbgs() << "Register: " << Reg.id() << " " << printReg(Reg, TRI) |
| 1803 | << "\n" ); |
| 1804 | } else { |
| 1805 | if (!(Loc = isRestoreInstruction(MI, MF, Reg))) |
| 1806 | return; |
| 1807 | TKind = TransferKind::TransferRestore; |
| 1808 | LLVM_DEBUG(dbgs() << "Recognized as restore: " ; MI.dump();); |
| 1809 | LLVM_DEBUG(dbgs() << "Register: " << Reg.id() << " " << printReg(Reg, TRI) |
| 1810 | << "\n" ); |
| 1811 | } |
| 1812 | // Check if the register or spill location is the location of a debug value. |
| 1813 | auto TransferCandidates = OpenRanges.getEmptyVarLocRange(); |
| 1814 | if (TKind == TransferKind::TransferSpill) |
| 1815 | TransferCandidates = OpenRanges.getRegisterVarLocs(Reg); |
| 1816 | else if (TKind == TransferKind::TransferRestore) |
| 1817 | TransferCandidates = OpenRanges.getSpillVarLocs(); |
| 1818 | for (uint64_t ID : TransferCandidates) { |
| 1819 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1820 | const VarLoc &VL = VarLocIDs[Idx]; |
| 1821 | unsigned LocIdx; |
| 1822 | if (TKind == TransferKind::TransferSpill) { |
| 1823 | assert(VL.usesReg(Reg) && "Broken VarLocSet?" ); |
| 1824 | LLVM_DEBUG(dbgs() << "Spilling Register " << printReg(Reg, TRI) << '(' |
| 1825 | << VL.Var.getVariable()->getName() << ")\n" ); |
| 1826 | LocIdx = VL.getRegIdx(Reg); |
| 1827 | } else { |
| 1828 | assert(TKind == TransferKind::TransferRestore && VL.containsSpillLocs() && |
| 1829 | "Broken VarLocSet?" ); |
| 1830 | if (!VL.usesSpillLoc(SpillLocation: *Loc)) |
| 1831 | // The spill location is not the location of a debug value. |
| 1832 | continue; |
| 1833 | LLVM_DEBUG(dbgs() << "Restoring Register " << printReg(Reg, TRI) << '(' |
| 1834 | << VL.Var.getVariable()->getName() << ")\n" ); |
| 1835 | LocIdx = VL.getSpillLocIdx(SpillLocation: *Loc); |
| 1836 | } |
| 1837 | VarLoc::MachineLoc MLoc = VL.Locs[LocIdx]; |
| 1838 | insertTransferDebugPair(MI, OpenRanges, Transfers, VarLocIDs, OldVarID: Idx, Kind: TKind, |
| 1839 | OldLoc: MLoc, NewReg: Reg); |
| 1840 | // FIXME: A comment should explain why it's correct to return early here, |
| 1841 | // if that is in fact correct. |
| 1842 | return; |
| 1843 | } |
| 1844 | } |
| 1845 | |
| 1846 | /// If \p MI is a register copy instruction, that copies a previously tracked |
| 1847 | /// value from one register to another register that is callee saved, we |
| 1848 | /// create new DBG_VALUE instruction described with copy destination register. |
| 1849 | void VarLocBasedLDV::transferRegisterCopy(MachineInstr &MI, |
| 1850 | OpenRangesSet &OpenRanges, |
| 1851 | VarLocMap &VarLocIDs, |
| 1852 | TransferMap &Transfers) { |
| 1853 | auto DestSrc = TII->isCopyLikeInstr(MI); |
| 1854 | if (!DestSrc) |
| 1855 | return; |
| 1856 | |
| 1857 | const MachineOperand *DestRegOp = DestSrc->Destination; |
| 1858 | const MachineOperand *SrcRegOp = DestSrc->Source; |
| 1859 | |
| 1860 | if (!DestRegOp->isDef()) |
| 1861 | return; |
| 1862 | |
| 1863 | auto isCalleeSavedReg = [&](Register Reg) { |
| 1864 | for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) |
| 1865 | if (CalleeSavedRegs.test(Idx: (*RAI).id())) |
| 1866 | return true; |
| 1867 | return false; |
| 1868 | }; |
| 1869 | |
| 1870 | Register SrcReg = SrcRegOp->getReg(); |
| 1871 | Register DestReg = DestRegOp->getReg(); |
| 1872 | |
| 1873 | // We want to recognize instructions where destination register is callee |
| 1874 | // saved register. If register that could be clobbered by the call is |
| 1875 | // included, there would be a great chance that it is going to be clobbered |
| 1876 | // soon. It is more likely that previous register location, which is callee |
| 1877 | // saved, is going to stay unclobbered longer, even if it is killed. |
| 1878 | if (!isCalleeSavedReg(DestReg)) |
| 1879 | return; |
| 1880 | |
| 1881 | // Remember an entry value movement. If we encounter a new debug value of |
| 1882 | // a parameter describing only a moving of the value around, rather then |
| 1883 | // modifying it, we are still able to use the entry value if needed. |
| 1884 | if (isRegOtherThanSPAndFP(Op: *DestRegOp, MI, TRI)) { |
| 1885 | for (uint64_t ID : OpenRanges.getEntryValueBackupVarLocs()) { |
| 1886 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1887 | const VarLoc &VL = VarLocIDs[Idx]; |
| 1888 | if (VL.isEntryValueBackupReg(Reg: SrcReg)) { |
| 1889 | LLVM_DEBUG(dbgs() << "Copy of the entry value: " ; MI.dump();); |
| 1890 | VarLoc EntryValLocCopyBackup = |
| 1891 | VarLoc::CreateEntryCopyBackupLoc(MI: VL.MI, EntryExpr: VL.Expr, NewReg: DestReg); |
| 1892 | // Stop tracking the original entry value. |
| 1893 | OpenRanges.erase(VL); |
| 1894 | |
| 1895 | // Start tracking the entry value copy. |
| 1896 | LocIndices EntryValCopyLocIDs = VarLocIDs.insert(VL: EntryValLocCopyBackup); |
| 1897 | OpenRanges.insert(VarLocIDs: EntryValCopyLocIDs, VL: EntryValLocCopyBackup); |
| 1898 | break; |
| 1899 | } |
| 1900 | } |
| 1901 | } |
| 1902 | |
| 1903 | if (!SrcRegOp->isKill()) |
| 1904 | return; |
| 1905 | |
| 1906 | for (uint64_t ID : OpenRanges.getRegisterVarLocs(Reg: SrcReg)) { |
| 1907 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 1908 | assert(VarLocIDs[Idx].usesReg(SrcReg) && "Broken VarLocSet?" ); |
| 1909 | VarLoc::MachineLocValue Loc; |
| 1910 | Loc.RegNo = SrcReg; |
| 1911 | VarLoc::MachineLoc MLoc{.Kind: VarLoc::MachineLocKind::RegisterKind, .Value: Loc}; |
| 1912 | insertTransferDebugPair(MI, OpenRanges, Transfers, VarLocIDs, OldVarID: Idx, |
| 1913 | Kind: TransferKind::TransferCopy, OldLoc: MLoc, NewReg: DestReg); |
| 1914 | // FIXME: A comment should explain why it's correct to return early here, |
| 1915 | // if that is in fact correct. |
| 1916 | return; |
| 1917 | } |
| 1918 | } |
| 1919 | |
| 1920 | /// Terminate all open ranges at the end of the current basic block. |
| 1921 | bool VarLocBasedLDV::transferTerminator(MachineBasicBlock *CurMBB, |
| 1922 | OpenRangesSet &OpenRanges, |
| 1923 | VarLocInMBB &OutLocs, |
| 1924 | const VarLocMap &VarLocIDs) { |
| 1925 | bool Changed = false; |
| 1926 | LLVM_DEBUG({ |
| 1927 | VarVec VarLocs; |
| 1928 | OpenRanges.getUniqueVarLocs(VarLocs, VarLocIDs); |
| 1929 | for (VarLoc &VL : VarLocs) { |
| 1930 | // Copy OpenRanges to OutLocs, if not already present. |
| 1931 | dbgs() << "Add to OutLocs in MBB #" << CurMBB->getNumber() << ": " ; |
| 1932 | VL.dump(TRI, TII); |
| 1933 | } |
| 1934 | }); |
| 1935 | VarLocSet &VLS = getVarLocsInMBB(MBB: CurMBB, Locs&: OutLocs); |
| 1936 | Changed = VLS != OpenRanges.getVarLocs(); |
| 1937 | // New OutLocs set may be different due to spill, restore or register |
| 1938 | // copy instruction processing. |
| 1939 | if (Changed) |
| 1940 | VLS = OpenRanges.getVarLocs(); |
| 1941 | OpenRanges.clear(); |
| 1942 | return Changed; |
| 1943 | } |
| 1944 | |
| 1945 | /// Accumulate a mapping between each DILocalVariable fragment and other |
| 1946 | /// fragments of that DILocalVariable which overlap. This reduces work during |
| 1947 | /// the data-flow stage from "Find any overlapping fragments" to "Check if the |
| 1948 | /// known-to-overlap fragments are present". |
| 1949 | /// \param MI A previously unprocessed DEBUG_VALUE instruction to analyze for |
| 1950 | /// fragment usage. |
| 1951 | /// \param SeenFragments Map from DILocalVariable to all fragments of that |
| 1952 | /// Variable which are known to exist. |
| 1953 | /// \param OverlappingFragments The overlap map being constructed, from one |
| 1954 | /// Var/Fragment pair to a vector of fragments known to overlap. |
| 1955 | void VarLocBasedLDV::accumulateFragmentMap(MachineInstr &MI, |
| 1956 | VarToFragments &SeenFragments, |
| 1957 | OverlapMap &OverlappingFragments) { |
| 1958 | DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), |
| 1959 | MI.getDebugLoc()->getInlinedAt()); |
| 1960 | FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); |
| 1961 | |
| 1962 | // If this is the first sighting of this variable, then we are guaranteed |
| 1963 | // there are currently no overlapping fragments either. Initialize the set |
| 1964 | // of seen fragments, record no overlaps for the current one, and return. |
| 1965 | auto [SeenIt, Inserted] = SeenFragments.try_emplace(Key: MIVar.getVariable()); |
| 1966 | if (Inserted) { |
| 1967 | SeenIt->second.insert(V: ThisFragment); |
| 1968 | |
| 1969 | OverlappingFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
| 1970 | return; |
| 1971 | } |
| 1972 | |
| 1973 | // If this particular Variable/Fragment pair already exists in the overlap |
| 1974 | // map, it has already been accounted for. |
| 1975 | auto IsInOLapMap = |
| 1976 | OverlappingFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}}); |
| 1977 | if (!IsInOLapMap.second) |
| 1978 | return; |
| 1979 | |
| 1980 | auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; |
| 1981 | auto &AllSeenFragments = SeenIt->second; |
| 1982 | |
| 1983 | // Otherwise, examine all other seen fragments for this variable, with "this" |
| 1984 | // fragment being a previously unseen fragment. Record any pair of |
| 1985 | // overlapping fragments. |
| 1986 | for (const auto &ASeenFragment : AllSeenFragments) { |
| 1987 | // Does this previously seen fragment overlap? |
| 1988 | if (DIExpression::fragmentsOverlap(A: ThisFragment, B: ASeenFragment)) { |
| 1989 | // Yes: Mark the current fragment as being overlapped. |
| 1990 | ThisFragmentsOverlaps.push_back(Elt: ASeenFragment); |
| 1991 | // Mark the previously seen fragment as being overlapped by the current |
| 1992 | // one. |
| 1993 | auto ASeenFragmentsOverlaps = |
| 1994 | OverlappingFragments.find(Val: {MIVar.getVariable(), ASeenFragment}); |
| 1995 | assert(ASeenFragmentsOverlaps != OverlappingFragments.end() && |
| 1996 | "Previously seen var fragment has no vector of overlaps" ); |
| 1997 | ASeenFragmentsOverlaps->second.push_back(Elt: ThisFragment); |
| 1998 | } |
| 1999 | } |
| 2000 | |
| 2001 | AllSeenFragments.insert(V: ThisFragment); |
| 2002 | } |
| 2003 | |
| 2004 | /// This routine creates OpenRanges. |
| 2005 | void VarLocBasedLDV::process(MachineInstr &MI, OpenRangesSet &OpenRanges, |
| 2006 | VarLocMap &VarLocIDs, TransferMap &Transfers, |
| 2007 | InstToEntryLocMap &EntryValTransfers, |
| 2008 | RegDefToInstMap &RegSetInstrs) { |
| 2009 | if (!MI.isDebugInstr()) |
| 2010 | LastNonDbgMI = &MI; |
| 2011 | transferDebugValue(MI, OpenRanges, VarLocIDs, EntryValTransfers, |
| 2012 | RegSetInstrs); |
| 2013 | transferRegisterDef(MI, OpenRanges, VarLocIDs, EntryValTransfers, |
| 2014 | RegSetInstrs); |
| 2015 | transferWasmDef(MI, OpenRanges, VarLocIDs); |
| 2016 | transferRegisterCopy(MI, OpenRanges, VarLocIDs, Transfers); |
| 2017 | transferSpillOrRestoreInst(MI, OpenRanges, VarLocIDs, Transfers); |
| 2018 | } |
| 2019 | |
| 2020 | /// This routine joins the analysis results of all incoming edges in @MBB by |
| 2021 | /// inserting a new DBG_VALUE instruction at the start of the @MBB - if the same |
| 2022 | /// source variable in all the predecessors of @MBB reside in the same location. |
| 2023 | bool VarLocBasedLDV::join( |
| 2024 | MachineBasicBlock &MBB, VarLocInMBB &OutLocs, VarLocInMBB &InLocs, |
| 2025 | const VarLocMap &VarLocIDs, |
| 2026 | SmallPtrSet<const MachineBasicBlock *, 16> &Visited, |
| 2027 | SmallPtrSetImpl<const MachineBasicBlock *> &ArtificialBlocks) { |
| 2028 | LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n" ); |
| 2029 | |
| 2030 | VarLocSet InLocsT(Alloc); // Temporary incoming locations. |
| 2031 | |
| 2032 | // For all predecessors of this MBB, find the set of VarLocs that |
| 2033 | // can be joined. |
| 2034 | int NumVisited = 0; |
| 2035 | for (auto *p : MBB.predecessors()) { |
| 2036 | // Ignore backedges if we have not visited the predecessor yet. As the |
| 2037 | // predecessor hasn't yet had locations propagated into it, most locations |
| 2038 | // will not yet be valid, so treat them as all being uninitialized and |
| 2039 | // potentially valid. If a location guessed to be correct here is |
| 2040 | // invalidated later, we will remove it when we revisit this block. |
| 2041 | if (!Visited.count(Ptr: p)) { |
| 2042 | LLVM_DEBUG(dbgs() << " ignoring unvisited pred MBB: " << p->getNumber() |
| 2043 | << "\n" ); |
| 2044 | continue; |
| 2045 | } |
| 2046 | auto OL = OutLocs.find(Val: p); |
| 2047 | // Join is null in case of empty OutLocs from any of the pred. |
| 2048 | if (OL == OutLocs.end()) |
| 2049 | return false; |
| 2050 | |
| 2051 | // Just copy over the Out locs to incoming locs for the first visited |
| 2052 | // predecessor, and for all other predecessors join the Out locs. |
| 2053 | VarLocSet &OutLocVLS = *OL->second; |
| 2054 | if (!NumVisited) |
| 2055 | InLocsT = OutLocVLS; |
| 2056 | else |
| 2057 | InLocsT &= OutLocVLS; |
| 2058 | |
| 2059 | LLVM_DEBUG({ |
| 2060 | if (!InLocsT.empty()) { |
| 2061 | VarVec VarLocs; |
| 2062 | collectAllVarLocs(VarLocs, InLocsT, VarLocIDs); |
| 2063 | for (const VarLoc &VL : VarLocs) |
| 2064 | dbgs() << " gathered candidate incoming var: " |
| 2065 | << VL.Var.getVariable()->getName() << "\n" ; |
| 2066 | } |
| 2067 | }); |
| 2068 | |
| 2069 | NumVisited++; |
| 2070 | } |
| 2071 | |
| 2072 | // Filter out DBG_VALUES that are out of scope. |
| 2073 | VarLocSet KillSet(Alloc); |
| 2074 | bool IsArtificial = ArtificialBlocks.count(Ptr: &MBB); |
| 2075 | if (!IsArtificial) { |
| 2076 | for (uint64_t ID : InLocsT) { |
| 2077 | LocIndex Idx = LocIndex::fromRawInteger(ID); |
| 2078 | if (!VarLocIDs[Idx].dominates(LS, MBB)) { |
| 2079 | KillSet.set(ID); |
| 2080 | LLVM_DEBUG({ |
| 2081 | auto Name = VarLocIDs[Idx].Var.getVariable()->getName(); |
| 2082 | dbgs() << " killing " << Name << ", it doesn't dominate MBB\n" ; |
| 2083 | }); |
| 2084 | } |
| 2085 | } |
| 2086 | } |
| 2087 | InLocsT.intersectWithComplement(Other: KillSet); |
| 2088 | |
| 2089 | // As we are processing blocks in reverse post-order we |
| 2090 | // should have processed at least one predecessor, unless it |
| 2091 | // is the entry block which has no predecessor. |
| 2092 | assert((NumVisited || MBB.pred_empty()) && |
| 2093 | "Should have processed at least one predecessor" ); |
| 2094 | |
| 2095 | VarLocSet &ILS = getVarLocsInMBB(MBB: &MBB, Locs&: InLocs); |
| 2096 | bool Changed = false; |
| 2097 | if (ILS != InLocsT) { |
| 2098 | ILS = InLocsT; |
| 2099 | Changed = true; |
| 2100 | } |
| 2101 | |
| 2102 | return Changed; |
| 2103 | } |
| 2104 | |
| 2105 | void VarLocBasedLDV::flushPendingLocs(VarLocInMBB &PendingInLocs, |
| 2106 | VarLocMap &VarLocIDs) { |
| 2107 | // PendingInLocs records all locations propagated into blocks, which have |
| 2108 | // not had DBG_VALUE insts created. Go through and create those insts now. |
| 2109 | for (auto &Iter : PendingInLocs) { |
| 2110 | // Map is keyed on a constant pointer, unwrap it so we can insert insts. |
| 2111 | auto &MBB = const_cast<MachineBasicBlock &>(*Iter.first); |
| 2112 | VarLocSet &Pending = *Iter.second; |
| 2113 | |
| 2114 | SmallVector<VarLoc, 32> VarLocs; |
| 2115 | collectAllVarLocs(Collected&: VarLocs, CollectFrom: Pending, VarLocIDs); |
| 2116 | |
| 2117 | for (VarLoc DiffIt : VarLocs) { |
| 2118 | // The ID location is live-in to MBB -- work out what kind of machine |
| 2119 | // location it is and create a DBG_VALUE. |
| 2120 | if (DiffIt.isEntryBackupLoc()) |
| 2121 | continue; |
| 2122 | MachineInstr *MI = DiffIt.BuildDbgValue(MF&: *MBB.getParent()); |
| 2123 | MBB.insert(I: MBB.instr_begin(), M: MI); |
| 2124 | |
| 2125 | (void)MI; |
| 2126 | LLVM_DEBUG(dbgs() << "Inserted: " ; MI->dump();); |
| 2127 | } |
| 2128 | } |
| 2129 | } |
| 2130 | |
| 2131 | bool VarLocBasedLDV::isEntryValueCandidate( |
| 2132 | const MachineInstr &MI, const DefinedRegsSet &DefinedRegs) const { |
| 2133 | assert(MI.isDebugValue() && "This must be DBG_VALUE." ); |
| 2134 | |
| 2135 | // TODO: Add support for local variables that are expressed in terms of |
| 2136 | // parameters entry values. |
| 2137 | // TODO: Add support for modified arguments that can be expressed |
| 2138 | // by using its entry value. |
| 2139 | auto *DIVar = MI.getDebugVariable(); |
| 2140 | if (!DIVar->isParameter()) |
| 2141 | return false; |
| 2142 | |
| 2143 | // Do not consider parameters that belong to an inlined function. |
| 2144 | if (MI.getDebugLoc()->getInlinedAt()) |
| 2145 | return false; |
| 2146 | |
| 2147 | // Only consider parameters that are described using registers. Parameters |
| 2148 | // that are passed on the stack are not yet supported, so ignore debug |
| 2149 | // values that are described by the frame or stack pointer. |
| 2150 | if (!isRegOtherThanSPAndFP(Op: MI.getDebugOperand(Index: 0), MI, TRI)) |
| 2151 | return false; |
| 2152 | |
| 2153 | // If a parameter's value has been propagated from the caller, then the |
| 2154 | // parameter's DBG_VALUE may be described using a register defined by some |
| 2155 | // instruction in the entry block, in which case we shouldn't create an |
| 2156 | // entry value. |
| 2157 | if (DefinedRegs.count(V: MI.getDebugOperand(Index: 0).getReg())) |
| 2158 | return false; |
| 2159 | |
| 2160 | // TODO: Add support for parameters that have a pre-existing debug expressions |
| 2161 | // (e.g. fragments). |
| 2162 | // A simple deref expression is equivalent to an indirect debug value. |
| 2163 | const DIExpression *Expr = MI.getDebugExpression(); |
| 2164 | if (Expr->getNumElements() > 0 && !Expr->isDeref()) |
| 2165 | return false; |
| 2166 | |
| 2167 | return true; |
| 2168 | } |
| 2169 | |
| 2170 | /// Collect all register defines (including aliases) for the given instruction. |
| 2171 | static void collectRegDefs(const MachineInstr &MI, DefinedRegsSet &Regs, |
| 2172 | const TargetRegisterInfo *TRI) { |
| 2173 | for (const MachineOperand &MO : MI.all_defs()) { |
| 2174 | if (MO.getReg() && MO.getReg().isPhysical()) { |
| 2175 | Regs.insert(V: MO.getReg()); |
| 2176 | for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI) |
| 2177 | Regs.insert(V: *AI); |
| 2178 | } |
| 2179 | } |
| 2180 | } |
| 2181 | |
| 2182 | /// This routine records the entry values of function parameters. The values |
| 2183 | /// could be used as backup values. If we loose the track of some unmodified |
| 2184 | /// parameters, the backup values will be used as a primary locations. |
| 2185 | void VarLocBasedLDV::recordEntryValue(const MachineInstr &MI, |
| 2186 | const DefinedRegsSet &DefinedRegs, |
| 2187 | OpenRangesSet &OpenRanges, |
| 2188 | VarLocMap &VarLocIDs) { |
| 2189 | if (!ShouldEmitDebugEntryValues) |
| 2190 | return; |
| 2191 | |
| 2192 | DebugVariable V(MI.getDebugVariable(), MI.getDebugExpression(), |
| 2193 | MI.getDebugLoc()->getInlinedAt()); |
| 2194 | |
| 2195 | if (!isEntryValueCandidate(MI, DefinedRegs) || |
| 2196 | OpenRanges.getEntryValueBackup(Var: V)) |
| 2197 | return; |
| 2198 | |
| 2199 | LLVM_DEBUG(dbgs() << "Creating the backup entry location: " ; MI.dump();); |
| 2200 | |
| 2201 | // Create the entry value and use it as a backup location until it is |
| 2202 | // valid. It is valid until a parameter is not changed. |
| 2203 | DIExpression *NewExpr = |
| 2204 | DIExpression::prepend(Expr: MI.getDebugExpression(), Flags: DIExpression::EntryValue); |
| 2205 | VarLoc EntryValLocAsBackup = VarLoc::CreateEntryBackupLoc(MI, EntryExpr: NewExpr); |
| 2206 | LocIndices EntryValLocIDs = VarLocIDs.insert(VL: EntryValLocAsBackup); |
| 2207 | OpenRanges.insert(VarLocIDs: EntryValLocIDs, VL: EntryValLocAsBackup); |
| 2208 | } |
| 2209 | |
| 2210 | /// Calculate the liveness information for the given machine function and |
| 2211 | /// extend ranges across basic blocks. |
| 2212 | bool VarLocBasedLDV::ExtendRanges(MachineFunction &MF, |
| 2213 | MachineDominatorTree *DomTree, |
| 2214 | bool ShouldEmitDebugEntryValues, |
| 2215 | unsigned InputBBLimit, |
| 2216 | unsigned InputDbgValLimit) { |
| 2217 | (void)DomTree; |
| 2218 | LLVM_DEBUG(dbgs() << "\nDebug Range Extension: " << MF.getName() << "\n" ); |
| 2219 | |
| 2220 | if (!MF.getFunction().getSubprogram()) |
| 2221 | // VarLocBaseLDV will already have removed all DBG_VALUEs. |
| 2222 | return false; |
| 2223 | |
| 2224 | // Skip functions from NoDebug compilation units. |
| 2225 | if (MF.getFunction().getSubprogram()->getUnit()->getEmissionKind() == |
| 2226 | DICompileUnit::NoDebug) |
| 2227 | return false; |
| 2228 | |
| 2229 | TRI = MF.getSubtarget().getRegisterInfo(); |
| 2230 | TII = MF.getSubtarget().getInstrInfo(); |
| 2231 | TFI = MF.getSubtarget().getFrameLowering(); |
| 2232 | TFI->getCalleeSaves(MF, SavedRegs&: CalleeSavedRegs); |
| 2233 | this->ShouldEmitDebugEntryValues = ShouldEmitDebugEntryValues; |
| 2234 | |
| 2235 | LS.scanFunction(MF); |
| 2236 | |
| 2237 | bool Changed = false; |
| 2238 | bool OLChanged = false; |
| 2239 | bool MBBJoined = false; |
| 2240 | |
| 2241 | VarLocMap VarLocIDs; // Map VarLoc<>unique ID for use in bitvectors. |
| 2242 | OverlapMap OverlapFragments; // Map of overlapping variable fragments. |
| 2243 | OpenRangesSet OpenRanges(Alloc, OverlapFragments); |
| 2244 | // Ranges that are open until end of bb. |
| 2245 | VarLocInMBB OutLocs; // Ranges that exist beyond bb. |
| 2246 | VarLocInMBB InLocs; // Ranges that are incoming after joining. |
| 2247 | TransferMap Transfers; // DBG_VALUEs associated with transfers (such as |
| 2248 | // spills, copies and restores). |
| 2249 | // Map responsible MI to attached Transfer emitted from Backup Entry Value. |
| 2250 | InstToEntryLocMap EntryValTransfers; |
| 2251 | // Map a Register to the last MI which clobbered it. |
| 2252 | RegDefToInstMap RegSetInstrs; |
| 2253 | |
| 2254 | VarToFragments SeenFragments; |
| 2255 | |
| 2256 | // Blocks which are artificial, i.e. blocks which exclusively contain |
| 2257 | // instructions without locations, or with line 0 locations. |
| 2258 | SmallPtrSet<const MachineBasicBlock *, 16> ArtificialBlocks; |
| 2259 | |
| 2260 | DenseMap<unsigned int, MachineBasicBlock *> OrderToBB; |
| 2261 | DenseMap<MachineBasicBlock *, unsigned int> BBToOrder; |
| 2262 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
| 2263 | std::greater<unsigned int>> |
| 2264 | Worklist; |
| 2265 | std::priority_queue<unsigned int, std::vector<unsigned int>, |
| 2266 | std::greater<unsigned int>> |
| 2267 | Pending; |
| 2268 | |
| 2269 | // Set of register defines that are seen when traversing the entry block |
| 2270 | // looking for debug entry value candidates. |
| 2271 | DefinedRegsSet DefinedRegs; |
| 2272 | |
| 2273 | // Only in the case of entry MBB collect DBG_VALUEs representing |
| 2274 | // function parameters in order to generate debug entry values for them. |
| 2275 | MachineBasicBlock &First_MBB = *(MF.begin()); |
| 2276 | for (auto &MI : First_MBB) { |
| 2277 | collectRegDefs(MI, Regs&: DefinedRegs, TRI); |
| 2278 | if (MI.isDebugValue()) |
| 2279 | recordEntryValue(MI, DefinedRegs, OpenRanges, VarLocIDs); |
| 2280 | } |
| 2281 | |
| 2282 | // Initialize per-block structures and scan for fragment overlaps. |
| 2283 | for (auto &MBB : MF) |
| 2284 | for (auto &MI : MBB) |
| 2285 | if (MI.isDebugValue()) |
| 2286 | accumulateFragmentMap(MI, SeenFragments, OverlappingFragments&: OverlapFragments); |
| 2287 | |
| 2288 | auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { |
| 2289 | if (const DebugLoc &DL = MI.getDebugLoc()) |
| 2290 | return DL.getLine() != 0; |
| 2291 | return false; |
| 2292 | }; |
| 2293 | for (auto &MBB : MF) |
| 2294 | if (none_of(Range: MBB.instrs(), P: hasNonArtificialLocation)) |
| 2295 | ArtificialBlocks.insert(Ptr: &MBB); |
| 2296 | |
| 2297 | LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, |
| 2298 | "OutLocs after initialization" , dbgs())); |
| 2299 | |
| 2300 | ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); |
| 2301 | unsigned int RPONumber = 0; |
| 2302 | for (MachineBasicBlock *MBB : RPOT) { |
| 2303 | OrderToBB[RPONumber] = MBB; |
| 2304 | BBToOrder[MBB] = RPONumber; |
| 2305 | Worklist.push(x: RPONumber); |
| 2306 | ++RPONumber; |
| 2307 | } |
| 2308 | |
| 2309 | if (RPONumber > InputBBLimit) { |
| 2310 | unsigned NumInputDbgValues = 0; |
| 2311 | for (auto &MBB : MF) |
| 2312 | for (auto &MI : MBB) |
| 2313 | if (MI.isDebugValue()) |
| 2314 | ++NumInputDbgValues; |
| 2315 | if (NumInputDbgValues > InputDbgValLimit) { |
| 2316 | LLVM_DEBUG(dbgs() << "Disabling VarLocBasedLDV: " << MF.getName() |
| 2317 | << " has " << RPONumber << " basic blocks and " |
| 2318 | << NumInputDbgValues |
| 2319 | << " input DBG_VALUEs, exceeding limits.\n" ); |
| 2320 | return false; |
| 2321 | } |
| 2322 | } |
| 2323 | |
| 2324 | // This is a standard "union of predecessor outs" dataflow problem. |
| 2325 | // To solve it, we perform join() and process() using the two worklist method |
| 2326 | // until the ranges converge. |
| 2327 | // Ranges have converged when both worklists are empty. |
| 2328 | SmallPtrSet<const MachineBasicBlock *, 16> Visited; |
| 2329 | while (!Worklist.empty() || !Pending.empty()) { |
| 2330 | // We track what is on the pending worklist to avoid inserting the same |
| 2331 | // thing twice. We could avoid this with a custom priority queue, but this |
| 2332 | // is probably not worth it. |
| 2333 | SmallPtrSet<MachineBasicBlock *, 16> OnPending; |
| 2334 | LLVM_DEBUG(dbgs() << "Processing Worklist\n" ); |
| 2335 | while (!Worklist.empty()) { |
| 2336 | MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; |
| 2337 | Worklist.pop(); |
| 2338 | MBBJoined = join(MBB&: *MBB, OutLocs, InLocs, VarLocIDs, Visited, |
| 2339 | ArtificialBlocks); |
| 2340 | MBBJoined |= Visited.insert(Ptr: MBB).second; |
| 2341 | if (MBBJoined) { |
| 2342 | MBBJoined = false; |
| 2343 | Changed = true; |
| 2344 | // Now that we have started to extend ranges across BBs we need to |
| 2345 | // examine spill, copy and restore instructions to see whether they |
| 2346 | // operate with registers that correspond to user variables. |
| 2347 | // First load any pending inlocs. |
| 2348 | OpenRanges.insertFromLocSet(ToLoad: getVarLocsInMBB(MBB, Locs&: InLocs), Map: VarLocIDs); |
| 2349 | LastNonDbgMI = nullptr; |
| 2350 | RegSetInstrs.clear(); |
| 2351 | // Iterate through instructions within each packet to handle VLIW |
| 2352 | // bundles correctly; this keeps DBG_VALUE placement valid on |
| 2353 | // packet-based targets. |
| 2354 | for (auto I = MBB->instr_begin(), E = MBB->instr_end(); I != E;) { |
| 2355 | auto BStart = llvm::getBundleStart(I); |
| 2356 | auto BEnd = llvm::getBundleEnd(I); |
| 2357 | bool PacketHasTerminator = false; |
| 2358 | for (auto BI = BStart; BI != BEnd; ++BI) { |
| 2359 | if (BI->isTerminator()) { |
| 2360 | PacketHasTerminator = true; |
| 2361 | break; |
| 2362 | } |
| 2363 | } |
| 2364 | if (PacketHasTerminator) { |
| 2365 | // FIXME: This drops debug info for spills in terminator bundles; |
| 2366 | // DBG_VALUE instructions can't be inserted after the bundle. |
| 2367 | // It may be possible to insert the DBG_VALUE elsewhere. |
| 2368 | I = BEnd; |
| 2369 | continue; |
| 2370 | } |
| 2371 | auto FirstOp = (BStart->isBundle()) ? std::next(x: BStart) : BStart; |
| 2372 | for (auto BI = FirstOp; BI != BEnd; ++BI) { |
| 2373 | if (BI->isTerminator()) |
| 2374 | continue; |
| 2375 | process(MI&: *BI, OpenRanges, VarLocIDs, Transfers, EntryValTransfers, |
| 2376 | RegSetInstrs); |
| 2377 | } |
| 2378 | I = BEnd; |
| 2379 | } |
| 2380 | OLChanged |= transferTerminator(CurMBB: MBB, OpenRanges, OutLocs, VarLocIDs); |
| 2381 | |
| 2382 | LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, |
| 2383 | "OutLocs after propagating" , dbgs())); |
| 2384 | LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, |
| 2385 | "InLocs after propagating" , dbgs())); |
| 2386 | |
| 2387 | if (OLChanged) { |
| 2388 | OLChanged = false; |
| 2389 | for (auto *s : MBB->successors()) |
| 2390 | if (OnPending.insert(Ptr: s).second) { |
| 2391 | Pending.push(x: BBToOrder[s]); |
| 2392 | } |
| 2393 | } |
| 2394 | } |
| 2395 | } |
| 2396 | Worklist.swap(pq&: Pending); |
| 2397 | // At this point, pending must be empty, since it was just the empty |
| 2398 | // worklist |
| 2399 | assert(Pending.empty() && "Pending should be empty" ); |
| 2400 | } |
| 2401 | |
| 2402 | // Add any DBG_VALUE instructions created by location transfers. |
| 2403 | for (auto &TR : Transfers) { |
| 2404 | assert(!TR.TransferInst->isTerminator() && |
| 2405 | "Cannot insert DBG_VALUE after terminator" ); |
| 2406 | MachineBasicBlock *MBB = TR.TransferInst->getParent(); |
| 2407 | const VarLoc &VL = VarLocIDs[TR.LocationID]; |
| 2408 | MachineInstr *MI = VL.BuildDbgValue(MF); |
| 2409 | MBB->insertAfterBundle(I: TR.TransferInst->getIterator(), MI); |
| 2410 | } |
| 2411 | Transfers.clear(); |
| 2412 | |
| 2413 | // Add DBG_VALUEs created using Backup Entry Value location. |
| 2414 | for (auto &TR : EntryValTransfers) { |
| 2415 | MachineInstr *TRInst = const_cast<MachineInstr *>(TR.first); |
| 2416 | assert(!TRInst->isTerminator() && |
| 2417 | "Cannot insert DBG_VALUE after terminator" ); |
| 2418 | MachineBasicBlock *MBB = TRInst->getParent(); |
| 2419 | const VarLoc &VL = VarLocIDs[TR.second]; |
| 2420 | MachineInstr *MI = VL.BuildDbgValue(MF); |
| 2421 | MBB->insertAfterBundle(I: TRInst->getIterator(), MI); |
| 2422 | } |
| 2423 | EntryValTransfers.clear(); |
| 2424 | |
| 2425 | // Deferred inlocs will not have had any DBG_VALUE insts created; do |
| 2426 | // that now. |
| 2427 | flushPendingLocs(PendingInLocs&: InLocs, VarLocIDs); |
| 2428 | |
| 2429 | LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, "Final OutLocs" , dbgs())); |
| 2430 | LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, "Final InLocs" , dbgs())); |
| 2431 | return Changed; |
| 2432 | } |
| 2433 | |
| 2434 | LDVImpl * |
| 2435 | llvm::makeVarLocBasedLiveDebugValues() |
| 2436 | { |
| 2437 | return new VarLocBasedLDV(); |
| 2438 | } |
| 2439 | |