1 | //===- DebugInfoMetadata.cpp - Implement debug info metadata --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the debug info Metadata classes. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/IR/DebugInfoMetadata.h" |
14 | #include "LLVMContextImpl.h" |
15 | #include "MetadataImpl.h" |
16 | #include "llvm/ADT/SetVector.h" |
17 | #include "llvm/ADT/StringSwitch.h" |
18 | #include "llvm/BinaryFormat/Dwarf.h" |
19 | #include "llvm/IR/DebugProgramInstruction.h" |
20 | #include "llvm/IR/Function.h" |
21 | #include "llvm/IR/IntrinsicInst.h" |
22 | #include "llvm/IR/Type.h" |
23 | #include "llvm/IR/Value.h" |
24 | #include "llvm/Support/CommandLine.h" |
25 | #include "llvm/Support/Compiler.h" |
26 | |
27 | #include <numeric> |
28 | #include <optional> |
29 | |
30 | using namespace llvm; |
31 | |
32 | namespace llvm { |
33 | // Use FS-AFDO discriminator. |
34 | cl::opt<bool> EnableFSDiscriminator( |
35 | "enable-fs-discriminator" , cl::Hidden, |
36 | cl::desc("Enable adding flow sensitive discriminators" )); |
37 | |
38 | // When true, preserves line and column number by picking one of the merged |
39 | // location info in a deterministic manner to assist sample based PGO. |
40 | LLVM_ABI cl::opt<bool> PickMergedSourceLocations( |
41 | "pick-merged-source-locations" , cl::init(Val: false), cl::Hidden, |
42 | cl::desc("Preserve line and column number when merging locations." )); |
43 | } // namespace llvm |
44 | |
45 | uint32_t DIType::getAlignInBits() const { |
46 | return (getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ? 0 : SubclassData32); |
47 | } |
48 | |
49 | const DIExpression::FragmentInfo DebugVariable::DefaultFragment = { |
50 | std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::min()}; |
51 | |
52 | DebugVariable::DebugVariable(const DbgVariableIntrinsic *DII) |
53 | : Variable(DII->getVariable()), |
54 | Fragment(DII->getExpression()->getFragmentInfo()), |
55 | InlinedAt(DII->getDebugLoc().getInlinedAt()) {} |
56 | |
57 | DebugVariable::DebugVariable(const DbgVariableRecord *DVR) |
58 | : Variable(DVR->getVariable()), |
59 | Fragment(DVR->getExpression()->getFragmentInfo()), |
60 | InlinedAt(DVR->getDebugLoc().getInlinedAt()) {} |
61 | |
62 | DebugVariableAggregate::DebugVariableAggregate(const DbgVariableIntrinsic *DVI) |
63 | : DebugVariable(DVI->getVariable(), std::nullopt, |
64 | DVI->getDebugLoc()->getInlinedAt()) {} |
65 | |
66 | DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line, |
67 | unsigned Column, uint64_t AtomGroup, uint8_t AtomRank, |
68 | ArrayRef<Metadata *> MDs, bool ImplicitCode) |
69 | : MDNode(C, DILocationKind, Storage, MDs) |
70 | #ifdef EXPERIMENTAL_KEY_INSTRUCTIONS |
71 | , |
72 | AtomGroup(AtomGroup), AtomRank(AtomRank) |
73 | #endif |
74 | { |
75 | #ifdef EXPERIMENTAL_KEY_INSTRUCTIONS |
76 | assert(AtomRank <= 7 && "AtomRank number should fit in 3 bits" ); |
77 | #endif |
78 | if (AtomGroup) |
79 | C.updateDILocationAtomGroupWaterline(G: AtomGroup + 1); |
80 | |
81 | assert((MDs.size() == 1 || MDs.size() == 2) && |
82 | "Expected a scope and optional inlined-at" ); |
83 | // Set line and column. |
84 | assert(Column < (1u << 16) && "Expected 16-bit column" ); |
85 | |
86 | SubclassData32 = Line; |
87 | SubclassData16 = Column; |
88 | |
89 | setImplicitCode(ImplicitCode); |
90 | } |
91 | |
92 | static void adjustColumn(unsigned &Column) { |
93 | // Set to unknown on overflow. We only have 16 bits to play with here. |
94 | if (Column >= (1u << 16)) |
95 | Column = 0; |
96 | } |
97 | |
98 | DILocation *DILocation::getImpl(LLVMContext &Context, unsigned Line, |
99 | unsigned Column, Metadata *Scope, |
100 | Metadata *InlinedAt, bool ImplicitCode, |
101 | uint64_t AtomGroup, uint8_t AtomRank, |
102 | StorageType Storage, bool ShouldCreate) { |
103 | // Fixup column. |
104 | adjustColumn(Column); |
105 | |
106 | if (Storage == Uniqued) { |
107 | if (auto *N = getUniqued(Store&: Context.pImpl->DILocations, |
108 | Key: DILocationInfo::KeyTy(Line, Column, Scope, |
109 | InlinedAt, ImplicitCode, |
110 | AtomGroup, AtomRank))) |
111 | return N; |
112 | if (!ShouldCreate) |
113 | return nullptr; |
114 | } else { |
115 | assert(ShouldCreate && "Expected non-uniqued nodes to always be created" ); |
116 | } |
117 | |
118 | SmallVector<Metadata *, 2> Ops; |
119 | Ops.push_back(Elt: Scope); |
120 | if (InlinedAt) |
121 | Ops.push_back(Elt: InlinedAt); |
122 | return storeImpl(N: new (Ops.size(), Storage) |
123 | DILocation(Context, Storage, Line, Column, AtomGroup, |
124 | AtomRank, Ops, ImplicitCode), |
125 | Storage, Store&: Context.pImpl->DILocations); |
126 | } |
127 | |
128 | DILocation *DILocation::getMergedLocations(ArrayRef<DILocation *> Locs) { |
129 | if (Locs.empty()) |
130 | return nullptr; |
131 | if (Locs.size() == 1) |
132 | return Locs[0]; |
133 | auto *Merged = Locs[0]; |
134 | for (DILocation *L : llvm::drop_begin(RangeOrContainer&: Locs)) { |
135 | Merged = getMergedLocation(LocA: Merged, LocB: L); |
136 | if (Merged == nullptr) |
137 | break; |
138 | } |
139 | return Merged; |
140 | } |
141 | |
142 | static DILexicalBlockBase *cloneAndReplaceParentScope(DILexicalBlockBase *LBB, |
143 | DIScope *NewParent) { |
144 | TempMDNode ClonedScope = LBB->clone(); |
145 | cast<DILexicalBlockBase>(Val&: *ClonedScope).replaceScope(Scope: NewParent); |
146 | return cast<DILexicalBlockBase>( |
147 | Val: MDNode::replaceWithUniqued(N: std::move(ClonedScope))); |
148 | } |
149 | |
150 | using LineColumn = std::pair<unsigned /* Line */, unsigned /* Column */>; |
151 | |
152 | /// Returns the location of DILocalScope, if present, or a default value. |
153 | static LineColumn getLocalScopeLocationOr(DIScope *S, LineColumn Default) { |
154 | assert(isa<DILocalScope>(S) && "Expected DILocalScope." ); |
155 | |
156 | if (isa<DILexicalBlockFile>(Val: S)) |
157 | return Default; |
158 | if (auto *LB = dyn_cast<DILexicalBlock>(Val: S)) |
159 | return {LB->getLine(), LB->getColumn()}; |
160 | if (auto *SP = dyn_cast<DISubprogram>(Val: S)) |
161 | return {SP->getLine(), 0u}; |
162 | |
163 | llvm_unreachable("Unhandled type of DILocalScope." ); |
164 | } |
165 | |
166 | // Returns the nearest matching scope inside a subprogram. |
167 | template <typename MatcherT> |
168 | static std::pair<DIScope *, LineColumn> |
169 | getNearestMatchingScope(const DILocation *L1, const DILocation *L2) { |
170 | MatcherT Matcher; |
171 | |
172 | DIScope *S1 = L1->getScope(); |
173 | DIScope *S2 = L2->getScope(); |
174 | |
175 | LineColumn Loc1(L1->getLine(), L1->getColumn()); |
176 | for (; S1; S1 = S1->getScope()) { |
177 | Loc1 = getLocalScopeLocationOr(S: S1, Default: Loc1); |
178 | Matcher.insert(S1, Loc1); |
179 | if (isa<DISubprogram>(Val: S1)) |
180 | break; |
181 | } |
182 | |
183 | LineColumn Loc2(L2->getLine(), L2->getColumn()); |
184 | for (; S2; S2 = S2->getScope()) { |
185 | Loc2 = getLocalScopeLocationOr(S: S2, Default: Loc2); |
186 | |
187 | if (DIScope *S = Matcher.match(S2, Loc2)) |
188 | return std::make_pair(x&: S, y&: Loc2); |
189 | |
190 | if (isa<DISubprogram>(Val: S2)) |
191 | break; |
192 | } |
193 | return std::make_pair(x: nullptr, y: LineColumn(L2->getLine(), L2->getColumn())); |
194 | } |
195 | |
196 | // Matches equal scopes. |
197 | struct EqualScopesMatcher { |
198 | SmallPtrSet<DIScope *, 8> Scopes; |
199 | |
200 | void insert(DIScope *S, LineColumn Loc) { Scopes.insert(Ptr: S); } |
201 | |
202 | DIScope *match(DIScope *S, LineColumn Loc) { |
203 | return Scopes.contains(Ptr: S) ? S : nullptr; |
204 | } |
205 | }; |
206 | |
207 | // Matches scopes with the same location. |
208 | struct ScopeLocationsMatcher { |
209 | SmallMapVector<std::pair<DIFile *, LineColumn>, SmallSetVector<DIScope *, 8>, |
210 | 8> |
211 | Scopes; |
212 | |
213 | void insert(DIScope *S, LineColumn Loc) { |
214 | Scopes[{S->getFile(), Loc}].insert(X: S); |
215 | } |
216 | |
217 | DIScope *match(DIScope *S, LineColumn Loc) { |
218 | auto ScopesAtLoc = Scopes.find(Key: {S->getFile(), Loc}); |
219 | // No scope found with the given location. |
220 | if (ScopesAtLoc == Scopes.end()) |
221 | return nullptr; |
222 | |
223 | // Prefer S over other scopes with the same location. |
224 | if (ScopesAtLoc->second.contains(key: S)) |
225 | return S; |
226 | |
227 | if (!ScopesAtLoc->second.empty()) |
228 | return *ScopesAtLoc->second.begin(); |
229 | |
230 | llvm_unreachable("Scopes must not have empty entries." ); |
231 | } |
232 | }; |
233 | |
234 | DILocation *DILocation::getMergedLocation(DILocation *LocA, DILocation *LocB) { |
235 | if (LocA == LocB) |
236 | return LocA; |
237 | |
238 | // For some use cases (SamplePGO), it is important to retain distinct source |
239 | // locations. When this flag is set, we choose arbitrarily between A and B, |
240 | // rather than computing a merged location using line 0, which is typically |
241 | // not useful for PGO. If one of them is null, then try to return one which is |
242 | // valid. |
243 | if (PickMergedSourceLocations) { |
244 | if (!LocA || !LocB) |
245 | return LocA ? LocA : LocB; |
246 | |
247 | auto A = std::make_tuple(args: LocA->getLine(), args: LocA->getColumn(), |
248 | args: LocA->getDiscriminator(), args: LocA->getFilename(), |
249 | args: LocA->getDirectory()); |
250 | auto B = std::make_tuple(args: LocB->getLine(), args: LocB->getColumn(), |
251 | args: LocB->getDiscriminator(), args: LocB->getFilename(), |
252 | args: LocB->getDirectory()); |
253 | return A < B ? LocA : LocB; |
254 | } |
255 | |
256 | if (!LocA || !LocB) |
257 | return nullptr; |
258 | |
259 | LLVMContext &C = LocA->getContext(); |
260 | |
261 | using LocVec = SmallVector<const DILocation *>; |
262 | LocVec ALocs; |
263 | LocVec BLocs; |
264 | SmallDenseMap<std::pair<const DISubprogram *, const DILocation *>, unsigned, |
265 | 4> |
266 | ALookup; |
267 | |
268 | // Walk through LocA and its inlined-at locations, populate them in ALocs and |
269 | // save the index for the subprogram and inlined-at pair, which we use to find |
270 | // a matching starting location in LocB's chain. |
271 | for (auto [L, I] = std::make_pair(x&: LocA, y: 0U); L; L = L->getInlinedAt(), I++) { |
272 | ALocs.push_back(Elt: L); |
273 | auto Res = ALookup.try_emplace( |
274 | Key: {L->getScope()->getSubprogram(), L->getInlinedAt()}, Args&: I); |
275 | assert(Res.second && "Multiple <SP, InlinedAt> pairs in a location chain?" ); |
276 | (void)Res; |
277 | } |
278 | |
279 | LocVec::reverse_iterator ARIt = ALocs.rend(); |
280 | LocVec::reverse_iterator BRIt = BLocs.rend(); |
281 | |
282 | // Populate BLocs and look for a matching starting location, the first |
283 | // location with the same subprogram and inlined-at location as in LocA's |
284 | // chain. Since the two locations have the same inlined-at location we do |
285 | // not need to look at those parts of the chains. |
286 | for (auto [L, I] = std::make_pair(x&: LocB, y: 0U); L; L = L->getInlinedAt(), I++) { |
287 | BLocs.push_back(Elt: L); |
288 | |
289 | if (ARIt != ALocs.rend()) |
290 | // We have already found a matching starting location. |
291 | continue; |
292 | |
293 | auto IT = ALookup.find(Val: {L->getScope()->getSubprogram(), L->getInlinedAt()}); |
294 | if (IT == ALookup.end()) |
295 | continue; |
296 | |
297 | // The + 1 is to account for the &*rev_it = &(it - 1) relationship. |
298 | ARIt = LocVec::reverse_iterator(ALocs.begin() + IT->second + 1); |
299 | BRIt = LocVec::reverse_iterator(BLocs.begin() + I + 1); |
300 | |
301 | // If we have found a matching starting location we do not need to add more |
302 | // locations to BLocs, since we will only look at location pairs preceding |
303 | // the matching starting location, and adding more elements to BLocs could |
304 | // invalidate the iterator that we initialized here. |
305 | break; |
306 | } |
307 | |
308 | // Merge the two locations if possible, using the supplied |
309 | // inlined-at location for the created location. |
310 | auto *LocAIA = LocA->getInlinedAt(); |
311 | auto *LocBIA = LocB->getInlinedAt(); |
312 | auto MergeLocPair = [&C, LocAIA, |
313 | LocBIA](const DILocation *L1, const DILocation *L2, |
314 | DILocation *InlinedAt) -> DILocation * { |
315 | if (L1 == L2) |
316 | return DILocation::get(Context&: C, Line: L1->getLine(), Column: L1->getColumn(), Scope: L1->getScope(), |
317 | InlinedAt, ImplicitCode: L1->isImplicitCode(), |
318 | AtomGroup: L1->getAtomGroup(), AtomRank: L1->getAtomRank()); |
319 | |
320 | // If the locations originate from different subprograms we can't produce |
321 | // a common location. |
322 | if (L1->getScope()->getSubprogram() != L2->getScope()->getSubprogram()) |
323 | return nullptr; |
324 | |
325 | // Find nearest common scope inside subprogram. |
326 | DIScope *Scope = getNearestMatchingScope<EqualScopesMatcher>(L1, L2).first; |
327 | assert(Scope && "No common scope in the same subprogram?" ); |
328 | |
329 | // Try using the nearest scope with common location if files are different. |
330 | if (Scope->getFile() != L1->getFile() || L1->getFile() != L2->getFile()) { |
331 | auto [CommonLocScope, CommonLoc] = |
332 | getNearestMatchingScope<ScopeLocationsMatcher>(L1, L2); |
333 | |
334 | // If CommonLocScope is a DILexicalBlockBase, clone it and locate |
335 | // a new scope inside the nearest common scope to preserve |
336 | // lexical blocks structure. |
337 | if (auto *LBB = dyn_cast<DILexicalBlockBase>(Val: CommonLocScope); |
338 | LBB && LBB != Scope) |
339 | CommonLocScope = cloneAndReplaceParentScope(LBB, NewParent: Scope); |
340 | |
341 | Scope = CommonLocScope; |
342 | |
343 | // If files are still different, assume that L1 and L2 were "included" |
344 | // from CommonLoc. Use it as merged location. |
345 | if (Scope->getFile() != L1->getFile() || L1->getFile() != L2->getFile()) |
346 | return DILocation::get(Context&: C, Line: CommonLoc.first, Column: CommonLoc.second, |
347 | Scope: CommonLocScope, InlinedAt); |
348 | } |
349 | |
350 | bool SameLine = L1->getLine() == L2->getLine(); |
351 | bool SameCol = L1->getColumn() == L2->getColumn(); |
352 | unsigned Line = SameLine ? L1->getLine() : 0; |
353 | unsigned Col = SameLine && SameCol ? L1->getColumn() : 0; |
354 | bool IsImplicitCode = L1->isImplicitCode() && L2->isImplicitCode(); |
355 | |
356 | // Discard source location atom if the line becomes 0. And there's nothing |
357 | // further to do if neither location has an atom number. |
358 | if (!SameLine || !(L1->getAtomGroup() || L2->getAtomGroup())) |
359 | return DILocation::get(Context&: C, Line, Column: Col, Scope, InlinedAt, ImplicitCode: IsImplicitCode, |
360 | /*AtomGroup*/ 0, /*AtomRank*/ 0); |
361 | |
362 | uint64_t Group = 0; |
363 | uint64_t Rank = 0; |
364 | // If we're preserving the same matching inlined-at field we can |
365 | // preserve the atom. |
366 | if (LocBIA == LocAIA && InlinedAt == LocBIA) { |
367 | // Deterministically keep the lowest non-zero ranking atom group |
368 | // number. |
369 | // FIXME: It would be nice if we could track that an instruction |
370 | // belongs to two source atoms. |
371 | bool UseL1Atom = [L1, L2]() { |
372 | if (L1->getAtomRank() == L2->getAtomRank()) { |
373 | // Arbitrarily choose the lowest non-zero group number. |
374 | if (!L1->getAtomGroup() || !L2->getAtomGroup()) |
375 | return !L2->getAtomGroup(); |
376 | return L1->getAtomGroup() < L2->getAtomGroup(); |
377 | } |
378 | // Choose the lowest non-zero rank. |
379 | if (!L1->getAtomRank() || !L2->getAtomRank()) |
380 | return !L2->getAtomRank(); |
381 | return L1->getAtomRank() < L2->getAtomRank(); |
382 | }(); |
383 | Group = UseL1Atom ? L1->getAtomGroup() : L2->getAtomGroup(); |
384 | Rank = UseL1Atom ? L1->getAtomRank() : L2->getAtomRank(); |
385 | } else { |
386 | // If either instruction is part of a source atom, reassign it a new |
387 | // atom group. This essentially regresses to non-key-instructions |
388 | // behaviour (now that it's the only instruction in its group it'll |
389 | // probably get is_stmt applied). |
390 | Group = C.incNextDILocationAtomGroup(); |
391 | Rank = 1; |
392 | } |
393 | return DILocation::get(Context&: C, Line, Column: Col, Scope, InlinedAt, ImplicitCode: IsImplicitCode, |
394 | AtomGroup: Group, AtomRank: Rank); |
395 | }; |
396 | |
397 | DILocation *Result = ARIt != ALocs.rend() ? (*ARIt)->getInlinedAt() : nullptr; |
398 | |
399 | // If we have found a common starting location, walk up the inlined-at chains |
400 | // and try to produce common locations. |
401 | for (; ARIt != ALocs.rend() && BRIt != BLocs.rend(); ++ARIt, ++BRIt) { |
402 | DILocation *Tmp = MergeLocPair(*ARIt, *BRIt, Result); |
403 | |
404 | if (!Tmp) |
405 | // We have walked up to a point in the chains where the two locations |
406 | // are irreconsilable. At this point Result contains the nearest common |
407 | // location in the inlined-at chains of LocA and LocB, so we break here. |
408 | break; |
409 | |
410 | Result = Tmp; |
411 | } |
412 | |
413 | if (Result) |
414 | return Result; |
415 | |
416 | // We ended up with LocA and LocB as irreconsilable locations. Produce a |
417 | // location at 0:0 with one of the locations' scope. The function has |
418 | // historically picked A's scope, and a nullptr inlined-at location, so that |
419 | // behavior is mimicked here but I am not sure if this is always the correct |
420 | // way to handle this. |
421 | // Key Instructions: it's fine to drop atom group and rank here, as line 0 |
422 | // is a nonsensical is_stmt location. |
423 | return DILocation::get(Context&: C, Line: 0, Column: 0, Scope: LocA->getScope(), InlinedAt: nullptr, ImplicitCode: false, |
424 | /*AtomGroup*/ 0, /*AtomRank*/ 0); |
425 | } |
426 | |
427 | std::optional<unsigned> |
428 | DILocation::encodeDiscriminator(unsigned BD, unsigned DF, unsigned CI) { |
429 | std::array<unsigned, 3> Components = {BD, DF, CI}; |
430 | uint64_t RemainingWork = 0U; |
431 | // We use RemainingWork to figure out if we have no remaining components to |
432 | // encode. For example: if BD != 0 but DF == 0 && CI == 0, we don't need to |
433 | // encode anything for the latter 2. |
434 | // Since any of the input components is at most 32 bits, their sum will be |
435 | // less than 34 bits, and thus RemainingWork won't overflow. |
436 | RemainingWork = |
437 | std::accumulate(first: Components.begin(), last: Components.end(), init: RemainingWork); |
438 | |
439 | int I = 0; |
440 | unsigned Ret = 0; |
441 | unsigned NextBitInsertionIndex = 0; |
442 | while (RemainingWork > 0) { |
443 | unsigned C = Components[I++]; |
444 | RemainingWork -= C; |
445 | unsigned EC = encodeComponent(C); |
446 | Ret |= (EC << NextBitInsertionIndex); |
447 | NextBitInsertionIndex += encodingBits(C); |
448 | } |
449 | |
450 | // Encoding may be unsuccessful because of overflow. We determine success by |
451 | // checking equivalence of components before & after encoding. Alternatively, |
452 | // we could determine Success during encoding, but the current alternative is |
453 | // simpler. |
454 | unsigned TBD, TDF, TCI = 0; |
455 | decodeDiscriminator(D: Ret, BD&: TBD, DF&: TDF, CI&: TCI); |
456 | if (TBD == BD && TDF == DF && TCI == CI) |
457 | return Ret; |
458 | return std::nullopt; |
459 | } |
460 | |
461 | void DILocation::decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF, |
462 | unsigned &CI) { |
463 | BD = getUnsignedFromPrefixEncoding(U: D); |
464 | DF = getUnsignedFromPrefixEncoding(U: getNextComponentInDiscriminator(D)); |
465 | CI = getUnsignedFromPrefixEncoding( |
466 | U: getNextComponentInDiscriminator(D: getNextComponentInDiscriminator(D))); |
467 | } |
468 | dwarf::Tag DINode::getTag() const { return (dwarf::Tag)SubclassData16; } |
469 | |
470 | DINode::DIFlags DINode::getFlag(StringRef Flag) { |
471 | return StringSwitch<DIFlags>(Flag) |
472 | #define HANDLE_DI_FLAG(ID, NAME) .Case("DIFlag" #NAME, Flag##NAME) |
473 | #include "llvm/IR/DebugInfoFlags.def" |
474 | .Default(Value: DINode::FlagZero); |
475 | } |
476 | |
477 | StringRef DINode::getFlagString(DIFlags Flag) { |
478 | switch (Flag) { |
479 | #define HANDLE_DI_FLAG(ID, NAME) \ |
480 | case Flag##NAME: \ |
481 | return "DIFlag" #NAME; |
482 | #include "llvm/IR/DebugInfoFlags.def" |
483 | } |
484 | return "" ; |
485 | } |
486 | |
487 | DINode::DIFlags DINode::splitFlags(DIFlags Flags, |
488 | SmallVectorImpl<DIFlags> &SplitFlags) { |
489 | // Flags that are packed together need to be specially handled, so |
490 | // that, for example, we emit "DIFlagPublic" and not |
491 | // "DIFlagPrivate | DIFlagProtected". |
492 | if (DIFlags A = Flags & FlagAccessibility) { |
493 | if (A == FlagPrivate) |
494 | SplitFlags.push_back(Elt: FlagPrivate); |
495 | else if (A == FlagProtected) |
496 | SplitFlags.push_back(Elt: FlagProtected); |
497 | else |
498 | SplitFlags.push_back(Elt: FlagPublic); |
499 | Flags &= ~A; |
500 | } |
501 | if (DIFlags R = Flags & FlagPtrToMemberRep) { |
502 | if (R == FlagSingleInheritance) |
503 | SplitFlags.push_back(Elt: FlagSingleInheritance); |
504 | else if (R == FlagMultipleInheritance) |
505 | SplitFlags.push_back(Elt: FlagMultipleInheritance); |
506 | else |
507 | SplitFlags.push_back(Elt: FlagVirtualInheritance); |
508 | Flags &= ~R; |
509 | } |
510 | if ((Flags & FlagIndirectVirtualBase) == FlagIndirectVirtualBase) { |
511 | Flags &= ~FlagIndirectVirtualBase; |
512 | SplitFlags.push_back(Elt: FlagIndirectVirtualBase); |
513 | } |
514 | |
515 | #define HANDLE_DI_FLAG(ID, NAME) \ |
516 | if (DIFlags Bit = Flags & Flag##NAME) { \ |
517 | SplitFlags.push_back(Bit); \ |
518 | Flags &= ~Bit; \ |
519 | } |
520 | #include "llvm/IR/DebugInfoFlags.def" |
521 | return Flags; |
522 | } |
523 | |
524 | DIScope *DIScope::getScope() const { |
525 | if (auto *T = dyn_cast<DIType>(Val: this)) |
526 | return T->getScope(); |
527 | |
528 | if (auto *SP = dyn_cast<DISubprogram>(Val: this)) |
529 | return SP->getScope(); |
530 | |
531 | if (auto *LB = dyn_cast<DILexicalBlockBase>(Val: this)) |
532 | return LB->getScope(); |
533 | |
534 | if (auto *NS = dyn_cast<DINamespace>(Val: this)) |
535 | return NS->getScope(); |
536 | |
537 | if (auto *CB = dyn_cast<DICommonBlock>(Val: this)) |
538 | return CB->getScope(); |
539 | |
540 | if (auto *M = dyn_cast<DIModule>(Val: this)) |
541 | return M->getScope(); |
542 | |
543 | assert((isa<DIFile>(this) || isa<DICompileUnit>(this)) && |
544 | "Unhandled type of scope." ); |
545 | return nullptr; |
546 | } |
547 | |
548 | StringRef DIScope::getName() const { |
549 | if (auto *T = dyn_cast<DIType>(Val: this)) |
550 | return T->getName(); |
551 | if (auto *SP = dyn_cast<DISubprogram>(Val: this)) |
552 | return SP->getName(); |
553 | if (auto *NS = dyn_cast<DINamespace>(Val: this)) |
554 | return NS->getName(); |
555 | if (auto *CB = dyn_cast<DICommonBlock>(Val: this)) |
556 | return CB->getName(); |
557 | if (auto *M = dyn_cast<DIModule>(Val: this)) |
558 | return M->getName(); |
559 | assert((isa<DILexicalBlockBase>(this) || isa<DIFile>(this) || |
560 | isa<DICompileUnit>(this)) && |
561 | "Unhandled type of scope." ); |
562 | return "" ; |
563 | } |
564 | |
565 | #ifndef NDEBUG |
566 | static bool isCanonical(const MDString *S) { |
567 | return !S || !S->getString().empty(); |
568 | } |
569 | #endif |
570 | |
571 | dwarf::Tag GenericDINode::getTag() const { return (dwarf::Tag)SubclassData16; } |
572 | GenericDINode *GenericDINode::getImpl(LLVMContext &Context, unsigned Tag, |
573 | MDString *, |
574 | ArrayRef<Metadata *> DwarfOps, |
575 | StorageType Storage, bool ShouldCreate) { |
576 | unsigned Hash = 0; |
577 | if (Storage == Uniqued) { |
578 | GenericDINodeInfo::KeyTy Key(Tag, Header, DwarfOps); |
579 | if (auto *N = getUniqued(Store&: Context.pImpl->GenericDINodes, Key)) |
580 | return N; |
581 | if (!ShouldCreate) |
582 | return nullptr; |
583 | Hash = Key.getHash(); |
584 | } else { |
585 | assert(ShouldCreate && "Expected non-uniqued nodes to always be created" ); |
586 | } |
587 | |
588 | // Use a nullptr for empty headers. |
589 | assert(isCanonical(Header) && "Expected canonical MDString" ); |
590 | Metadata *PreOps[] = {Header}; |
591 | return storeImpl(N: new (DwarfOps.size() + 1, Storage) GenericDINode( |
592 | Context, Storage, Hash, Tag, PreOps, DwarfOps), |
593 | Storage, Store&: Context.pImpl->GenericDINodes); |
594 | } |
595 | |
596 | void GenericDINode::recalculateHash() { |
597 | setHash(GenericDINodeInfo::KeyTy::calculateHash(N: this)); |
598 | } |
599 | |
600 | #define UNWRAP_ARGS_IMPL(...) __VA_ARGS__ |
601 | #define UNWRAP_ARGS(ARGS) UNWRAP_ARGS_IMPL ARGS |
602 | #define DEFINE_GETIMPL_LOOKUP(CLASS, ARGS) \ |
603 | do { \ |
604 | if (Storage == Uniqued) { \ |
605 | if (auto *N = getUniqued(Context.pImpl->CLASS##s, \ |
606 | CLASS##Info::KeyTy(UNWRAP_ARGS(ARGS)))) \ |
607 | return N; \ |
608 | if (!ShouldCreate) \ |
609 | return nullptr; \ |
610 | } else { \ |
611 | assert(ShouldCreate && \ |
612 | "Expected non-uniqued nodes to always be created"); \ |
613 | } \ |
614 | } while (false) |
615 | #define DEFINE_GETIMPL_STORE(CLASS, ARGS, OPS) \ |
616 | return storeImpl(new (std::size(OPS), Storage) \ |
617 | CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \ |
618 | Storage, Context.pImpl->CLASS##s) |
619 | #define DEFINE_GETIMPL_STORE_NO_OPS(CLASS, ARGS) \ |
620 | return storeImpl(new (0u, Storage) \ |
621 | CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \ |
622 | Storage, Context.pImpl->CLASS##s) |
623 | #define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \ |
624 | return storeImpl(new (std::size(OPS), Storage) CLASS(Context, Storage, OPS), \ |
625 | Storage, Context.pImpl->CLASS##s) |
626 | #define DEFINE_GETIMPL_STORE_N(CLASS, ARGS, OPS, NUM_OPS) \ |
627 | return storeImpl(new (NUM_OPS, Storage) \ |
628 | CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \ |
629 | Storage, Context.pImpl->CLASS##s) |
630 | |
631 | DISubrange::DISubrange(LLVMContext &C, StorageType Storage, |
632 | ArrayRef<Metadata *> Ops) |
633 | : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, Ops) {} |
634 | DISubrange *DISubrange::getImpl(LLVMContext &Context, int64_t Count, int64_t Lo, |
635 | StorageType Storage, bool ShouldCreate) { |
636 | auto *CountNode = ConstantAsMetadata::get( |
637 | C: ConstantInt::getSigned(Ty: Type::getInt64Ty(C&: Context), V: Count)); |
638 | auto *LB = ConstantAsMetadata::get( |
639 | C: ConstantInt::getSigned(Ty: Type::getInt64Ty(C&: Context), V: Lo)); |
640 | return getImpl(Context, CountNode, LowerBound: LB, UpperBound: nullptr, Stride: nullptr, Storage, |
641 | ShouldCreate); |
642 | } |
643 | |
644 | DISubrange *DISubrange::getImpl(LLVMContext &Context, Metadata *CountNode, |
645 | int64_t Lo, StorageType Storage, |
646 | bool ShouldCreate) { |
647 | auto *LB = ConstantAsMetadata::get( |
648 | C: ConstantInt::getSigned(Ty: Type::getInt64Ty(C&: Context), V: Lo)); |
649 | return getImpl(Context, CountNode, LowerBound: LB, UpperBound: nullptr, Stride: nullptr, Storage, |
650 | ShouldCreate); |
651 | } |
652 | |
653 | DISubrange *DISubrange::getImpl(LLVMContext &Context, Metadata *CountNode, |
654 | Metadata *LB, Metadata *UB, Metadata *Stride, |
655 | StorageType Storage, bool ShouldCreate) { |
656 | DEFINE_GETIMPL_LOOKUP(DISubrange, (CountNode, LB, UB, Stride)); |
657 | Metadata *Ops[] = {CountNode, LB, UB, Stride}; |
658 | DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DISubrange, Ops); |
659 | } |
660 | |
661 | DISubrange::BoundType DISubrange::getCount() const { |
662 | Metadata *CB = getRawCountNode(); |
663 | if (!CB) |
664 | return BoundType(); |
665 | |
666 | assert((isa<ConstantAsMetadata>(CB) || isa<DIVariable>(CB) || |
667 | isa<DIExpression>(CB)) && |
668 | "Count must be signed constant or DIVariable or DIExpression" ); |
669 | |
670 | if (auto *MD = dyn_cast<ConstantAsMetadata>(Val: CB)) |
671 | return BoundType(cast<ConstantInt>(Val: MD->getValue())); |
672 | |
673 | if (auto *MD = dyn_cast<DIVariable>(Val: CB)) |
674 | return BoundType(MD); |
675 | |
676 | if (auto *MD = dyn_cast<DIExpression>(Val: CB)) |
677 | return BoundType(MD); |
678 | |
679 | return BoundType(); |
680 | } |
681 | |
682 | DISubrange::BoundType DISubrange::getLowerBound() const { |
683 | Metadata *LB = getRawLowerBound(); |
684 | if (!LB) |
685 | return BoundType(); |
686 | |
687 | assert((isa<ConstantAsMetadata>(LB) || isa<DIVariable>(LB) || |
688 | isa<DIExpression>(LB)) && |
689 | "LowerBound must be signed constant or DIVariable or DIExpression" ); |
690 | |
691 | if (auto *MD = dyn_cast<ConstantAsMetadata>(Val: LB)) |
692 | return BoundType(cast<ConstantInt>(Val: MD->getValue())); |
693 | |
694 | if (auto *MD = dyn_cast<DIVariable>(Val: LB)) |
695 | return BoundType(MD); |
696 | |
697 | if (auto *MD = dyn_cast<DIExpression>(Val: LB)) |
698 | return BoundType(MD); |
699 | |
700 | return BoundType(); |
701 | } |
702 | |
703 | DISubrange::BoundType DISubrange::getUpperBound() const { |
704 | Metadata *UB = getRawUpperBound(); |
705 | if (!UB) |
706 | return BoundType(); |
707 | |
708 | assert((isa<ConstantAsMetadata>(UB) || isa<DIVariable>(UB) || |
709 | isa<DIExpression>(UB)) && |
710 | "UpperBound must be signed constant or DIVariable or DIExpression" ); |
711 | |
712 | if (auto *MD = dyn_cast<ConstantAsMetadata>(Val: UB)) |
713 | return BoundType(cast<ConstantInt>(Val: MD->getValue())); |
714 | |
715 | if (auto *MD = dyn_cast<DIVariable>(Val: UB)) |
716 | return BoundType(MD); |
717 | |
718 | if (auto *MD = dyn_cast<DIExpression>(Val: UB)) |
719 | return BoundType(MD); |
720 | |
721 | return BoundType(); |
722 | } |
723 | |
724 | DISubrange::BoundType DISubrange::getStride() const { |
725 | Metadata *ST = getRawStride(); |
726 | if (!ST) |
727 | return BoundType(); |
728 | |
729 | assert((isa<ConstantAsMetadata>(ST) || isa<DIVariable>(ST) || |
730 | isa<DIExpression>(ST)) && |
731 | "Stride must be signed constant or DIVariable or DIExpression" ); |
732 | |
733 | if (auto *MD = dyn_cast<ConstantAsMetadata>(Val: ST)) |
734 | return BoundType(cast<ConstantInt>(Val: MD->getValue())); |
735 | |
736 | if (auto *MD = dyn_cast<DIVariable>(Val: ST)) |
737 | return BoundType(MD); |
738 | |
739 | if (auto *MD = dyn_cast<DIExpression>(Val: ST)) |
740 | return BoundType(MD); |
741 | |
742 | return BoundType(); |
743 | } |
744 | DIGenericSubrange::DIGenericSubrange(LLVMContext &C, StorageType Storage, |
745 | ArrayRef<Metadata *> Ops) |
746 | : DINode(C, DIGenericSubrangeKind, Storage, dwarf::DW_TAG_generic_subrange, |
747 | Ops) {} |
748 | |
749 | DIGenericSubrange *DIGenericSubrange::getImpl(LLVMContext &Context, |
750 | Metadata *CountNode, Metadata *LB, |
751 | Metadata *UB, Metadata *Stride, |
752 | StorageType Storage, |
753 | bool ShouldCreate) { |
754 | DEFINE_GETIMPL_LOOKUP(DIGenericSubrange, (CountNode, LB, UB, Stride)); |
755 | Metadata *Ops[] = {CountNode, LB, UB, Stride}; |
756 | DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIGenericSubrange, Ops); |
757 | } |
758 | |
759 | DIGenericSubrange::BoundType DIGenericSubrange::getCount() const { |
760 | Metadata *CB = getRawCountNode(); |
761 | if (!CB) |
762 | return BoundType(); |
763 | |
764 | assert((isa<DIVariable>(CB) || isa<DIExpression>(CB)) && |
765 | "Count must be signed constant or DIVariable or DIExpression" ); |
766 | |
767 | if (auto *MD = dyn_cast<DIVariable>(Val: CB)) |
768 | return BoundType(MD); |
769 | |
770 | if (auto *MD = dyn_cast<DIExpression>(Val: CB)) |
771 | return BoundType(MD); |
772 | |
773 | return BoundType(); |
774 | } |
775 | |
776 | DIGenericSubrange::BoundType DIGenericSubrange::getLowerBound() const { |
777 | Metadata *LB = getRawLowerBound(); |
778 | if (!LB) |
779 | return BoundType(); |
780 | |
781 | assert((isa<DIVariable>(LB) || isa<DIExpression>(LB)) && |
782 | "LowerBound must be signed constant or DIVariable or DIExpression" ); |
783 | |
784 | if (auto *MD = dyn_cast<DIVariable>(Val: LB)) |
785 | return BoundType(MD); |
786 | |
787 | if (auto *MD = dyn_cast<DIExpression>(Val: LB)) |
788 | return BoundType(MD); |
789 | |
790 | return BoundType(); |
791 | } |
792 | |
793 | DIGenericSubrange::BoundType DIGenericSubrange::getUpperBound() const { |
794 | Metadata *UB = getRawUpperBound(); |
795 | if (!UB) |
796 | return BoundType(); |
797 | |
798 | assert((isa<DIVariable>(UB) || isa<DIExpression>(UB)) && |
799 | "UpperBound must be signed constant or DIVariable or DIExpression" ); |
800 | |
801 | if (auto *MD = dyn_cast<DIVariable>(Val: UB)) |
802 | return BoundType(MD); |
803 | |
804 | if (auto *MD = dyn_cast<DIExpression>(Val: UB)) |
805 | return BoundType(MD); |
806 | |
807 | return BoundType(); |
808 | } |
809 | |
810 | DIGenericSubrange::BoundType DIGenericSubrange::getStride() const { |
811 | Metadata *ST = getRawStride(); |
812 | if (!ST) |
813 | return BoundType(); |
814 | |
815 | assert((isa<DIVariable>(ST) || isa<DIExpression>(ST)) && |
816 | "Stride must be signed constant or DIVariable or DIExpression" ); |
817 | |
818 | if (auto *MD = dyn_cast<DIVariable>(Val: ST)) |
819 | return BoundType(MD); |
820 | |
821 | if (auto *MD = dyn_cast<DIExpression>(Val: ST)) |
822 | return BoundType(MD); |
823 | |
824 | return BoundType(); |
825 | } |
826 | |
827 | DISubrangeType::DISubrangeType(LLVMContext &C, StorageType Storage, |
828 | unsigned Line, uint32_t AlignInBits, |
829 | DIFlags Flags, ArrayRef<Metadata *> Ops) |
830 | : DIType(C, DISubrangeTypeKind, Storage, dwarf::DW_TAG_subrange_type, Line, |
831 | AlignInBits, 0, Flags, Ops) {} |
832 | |
833 | DISubrangeType *DISubrangeType::getImpl( |
834 | LLVMContext &Context, MDString *Name, Metadata *File, unsigned Line, |
835 | Metadata *Scope, Metadata *SizeInBits, uint32_t AlignInBits, DIFlags Flags, |
836 | Metadata *BaseType, Metadata *LowerBound, Metadata *UpperBound, |
837 | Metadata *Stride, Metadata *Bias, StorageType Storage, bool ShouldCreate) { |
838 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
839 | DEFINE_GETIMPL_LOOKUP(DISubrangeType, (Name, File, Line, Scope, SizeInBits, |
840 | AlignInBits, Flags, BaseType, |
841 | LowerBound, UpperBound, Stride, Bias)); |
842 | Metadata *Ops[] = {File, Scope, Name, SizeInBits, nullptr, |
843 | BaseType, LowerBound, UpperBound, Stride, Bias}; |
844 | DEFINE_GETIMPL_STORE(DISubrangeType, (Line, AlignInBits, Flags), Ops); |
845 | } |
846 | |
847 | DISubrangeType::BoundType |
848 | DISubrangeType::convertRawToBound(Metadata *IN) const { |
849 | if (!IN) |
850 | return BoundType(); |
851 | |
852 | assert(isa<ConstantAsMetadata>(IN) || isa<DIVariable>(IN) || |
853 | isa<DIExpression>(IN)); |
854 | |
855 | if (auto *MD = dyn_cast<ConstantAsMetadata>(Val: IN)) |
856 | return BoundType(cast<ConstantInt>(Val: MD->getValue())); |
857 | |
858 | if (auto *MD = dyn_cast<DIVariable>(Val: IN)) |
859 | return BoundType(MD); |
860 | |
861 | if (auto *MD = dyn_cast<DIExpression>(Val: IN)) |
862 | return BoundType(MD); |
863 | |
864 | return BoundType(); |
865 | } |
866 | |
867 | DIEnumerator::DIEnumerator(LLVMContext &C, StorageType Storage, |
868 | const APInt &Value, bool IsUnsigned, |
869 | ArrayRef<Metadata *> Ops) |
870 | : DINode(C, DIEnumeratorKind, Storage, dwarf::DW_TAG_enumerator, Ops), |
871 | Value(Value) { |
872 | SubclassData32 = IsUnsigned; |
873 | } |
874 | DIEnumerator *DIEnumerator::getImpl(LLVMContext &Context, const APInt &Value, |
875 | bool IsUnsigned, MDString *Name, |
876 | StorageType Storage, bool ShouldCreate) { |
877 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
878 | DEFINE_GETIMPL_LOOKUP(DIEnumerator, (Value, IsUnsigned, Name)); |
879 | Metadata *Ops[] = {Name}; |
880 | DEFINE_GETIMPL_STORE(DIEnumerator, (Value, IsUnsigned), Ops); |
881 | } |
882 | |
883 | DIBasicType *DIBasicType::getImpl(LLVMContext &Context, unsigned Tag, |
884 | MDString *Name, Metadata *SizeInBits, |
885 | uint32_t AlignInBits, unsigned Encoding, |
886 | uint32_t , DIFlags Flags, |
887 | StorageType Storage, bool ShouldCreate) { |
888 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
889 | DEFINE_GETIMPL_LOOKUP(DIBasicType, (Tag, Name, SizeInBits, AlignInBits, |
890 | Encoding, NumExtraInhabitants, Flags)); |
891 | Metadata *Ops[] = {nullptr, nullptr, Name, SizeInBits, nullptr}; |
892 | DEFINE_GETIMPL_STORE(DIBasicType, |
893 | (Tag, AlignInBits, Encoding, NumExtraInhabitants, Flags), |
894 | Ops); |
895 | } |
896 | |
897 | std::optional<DIBasicType::Signedness> DIBasicType::getSignedness() const { |
898 | switch (getEncoding()) { |
899 | case dwarf::DW_ATE_signed: |
900 | case dwarf::DW_ATE_signed_char: |
901 | case dwarf::DW_ATE_signed_fixed: |
902 | return Signedness::Signed; |
903 | case dwarf::DW_ATE_unsigned: |
904 | case dwarf::DW_ATE_unsigned_char: |
905 | case dwarf::DW_ATE_unsigned_fixed: |
906 | return Signedness::Unsigned; |
907 | default: |
908 | return std::nullopt; |
909 | } |
910 | } |
911 | |
912 | DIFixedPointType * |
913 | DIFixedPointType::getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, |
914 | Metadata *SizeInBits, uint32_t AlignInBits, |
915 | unsigned Encoding, DIFlags Flags, unsigned Kind, |
916 | int Factor, APInt Numerator, APInt Denominator, |
917 | StorageType Storage, bool ShouldCreate) { |
918 | DEFINE_GETIMPL_LOOKUP(DIFixedPointType, |
919 | (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags, |
920 | Kind, Factor, Numerator, Denominator)); |
921 | Metadata *Ops[] = {nullptr, nullptr, Name, SizeInBits, nullptr}; |
922 | DEFINE_GETIMPL_STORE( |
923 | DIFixedPointType, |
924 | (Tag, AlignInBits, Encoding, Flags, Kind, Factor, Numerator, Denominator), |
925 | Ops); |
926 | } |
927 | |
928 | bool DIFixedPointType::isSigned() const { |
929 | return getEncoding() == dwarf::DW_ATE_signed_fixed; |
930 | } |
931 | |
932 | std::optional<DIFixedPointType::FixedPointKind> |
933 | DIFixedPointType::getFixedPointKind(StringRef Str) { |
934 | return StringSwitch<std::optional<FixedPointKind>>(Str) |
935 | .Case(S: "Binary" , Value: FixedPointBinary) |
936 | .Case(S: "Decimal" , Value: FixedPointDecimal) |
937 | .Case(S: "Rational" , Value: FixedPointRational) |
938 | .Default(Value: std::nullopt); |
939 | } |
940 | |
941 | const char *DIFixedPointType::fixedPointKindString(FixedPointKind V) { |
942 | switch (V) { |
943 | case FixedPointBinary: |
944 | return "Binary" ; |
945 | case FixedPointDecimal: |
946 | return "Decimal" ; |
947 | case FixedPointRational: |
948 | return "Rational" ; |
949 | } |
950 | return nullptr; |
951 | } |
952 | |
953 | DIStringType *DIStringType::getImpl(LLVMContext &Context, unsigned Tag, |
954 | MDString *Name, Metadata *StringLength, |
955 | Metadata *StringLengthExp, |
956 | Metadata *StringLocationExp, |
957 | Metadata *SizeInBits, uint32_t AlignInBits, |
958 | unsigned Encoding, StorageType Storage, |
959 | bool ShouldCreate) { |
960 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
961 | DEFINE_GETIMPL_LOOKUP(DIStringType, |
962 | (Tag, Name, StringLength, StringLengthExp, |
963 | StringLocationExp, SizeInBits, AlignInBits, Encoding)); |
964 | Metadata *Ops[] = {nullptr, nullptr, Name, |
965 | SizeInBits, nullptr, StringLength, |
966 | StringLengthExp, StringLocationExp}; |
967 | DEFINE_GETIMPL_STORE(DIStringType, (Tag, AlignInBits, Encoding), Ops); |
968 | } |
969 | DIType *DIDerivedType::getClassType() const { |
970 | assert(getTag() == dwarf::DW_TAG_ptr_to_member_type); |
971 | return cast_or_null<DIType>(Val: getExtraData()); |
972 | } |
973 | uint32_t DIDerivedType::getVBPtrOffset() const { |
974 | assert(getTag() == dwarf::DW_TAG_inheritance); |
975 | if (auto *CM = cast_or_null<ConstantAsMetadata>(Val: getExtraData())) |
976 | if (auto *CI = dyn_cast_or_null<ConstantInt>(Val: CM->getValue())) |
977 | return static_cast<uint32_t>(CI->getZExtValue()); |
978 | return 0; |
979 | } |
980 | Constant *DIDerivedType::getStorageOffsetInBits() const { |
981 | assert(getTag() == dwarf::DW_TAG_member && isBitField()); |
982 | if (auto *C = cast_or_null<ConstantAsMetadata>(Val: getExtraData())) |
983 | return C->getValue(); |
984 | return nullptr; |
985 | } |
986 | |
987 | Constant *DIDerivedType::getConstant() const { |
988 | assert((getTag() == dwarf::DW_TAG_member || |
989 | getTag() == dwarf::DW_TAG_variable) && |
990 | isStaticMember()); |
991 | if (auto *C = cast_or_null<ConstantAsMetadata>(Val: getExtraData())) |
992 | return C->getValue(); |
993 | return nullptr; |
994 | } |
995 | Constant *DIDerivedType::getDiscriminantValue() const { |
996 | assert(getTag() == dwarf::DW_TAG_member && !isStaticMember()); |
997 | if (auto *C = cast_or_null<ConstantAsMetadata>(Val: getExtraData())) |
998 | return C->getValue(); |
999 | return nullptr; |
1000 | } |
1001 | |
1002 | DIDerivedType *DIDerivedType::getImpl( |
1003 | LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File, |
1004 | unsigned Line, Metadata *Scope, Metadata *BaseType, Metadata *SizeInBits, |
1005 | uint32_t AlignInBits, Metadata *OffsetInBits, |
1006 | std::optional<unsigned> DWARFAddressSpace, |
1007 | std::optional<PtrAuthData> PtrAuthData, DIFlags Flags, Metadata *, |
1008 | Metadata *Annotations, StorageType Storage, bool ShouldCreate) { |
1009 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1010 | DEFINE_GETIMPL_LOOKUP(DIDerivedType, |
1011 | (Tag, Name, File, Line, Scope, BaseType, SizeInBits, |
1012 | AlignInBits, OffsetInBits, DWARFAddressSpace, |
1013 | PtrAuthData, Flags, ExtraData, Annotations)); |
1014 | Metadata *Ops[] = {File, Scope, Name, SizeInBits, |
1015 | OffsetInBits, BaseType, ExtraData, Annotations}; |
1016 | DEFINE_GETIMPL_STORE( |
1017 | DIDerivedType, |
1018 | (Tag, Line, AlignInBits, DWARFAddressSpace, PtrAuthData, Flags), Ops); |
1019 | } |
1020 | |
1021 | std::optional<DIDerivedType::PtrAuthData> |
1022 | DIDerivedType::getPtrAuthData() const { |
1023 | return getTag() == dwarf::DW_TAG_LLVM_ptrauth_type |
1024 | ? std::optional<PtrAuthData>(PtrAuthData(SubclassData32)) |
1025 | : std::nullopt; |
1026 | } |
1027 | |
1028 | DICompositeType *DICompositeType::getImpl( |
1029 | LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File, |
1030 | unsigned Line, Metadata *Scope, Metadata *BaseType, Metadata *SizeInBits, |
1031 | uint32_t AlignInBits, Metadata *OffsetInBits, DIFlags Flags, |
1032 | Metadata *Elements, unsigned RuntimeLang, std::optional<uint32_t> EnumKind, |
1033 | Metadata *VTableHolder, Metadata *TemplateParams, MDString *Identifier, |
1034 | Metadata *Discriminator, Metadata *DataLocation, Metadata *Associated, |
1035 | Metadata *Allocated, Metadata *Rank, Metadata *Annotations, |
1036 | Metadata *Specification, uint32_t , Metadata *BitStride, |
1037 | StorageType Storage, bool ShouldCreate) { |
1038 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1039 | |
1040 | // Keep this in sync with buildODRType. |
1041 | DEFINE_GETIMPL_LOOKUP( |
1042 | DICompositeType, |
1043 | (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits, |
1044 | OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder, TemplateParams, |
1045 | Identifier, Discriminator, DataLocation, Associated, Allocated, Rank, |
1046 | Annotations, Specification, NumExtraInhabitants, BitStride)); |
1047 | Metadata *Ops[] = {File, Scope, Name, SizeInBits, |
1048 | OffsetInBits, BaseType, Elements, VTableHolder, |
1049 | TemplateParams, Identifier, Discriminator, DataLocation, |
1050 | Associated, Allocated, Rank, Annotations, |
1051 | Specification, BitStride}; |
1052 | DEFINE_GETIMPL_STORE(DICompositeType, |
1053 | (Tag, Line, RuntimeLang, AlignInBits, |
1054 | NumExtraInhabitants, EnumKind, Flags), |
1055 | Ops); |
1056 | } |
1057 | |
1058 | DICompositeType *DICompositeType::buildODRType( |
1059 | LLVMContext &Context, MDString &Identifier, unsigned Tag, MDString *Name, |
1060 | Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType, |
1061 | Metadata *SizeInBits, uint32_t AlignInBits, Metadata *OffsetInBits, |
1062 | Metadata *Specification, uint32_t , DIFlags Flags, |
1063 | Metadata *Elements, unsigned RuntimeLang, std::optional<uint32_t> EnumKind, |
1064 | Metadata *VTableHolder, Metadata *TemplateParams, Metadata *Discriminator, |
1065 | Metadata *DataLocation, Metadata *Associated, Metadata *Allocated, |
1066 | Metadata *Rank, Metadata *Annotations, Metadata *BitStride) { |
1067 | assert(!Identifier.getString().empty() && "Expected valid identifier" ); |
1068 | if (!Context.isODRUniquingDebugTypes()) |
1069 | return nullptr; |
1070 | auto *&CT = (*Context.pImpl->DITypeMap)[&Identifier]; |
1071 | if (!CT) |
1072 | return CT = DICompositeType::getDistinct( |
1073 | Context, Tag, Name, File, Line, Scope, BaseType, SizeInBits, |
1074 | AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang, |
1075 | EnumKind, VTableHolder, TemplateParams, Identifier: &Identifier, |
1076 | Discriminator, DataLocation, Associated, Allocated, Rank, |
1077 | Annotations, Specification, NumExtraInhabitants, BitStride); |
1078 | if (CT->getTag() != Tag) |
1079 | return nullptr; |
1080 | |
1081 | // Only mutate CT if it's a forward declaration and the new operands aren't. |
1082 | assert(CT->getRawIdentifier() == &Identifier && "Wrong ODR identifier?" ); |
1083 | if (!CT->isForwardDecl() || (Flags & DINode::FlagFwdDecl)) |
1084 | return CT; |
1085 | |
1086 | // Mutate CT in place. Keep this in sync with getImpl. |
1087 | CT->mutate(Tag, Line, RuntimeLang, AlignInBits, NumExtraInhabitants, EnumKind, |
1088 | Flags); |
1089 | Metadata *Ops[] = {File, Scope, Name, SizeInBits, |
1090 | OffsetInBits, BaseType, Elements, VTableHolder, |
1091 | TemplateParams, &Identifier, Discriminator, DataLocation, |
1092 | Associated, Allocated, Rank, Annotations, |
1093 | Specification, BitStride}; |
1094 | assert((std::end(Ops) - std::begin(Ops)) == (int)CT->getNumOperands() && |
1095 | "Mismatched number of operands" ); |
1096 | for (unsigned I = 0, E = CT->getNumOperands(); I != E; ++I) |
1097 | if (Ops[I] != CT->getOperand(I)) |
1098 | CT->setOperand(I, New: Ops[I]); |
1099 | return CT; |
1100 | } |
1101 | |
1102 | DICompositeType *DICompositeType::getODRType( |
1103 | LLVMContext &Context, MDString &Identifier, unsigned Tag, MDString *Name, |
1104 | Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType, |
1105 | Metadata *SizeInBits, uint32_t AlignInBits, Metadata *OffsetInBits, |
1106 | Metadata *Specification, uint32_t , DIFlags Flags, |
1107 | Metadata *Elements, unsigned RuntimeLang, std::optional<uint32_t> EnumKind, |
1108 | Metadata *VTableHolder, Metadata *TemplateParams, Metadata *Discriminator, |
1109 | Metadata *DataLocation, Metadata *Associated, Metadata *Allocated, |
1110 | Metadata *Rank, Metadata *Annotations, Metadata *BitStride) { |
1111 | assert(!Identifier.getString().empty() && "Expected valid identifier" ); |
1112 | if (!Context.isODRUniquingDebugTypes()) |
1113 | return nullptr; |
1114 | auto *&CT = (*Context.pImpl->DITypeMap)[&Identifier]; |
1115 | if (!CT) { |
1116 | CT = DICompositeType::getDistinct( |
1117 | Context, Tag, Name, File, Line, Scope, BaseType, SizeInBits, |
1118 | AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang, EnumKind, |
1119 | VTableHolder, TemplateParams, Identifier: &Identifier, Discriminator, DataLocation, |
1120 | Associated, Allocated, Rank, Annotations, Specification, |
1121 | NumExtraInhabitants, BitStride); |
1122 | } else { |
1123 | if (CT->getTag() != Tag) |
1124 | return nullptr; |
1125 | } |
1126 | return CT; |
1127 | } |
1128 | |
1129 | DICompositeType *DICompositeType::getODRTypeIfExists(LLVMContext &Context, |
1130 | MDString &Identifier) { |
1131 | assert(!Identifier.getString().empty() && "Expected valid identifier" ); |
1132 | if (!Context.isODRUniquingDebugTypes()) |
1133 | return nullptr; |
1134 | return Context.pImpl->DITypeMap->lookup(Val: &Identifier); |
1135 | } |
1136 | DISubroutineType::DISubroutineType(LLVMContext &C, StorageType Storage, |
1137 | DIFlags Flags, uint8_t CC, |
1138 | ArrayRef<Metadata *> Ops) |
1139 | : DIType(C, DISubroutineTypeKind, Storage, dwarf::DW_TAG_subroutine_type, 0, |
1140 | 0, 0, Flags, Ops), |
1141 | CC(CC) {} |
1142 | |
1143 | DISubroutineType *DISubroutineType::getImpl(LLVMContext &Context, DIFlags Flags, |
1144 | uint8_t CC, Metadata *TypeArray, |
1145 | StorageType Storage, |
1146 | bool ShouldCreate) { |
1147 | DEFINE_GETIMPL_LOOKUP(DISubroutineType, (Flags, CC, TypeArray)); |
1148 | Metadata *Ops[] = {nullptr, nullptr, nullptr, nullptr, nullptr, TypeArray}; |
1149 | DEFINE_GETIMPL_STORE(DISubroutineType, (Flags, CC), Ops); |
1150 | } |
1151 | |
1152 | DIFile::DIFile(LLVMContext &C, StorageType Storage, |
1153 | std::optional<ChecksumInfo<MDString *>> CS, MDString *Src, |
1154 | ArrayRef<Metadata *> Ops) |
1155 | : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops), |
1156 | Checksum(CS), Source(Src) {} |
1157 | |
1158 | // FIXME: Implement this string-enum correspondence with a .def file and macros, |
1159 | // so that the association is explicit rather than implied. |
1160 | static const char *ChecksumKindName[DIFile::CSK_Last] = { |
1161 | "CSK_MD5" , |
1162 | "CSK_SHA1" , |
1163 | "CSK_SHA256" , |
1164 | }; |
1165 | |
1166 | StringRef DIFile::getChecksumKindAsString(ChecksumKind CSKind) { |
1167 | assert(CSKind <= DIFile::CSK_Last && "Invalid checksum kind" ); |
1168 | // The first space was originally the CSK_None variant, which is now |
1169 | // obsolete, but the space is still reserved in ChecksumKind, so we account |
1170 | // for it here. |
1171 | return ChecksumKindName[CSKind - 1]; |
1172 | } |
1173 | |
1174 | std::optional<DIFile::ChecksumKind> |
1175 | DIFile::getChecksumKind(StringRef CSKindStr) { |
1176 | return StringSwitch<std::optional<DIFile::ChecksumKind>>(CSKindStr) |
1177 | .Case(S: "CSK_MD5" , Value: DIFile::CSK_MD5) |
1178 | .Case(S: "CSK_SHA1" , Value: DIFile::CSK_SHA1) |
1179 | .Case(S: "CSK_SHA256" , Value: DIFile::CSK_SHA256) |
1180 | .Default(Value: std::nullopt); |
1181 | } |
1182 | |
1183 | DIFile *DIFile::getImpl(LLVMContext &Context, MDString *Filename, |
1184 | MDString *Directory, |
1185 | std::optional<DIFile::ChecksumInfo<MDString *>> CS, |
1186 | MDString *Source, StorageType Storage, |
1187 | bool ShouldCreate) { |
1188 | assert(isCanonical(Filename) && "Expected canonical MDString" ); |
1189 | assert(isCanonical(Directory) && "Expected canonical MDString" ); |
1190 | assert((!CS || isCanonical(CS->Value)) && "Expected canonical MDString" ); |
1191 | // We do *NOT* expect Source to be a canonical MDString because nullptr |
1192 | // means none, so we need something to represent the empty file. |
1193 | DEFINE_GETIMPL_LOOKUP(DIFile, (Filename, Directory, CS, Source)); |
1194 | Metadata *Ops[] = {Filename, Directory, CS ? CS->Value : nullptr, Source}; |
1195 | DEFINE_GETIMPL_STORE(DIFile, (CS, Source), Ops); |
1196 | } |
1197 | DICompileUnit::DICompileUnit(LLVMContext &C, StorageType Storage, |
1198 | unsigned SourceLanguage, bool IsOptimized, |
1199 | unsigned RuntimeVersion, unsigned EmissionKind, |
1200 | uint64_t DWOId, bool SplitDebugInlining, |
1201 | bool DebugInfoForProfiling, unsigned NameTableKind, |
1202 | bool RangesBaseAddress, ArrayRef<Metadata *> Ops) |
1203 | : DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops), |
1204 | SourceLanguage(SourceLanguage), RuntimeVersion(RuntimeVersion), |
1205 | DWOId(DWOId), EmissionKind(EmissionKind), NameTableKind(NameTableKind), |
1206 | IsOptimized(IsOptimized), SplitDebugInlining(SplitDebugInlining), |
1207 | DebugInfoForProfiling(DebugInfoForProfiling), |
1208 | RangesBaseAddress(RangesBaseAddress) { |
1209 | assert(Storage != Uniqued); |
1210 | } |
1211 | |
1212 | DICompileUnit *DICompileUnit::getImpl( |
1213 | LLVMContext &Context, unsigned SourceLanguage, Metadata *File, |
1214 | MDString *Producer, bool IsOptimized, MDString *Flags, |
1215 | unsigned RuntimeVersion, MDString *SplitDebugFilename, |
1216 | unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes, |
1217 | Metadata *GlobalVariables, Metadata *ImportedEntities, Metadata *Macros, |
1218 | uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling, |
1219 | unsigned NameTableKind, bool RangesBaseAddress, MDString *SysRoot, |
1220 | MDString *SDK, StorageType Storage, bool ShouldCreate) { |
1221 | assert(Storage != Uniqued && "Cannot unique DICompileUnit" ); |
1222 | assert(isCanonical(Producer) && "Expected canonical MDString" ); |
1223 | assert(isCanonical(Flags) && "Expected canonical MDString" ); |
1224 | assert(isCanonical(SplitDebugFilename) && "Expected canonical MDString" ); |
1225 | |
1226 | Metadata *Ops[] = {File, |
1227 | Producer, |
1228 | Flags, |
1229 | SplitDebugFilename, |
1230 | EnumTypes, |
1231 | RetainedTypes, |
1232 | GlobalVariables, |
1233 | ImportedEntities, |
1234 | Macros, |
1235 | SysRoot, |
1236 | SDK}; |
1237 | return storeImpl(N: new (std::size(Ops), Storage) DICompileUnit( |
1238 | Context, Storage, SourceLanguage, IsOptimized, |
1239 | RuntimeVersion, EmissionKind, DWOId, SplitDebugInlining, |
1240 | DebugInfoForProfiling, NameTableKind, RangesBaseAddress, |
1241 | Ops), |
1242 | Storage); |
1243 | } |
1244 | |
1245 | std::optional<DICompileUnit::DebugEmissionKind> |
1246 | DICompileUnit::getEmissionKind(StringRef Str) { |
1247 | return StringSwitch<std::optional<DebugEmissionKind>>(Str) |
1248 | .Case(S: "NoDebug" , Value: NoDebug) |
1249 | .Case(S: "FullDebug" , Value: FullDebug) |
1250 | .Case(S: "LineTablesOnly" , Value: LineTablesOnly) |
1251 | .Case(S: "DebugDirectivesOnly" , Value: DebugDirectivesOnly) |
1252 | .Default(Value: std::nullopt); |
1253 | } |
1254 | |
1255 | std::optional<DICompileUnit::DebugNameTableKind> |
1256 | DICompileUnit::getNameTableKind(StringRef Str) { |
1257 | return StringSwitch<std::optional<DebugNameTableKind>>(Str) |
1258 | .Case(S: "Default" , Value: DebugNameTableKind::Default) |
1259 | .Case(S: "GNU" , Value: DebugNameTableKind::GNU) |
1260 | .Case(S: "Apple" , Value: DebugNameTableKind::Apple) |
1261 | .Case(S: "None" , Value: DebugNameTableKind::None) |
1262 | .Default(Value: std::nullopt); |
1263 | } |
1264 | |
1265 | const char *DICompileUnit::emissionKindString(DebugEmissionKind EK) { |
1266 | switch (EK) { |
1267 | case NoDebug: |
1268 | return "NoDebug" ; |
1269 | case FullDebug: |
1270 | return "FullDebug" ; |
1271 | case LineTablesOnly: |
1272 | return "LineTablesOnly" ; |
1273 | case DebugDirectivesOnly: |
1274 | return "DebugDirectivesOnly" ; |
1275 | } |
1276 | return nullptr; |
1277 | } |
1278 | |
1279 | const char *DICompileUnit::nameTableKindString(DebugNameTableKind NTK) { |
1280 | switch (NTK) { |
1281 | case DebugNameTableKind::Default: |
1282 | return nullptr; |
1283 | case DebugNameTableKind::GNU: |
1284 | return "GNU" ; |
1285 | case DebugNameTableKind::Apple: |
1286 | return "Apple" ; |
1287 | case DebugNameTableKind::None: |
1288 | return "None" ; |
1289 | } |
1290 | return nullptr; |
1291 | } |
1292 | DISubprogram::DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line, |
1293 | unsigned ScopeLine, unsigned VirtualIndex, |
1294 | int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags, |
1295 | bool UsesKeyInstructions, ArrayRef<Metadata *> Ops) |
1296 | : DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram, Ops), |
1297 | Line(Line), ScopeLine(ScopeLine), VirtualIndex(VirtualIndex), |
1298 | ThisAdjustment(ThisAdjustment), Flags(Flags), SPFlags(SPFlags) { |
1299 | static_assert(dwarf::DW_VIRTUALITY_max < 4, "Virtuality out of range" ); |
1300 | SubclassData1 = UsesKeyInstructions; |
1301 | } |
1302 | DISubprogram::DISPFlags |
1303 | DISubprogram::toSPFlags(bool IsLocalToUnit, bool IsDefinition, bool IsOptimized, |
1304 | unsigned Virtuality, bool IsMainSubprogram) { |
1305 | // We're assuming virtuality is the low-order field. |
1306 | static_assert(int(SPFlagVirtual) == int(dwarf::DW_VIRTUALITY_virtual) && |
1307 | int(SPFlagPureVirtual) == |
1308 | int(dwarf::DW_VIRTUALITY_pure_virtual), |
1309 | "Virtuality constant mismatch" ); |
1310 | return static_cast<DISPFlags>( |
1311 | (Virtuality & SPFlagVirtuality) | |
1312 | (IsLocalToUnit ? SPFlagLocalToUnit : SPFlagZero) | |
1313 | (IsDefinition ? SPFlagDefinition : SPFlagZero) | |
1314 | (IsOptimized ? SPFlagOptimized : SPFlagZero) | |
1315 | (IsMainSubprogram ? SPFlagMainSubprogram : SPFlagZero)); |
1316 | } |
1317 | |
1318 | DISubprogram *DILocalScope::getSubprogram() const { |
1319 | if (auto *Block = dyn_cast<DILexicalBlockBase>(Val: this)) |
1320 | return Block->getScope()->getSubprogram(); |
1321 | return const_cast<DISubprogram *>(cast<DISubprogram>(Val: this)); |
1322 | } |
1323 | |
1324 | DILocalScope *DILocalScope::getNonLexicalBlockFileScope() const { |
1325 | if (auto *File = dyn_cast<DILexicalBlockFile>(Val: this)) |
1326 | return File->getScope()->getNonLexicalBlockFileScope(); |
1327 | return const_cast<DILocalScope *>(this); |
1328 | } |
1329 | |
1330 | DILocalScope *DILocalScope::cloneScopeForSubprogram( |
1331 | DILocalScope &RootScope, DISubprogram &NewSP, LLVMContext &Ctx, |
1332 | DenseMap<const MDNode *, MDNode *> &Cache) { |
1333 | SmallVector<DIScope *> ScopeChain; |
1334 | DIScope *CachedResult = nullptr; |
1335 | |
1336 | for (DIScope *Scope = &RootScope; !isa<DISubprogram>(Val: Scope); |
1337 | Scope = Scope->getScope()) { |
1338 | if (auto It = Cache.find(Val: Scope); It != Cache.end()) { |
1339 | CachedResult = cast<DIScope>(Val: It->second); |
1340 | break; |
1341 | } |
1342 | ScopeChain.push_back(Elt: Scope); |
1343 | } |
1344 | |
1345 | // Recreate the scope chain, bottom-up, starting at the new subprogram (or a |
1346 | // cached result). |
1347 | DIScope *UpdatedScope = CachedResult ? CachedResult : &NewSP; |
1348 | for (DIScope *ScopeToUpdate : reverse(C&: ScopeChain)) { |
1349 | UpdatedScope = cloneAndReplaceParentScope( |
1350 | LBB: cast<DILexicalBlockBase>(Val: ScopeToUpdate), NewParent: UpdatedScope); |
1351 | Cache[ScopeToUpdate] = UpdatedScope; |
1352 | } |
1353 | |
1354 | return cast<DILocalScope>(Val: UpdatedScope); |
1355 | } |
1356 | |
1357 | DISubprogram::DISPFlags DISubprogram::getFlag(StringRef Flag) { |
1358 | return StringSwitch<DISPFlags>(Flag) |
1359 | #define HANDLE_DISP_FLAG(ID, NAME) .Case("DISPFlag" #NAME, SPFlag##NAME) |
1360 | #include "llvm/IR/DebugInfoFlags.def" |
1361 | .Default(Value: SPFlagZero); |
1362 | } |
1363 | |
1364 | StringRef DISubprogram::getFlagString(DISPFlags Flag) { |
1365 | switch (Flag) { |
1366 | // Appease a warning. |
1367 | case SPFlagVirtuality: |
1368 | return "" ; |
1369 | #define HANDLE_DISP_FLAG(ID, NAME) \ |
1370 | case SPFlag##NAME: \ |
1371 | return "DISPFlag" #NAME; |
1372 | #include "llvm/IR/DebugInfoFlags.def" |
1373 | } |
1374 | return "" ; |
1375 | } |
1376 | |
1377 | DISubprogram::DISPFlags |
1378 | DISubprogram::splitFlags(DISPFlags Flags, |
1379 | SmallVectorImpl<DISPFlags> &SplitFlags) { |
1380 | // Multi-bit fields can require special handling. In our case, however, the |
1381 | // only multi-bit field is virtuality, and all its values happen to be |
1382 | // single-bit values, so the right behavior just falls out. |
1383 | #define HANDLE_DISP_FLAG(ID, NAME) \ |
1384 | if (DISPFlags Bit = Flags & SPFlag##NAME) { \ |
1385 | SplitFlags.push_back(Bit); \ |
1386 | Flags &= ~Bit; \ |
1387 | } |
1388 | #include "llvm/IR/DebugInfoFlags.def" |
1389 | return Flags; |
1390 | } |
1391 | |
1392 | DISubprogram *DISubprogram::getImpl( |
1393 | LLVMContext &Context, Metadata *Scope, MDString *Name, |
1394 | MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type, |
1395 | unsigned ScopeLine, Metadata *ContainingType, unsigned VirtualIndex, |
1396 | int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags, Metadata *Unit, |
1397 | Metadata *TemplateParams, Metadata *Declaration, Metadata *RetainedNodes, |
1398 | Metadata *ThrownTypes, Metadata *Annotations, MDString *TargetFuncName, |
1399 | bool UsesKeyInstructions, StorageType Storage, bool ShouldCreate) { |
1400 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1401 | assert(isCanonical(LinkageName) && "Expected canonical MDString" ); |
1402 | assert(isCanonical(TargetFuncName) && "Expected canonical MDString" ); |
1403 | DEFINE_GETIMPL_LOOKUP(DISubprogram, |
1404 | (Scope, Name, LinkageName, File, Line, Type, ScopeLine, |
1405 | ContainingType, VirtualIndex, ThisAdjustment, Flags, |
1406 | SPFlags, Unit, TemplateParams, Declaration, |
1407 | RetainedNodes, ThrownTypes, Annotations, |
1408 | TargetFuncName, UsesKeyInstructions)); |
1409 | SmallVector<Metadata *, 13> Ops = { |
1410 | File, Scope, Name, LinkageName, |
1411 | Type, Unit, Declaration, RetainedNodes, |
1412 | ContainingType, TemplateParams, ThrownTypes, Annotations, |
1413 | TargetFuncName}; |
1414 | if (!TargetFuncName) { |
1415 | Ops.pop_back(); |
1416 | if (!Annotations) { |
1417 | Ops.pop_back(); |
1418 | if (!ThrownTypes) { |
1419 | Ops.pop_back(); |
1420 | if (!TemplateParams) { |
1421 | Ops.pop_back(); |
1422 | if (!ContainingType) |
1423 | Ops.pop_back(); |
1424 | } |
1425 | } |
1426 | } |
1427 | } |
1428 | DEFINE_GETIMPL_STORE_N(DISubprogram, |
1429 | (Line, ScopeLine, VirtualIndex, ThisAdjustment, Flags, |
1430 | SPFlags, UsesKeyInstructions), |
1431 | Ops, Ops.size()); |
1432 | } |
1433 | |
1434 | bool DISubprogram::describes(const Function *F) const { |
1435 | assert(F && "Invalid function" ); |
1436 | return F->getSubprogram() == this; |
1437 | } |
1438 | DILexicalBlockBase::DILexicalBlockBase(LLVMContext &C, unsigned ID, |
1439 | StorageType Storage, |
1440 | ArrayRef<Metadata *> Ops) |
1441 | : DILocalScope(C, ID, Storage, dwarf::DW_TAG_lexical_block, Ops) {} |
1442 | |
1443 | DILexicalBlock *DILexicalBlock::getImpl(LLVMContext &Context, Metadata *Scope, |
1444 | Metadata *File, unsigned Line, |
1445 | unsigned Column, StorageType Storage, |
1446 | bool ShouldCreate) { |
1447 | // Fixup column. |
1448 | adjustColumn(Column); |
1449 | |
1450 | assert(Scope && "Expected scope" ); |
1451 | DEFINE_GETIMPL_LOOKUP(DILexicalBlock, (Scope, File, Line, Column)); |
1452 | Metadata *Ops[] = {File, Scope}; |
1453 | DEFINE_GETIMPL_STORE(DILexicalBlock, (Line, Column), Ops); |
1454 | } |
1455 | |
1456 | DILexicalBlockFile *DILexicalBlockFile::getImpl(LLVMContext &Context, |
1457 | Metadata *Scope, Metadata *File, |
1458 | unsigned Discriminator, |
1459 | StorageType Storage, |
1460 | bool ShouldCreate) { |
1461 | assert(Scope && "Expected scope" ); |
1462 | DEFINE_GETIMPL_LOOKUP(DILexicalBlockFile, (Scope, File, Discriminator)); |
1463 | Metadata *Ops[] = {File, Scope}; |
1464 | DEFINE_GETIMPL_STORE(DILexicalBlockFile, (Discriminator), Ops); |
1465 | } |
1466 | |
1467 | DINamespace::DINamespace(LLVMContext &Context, StorageType Storage, |
1468 | bool ExportSymbols, ArrayRef<Metadata *> Ops) |
1469 | : DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace, Ops) { |
1470 | SubclassData1 = ExportSymbols; |
1471 | } |
1472 | DINamespace *DINamespace::getImpl(LLVMContext &Context, Metadata *Scope, |
1473 | MDString *Name, bool ExportSymbols, |
1474 | StorageType Storage, bool ShouldCreate) { |
1475 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1476 | DEFINE_GETIMPL_LOOKUP(DINamespace, (Scope, Name, ExportSymbols)); |
1477 | // The nullptr is for DIScope's File operand. This should be refactored. |
1478 | Metadata *Ops[] = {nullptr, Scope, Name}; |
1479 | DEFINE_GETIMPL_STORE(DINamespace, (ExportSymbols), Ops); |
1480 | } |
1481 | |
1482 | DICommonBlock::DICommonBlock(LLVMContext &Context, StorageType Storage, |
1483 | unsigned LineNo, ArrayRef<Metadata *> Ops) |
1484 | : DIScope(Context, DICommonBlockKind, Storage, dwarf::DW_TAG_common_block, |
1485 | Ops) { |
1486 | SubclassData32 = LineNo; |
1487 | } |
1488 | DICommonBlock *DICommonBlock::getImpl(LLVMContext &Context, Metadata *Scope, |
1489 | Metadata *Decl, MDString *Name, |
1490 | Metadata *File, unsigned LineNo, |
1491 | StorageType Storage, bool ShouldCreate) { |
1492 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1493 | DEFINE_GETIMPL_LOOKUP(DICommonBlock, (Scope, Decl, Name, File, LineNo)); |
1494 | // The nullptr is for DIScope's File operand. This should be refactored. |
1495 | Metadata *Ops[] = {Scope, Decl, Name, File}; |
1496 | DEFINE_GETIMPL_STORE(DICommonBlock, (LineNo), Ops); |
1497 | } |
1498 | |
1499 | DIModule::DIModule(LLVMContext &Context, StorageType Storage, unsigned LineNo, |
1500 | bool IsDecl, ArrayRef<Metadata *> Ops) |
1501 | : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops) { |
1502 | SubclassData1 = IsDecl; |
1503 | SubclassData32 = LineNo; |
1504 | } |
1505 | DIModule *DIModule::getImpl(LLVMContext &Context, Metadata *File, |
1506 | Metadata *Scope, MDString *Name, |
1507 | MDString *ConfigurationMacros, |
1508 | MDString *IncludePath, MDString *APINotesFile, |
1509 | unsigned LineNo, bool IsDecl, StorageType Storage, |
1510 | bool ShouldCreate) { |
1511 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1512 | DEFINE_GETIMPL_LOOKUP(DIModule, (File, Scope, Name, ConfigurationMacros, |
1513 | IncludePath, APINotesFile, LineNo, IsDecl)); |
1514 | Metadata *Ops[] = {File, Scope, Name, ConfigurationMacros, |
1515 | IncludePath, APINotesFile}; |
1516 | DEFINE_GETIMPL_STORE(DIModule, (LineNo, IsDecl), Ops); |
1517 | } |
1518 | DITemplateTypeParameter::DITemplateTypeParameter(LLVMContext &Context, |
1519 | StorageType Storage, |
1520 | bool IsDefault, |
1521 | ArrayRef<Metadata *> Ops) |
1522 | : DITemplateParameter(Context, DITemplateTypeParameterKind, Storage, |
1523 | dwarf::DW_TAG_template_type_parameter, IsDefault, |
1524 | Ops) {} |
1525 | |
1526 | DITemplateTypeParameter * |
1527 | DITemplateTypeParameter::getImpl(LLVMContext &Context, MDString *Name, |
1528 | Metadata *Type, bool isDefault, |
1529 | StorageType Storage, bool ShouldCreate) { |
1530 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1531 | DEFINE_GETIMPL_LOOKUP(DITemplateTypeParameter, (Name, Type, isDefault)); |
1532 | Metadata *Ops[] = {Name, Type}; |
1533 | DEFINE_GETIMPL_STORE(DITemplateTypeParameter, (isDefault), Ops); |
1534 | } |
1535 | |
1536 | DITemplateValueParameter *DITemplateValueParameter::getImpl( |
1537 | LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *Type, |
1538 | bool isDefault, Metadata *Value, StorageType Storage, bool ShouldCreate) { |
1539 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1540 | DEFINE_GETIMPL_LOOKUP(DITemplateValueParameter, |
1541 | (Tag, Name, Type, isDefault, Value)); |
1542 | Metadata *Ops[] = {Name, Type, Value}; |
1543 | DEFINE_GETIMPL_STORE(DITemplateValueParameter, (Tag, isDefault), Ops); |
1544 | } |
1545 | |
1546 | DIGlobalVariable * |
1547 | DIGlobalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name, |
1548 | MDString *LinkageName, Metadata *File, unsigned Line, |
1549 | Metadata *Type, bool IsLocalToUnit, bool IsDefinition, |
1550 | Metadata *StaticDataMemberDeclaration, |
1551 | Metadata *TemplateParams, uint32_t AlignInBits, |
1552 | Metadata *Annotations, StorageType Storage, |
1553 | bool ShouldCreate) { |
1554 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1555 | assert(isCanonical(LinkageName) && "Expected canonical MDString" ); |
1556 | DEFINE_GETIMPL_LOOKUP( |
1557 | DIGlobalVariable, |
1558 | (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition, |
1559 | StaticDataMemberDeclaration, TemplateParams, AlignInBits, Annotations)); |
1560 | Metadata *Ops[] = {Scope, |
1561 | Name, |
1562 | File, |
1563 | Type, |
1564 | Name, |
1565 | LinkageName, |
1566 | StaticDataMemberDeclaration, |
1567 | TemplateParams, |
1568 | Annotations}; |
1569 | DEFINE_GETIMPL_STORE(DIGlobalVariable, |
1570 | (Line, IsLocalToUnit, IsDefinition, AlignInBits), Ops); |
1571 | } |
1572 | |
1573 | DILocalVariable * |
1574 | DILocalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name, |
1575 | Metadata *File, unsigned Line, Metadata *Type, |
1576 | unsigned Arg, DIFlags Flags, uint32_t AlignInBits, |
1577 | Metadata *Annotations, StorageType Storage, |
1578 | bool ShouldCreate) { |
1579 | // 64K ought to be enough for any frontend. |
1580 | assert(Arg <= UINT16_MAX && "Expected argument number to fit in 16-bits" ); |
1581 | |
1582 | assert(Scope && "Expected scope" ); |
1583 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1584 | DEFINE_GETIMPL_LOOKUP(DILocalVariable, (Scope, Name, File, Line, Type, Arg, |
1585 | Flags, AlignInBits, Annotations)); |
1586 | Metadata *Ops[] = {Scope, Name, File, Type, Annotations}; |
1587 | DEFINE_GETIMPL_STORE(DILocalVariable, (Line, Arg, Flags, AlignInBits), Ops); |
1588 | } |
1589 | |
1590 | DIVariable::DIVariable(LLVMContext &C, unsigned ID, StorageType Storage, |
1591 | signed Line, ArrayRef<Metadata *> Ops, |
1592 | uint32_t AlignInBits) |
1593 | : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line) { |
1594 | SubclassData32 = AlignInBits; |
1595 | } |
1596 | std::optional<uint64_t> DIVariable::getSizeInBits() const { |
1597 | // This is used by the Verifier so be mindful of broken types. |
1598 | const Metadata *RawType = getRawType(); |
1599 | while (RawType) { |
1600 | // Try to get the size directly. |
1601 | if (auto *T = dyn_cast<DIType>(Val: RawType)) |
1602 | if (uint64_t Size = T->getSizeInBits()) |
1603 | return Size; |
1604 | |
1605 | if (auto *DT = dyn_cast<DIDerivedType>(Val: RawType)) { |
1606 | // Look at the base type. |
1607 | RawType = DT->getRawBaseType(); |
1608 | continue; |
1609 | } |
1610 | |
1611 | // Missing type or size. |
1612 | break; |
1613 | } |
1614 | |
1615 | // Fail gracefully. |
1616 | return std::nullopt; |
1617 | } |
1618 | |
1619 | DILabel::DILabel(LLVMContext &C, StorageType Storage, unsigned Line, |
1620 | unsigned Column, bool IsArtificial, |
1621 | std::optional<unsigned> CoroSuspendIdx, |
1622 | ArrayRef<Metadata *> Ops) |
1623 | : DINode(C, DILabelKind, Storage, dwarf::DW_TAG_label, Ops) { |
1624 | this->SubclassData32 = Line; |
1625 | this->Column = Column; |
1626 | this->IsArtificial = IsArtificial; |
1627 | this->CoroSuspendIdx = CoroSuspendIdx; |
1628 | } |
1629 | DILabel *DILabel::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name, |
1630 | Metadata *File, unsigned Line, unsigned Column, |
1631 | bool IsArtificial, |
1632 | std::optional<unsigned> CoroSuspendIdx, |
1633 | StorageType Storage, bool ShouldCreate) { |
1634 | assert(Scope && "Expected scope" ); |
1635 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
1636 | DEFINE_GETIMPL_LOOKUP( |
1637 | DILabel, (Scope, Name, File, Line, Column, IsArtificial, CoroSuspendIdx)); |
1638 | Metadata *Ops[] = {Scope, Name, File}; |
1639 | DEFINE_GETIMPL_STORE(DILabel, (Line, Column, IsArtificial, CoroSuspendIdx), |
1640 | Ops); |
1641 | } |
1642 | |
1643 | DIExpression *DIExpression::getImpl(LLVMContext &Context, |
1644 | ArrayRef<uint64_t> Elements, |
1645 | StorageType Storage, bool ShouldCreate) { |
1646 | DEFINE_GETIMPL_LOOKUP(DIExpression, (Elements)); |
1647 | DEFINE_GETIMPL_STORE_NO_OPS(DIExpression, (Elements)); |
1648 | } |
1649 | bool DIExpression::isEntryValue() const { |
1650 | if (auto singleLocElts = getSingleLocationExpressionElements()) { |
1651 | return singleLocElts->size() > 0 && |
1652 | (*singleLocElts)[0] == dwarf::DW_OP_LLVM_entry_value; |
1653 | } |
1654 | return false; |
1655 | } |
1656 | bool DIExpression::startsWithDeref() const { |
1657 | if (auto singleLocElts = getSingleLocationExpressionElements()) |
1658 | return singleLocElts->size() > 0 && |
1659 | (*singleLocElts)[0] == dwarf::DW_OP_deref; |
1660 | return false; |
1661 | } |
1662 | bool DIExpression::isDeref() const { |
1663 | if (auto singleLocElts = getSingleLocationExpressionElements()) |
1664 | return singleLocElts->size() == 1 && |
1665 | (*singleLocElts)[0] == dwarf::DW_OP_deref; |
1666 | return false; |
1667 | } |
1668 | |
1669 | DIAssignID *DIAssignID::getImpl(LLVMContext &Context, StorageType Storage, |
1670 | bool ShouldCreate) { |
1671 | // Uniqued DIAssignID are not supported as the instance address *is* the ID. |
1672 | assert(Storage != StorageType::Uniqued && "uniqued DIAssignID unsupported" ); |
1673 | return storeImpl(N: new (0u, Storage) DIAssignID(Context, Storage), Storage); |
1674 | } |
1675 | |
1676 | unsigned DIExpression::ExprOperand::getSize() const { |
1677 | uint64_t Op = getOp(); |
1678 | |
1679 | if (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31) |
1680 | return 2; |
1681 | |
1682 | switch (Op) { |
1683 | case dwarf::DW_OP_LLVM_convert: |
1684 | case dwarf::DW_OP_LLVM_fragment: |
1685 | case dwarf::DW_OP_LLVM_extract_bits_sext: |
1686 | case dwarf::DW_OP_LLVM_extract_bits_zext: |
1687 | case dwarf::DW_OP_bregx: |
1688 | return 3; |
1689 | case dwarf::DW_OP_constu: |
1690 | case dwarf::DW_OP_consts: |
1691 | case dwarf::DW_OP_deref_size: |
1692 | case dwarf::DW_OP_plus_uconst: |
1693 | case dwarf::DW_OP_LLVM_tag_offset: |
1694 | case dwarf::DW_OP_LLVM_entry_value: |
1695 | case dwarf::DW_OP_LLVM_arg: |
1696 | case dwarf::DW_OP_regx: |
1697 | return 2; |
1698 | default: |
1699 | return 1; |
1700 | } |
1701 | } |
1702 | |
1703 | bool DIExpression::isValid() const { |
1704 | for (auto I = expr_op_begin(), E = expr_op_end(); I != E; ++I) { |
1705 | // Check that there's space for the operand. |
1706 | if (I->get() + I->getSize() > E->get()) |
1707 | return false; |
1708 | |
1709 | uint64_t Op = I->getOp(); |
1710 | if ((Op >= dwarf::DW_OP_reg0 && Op <= dwarf::DW_OP_reg31) || |
1711 | (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31)) |
1712 | return true; |
1713 | |
1714 | // Check that the operand is valid. |
1715 | switch (Op) { |
1716 | default: |
1717 | return false; |
1718 | case dwarf::DW_OP_LLVM_fragment: |
1719 | // A fragment operator must appear at the end. |
1720 | return I->get() + I->getSize() == E->get(); |
1721 | case dwarf::DW_OP_stack_value: { |
1722 | // Must be the last one or followed by a DW_OP_LLVM_fragment. |
1723 | if (I->get() + I->getSize() == E->get()) |
1724 | break; |
1725 | auto J = I; |
1726 | if ((++J)->getOp() != dwarf::DW_OP_LLVM_fragment) |
1727 | return false; |
1728 | break; |
1729 | } |
1730 | case dwarf::DW_OP_swap: { |
1731 | // Must be more than one implicit element on the stack. |
1732 | |
1733 | // FIXME: A better way to implement this would be to add a local variable |
1734 | // that keeps track of the stack depth and introduce something like a |
1735 | // DW_LLVM_OP_implicit_location as a placeholder for the location this |
1736 | // DIExpression is attached to, or else pass the number of implicit stack |
1737 | // elements into isValid. |
1738 | if (getNumElements() == 1) |
1739 | return false; |
1740 | break; |
1741 | } |
1742 | case dwarf::DW_OP_LLVM_entry_value: { |
1743 | // An entry value operator must appear at the beginning or immediately |
1744 | // following `DW_OP_LLVM_arg 0`, and the number of operations it cover can |
1745 | // currently only be 1, because we support only entry values of a simple |
1746 | // register location. One reason for this is that we currently can't |
1747 | // calculate the size of the resulting DWARF block for other expressions. |
1748 | auto FirstOp = expr_op_begin(); |
1749 | if (FirstOp->getOp() == dwarf::DW_OP_LLVM_arg && FirstOp->getArg(I: 0) == 0) |
1750 | ++FirstOp; |
1751 | return I->get() == FirstOp->get() && I->getArg(I: 0) == 1; |
1752 | } |
1753 | case dwarf::DW_OP_LLVM_implicit_pointer: |
1754 | case dwarf::DW_OP_LLVM_convert: |
1755 | case dwarf::DW_OP_LLVM_arg: |
1756 | case dwarf::DW_OP_LLVM_tag_offset: |
1757 | case dwarf::DW_OP_LLVM_extract_bits_sext: |
1758 | case dwarf::DW_OP_LLVM_extract_bits_zext: |
1759 | case dwarf::DW_OP_constu: |
1760 | case dwarf::DW_OP_plus_uconst: |
1761 | case dwarf::DW_OP_plus: |
1762 | case dwarf::DW_OP_minus: |
1763 | case dwarf::DW_OP_mul: |
1764 | case dwarf::DW_OP_div: |
1765 | case dwarf::DW_OP_mod: |
1766 | case dwarf::DW_OP_or: |
1767 | case dwarf::DW_OP_and: |
1768 | case dwarf::DW_OP_xor: |
1769 | case dwarf::DW_OP_shl: |
1770 | case dwarf::DW_OP_shr: |
1771 | case dwarf::DW_OP_shra: |
1772 | case dwarf::DW_OP_deref: |
1773 | case dwarf::DW_OP_deref_size: |
1774 | case dwarf::DW_OP_xderef: |
1775 | case dwarf::DW_OP_lit0: |
1776 | case dwarf::DW_OP_not: |
1777 | case dwarf::DW_OP_dup: |
1778 | case dwarf::DW_OP_regx: |
1779 | case dwarf::DW_OP_bregx: |
1780 | case dwarf::DW_OP_push_object_address: |
1781 | case dwarf::DW_OP_over: |
1782 | case dwarf::DW_OP_consts: |
1783 | case dwarf::DW_OP_eq: |
1784 | case dwarf::DW_OP_ne: |
1785 | case dwarf::DW_OP_gt: |
1786 | case dwarf::DW_OP_ge: |
1787 | case dwarf::DW_OP_lt: |
1788 | case dwarf::DW_OP_le: |
1789 | break; |
1790 | } |
1791 | } |
1792 | return true; |
1793 | } |
1794 | |
1795 | bool DIExpression::isImplicit() const { |
1796 | if (!isValid()) |
1797 | return false; |
1798 | |
1799 | if (getNumElements() == 0) |
1800 | return false; |
1801 | |
1802 | for (const auto &It : expr_ops()) { |
1803 | switch (It.getOp()) { |
1804 | default: |
1805 | break; |
1806 | case dwarf::DW_OP_stack_value: |
1807 | return true; |
1808 | } |
1809 | } |
1810 | |
1811 | return false; |
1812 | } |
1813 | |
1814 | bool DIExpression::isComplex() const { |
1815 | if (!isValid()) |
1816 | return false; |
1817 | |
1818 | if (getNumElements() == 0) |
1819 | return false; |
1820 | |
1821 | // If there are any elements other than fragment or tag_offset, then some |
1822 | // kind of complex computation occurs. |
1823 | for (const auto &It : expr_ops()) { |
1824 | switch (It.getOp()) { |
1825 | case dwarf::DW_OP_LLVM_tag_offset: |
1826 | case dwarf::DW_OP_LLVM_fragment: |
1827 | case dwarf::DW_OP_LLVM_arg: |
1828 | continue; |
1829 | default: |
1830 | return true; |
1831 | } |
1832 | } |
1833 | |
1834 | return false; |
1835 | } |
1836 | |
1837 | bool DIExpression::isSingleLocationExpression() const { |
1838 | if (!isValid()) |
1839 | return false; |
1840 | |
1841 | if (getNumElements() == 0) |
1842 | return true; |
1843 | |
1844 | auto ExprOpBegin = expr_ops().begin(); |
1845 | auto ExprOpEnd = expr_ops().end(); |
1846 | if (ExprOpBegin->getOp() == dwarf::DW_OP_LLVM_arg) { |
1847 | if (ExprOpBegin->getArg(I: 0) != 0) |
1848 | return false; |
1849 | ++ExprOpBegin; |
1850 | } |
1851 | |
1852 | return !std::any_of(first: ExprOpBegin, last: ExprOpEnd, pred: [](auto Op) { |
1853 | return Op.getOp() == dwarf::DW_OP_LLVM_arg; |
1854 | }); |
1855 | } |
1856 | |
1857 | std::optional<ArrayRef<uint64_t>> |
1858 | DIExpression::getSingleLocationExpressionElements() const { |
1859 | // Check for `isValid` covered by `isSingleLocationExpression`. |
1860 | if (!isSingleLocationExpression()) |
1861 | return std::nullopt; |
1862 | |
1863 | // An empty expression is already non-variadic. |
1864 | if (!getNumElements()) |
1865 | return ArrayRef<uint64_t>(); |
1866 | |
1867 | // If Expr does not have a leading DW_OP_LLVM_arg then we don't need to do |
1868 | // anything. |
1869 | if (getElements()[0] == dwarf::DW_OP_LLVM_arg) |
1870 | return getElements().drop_front(N: 2); |
1871 | return getElements(); |
1872 | } |
1873 | |
1874 | const DIExpression * |
1875 | DIExpression::convertToUndefExpression(const DIExpression *Expr) { |
1876 | SmallVector<uint64_t, 3> UndefOps; |
1877 | if (auto FragmentInfo = Expr->getFragmentInfo()) { |
1878 | UndefOps.append(IL: {dwarf::DW_OP_LLVM_fragment, FragmentInfo->OffsetInBits, |
1879 | FragmentInfo->SizeInBits}); |
1880 | } |
1881 | return DIExpression::get(Context&: Expr->getContext(), Elements: UndefOps); |
1882 | } |
1883 | |
1884 | const DIExpression * |
1885 | DIExpression::convertToVariadicExpression(const DIExpression *Expr) { |
1886 | if (any_of(Range: Expr->expr_ops(), P: [](auto ExprOp) { |
1887 | return ExprOp.getOp() == dwarf::DW_OP_LLVM_arg; |
1888 | })) |
1889 | return Expr; |
1890 | SmallVector<uint64_t> NewOps; |
1891 | NewOps.reserve(N: Expr->getNumElements() + 2); |
1892 | NewOps.append(IL: {dwarf::DW_OP_LLVM_arg, 0}); |
1893 | NewOps.append(in_start: Expr->elements_begin(), in_end: Expr->elements_end()); |
1894 | return DIExpression::get(Context&: Expr->getContext(), Elements: NewOps); |
1895 | } |
1896 | |
1897 | std::optional<const DIExpression *> |
1898 | DIExpression::convertToNonVariadicExpression(const DIExpression *Expr) { |
1899 | if (!Expr) |
1900 | return std::nullopt; |
1901 | |
1902 | if (auto Elts = Expr->getSingleLocationExpressionElements()) |
1903 | return DIExpression::get(Context&: Expr->getContext(), Elements: *Elts); |
1904 | |
1905 | return std::nullopt; |
1906 | } |
1907 | |
1908 | void DIExpression::canonicalizeExpressionOps(SmallVectorImpl<uint64_t> &Ops, |
1909 | const DIExpression *Expr, |
1910 | bool IsIndirect) { |
1911 | // If Expr is not already variadic, insert the implied `DW_OP_LLVM_arg 0` |
1912 | // to the existing expression ops. |
1913 | if (none_of(Range: Expr->expr_ops(), P: [](auto ExprOp) { |
1914 | return ExprOp.getOp() == dwarf::DW_OP_LLVM_arg; |
1915 | })) |
1916 | Ops.append(IL: {dwarf::DW_OP_LLVM_arg, 0}); |
1917 | // If Expr is not indirect, we only need to insert the expression elements and |
1918 | // we're done. |
1919 | if (!IsIndirect) { |
1920 | Ops.append(in_start: Expr->elements_begin(), in_end: Expr->elements_end()); |
1921 | return; |
1922 | } |
1923 | // If Expr is indirect, insert the implied DW_OP_deref at the end of the |
1924 | // expression but before DW_OP_{stack_value, LLVM_fragment} if they are |
1925 | // present. |
1926 | for (auto Op : Expr->expr_ops()) { |
1927 | if (Op.getOp() == dwarf::DW_OP_stack_value || |
1928 | Op.getOp() == dwarf::DW_OP_LLVM_fragment) { |
1929 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
1930 | IsIndirect = false; |
1931 | } |
1932 | Op.appendToVector(V&: Ops); |
1933 | } |
1934 | if (IsIndirect) |
1935 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
1936 | } |
1937 | |
1938 | bool DIExpression::isEqualExpression(const DIExpression *FirstExpr, |
1939 | bool FirstIndirect, |
1940 | const DIExpression *SecondExpr, |
1941 | bool SecondIndirect) { |
1942 | SmallVector<uint64_t> FirstOps; |
1943 | DIExpression::canonicalizeExpressionOps(Ops&: FirstOps, Expr: FirstExpr, IsIndirect: FirstIndirect); |
1944 | SmallVector<uint64_t> SecondOps; |
1945 | DIExpression::canonicalizeExpressionOps(Ops&: SecondOps, Expr: SecondExpr, |
1946 | IsIndirect: SecondIndirect); |
1947 | return FirstOps == SecondOps; |
1948 | } |
1949 | |
1950 | std::optional<DIExpression::FragmentInfo> |
1951 | DIExpression::getFragmentInfo(expr_op_iterator Start, expr_op_iterator End) { |
1952 | for (auto I = Start; I != End; ++I) |
1953 | if (I->getOp() == dwarf::DW_OP_LLVM_fragment) { |
1954 | DIExpression::FragmentInfo Info = {I->getArg(I: 1), I->getArg(I: 0)}; |
1955 | return Info; |
1956 | } |
1957 | return std::nullopt; |
1958 | } |
1959 | |
1960 | std::optional<uint64_t> DIExpression::getActiveBits(DIVariable *Var) { |
1961 | std::optional<uint64_t> InitialActiveBits = Var->getSizeInBits(); |
1962 | std::optional<uint64_t> ActiveBits = InitialActiveBits; |
1963 | for (auto Op : expr_ops()) { |
1964 | switch (Op.getOp()) { |
1965 | default: |
1966 | // We assume the worst case for anything we don't currently handle and |
1967 | // revert to the initial active bits. |
1968 | ActiveBits = InitialActiveBits; |
1969 | break; |
1970 | case dwarf::DW_OP_LLVM_extract_bits_zext: |
1971 | case dwarf::DW_OP_LLVM_extract_bits_sext: { |
1972 | // We can't handle an extract whose sign doesn't match that of the |
1973 | // variable. |
1974 | std::optional<DIBasicType::Signedness> VarSign = Var->getSignedness(); |
1975 | bool VarSigned = (VarSign == DIBasicType::Signedness::Signed); |
1976 | bool OpSigned = (Op.getOp() == dwarf::DW_OP_LLVM_extract_bits_sext); |
1977 | if (!VarSign || VarSigned != OpSigned) { |
1978 | ActiveBits = InitialActiveBits; |
1979 | break; |
1980 | } |
1981 | [[fallthrough]]; |
1982 | } |
1983 | case dwarf::DW_OP_LLVM_fragment: |
1984 | // Extract or fragment narrows the active bits |
1985 | if (ActiveBits) |
1986 | ActiveBits = std::min(a: *ActiveBits, b: Op.getArg(I: 1)); |
1987 | else |
1988 | ActiveBits = Op.getArg(I: 1); |
1989 | break; |
1990 | } |
1991 | } |
1992 | return ActiveBits; |
1993 | } |
1994 | |
1995 | void DIExpression::appendOffset(SmallVectorImpl<uint64_t> &Ops, |
1996 | int64_t Offset) { |
1997 | if (Offset > 0) { |
1998 | Ops.push_back(Elt: dwarf::DW_OP_plus_uconst); |
1999 | Ops.push_back(Elt: Offset); |
2000 | } else if (Offset < 0) { |
2001 | Ops.push_back(Elt: dwarf::DW_OP_constu); |
2002 | // Avoid UB when encountering LLONG_MIN, because in 2's complement |
2003 | // abs(LLONG_MIN) is LLONG_MAX+1. |
2004 | uint64_t AbsMinusOne = -(Offset+1); |
2005 | Ops.push_back(Elt: AbsMinusOne + 1); |
2006 | Ops.push_back(Elt: dwarf::DW_OP_minus); |
2007 | } |
2008 | } |
2009 | |
2010 | bool DIExpression::(int64_t &Offset) const { |
2011 | auto SingleLocEltsOpt = getSingleLocationExpressionElements(); |
2012 | if (!SingleLocEltsOpt) |
2013 | return false; |
2014 | auto SingleLocElts = *SingleLocEltsOpt; |
2015 | |
2016 | if (SingleLocElts.size() == 0) { |
2017 | Offset = 0; |
2018 | return true; |
2019 | } |
2020 | |
2021 | if (SingleLocElts.size() == 2 && |
2022 | SingleLocElts[0] == dwarf::DW_OP_plus_uconst) { |
2023 | Offset = SingleLocElts[1]; |
2024 | return true; |
2025 | } |
2026 | |
2027 | if (SingleLocElts.size() == 3 && SingleLocElts[0] == dwarf::DW_OP_constu) { |
2028 | if (SingleLocElts[2] == dwarf::DW_OP_plus) { |
2029 | Offset = SingleLocElts[1]; |
2030 | return true; |
2031 | } |
2032 | if (SingleLocElts[2] == dwarf::DW_OP_minus) { |
2033 | Offset = -SingleLocElts[1]; |
2034 | return true; |
2035 | } |
2036 | } |
2037 | |
2038 | return false; |
2039 | } |
2040 | |
2041 | bool DIExpression::( |
2042 | int64_t &OffsetInBytes, SmallVectorImpl<uint64_t> &RemainingOps) const { |
2043 | OffsetInBytes = 0; |
2044 | RemainingOps.clear(); |
2045 | |
2046 | auto SingleLocEltsOpt = getSingleLocationExpressionElements(); |
2047 | if (!SingleLocEltsOpt) |
2048 | return false; |
2049 | |
2050 | auto ExprOpEnd = expr_op_iterator(SingleLocEltsOpt->end()); |
2051 | auto ExprOpIt = expr_op_iterator(SingleLocEltsOpt->begin()); |
2052 | while (ExprOpIt != ExprOpEnd) { |
2053 | uint64_t Op = ExprOpIt->getOp(); |
2054 | if (Op == dwarf::DW_OP_deref || Op == dwarf::DW_OP_deref_size || |
2055 | Op == dwarf::DW_OP_deref_type || Op == dwarf::DW_OP_LLVM_fragment || |
2056 | Op == dwarf::DW_OP_LLVM_extract_bits_zext || |
2057 | Op == dwarf::DW_OP_LLVM_extract_bits_sext) { |
2058 | break; |
2059 | } else if (Op == dwarf::DW_OP_plus_uconst) { |
2060 | OffsetInBytes += ExprOpIt->getArg(I: 0); |
2061 | } else if (Op == dwarf::DW_OP_constu) { |
2062 | uint64_t Value = ExprOpIt->getArg(I: 0); |
2063 | ++ExprOpIt; |
2064 | if (ExprOpIt->getOp() == dwarf::DW_OP_plus) |
2065 | OffsetInBytes += Value; |
2066 | else if (ExprOpIt->getOp() == dwarf::DW_OP_minus) |
2067 | OffsetInBytes -= Value; |
2068 | else |
2069 | return false; |
2070 | } else { |
2071 | // Not a const plus/minus operation or deref. |
2072 | return false; |
2073 | } |
2074 | ++ExprOpIt; |
2075 | } |
2076 | RemainingOps.append(in_start: ExprOpIt.getBase(), in_end: ExprOpEnd.getBase()); |
2077 | return true; |
2078 | } |
2079 | |
2080 | bool DIExpression::hasAllLocationOps(unsigned N) const { |
2081 | SmallDenseSet<uint64_t, 4> SeenOps; |
2082 | for (auto ExprOp : expr_ops()) |
2083 | if (ExprOp.getOp() == dwarf::DW_OP_LLVM_arg) |
2084 | SeenOps.insert(V: ExprOp.getArg(I: 0)); |
2085 | for (uint64_t Idx = 0; Idx < N; ++Idx) |
2086 | if (!SeenOps.contains(V: Idx)) |
2087 | return false; |
2088 | return true; |
2089 | } |
2090 | |
2091 | const DIExpression *DIExpression::(const DIExpression *Expr, |
2092 | unsigned &AddrClass) { |
2093 | // FIXME: This seems fragile. Nothing that verifies that these elements |
2094 | // actually map to ops and not operands. |
2095 | auto SingleLocEltsOpt = Expr->getSingleLocationExpressionElements(); |
2096 | if (!SingleLocEltsOpt) |
2097 | return nullptr; |
2098 | auto SingleLocElts = *SingleLocEltsOpt; |
2099 | |
2100 | const unsigned PatternSize = 4; |
2101 | if (SingleLocElts.size() >= PatternSize && |
2102 | SingleLocElts[PatternSize - 4] == dwarf::DW_OP_constu && |
2103 | SingleLocElts[PatternSize - 2] == dwarf::DW_OP_swap && |
2104 | SingleLocElts[PatternSize - 1] == dwarf::DW_OP_xderef) { |
2105 | AddrClass = SingleLocElts[PatternSize - 3]; |
2106 | |
2107 | if (SingleLocElts.size() == PatternSize) |
2108 | return nullptr; |
2109 | return DIExpression::get( |
2110 | Context&: Expr->getContext(), |
2111 | Elements: ArrayRef(&*SingleLocElts.begin(), SingleLocElts.size() - PatternSize)); |
2112 | } |
2113 | return Expr; |
2114 | } |
2115 | |
2116 | DIExpression *DIExpression::prepend(const DIExpression *Expr, uint8_t Flags, |
2117 | int64_t Offset) { |
2118 | SmallVector<uint64_t, 8> Ops; |
2119 | if (Flags & DIExpression::DerefBefore) |
2120 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
2121 | |
2122 | appendOffset(Ops, Offset); |
2123 | if (Flags & DIExpression::DerefAfter) |
2124 | Ops.push_back(Elt: dwarf::DW_OP_deref); |
2125 | |
2126 | bool StackValue = Flags & DIExpression::StackValue; |
2127 | bool EntryValue = Flags & DIExpression::EntryValue; |
2128 | |
2129 | return prependOpcodes(Expr, Ops, StackValue, EntryValue); |
2130 | } |
2131 | |
2132 | DIExpression *DIExpression::appendOpsToArg(const DIExpression *Expr, |
2133 | ArrayRef<uint64_t> Ops, |
2134 | unsigned ArgNo, bool StackValue) { |
2135 | assert(Expr && "Can't add ops to this expression" ); |
2136 | |
2137 | // Handle non-variadic intrinsics by prepending the opcodes. |
2138 | if (!any_of(Range: Expr->expr_ops(), |
2139 | P: [](auto Op) { return Op.getOp() == dwarf::DW_OP_LLVM_arg; })) { |
2140 | assert(ArgNo == 0 && |
2141 | "Location Index must be 0 for a non-variadic expression." ); |
2142 | SmallVector<uint64_t, 8> NewOps(Ops); |
2143 | return DIExpression::prependOpcodes(Expr, Ops&: NewOps, StackValue); |
2144 | } |
2145 | |
2146 | SmallVector<uint64_t, 8> NewOps; |
2147 | for (auto Op : Expr->expr_ops()) { |
2148 | // A DW_OP_stack_value comes at the end, but before a DW_OP_LLVM_fragment. |
2149 | if (StackValue) { |
2150 | if (Op.getOp() == dwarf::DW_OP_stack_value) |
2151 | StackValue = false; |
2152 | else if (Op.getOp() == dwarf::DW_OP_LLVM_fragment) { |
2153 | NewOps.push_back(Elt: dwarf::DW_OP_stack_value); |
2154 | StackValue = false; |
2155 | } |
2156 | } |
2157 | Op.appendToVector(V&: NewOps); |
2158 | if (Op.getOp() == dwarf::DW_OP_LLVM_arg && Op.getArg(I: 0) == ArgNo) |
2159 | llvm::append_range(C&: NewOps, R&: Ops); |
2160 | } |
2161 | if (StackValue) |
2162 | NewOps.push_back(Elt: dwarf::DW_OP_stack_value); |
2163 | |
2164 | return DIExpression::get(Context&: Expr->getContext(), Elements: NewOps); |
2165 | } |
2166 | |
2167 | DIExpression *DIExpression::replaceArg(const DIExpression *Expr, |
2168 | uint64_t OldArg, uint64_t NewArg) { |
2169 | assert(Expr && "Can't replace args in this expression" ); |
2170 | |
2171 | SmallVector<uint64_t, 8> NewOps; |
2172 | |
2173 | for (auto Op : Expr->expr_ops()) { |
2174 | if (Op.getOp() != dwarf::DW_OP_LLVM_arg || Op.getArg(I: 0) < OldArg) { |
2175 | Op.appendToVector(V&: NewOps); |
2176 | continue; |
2177 | } |
2178 | NewOps.push_back(Elt: dwarf::DW_OP_LLVM_arg); |
2179 | uint64_t Arg = Op.getArg(I: 0) == OldArg ? NewArg : Op.getArg(I: 0); |
2180 | // OldArg has been deleted from the Op list, so decrement all indices |
2181 | // greater than it. |
2182 | if (Arg > OldArg) |
2183 | --Arg; |
2184 | NewOps.push_back(Elt: Arg); |
2185 | } |
2186 | return DIExpression::get(Context&: Expr->getContext(), Elements: NewOps); |
2187 | } |
2188 | |
2189 | DIExpression *DIExpression::prependOpcodes(const DIExpression *Expr, |
2190 | SmallVectorImpl<uint64_t> &Ops, |
2191 | bool StackValue, bool EntryValue) { |
2192 | assert(Expr && "Can't prepend ops to this expression" ); |
2193 | |
2194 | if (EntryValue) { |
2195 | Ops.push_back(Elt: dwarf::DW_OP_LLVM_entry_value); |
2196 | // Use a block size of 1 for the target register operand. The |
2197 | // DWARF backend currently cannot emit entry values with a block |
2198 | // size > 1. |
2199 | Ops.push_back(Elt: 1); |
2200 | } |
2201 | |
2202 | // If there are no ops to prepend, do not even add the DW_OP_stack_value. |
2203 | if (Ops.empty()) |
2204 | StackValue = false; |
2205 | for (auto Op : Expr->expr_ops()) { |
2206 | // A DW_OP_stack_value comes at the end, but before a DW_OP_LLVM_fragment. |
2207 | if (StackValue) { |
2208 | if (Op.getOp() == dwarf::DW_OP_stack_value) |
2209 | StackValue = false; |
2210 | else if (Op.getOp() == dwarf::DW_OP_LLVM_fragment) { |
2211 | Ops.push_back(Elt: dwarf::DW_OP_stack_value); |
2212 | StackValue = false; |
2213 | } |
2214 | } |
2215 | Op.appendToVector(V&: Ops); |
2216 | } |
2217 | if (StackValue) |
2218 | Ops.push_back(Elt: dwarf::DW_OP_stack_value); |
2219 | return DIExpression::get(Context&: Expr->getContext(), Elements: Ops); |
2220 | } |
2221 | |
2222 | DIExpression *DIExpression::append(const DIExpression *Expr, |
2223 | ArrayRef<uint64_t> Ops) { |
2224 | assert(Expr && !Ops.empty() && "Can't append ops to this expression" ); |
2225 | |
2226 | // Copy Expr's current op list. |
2227 | SmallVector<uint64_t, 16> NewOps; |
2228 | for (auto Op : Expr->expr_ops()) { |
2229 | // Append new opcodes before DW_OP_{stack_value, LLVM_fragment}. |
2230 | if (Op.getOp() == dwarf::DW_OP_stack_value || |
2231 | Op.getOp() == dwarf::DW_OP_LLVM_fragment) { |
2232 | NewOps.append(in_start: Ops.begin(), in_end: Ops.end()); |
2233 | |
2234 | // Ensure that the new opcodes are only appended once. |
2235 | Ops = {}; |
2236 | } |
2237 | Op.appendToVector(V&: NewOps); |
2238 | } |
2239 | NewOps.append(in_start: Ops.begin(), in_end: Ops.end()); |
2240 | auto *result = |
2241 | DIExpression::get(Context&: Expr->getContext(), Elements: NewOps)->foldConstantMath(); |
2242 | assert(result->isValid() && "concatenated expression is not valid" ); |
2243 | return result; |
2244 | } |
2245 | |
2246 | DIExpression *DIExpression::appendToStack(const DIExpression *Expr, |
2247 | ArrayRef<uint64_t> Ops) { |
2248 | assert(Expr && !Ops.empty() && "Can't append ops to this expression" ); |
2249 | assert(std::none_of(expr_op_iterator(Ops.begin()), |
2250 | expr_op_iterator(Ops.end()), |
2251 | [](auto Op) { |
2252 | return Op.getOp() == dwarf::DW_OP_stack_value || |
2253 | Op.getOp() == dwarf::DW_OP_LLVM_fragment; |
2254 | }) && |
2255 | "Can't append this op" ); |
2256 | |
2257 | // Append a DW_OP_deref after Expr's current op list if it's non-empty and |
2258 | // has no DW_OP_stack_value. |
2259 | // |
2260 | // Match .* DW_OP_stack_value (DW_OP_LLVM_fragment A B)?. |
2261 | std::optional<FragmentInfo> FI = Expr->getFragmentInfo(); |
2262 | unsigned DropUntilStackValue = FI ? 3 : 0; |
2263 | ArrayRef<uint64_t> ExprOpsBeforeFragment = |
2264 | Expr->getElements().drop_back(N: DropUntilStackValue); |
2265 | bool NeedsDeref = (Expr->getNumElements() > DropUntilStackValue) && |
2266 | (ExprOpsBeforeFragment.back() != dwarf::DW_OP_stack_value); |
2267 | bool NeedsStackValue = NeedsDeref || ExprOpsBeforeFragment.empty(); |
2268 | |
2269 | // Append a DW_OP_deref after Expr's current op list if needed, then append |
2270 | // the new ops, and finally ensure that a single DW_OP_stack_value is present. |
2271 | SmallVector<uint64_t, 16> NewOps; |
2272 | if (NeedsDeref) |
2273 | NewOps.push_back(Elt: dwarf::DW_OP_deref); |
2274 | NewOps.append(in_start: Ops.begin(), in_end: Ops.end()); |
2275 | if (NeedsStackValue) |
2276 | NewOps.push_back(Elt: dwarf::DW_OP_stack_value); |
2277 | return DIExpression::append(Expr, Ops: NewOps); |
2278 | } |
2279 | |
2280 | std::optional<DIExpression *> DIExpression::createFragmentExpression( |
2281 | const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits) { |
2282 | SmallVector<uint64_t, 8> Ops; |
2283 | // Track whether it's safe to split the value at the top of the DWARF stack, |
2284 | // assuming that it'll be used as an implicit location value. |
2285 | bool CanSplitValue = true; |
2286 | // Track whether we need to add a fragment expression to the end of Expr. |
2287 | bool EmitFragment = true; |
2288 | // Copy over the expression, but leave off any trailing DW_OP_LLVM_fragment. |
2289 | if (Expr) { |
2290 | for (auto Op : Expr->expr_ops()) { |
2291 | switch (Op.getOp()) { |
2292 | default: |
2293 | break; |
2294 | case dwarf::DW_OP_shr: |
2295 | case dwarf::DW_OP_shra: |
2296 | case dwarf::DW_OP_shl: |
2297 | case dwarf::DW_OP_plus: |
2298 | case dwarf::DW_OP_plus_uconst: |
2299 | case dwarf::DW_OP_minus: |
2300 | // We can't safely split arithmetic or shift operations into multiple |
2301 | // fragments because we can't express carry-over between fragments. |
2302 | // |
2303 | // FIXME: We *could* preserve the lowest fragment of a constant offset |
2304 | // operation if the offset fits into SizeInBits. |
2305 | CanSplitValue = false; |
2306 | break; |
2307 | case dwarf::DW_OP_deref: |
2308 | case dwarf::DW_OP_deref_size: |
2309 | case dwarf::DW_OP_deref_type: |
2310 | case dwarf::DW_OP_xderef: |
2311 | case dwarf::DW_OP_xderef_size: |
2312 | case dwarf::DW_OP_xderef_type: |
2313 | // Preceeding arithmetic operations have been applied to compute an |
2314 | // address. It's okay to split the value loaded from that address. |
2315 | CanSplitValue = true; |
2316 | break; |
2317 | case dwarf::DW_OP_stack_value: |
2318 | // Bail if this expression computes a value that cannot be split. |
2319 | if (!CanSplitValue) |
2320 | return std::nullopt; |
2321 | break; |
2322 | case dwarf::DW_OP_LLVM_fragment: { |
2323 | // If we've decided we don't need a fragment then give up if we see that |
2324 | // there's already a fragment expression. |
2325 | // FIXME: We could probably do better here |
2326 | if (!EmitFragment) |
2327 | return std::nullopt; |
2328 | // Make the new offset point into the existing fragment. |
2329 | uint64_t FragmentOffsetInBits = Op.getArg(I: 0); |
2330 | uint64_t FragmentSizeInBits = Op.getArg(I: 1); |
2331 | (void)FragmentSizeInBits; |
2332 | assert((OffsetInBits + SizeInBits <= FragmentSizeInBits) && |
2333 | "new fragment outside of original fragment" ); |
2334 | OffsetInBits += FragmentOffsetInBits; |
2335 | continue; |
2336 | } |
2337 | case dwarf::DW_OP_LLVM_extract_bits_zext: |
2338 | case dwarf::DW_OP_LLVM_extract_bits_sext: { |
2339 | // If we're extracting bits from inside of the fragment that we're |
2340 | // creating then we don't have a fragment after all, and just need to |
2341 | // adjust the offset that we're extracting from. |
2342 | uint64_t = Op.getArg(I: 0); |
2343 | uint64_t = Op.getArg(I: 1); |
2344 | if (ExtractOffsetInBits >= OffsetInBits && |
2345 | ExtractOffsetInBits + ExtractSizeInBits <= |
2346 | OffsetInBits + SizeInBits) { |
2347 | Ops.push_back(Elt: Op.getOp()); |
2348 | Ops.push_back(Elt: ExtractOffsetInBits - OffsetInBits); |
2349 | Ops.push_back(Elt: ExtractSizeInBits); |
2350 | EmitFragment = false; |
2351 | continue; |
2352 | } |
2353 | // If the extracted bits aren't fully contained within the fragment then |
2354 | // give up. |
2355 | // FIXME: We could probably do better here |
2356 | return std::nullopt; |
2357 | } |
2358 | } |
2359 | Op.appendToVector(V&: Ops); |
2360 | } |
2361 | } |
2362 | assert((!Expr->isImplicit() || CanSplitValue) && "Expr can't be split" ); |
2363 | assert(Expr && "Unknown DIExpression" ); |
2364 | if (EmitFragment) { |
2365 | Ops.push_back(Elt: dwarf::DW_OP_LLVM_fragment); |
2366 | Ops.push_back(Elt: OffsetInBits); |
2367 | Ops.push_back(Elt: SizeInBits); |
2368 | } |
2369 | return DIExpression::get(Context&: Expr->getContext(), Elements: Ops); |
2370 | } |
2371 | |
2372 | /// See declaration for more info. |
2373 | bool DIExpression::calculateFragmentIntersect( |
2374 | const DataLayout &DL, const Value *SliceStart, uint64_t SliceOffsetInBits, |
2375 | uint64_t SliceSizeInBits, const Value *DbgPtr, int64_t DbgPtrOffsetInBits, |
2376 | int64_t , DIExpression::FragmentInfo VarFrag, |
2377 | std::optional<DIExpression::FragmentInfo> &Result, |
2378 | int64_t &OffsetFromLocationInBits) { |
2379 | |
2380 | if (VarFrag.SizeInBits == 0) |
2381 | return false; // Variable size is unknown. |
2382 | |
2383 | // Difference between mem slice start and the dbg location start. |
2384 | // 0 4 8 12 16 ... |
2385 | // | | |
2386 | // dbg location start |
2387 | // | |
2388 | // mem slice start |
2389 | // Here MemStartRelToDbgStartInBits is 8. Note this can be negative. |
2390 | int64_t MemStartRelToDbgStartInBits; |
2391 | { |
2392 | auto MemOffsetFromDbgInBytes = SliceStart->getPointerOffsetFrom(Other: DbgPtr, DL); |
2393 | if (!MemOffsetFromDbgInBytes) |
2394 | return false; // Can't calculate difference in addresses. |
2395 | // Difference between the pointers. |
2396 | MemStartRelToDbgStartInBits = *MemOffsetFromDbgInBytes * 8; |
2397 | // Add the difference of the offsets. |
2398 | MemStartRelToDbgStartInBits += |
2399 | SliceOffsetInBits - (DbgPtrOffsetInBits + DbgExtractOffsetInBits); |
2400 | } |
2401 | |
2402 | // Out-param. Invert offset to get offset from debug location. |
2403 | OffsetFromLocationInBits = -MemStartRelToDbgStartInBits; |
2404 | |
2405 | // Check if the variable fragment sits outside (before) this memory slice. |
2406 | int64_t MemEndRelToDbgStart = MemStartRelToDbgStartInBits + SliceSizeInBits; |
2407 | if (MemEndRelToDbgStart < 0) { |
2408 | Result = {0, 0}; // Out-param. |
2409 | return true; |
2410 | } |
2411 | |
2412 | // Work towards creating SliceOfVariable which is the bits of the variable |
2413 | // that the memory region covers. |
2414 | // 0 4 8 12 16 ... |
2415 | // | | |
2416 | // dbg location start with VarFrag offset=32 |
2417 | // | |
2418 | // mem slice start: SliceOfVariable offset=40 |
2419 | int64_t MemStartRelToVarInBits = |
2420 | MemStartRelToDbgStartInBits + VarFrag.OffsetInBits; |
2421 | int64_t MemEndRelToVarInBits = MemStartRelToVarInBits + SliceSizeInBits; |
2422 | // If the memory region starts before the debug location the fragment |
2423 | // offset would be negative, which we can't encode. Limit those to 0. This |
2424 | // is fine because those bits necessarily don't overlap with the existing |
2425 | // variable fragment. |
2426 | int64_t MemFragStart = std::max<int64_t>(a: 0, b: MemStartRelToVarInBits); |
2427 | int64_t MemFragSize = |
2428 | std::max<int64_t>(a: 0, b: MemEndRelToVarInBits - MemFragStart); |
2429 | DIExpression::FragmentInfo SliceOfVariable(MemFragSize, MemFragStart); |
2430 | |
2431 | // Intersect the memory region fragment with the variable location fragment. |
2432 | DIExpression::FragmentInfo TrimmedSliceOfVariable = |
2433 | DIExpression::FragmentInfo::intersect(A: SliceOfVariable, B: VarFrag); |
2434 | if (TrimmedSliceOfVariable == VarFrag) |
2435 | Result = std::nullopt; // Out-param. |
2436 | else |
2437 | Result = TrimmedSliceOfVariable; // Out-param. |
2438 | return true; |
2439 | } |
2440 | |
2441 | std::pair<DIExpression *, const ConstantInt *> |
2442 | DIExpression::constantFold(const ConstantInt *CI) { |
2443 | // Copy the APInt so we can modify it. |
2444 | APInt NewInt = CI->getValue(); |
2445 | SmallVector<uint64_t, 8> Ops; |
2446 | |
2447 | // Fold operators only at the beginning of the expression. |
2448 | bool First = true; |
2449 | bool Changed = false; |
2450 | for (auto Op : expr_ops()) { |
2451 | switch (Op.getOp()) { |
2452 | default: |
2453 | // We fold only the leading part of the expression; if we get to a part |
2454 | // that we're going to copy unchanged, and haven't done any folding, |
2455 | // then the entire expression is unchanged and we can return early. |
2456 | if (!Changed) |
2457 | return {this, CI}; |
2458 | First = false; |
2459 | break; |
2460 | case dwarf::DW_OP_LLVM_convert: |
2461 | if (!First) |
2462 | break; |
2463 | Changed = true; |
2464 | if (Op.getArg(I: 1) == dwarf::DW_ATE_signed) |
2465 | NewInt = NewInt.sextOrTrunc(width: Op.getArg(I: 0)); |
2466 | else { |
2467 | assert(Op.getArg(1) == dwarf::DW_ATE_unsigned && "Unexpected operand" ); |
2468 | NewInt = NewInt.zextOrTrunc(width: Op.getArg(I: 0)); |
2469 | } |
2470 | continue; |
2471 | } |
2472 | Op.appendToVector(V&: Ops); |
2473 | } |
2474 | if (!Changed) |
2475 | return {this, CI}; |
2476 | return {DIExpression::get(Context&: getContext(), Elements: Ops), |
2477 | ConstantInt::get(Context&: getContext(), V: NewInt)}; |
2478 | } |
2479 | |
2480 | uint64_t DIExpression::getNumLocationOperands() const { |
2481 | uint64_t Result = 0; |
2482 | for (auto ExprOp : expr_ops()) |
2483 | if (ExprOp.getOp() == dwarf::DW_OP_LLVM_arg) |
2484 | Result = std::max(a: Result, b: ExprOp.getArg(I: 0) + 1); |
2485 | assert(hasAllLocationOps(Result) && |
2486 | "Expression is missing one or more location operands." ); |
2487 | return Result; |
2488 | } |
2489 | |
2490 | std::optional<DIExpression::SignedOrUnsignedConstant> |
2491 | DIExpression::isConstant() const { |
2492 | |
2493 | // Recognize signed and unsigned constants. |
2494 | // An signed constants can be represented as DW_OP_consts C DW_OP_stack_value |
2495 | // (DW_OP_LLVM_fragment of Len). |
2496 | // An unsigned constant can be represented as |
2497 | // DW_OP_constu C DW_OP_stack_value (DW_OP_LLVM_fragment of Len). |
2498 | |
2499 | if ((getNumElements() != 2 && getNumElements() != 3 && |
2500 | getNumElements() != 6) || |
2501 | (getElement(I: 0) != dwarf::DW_OP_consts && |
2502 | getElement(I: 0) != dwarf::DW_OP_constu)) |
2503 | return std::nullopt; |
2504 | |
2505 | if (getNumElements() == 2 && getElement(I: 0) == dwarf::DW_OP_consts) |
2506 | return SignedOrUnsignedConstant::SignedConstant; |
2507 | |
2508 | if ((getNumElements() == 3 && getElement(I: 2) != dwarf::DW_OP_stack_value) || |
2509 | (getNumElements() == 6 && (getElement(I: 2) != dwarf::DW_OP_stack_value || |
2510 | getElement(I: 3) != dwarf::DW_OP_LLVM_fragment))) |
2511 | return std::nullopt; |
2512 | return getElement(I: 0) == dwarf::DW_OP_constu |
2513 | ? SignedOrUnsignedConstant::UnsignedConstant |
2514 | : SignedOrUnsignedConstant::SignedConstant; |
2515 | } |
2516 | |
2517 | DIExpression::ExtOps DIExpression::getExtOps(unsigned FromSize, unsigned ToSize, |
2518 | bool Signed) { |
2519 | dwarf::TypeKind TK = Signed ? dwarf::DW_ATE_signed : dwarf::DW_ATE_unsigned; |
2520 | DIExpression::ExtOps Ops{._M_elems: {dwarf::DW_OP_LLVM_convert, FromSize, TK, |
2521 | dwarf::DW_OP_LLVM_convert, ToSize, TK}}; |
2522 | return Ops; |
2523 | } |
2524 | |
2525 | DIExpression *DIExpression::appendExt(const DIExpression *Expr, |
2526 | unsigned FromSize, unsigned ToSize, |
2527 | bool Signed) { |
2528 | return appendToStack(Expr, Ops: getExtOps(FromSize, ToSize, Signed)); |
2529 | } |
2530 | |
2531 | DIGlobalVariableExpression * |
2532 | DIGlobalVariableExpression::getImpl(LLVMContext &Context, Metadata *Variable, |
2533 | Metadata *Expression, StorageType Storage, |
2534 | bool ShouldCreate) { |
2535 | DEFINE_GETIMPL_LOOKUP(DIGlobalVariableExpression, (Variable, Expression)); |
2536 | Metadata *Ops[] = {Variable, Expression}; |
2537 | DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIGlobalVariableExpression, Ops); |
2538 | } |
2539 | DIObjCProperty::DIObjCProperty(LLVMContext &C, StorageType Storage, |
2540 | unsigned Line, unsigned Attributes, |
2541 | ArrayRef<Metadata *> Ops) |
2542 | : DINode(C, DIObjCPropertyKind, Storage, dwarf::DW_TAG_APPLE_property, Ops), |
2543 | Line(Line), Attributes(Attributes) {} |
2544 | |
2545 | DIObjCProperty *DIObjCProperty::getImpl( |
2546 | LLVMContext &Context, MDString *Name, Metadata *File, unsigned Line, |
2547 | MDString *GetterName, MDString *SetterName, unsigned Attributes, |
2548 | Metadata *Type, StorageType Storage, bool ShouldCreate) { |
2549 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
2550 | assert(isCanonical(GetterName) && "Expected canonical MDString" ); |
2551 | assert(isCanonical(SetterName) && "Expected canonical MDString" ); |
2552 | DEFINE_GETIMPL_LOOKUP(DIObjCProperty, (Name, File, Line, GetterName, |
2553 | SetterName, Attributes, Type)); |
2554 | Metadata *Ops[] = {Name, File, GetterName, SetterName, Type}; |
2555 | DEFINE_GETIMPL_STORE(DIObjCProperty, (Line, Attributes), Ops); |
2556 | } |
2557 | |
2558 | DIImportedEntity *DIImportedEntity::getImpl(LLVMContext &Context, unsigned Tag, |
2559 | Metadata *Scope, Metadata *Entity, |
2560 | Metadata *File, unsigned Line, |
2561 | MDString *Name, Metadata *Elements, |
2562 | StorageType Storage, |
2563 | bool ShouldCreate) { |
2564 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
2565 | DEFINE_GETIMPL_LOOKUP(DIImportedEntity, |
2566 | (Tag, Scope, Entity, File, Line, Name, Elements)); |
2567 | Metadata *Ops[] = {Scope, Entity, Name, File, Elements}; |
2568 | DEFINE_GETIMPL_STORE(DIImportedEntity, (Tag, Line), Ops); |
2569 | } |
2570 | |
2571 | DIMacro *DIMacro::getImpl(LLVMContext &Context, unsigned MIType, unsigned Line, |
2572 | MDString *Name, MDString *Value, StorageType Storage, |
2573 | bool ShouldCreate) { |
2574 | assert(isCanonical(Name) && "Expected canonical MDString" ); |
2575 | DEFINE_GETIMPL_LOOKUP(DIMacro, (MIType, Line, Name, Value)); |
2576 | Metadata *Ops[] = {Name, Value}; |
2577 | DEFINE_GETIMPL_STORE(DIMacro, (MIType, Line), Ops); |
2578 | } |
2579 | |
2580 | DIMacroFile *DIMacroFile::getImpl(LLVMContext &Context, unsigned MIType, |
2581 | unsigned Line, Metadata *File, |
2582 | Metadata *Elements, StorageType Storage, |
2583 | bool ShouldCreate) { |
2584 | DEFINE_GETIMPL_LOOKUP(DIMacroFile, (MIType, Line, File, Elements)); |
2585 | Metadata *Ops[] = {File, Elements}; |
2586 | DEFINE_GETIMPL_STORE(DIMacroFile, (MIType, Line), Ops); |
2587 | } |
2588 | |
2589 | DIArgList *DIArgList::get(LLVMContext &Context, |
2590 | ArrayRef<ValueAsMetadata *> Args) { |
2591 | auto ExistingIt = Context.pImpl->DIArgLists.find_as(Val: DIArgListKeyInfo(Args)); |
2592 | if (ExistingIt != Context.pImpl->DIArgLists.end()) |
2593 | return *ExistingIt; |
2594 | DIArgList *NewArgList = new DIArgList(Context, Args); |
2595 | Context.pImpl->DIArgLists.insert(V: NewArgList); |
2596 | return NewArgList; |
2597 | } |
2598 | |
2599 | void DIArgList::handleChangedOperand(void *Ref, Metadata *New) { |
2600 | ValueAsMetadata **OldVMPtr = static_cast<ValueAsMetadata **>(Ref); |
2601 | assert((!New || isa<ValueAsMetadata>(New)) && |
2602 | "DIArgList must be passed a ValueAsMetadata" ); |
2603 | untrack(); |
2604 | // We need to update the set storage once the Args are updated since they |
2605 | // form the key to the DIArgLists store. |
2606 | getContext().pImpl->DIArgLists.erase(V: this); |
2607 | ValueAsMetadata *NewVM = cast_or_null<ValueAsMetadata>(Val: New); |
2608 | for (ValueAsMetadata *&VM : Args) { |
2609 | if (&VM == OldVMPtr) { |
2610 | if (NewVM) |
2611 | VM = NewVM; |
2612 | else |
2613 | VM = ValueAsMetadata::get(V: PoisonValue::get(T: VM->getValue()->getType())); |
2614 | } |
2615 | } |
2616 | // We've changed the contents of this DIArgList, and the set storage may |
2617 | // already contain a DIArgList with our new set of args; if it does, then we |
2618 | // must RAUW this with the existing DIArgList, otherwise we simply insert this |
2619 | // back into the set storage. |
2620 | DIArgList *ExistingArgList = getUniqued(Store&: getContext().pImpl->DIArgLists, Key: this); |
2621 | if (ExistingArgList) { |
2622 | replaceAllUsesWith(MD: ExistingArgList); |
2623 | // Clear this here so we don't try to untrack in the destructor. |
2624 | Args.clear(); |
2625 | delete this; |
2626 | return; |
2627 | } |
2628 | getContext().pImpl->DIArgLists.insert(V: this); |
2629 | track(); |
2630 | } |
2631 | void DIArgList::track() { |
2632 | for (ValueAsMetadata *&VAM : Args) |
2633 | if (VAM) |
2634 | MetadataTracking::track(Ref: &VAM, MD&: *VAM, Owner&: *this); |
2635 | } |
2636 | void DIArgList::untrack() { |
2637 | for (ValueAsMetadata *&VAM : Args) |
2638 | if (VAM) |
2639 | MetadataTracking::untrack(Ref: &VAM, MD&: *VAM); |
2640 | } |
2641 | void DIArgList::dropAllReferences(bool Untrack) { |
2642 | if (Untrack) |
2643 | untrack(); |
2644 | Args.clear(); |
2645 | ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false); |
2646 | } |
2647 | |