1//===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing dwarf debug info into asm files.
10//
11//===----------------------------------------------------------------------===//
12
13#include "DwarfExpression.h"
14#include "DwarfCompileUnit.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/SmallBitVector.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/Register.h"
19#include "llvm/CodeGen/TargetRegisterInfo.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/MC/MCAsmInfo.h"
22#include "llvm/Support/ErrorHandling.h"
23#include <algorithm>
24
25using namespace llvm;
26
27#define DEBUG_TYPE "dwarfdebug"
28
29void DwarfExpression::emitConstu(uint64_t Value) {
30 if (Value < 32)
31 emitOp(Op: dwarf::DW_OP_lit0 + Value);
32 else if (Value == std::numeric_limits<uint64_t>::max()) {
33 // Only do this for 64-bit values as the DWARF expression stack uses
34 // target-address-size values.
35 emitOp(Op: dwarf::DW_OP_lit0);
36 emitOp(Op: dwarf::DW_OP_not);
37 } else {
38 emitOp(Op: dwarf::DW_OP_constu);
39 emitUnsigned(Value);
40 }
41}
42
43void DwarfExpression::addReg(int64_t DwarfReg, const char *Comment) {
44 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
45 assert((isUnknownLocation() || isRegisterLocation()) &&
46 "location description already locked down");
47 LocationKind = Register;
48 if (DwarfReg < 32) {
49 emitOp(Op: dwarf::DW_OP_reg0 + DwarfReg, Comment);
50 } else {
51 emitOp(Op: dwarf::DW_OP_regx, Comment);
52 emitUnsigned(Value: DwarfReg);
53 }
54}
55
56void DwarfExpression::addBReg(int64_t DwarfReg, int64_t Offset) {
57 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
58 assert(!isRegisterLocation() && "location description already locked down");
59 if (DwarfReg < 32) {
60 emitOp(Op: dwarf::DW_OP_breg0 + DwarfReg);
61 } else {
62 emitOp(Op: dwarf::DW_OP_bregx);
63 emitUnsigned(Value: DwarfReg);
64 }
65 emitSigned(Value: Offset);
66}
67
68void DwarfExpression::addFBReg(int64_t Offset) {
69 emitOp(Op: dwarf::DW_OP_fbreg);
70 emitSigned(Value: Offset);
71}
72
73void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) {
74 if (!SizeInBits)
75 return;
76
77 const unsigned SizeOfByte = 8;
78 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) {
79 emitOp(Op: dwarf::DW_OP_bit_piece);
80 emitUnsigned(Value: SizeInBits);
81 emitUnsigned(Value: OffsetInBits);
82 } else {
83 emitOp(Op: dwarf::DW_OP_piece);
84 unsigned ByteSize = SizeInBits / SizeOfByte;
85 emitUnsigned(Value: ByteSize);
86 }
87 this->OffsetInBits += SizeInBits;
88}
89
90void DwarfExpression::addShr(unsigned ShiftBy) {
91 emitConstu(Value: ShiftBy);
92 emitOp(Op: dwarf::DW_OP_shr);
93}
94
95void DwarfExpression::addAnd(unsigned Mask) {
96 emitConstu(Value: Mask);
97 emitOp(Op: dwarf::DW_OP_and);
98}
99
100bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
101 llvm::Register MachineReg,
102 unsigned MaxSize) {
103 if (!MachineReg.isPhysical()) {
104 if (isFrameRegister(TRI, MachineReg)) {
105 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: -1, Comment: nullptr));
106 return true;
107 }
108 // Try getting dwarf register for virtual register anyway, eg. for NVPTX.
109 int64_t Reg = TRI.getDwarfRegNumForVirtReg(RegNum: MachineReg, isEH: false);
110 if (Reg > 0) {
111 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: nullptr));
112 return true;
113 }
114 return false;
115 }
116
117 int64_t Reg = TRI.getDwarfRegNum(Reg: MachineReg, isEH: false);
118
119 // If this is a valid register number, emit it.
120 if (Reg >= 0) {
121 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: nullptr));
122 return true;
123 }
124
125 // Walk up the super-register chain until we find a valid number.
126 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
127 for (MCPhysReg SR : TRI.superregs(Reg: MachineReg)) {
128 Reg = TRI.getDwarfRegNum(Reg: SR, isEH: false);
129 if (Reg >= 0) {
130 unsigned Idx = TRI.getSubRegIndex(RegNo: SR, SubRegNo: MachineReg);
131 unsigned Size = TRI.getSubRegIdxSize(Idx);
132 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
133 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: "super-register"));
134 // Use a DW_OP_bit_piece to describe the sub-register.
135 setSubRegisterPiece(SizeInBits: Size, OffsetInBits: RegOffset);
136 return true;
137 }
138 }
139
140 // Otherwise, attempt to find a covering set of sub-register numbers.
141 // For example, Q0 on ARM is a composition of D0+D1.
142 unsigned CurPos = 0;
143 // The size of the register in bits.
144 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg: MachineReg);
145 unsigned RegSize = TRI.getRegSizeInBits(RC: *RC);
146 // Keep track of the bits in the register we already emitted, so we
147 // can avoid emitting redundant aliasing subregs. Because this is
148 // just doing a greedy scan of all subregisters, it is possible that
149 // this doesn't find a combination of subregisters that fully cover
150 // the register (even though one may exist).
151 SmallBitVector Coverage(RegSize, false);
152 for (MCPhysReg SR : TRI.subregs(Reg: MachineReg)) {
153 unsigned Idx = TRI.getSubRegIndex(RegNo: MachineReg, SubRegNo: SR);
154 unsigned Size = TRI.getSubRegIdxSize(Idx);
155 unsigned Offset = TRI.getSubRegIdxOffset(Idx);
156 Reg = TRI.getDwarfRegNum(Reg: SR, isEH: false);
157 if (Reg < 0 || Offset + Size > RegSize)
158 continue;
159
160 // Used to build the intersection between the bits we already
161 // emitted and the bits covered by this subregister.
162 SmallBitVector CurSubReg(RegSize, false);
163 CurSubReg.set(I: Offset, E: Offset + Size);
164
165 // If this sub-register has a DWARF number and we haven't covered
166 // its range, and its range covers the value, emit a DWARF piece for it.
167 if (Offset < MaxSize && !CurSubReg.subsetOf(RHS: Coverage)) {
168 // Emit a piece for any gap in the coverage.
169 if (Offset > CurPos)
170 DwarfRegs.push_back(Elt: Register::createSubRegister(
171 RegNo: -1, SizeInBits: Offset - CurPos, Comment: "no DWARF register encoding"));
172 if (Offset == 0 && Size >= MaxSize)
173 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: "sub-register"));
174 else
175 DwarfRegs.push_back(Elt: Register::createSubRegister(
176 RegNo: Reg, SizeInBits: std::min<unsigned>(a: Size, b: MaxSize - Offset), Comment: "sub-register"));
177 }
178 // Mark it as emitted.
179 Coverage.set(I: Offset, E: Offset + Size);
180 CurPos = Offset + Size;
181 }
182 // Failed to find any DWARF encoding.
183 if (CurPos == 0)
184 return false;
185 // Found a partial or complete DWARF encoding.
186 if (CurPos < RegSize)
187 DwarfRegs.push_back(Elt: Register::createSubRegister(
188 RegNo: -1, SizeInBits: RegSize - CurPos, Comment: "no DWARF register encoding"));
189 return true;
190}
191
192void DwarfExpression::addStackValue() {
193 if (DwarfVersion >= 4)
194 emitOp(Op: dwarf::DW_OP_stack_value);
195}
196
197void DwarfExpression::addBooleanConstant(int64_t Value) {
198 assert(isImplicitLocation() || isUnknownLocation());
199 LocationKind = Implicit;
200 if (Value == 0)
201 emitOp(Op: dwarf::DW_OP_lit0);
202 else
203 emitOp(Op: dwarf::DW_OP_lit1);
204}
205
206void DwarfExpression::addSignedConstant(int64_t Value) {
207 assert(isImplicitLocation() || isUnknownLocation());
208 LocationKind = Implicit;
209 emitOp(Op: dwarf::DW_OP_consts);
210 emitSigned(Value);
211}
212
213void DwarfExpression::addUnsignedConstant(uint64_t Value) {
214 assert(isImplicitLocation() || isUnknownLocation());
215 LocationKind = Implicit;
216 emitConstu(Value);
217}
218
219void DwarfExpression::addUnsignedConstant(const APInt &Value) {
220 assert(isImplicitLocation() || isUnknownLocation());
221 LocationKind = Implicit;
222
223 unsigned Size = Value.getBitWidth();
224 const uint64_t *Data = Value.getRawData();
225
226 // Chop it up into 64-bit pieces, because that's the maximum that
227 // addUnsignedConstant takes.
228 unsigned Offset = 0;
229 while (Offset < Size) {
230 addUnsignedConstant(Value: *Data++);
231 if (Offset == 0 && Size <= 64)
232 break;
233 addStackValue();
234 addOpPiece(SizeInBits: std::min(a: Size - Offset, b: 64u), OffsetInBits: Offset);
235 Offset += 64;
236 }
237}
238
239void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) {
240 assert(isImplicitLocation() || isUnknownLocation());
241 APInt API = APF.bitcastToAPInt();
242 int NumBytes = API.getBitWidth() / 8;
243 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) {
244 // FIXME: Add support for `long double`.
245 emitOp(Op: dwarf::DW_OP_implicit_value);
246 emitUnsigned(Value: NumBytes /*Size of the block in bytes*/);
247
248 // The loop below is emitting the value starting at least significant byte,
249 // so we need to perform a byte-swap to get the byte order correct in case
250 // of a big-endian target.
251 if (AP.getDataLayout().isBigEndian())
252 API = API.byteSwap();
253
254 for (int i = 0; i < NumBytes; ++i) {
255 emitData1(Value: API.getZExtValue() & 0xFF);
256 API = API.lshr(shiftAmt: 8);
257 }
258
259 return;
260 }
261 LLVM_DEBUG(
262 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: "
263 << API.getBitWidth() << " bits\n");
264}
265
266bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
267 DIExpressionCursor &ExprCursor,
268 llvm::Register MachineReg,
269 unsigned FragmentOffsetInBits) {
270 auto Fragment = ExprCursor.getFragmentInfo();
271 if (!addMachineReg(TRI, MachineReg, MaxSize: Fragment ? Fragment->SizeInBits : ~1U)) {
272 LocationKind = Unknown;
273 return false;
274 }
275
276 bool HasComplexExpression = false;
277 auto Op = ExprCursor.peek();
278 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment)
279 HasComplexExpression = true;
280
281 // If the register can only be described by a complex expression (i.e.,
282 // multiple subregisters) it doesn't safely compose with another complex
283 // expression. For example, it is not possible to apply a DW_OP_deref
284 // operation to multiple DW_OP_pieces, since composite location descriptions
285 // do not push anything on the DWARF stack.
286 //
287 // DW_OP_entry_value operations can only hold a DWARF expression or a
288 // register location description, so we can't emit a single entry value
289 // covering a composite location description. In the future we may want to
290 // emit entry value operations for each register location in the composite
291 // location, but until that is supported do not emit anything.
292 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) {
293 if (IsEmittingEntryValue)
294 cancelEntryValue();
295 DwarfRegs.clear();
296 LocationKind = Unknown;
297 return false;
298 }
299
300 // Handle simple register locations. If we are supposed to emit
301 // a call site parameter expression and if that expression is just a register
302 // location, emit it with addBReg and offset 0, because we should emit a DWARF
303 // expression representing a value, rather than a location.
304 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) ||
305 isEntryValue()) {
306 auto FragmentInfo = ExprCursor.getFragmentInfo();
307 unsigned RegSize = 0;
308 for (auto &Reg : DwarfRegs) {
309 RegSize += Reg.SubRegSize;
310 if (Reg.DwarfRegNo >= 0)
311 addReg(DwarfReg: Reg.DwarfRegNo, Comment: Reg.Comment);
312 if (FragmentInfo)
313 if (RegSize > FragmentInfo->SizeInBits)
314 // If the register is larger than the current fragment stop
315 // once the fragment is covered.
316 break;
317 addOpPiece(SizeInBits: Reg.SubRegSize);
318 }
319
320 if (isEntryValue()) {
321 finalizeEntryValue();
322
323 if (!isIndirect() && !isParameterValue() && !HasComplexExpression &&
324 DwarfVersion >= 4)
325 emitOp(Op: dwarf::DW_OP_stack_value);
326 }
327
328 DwarfRegs.clear();
329 // If we need to mask out a subregister, do it now, unless the next
330 // operation would emit an OpPiece anyway.
331 auto NextOp = ExprCursor.peek();
332 if (SubRegisterSizeInBits && NextOp &&
333 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
334 maskSubRegister();
335 return true;
336 }
337
338 // Don't emit locations that cannot be expressed without DW_OP_stack_value.
339 if (DwarfVersion < 4)
340 if (any_of(Range&: ExprCursor, P: [](DIExpression::ExprOperand Op) -> bool {
341 return Op.getOp() == dwarf::DW_OP_stack_value;
342 })) {
343 DwarfRegs.clear();
344 LocationKind = Unknown;
345 return false;
346 }
347
348 // TODO: We should not give up here but the following code needs to be changed
349 // to deal with multiple (sub)registers first.
350 if (DwarfRegs.size() > 1) {
351 LLVM_DEBUG(dbgs() << "TODO: giving up on debug information due to "
352 "multi-register usage.\n");
353 DwarfRegs.clear();
354 LocationKind = Unknown;
355 return false;
356 }
357
358 auto Reg = DwarfRegs[0];
359 bool FBReg = isFrameRegister(TRI, MachineReg);
360 int SignedOffset = 0;
361 assert(!Reg.isSubRegister() && "full register expected");
362
363 // Pattern-match combinations for which more efficient representations exist.
364 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset].
365 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) {
366 uint64_t Offset = Op->getArg(I: 0);
367 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
368 if (Offset <= IntMax) {
369 SignedOffset = Offset;
370 ExprCursor.take();
371 }
372 }
373
374 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset]
375 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset]
376 // If Reg is a subregister we need to mask it out before subtracting.
377 if (Op && Op->getOp() == dwarf::DW_OP_constu) {
378 uint64_t Offset = Op->getArg(I: 0);
379 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
380 auto N = ExprCursor.peekNext();
381 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) {
382 SignedOffset = Offset;
383 ExprCursor.consume(N: 2);
384 } else if (N && N->getOp() == dwarf::DW_OP_minus &&
385 !SubRegisterSizeInBits && Offset <= IntMax + 1) {
386 SignedOffset = -static_cast<int64_t>(Offset);
387 ExprCursor.consume(N: 2);
388 }
389 }
390
391 if (FBReg)
392 addFBReg(Offset: SignedOffset);
393 else
394 addBReg(DwarfReg: Reg.DwarfRegNo, Offset: SignedOffset);
395 DwarfRegs.clear();
396
397 // If we need to mask out a subregister, do it now, unless the next
398 // operation would emit an OpPiece anyway.
399 auto NextOp = ExprCursor.peek();
400 if (SubRegisterSizeInBits && NextOp &&
401 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
402 maskSubRegister();
403
404 return true;
405}
406
407void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) {
408 LocationFlags |= EntryValue;
409 if (Loc.isIndirect())
410 LocationFlags |= Indirect;
411}
412
413void DwarfExpression::setLocation(const MachineLocation &Loc,
414 const DIExpression *DIExpr) {
415 if (Loc.isIndirect())
416 setMemoryLocationKind();
417
418 if (DIExpr->isEntryValue())
419 setEntryValueFlags(Loc);
420}
421
422void DwarfExpression::beginEntryValueExpression(
423 DIExpressionCursor &ExprCursor) {
424 auto Op = ExprCursor.take();
425 (void)Op;
426 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value);
427 assert(!IsEmittingEntryValue && "Already emitting entry value?");
428 assert(Op->getArg(0) == 1 &&
429 "Can currently only emit entry values covering a single operation");
430
431 SavedLocationKind = LocationKind;
432 LocationKind = Register;
433 LocationFlags |= EntryValue;
434 IsEmittingEntryValue = true;
435 enableTemporaryBuffer();
436}
437
438void DwarfExpression::finalizeEntryValue() {
439 assert(IsEmittingEntryValue && "Entry value not open?");
440 disableTemporaryBuffer();
441
442 emitOp(Op: CU.getDwarf5OrGNULocationAtom(Loc: dwarf::DW_OP_entry_value));
443
444 // Emit the entry value's size operand.
445 unsigned Size = getTemporaryBufferSize();
446 emitUnsigned(Value: Size);
447
448 // Emit the entry value's DWARF block operand.
449 commitTemporaryBuffer();
450
451 LocationFlags &= ~EntryValue;
452 LocationKind = SavedLocationKind;
453 IsEmittingEntryValue = false;
454}
455
456void DwarfExpression::cancelEntryValue() {
457 assert(IsEmittingEntryValue && "Entry value not open?");
458 disableTemporaryBuffer();
459
460 // The temporary buffer can't be emptied, so for now just assert that nothing
461 // has been emitted to it.
462 assert(getTemporaryBufferSize() == 0 &&
463 "Began emitting entry value block before cancelling entry value");
464
465 LocationKind = SavedLocationKind;
466 IsEmittingEntryValue = false;
467}
468
469unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize,
470 dwarf::TypeKind Encoding) {
471 // Reuse the base_type if we already have one in this CU otherwise we
472 // create a new one.
473 unsigned I = 0, E = CU.ExprRefedBaseTypes.size();
474 for (; I != E; ++I)
475 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize &&
476 CU.ExprRefedBaseTypes[I].Encoding == Encoding)
477 break;
478
479 if (I == E)
480 CU.ExprRefedBaseTypes.emplace_back(args&: BitSize, args&: Encoding);
481 return I;
482}
483
484/// Assuming a well-formed expression, match "DW_OP_deref*
485/// DW_OP_LLVM_fragment?".
486static bool isMemoryLocation(DIExpressionCursor ExprCursor) {
487 while (ExprCursor) {
488 auto Op = ExprCursor.take();
489 switch (Op->getOp()) {
490 case dwarf::DW_OP_deref:
491 case dwarf::DW_OP_LLVM_fragment:
492 break;
493 default:
494 return false;
495 }
496 }
497 return true;
498}
499
500void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) {
501 addExpression(Expr: std::move(ExprCursor),
502 InsertArg: [](unsigned Idx, DIExpressionCursor &Cursor) -> bool {
503 llvm_unreachable("unhandled opcode found in expression");
504 });
505}
506
507bool DwarfExpression::addExpression(
508 DIExpressionCursor &&ExprCursor,
509 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) {
510 // Entry values can currently only cover the initial register location,
511 // and not any other parts of the following DWARF expression.
512 assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
513
514 std::optional<DIExpression::ExprOperand> PrevConvertOp;
515
516 while (ExprCursor) {
517 auto Op = ExprCursor.take();
518 uint64_t OpNum = Op->getOp();
519
520 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) {
521 emitOp(Op: OpNum);
522 continue;
523 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) {
524 addBReg(DwarfReg: OpNum - dwarf::DW_OP_breg0, Offset: Op->getArg(I: 0));
525 continue;
526 }
527
528 switch (OpNum) {
529 case dwarf::DW_OP_LLVM_arg:
530 if (!InsertArg(Op->getArg(I: 0), ExprCursor)) {
531 LocationKind = Unknown;
532 return false;
533 }
534 break;
535 case dwarf::DW_OP_LLVM_fragment: {
536 unsigned SizeInBits = Op->getArg(I: 1);
537 unsigned FragmentOffset = Op->getArg(I: 0);
538 // The fragment offset must have already been adjusted by emitting an
539 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base
540 // location.
541 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?");
542 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow");
543
544 // If addMachineReg already emitted DW_OP_piece operations to represent
545 // a super-register by splicing together sub-registers, subtract the size
546 // of the pieces that was already emitted.
547 SizeInBits -= OffsetInBits - FragmentOffset;
548
549 // If addMachineReg requested a DW_OP_bit_piece to stencil out a
550 // sub-register that is smaller than the current fragment's size, use it.
551 if (SubRegisterSizeInBits)
552 SizeInBits = std::min<unsigned>(a: SizeInBits, b: SubRegisterSizeInBits);
553
554 // Emit a DW_OP_stack_value for implicit location descriptions.
555 if (isImplicitLocation())
556 addStackValue();
557
558 // Emit the DW_OP_piece.
559 addOpPiece(SizeInBits, OffsetInBits: SubRegisterOffsetInBits);
560 setSubRegisterPiece(SizeInBits: 0, OffsetInBits: 0);
561 // Reset the location description kind.
562 LocationKind = Unknown;
563 return true;
564 }
565 case dwarf::DW_OP_LLVM_extract_bits_sext:
566 case dwarf::DW_OP_LLVM_extract_bits_zext: {
567 unsigned SizeInBits = Op->getArg(I: 1);
568 unsigned BitOffset = Op->getArg(I: 0);
569 unsigned DerefSize = 0;
570 // Operations are done in the DWARF "generic type" whose size
571 // is the size of a pointer.
572 unsigned PtrSizeInBytes = CU.getAsmPrinter()->MAI->getCodePointerSize();
573
574 // If we have a memory location then dereference to get the value, though
575 // we have to make sure we don't dereference any bytes past the end of the
576 // object.
577 if (isMemoryLocation()) {
578 DerefSize = alignTo(Value: BitOffset + SizeInBits, Align: 8) / 8;
579 if (DerefSize == PtrSizeInBytes) {
580 emitOp(Op: dwarf::DW_OP_deref);
581 } else {
582 emitOp(Op: dwarf::DW_OP_deref_size);
583 emitUnsigned(Value: DerefSize);
584 }
585 }
586
587 // If a dereference was emitted for an unsigned value, and
588 // there's no bit offset, then a bit of optimization is
589 // possible.
590 if (OpNum == dwarf::DW_OP_LLVM_extract_bits_zext && BitOffset == 0) {
591 if (8 * DerefSize == SizeInBits) {
592 // The correct value is already on the stack.
593 } else {
594 // No need to shift, we can just mask off the desired bits.
595 emitOp(Op: dwarf::DW_OP_constu);
596 emitUnsigned(Value: (1u << SizeInBits) - 1);
597 emitOp(Op: dwarf::DW_OP_and);
598 }
599 } else {
600 // Extract the bits by a shift left (to shift out the bits after what we
601 // want to extract) followed by shift right (to shift the bits to
602 // position 0 and also sign/zero extend).
603 unsigned LeftShift = PtrSizeInBytes * 8 - (SizeInBits + BitOffset);
604 unsigned RightShift = LeftShift + BitOffset;
605 if (LeftShift) {
606 emitOp(Op: dwarf::DW_OP_constu);
607 emitUnsigned(Value: LeftShift);
608 emitOp(Op: dwarf::DW_OP_shl);
609 }
610 if (RightShift) {
611 emitOp(Op: dwarf::DW_OP_constu);
612 emitUnsigned(Value: RightShift);
613 emitOp(Op: OpNum == dwarf::DW_OP_LLVM_extract_bits_sext
614 ? dwarf::DW_OP_shra
615 : dwarf::DW_OP_shr);
616 }
617 }
618
619 // The value is now at the top of the stack, so set the location to
620 // implicit so that we get a stack_value at the end.
621 LocationKind = Implicit;
622 break;
623 }
624 case dwarf::DW_OP_plus_uconst:
625 assert(!isRegisterLocation());
626 emitOp(Op: dwarf::DW_OP_plus_uconst);
627 emitUnsigned(Value: Op->getArg(I: 0));
628 break;
629 case dwarf::DW_OP_plus:
630 case dwarf::DW_OP_minus:
631 case dwarf::DW_OP_mul:
632 case dwarf::DW_OP_div:
633 case dwarf::DW_OP_mod:
634 case dwarf::DW_OP_or:
635 case dwarf::DW_OP_and:
636 case dwarf::DW_OP_xor:
637 case dwarf::DW_OP_shl:
638 case dwarf::DW_OP_shr:
639 case dwarf::DW_OP_shra:
640 case dwarf::DW_OP_lit0:
641 case dwarf::DW_OP_not:
642 case dwarf::DW_OP_dup:
643 case dwarf::DW_OP_push_object_address:
644 case dwarf::DW_OP_over:
645 case dwarf::DW_OP_rot:
646 case dwarf::DW_OP_eq:
647 case dwarf::DW_OP_ne:
648 case dwarf::DW_OP_gt:
649 case dwarf::DW_OP_ge:
650 case dwarf::DW_OP_lt:
651 case dwarf::DW_OP_le:
652 case dwarf::DW_OP_neg:
653 case dwarf::DW_OP_abs:
654 emitOp(Op: OpNum);
655 break;
656 case dwarf::DW_OP_deref:
657 assert(!isRegisterLocation());
658 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor))
659 // Turning this into a memory location description makes the deref
660 // implicit.
661 LocationKind = Memory;
662 else
663 emitOp(Op: dwarf::DW_OP_deref);
664 break;
665 case dwarf::DW_OP_constu:
666 assert(!isRegisterLocation());
667 emitConstu(Value: Op->getArg(I: 0));
668 break;
669 case dwarf::DW_OP_consts:
670 assert(!isRegisterLocation());
671 emitOp(Op: dwarf::DW_OP_consts);
672 emitSigned(Value: Op->getArg(I: 0));
673 break;
674 case dwarf::DW_OP_LLVM_convert: {
675 unsigned BitSize = Op->getArg(I: 0);
676 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(I: 1));
677 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) {
678 emitOp(Op: dwarf::DW_OP_convert);
679 // If targeting a location-list; simply emit the index into the raw
680 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been
681 // fitted with means to extract it later.
682 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef
683 // (containing the index and a resolve mechanism during emit) into the
684 // DIE value list.
685 emitBaseTypeRef(Idx: getOrCreateBaseType(BitSize, Encoding));
686 } else {
687 if (PrevConvertOp && PrevConvertOp->getArg(I: 0) < BitSize) {
688 if (Encoding == dwarf::DW_ATE_signed)
689 emitLegacySExt(FromBits: PrevConvertOp->getArg(I: 0));
690 else if (Encoding == dwarf::DW_ATE_unsigned)
691 emitLegacyZExt(FromBits: PrevConvertOp->getArg(I: 0));
692 PrevConvertOp = std::nullopt;
693 } else {
694 PrevConvertOp = Op;
695 }
696 }
697 break;
698 }
699 case dwarf::DW_OP_stack_value:
700 LocationKind = Implicit;
701 break;
702 case dwarf::DW_OP_swap:
703 assert(!isRegisterLocation());
704 emitOp(Op: dwarf::DW_OP_swap);
705 break;
706 case dwarf::DW_OP_xderef:
707 assert(!isRegisterLocation());
708 emitOp(Op: dwarf::DW_OP_xderef);
709 break;
710 case dwarf::DW_OP_deref_size:
711 emitOp(Op: dwarf::DW_OP_deref_size);
712 emitData1(Value: Op->getArg(I: 0));
713 break;
714 case dwarf::DW_OP_LLVM_tag_offset:
715 TagOffset = Op->getArg(I: 0);
716 break;
717 case dwarf::DW_OP_regx:
718 emitOp(Op: dwarf::DW_OP_regx);
719 emitUnsigned(Value: Op->getArg(I: 0));
720 break;
721 case dwarf::DW_OP_bregx:
722 emitOp(Op: dwarf::DW_OP_bregx);
723 emitUnsigned(Value: Op->getArg(I: 0));
724 emitSigned(Value: Op->getArg(I: 1));
725 break;
726 default:
727 llvm_unreachable("unhandled opcode found in expression");
728 }
729 }
730
731 if (isImplicitLocation() && !isParameterValue())
732 // Turn this into an implicit location description.
733 addStackValue();
734
735 return true;
736}
737
738/// add masking operations to stencil out a subregister.
739void DwarfExpression::maskSubRegister() {
740 assert(SubRegisterSizeInBits && "no subregister was registered");
741 if (SubRegisterOffsetInBits > 0)
742 addShr(ShiftBy: SubRegisterOffsetInBits);
743 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL;
744 addAnd(Mask);
745}
746
747void DwarfExpression::finalize() {
748 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted");
749 // Emit any outstanding DW_OP_piece operations to mask out subregisters.
750 if (SubRegisterSizeInBits == 0)
751 return;
752 // Don't emit a DW_OP_piece for a subregister at offset 0.
753 if (SubRegisterOffsetInBits == 0)
754 return;
755 addOpPiece(SizeInBits: SubRegisterSizeInBits, OffsetInBits: SubRegisterOffsetInBits);
756}
757
758void DwarfExpression::addFragmentOffset(const DIExpression *Expr) {
759 if (!Expr || !Expr->isFragment())
760 return;
761
762 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits;
763 assert(FragmentOffset >= OffsetInBits &&
764 "overlapping or duplicate fragments");
765 if (FragmentOffset > OffsetInBits)
766 addOpPiece(SizeInBits: FragmentOffset - OffsetInBits);
767 OffsetInBits = FragmentOffset;
768}
769
770void DwarfExpression::emitLegacySExt(unsigned FromBits) {
771 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X
772 emitOp(Op: dwarf::DW_OP_dup);
773 emitOp(Op: dwarf::DW_OP_constu);
774 emitUnsigned(Value: FromBits - 1);
775 emitOp(Op: dwarf::DW_OP_shr);
776 emitOp(Op: dwarf::DW_OP_lit0);
777 emitOp(Op: dwarf::DW_OP_not);
778 emitOp(Op: dwarf::DW_OP_mul);
779 emitOp(Op: dwarf::DW_OP_constu);
780 emitUnsigned(Value: FromBits);
781 emitOp(Op: dwarf::DW_OP_shl);
782 emitOp(Op: dwarf::DW_OP_or);
783}
784
785void DwarfExpression::emitLegacyZExt(unsigned FromBits) {
786 // Heuristic to decide the most efficient encoding.
787 // A ULEB can encode 7 1-bits per byte.
788 if (FromBits / 7 < 1+1+1+1+1) {
789 // (X & (1 << FromBits - 1))
790 emitOp(Op: dwarf::DW_OP_constu);
791 emitUnsigned(Value: (1ULL << FromBits) - 1);
792 } else {
793 // Note that the DWARF 4 stack consists of pointer-sized elements,
794 // so technically it doesn't make sense to shift left more than 64
795 // bits. We leave that for the consumer to decide though. LLDB for
796 // example uses APInt for the stack elements and can still deal
797 // with this.
798 emitOp(Op: dwarf::DW_OP_lit1);
799 emitOp(Op: dwarf::DW_OP_constu);
800 emitUnsigned(Value: FromBits);
801 emitOp(Op: dwarf::DW_OP_shl);
802 emitOp(Op: dwarf::DW_OP_lit1);
803 emitOp(Op: dwarf::DW_OP_minus);
804 }
805 emitOp(Op: dwarf::DW_OP_and);
806}
807
808void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) {
809 emitOp(Op: dwarf::DW_OP_WASM_location);
810 emitUnsigned(Value: Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index);
811 emitUnsigned(Value: Offset);
812 if (Index == 4 /*TI_LOCAL_INDIRECT*/) {
813 assert(LocationKind == Unknown);
814 LocationKind = Memory;
815 } else {
816 assert(LocationKind == Implicit || LocationKind == Unknown);
817 LocationKind = Implicit;
818 }
819}
820