1 | //===- UnwindInfoSection.cpp ----------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "UnwindInfoSection.h" |
10 | #include "InputSection.h" |
11 | #include "Layout.h" |
12 | #include "OutputSection.h" |
13 | #include "OutputSegment.h" |
14 | #include "SymbolTable.h" |
15 | #include "Symbols.h" |
16 | #include "SyntheticSections.h" |
17 | #include "Target.h" |
18 | |
19 | #include "lld/Common/ErrorHandler.h" |
20 | #include "lld/Common/Memory.h" |
21 | #include "llvm/ADT/DenseMap.h" |
22 | #include "llvm/ADT/STLExtras.h" |
23 | #include "llvm/BinaryFormat/MachO.h" |
24 | #include "llvm/Support/Parallel.h" |
25 | |
26 | #include "mach-o/compact_unwind_encoding.h" |
27 | |
28 | #include <numeric> |
29 | |
30 | using namespace llvm; |
31 | using namespace llvm::MachO; |
32 | using namespace llvm::support::endian; |
33 | using namespace lld; |
34 | using namespace lld::macho; |
35 | |
36 | #define COMMON_ENCODINGS_MAX 127 |
37 | #define COMPACT_ENCODINGS_MAX 256 |
38 | |
39 | #define SECOND_LEVEL_PAGE_BYTES 4096 |
40 | #define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t)) |
41 | #define REGULAR_SECOND_LEVEL_ENTRIES_MAX \ |
42 | ((SECOND_LEVEL_PAGE_BYTES - \ |
43 | sizeof(unwind_info_regular_second_level_page_header)) / \ |
44 | sizeof(unwind_info_regular_second_level_entry)) |
45 | #define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX \ |
46 | ((SECOND_LEVEL_PAGE_BYTES - \ |
47 | sizeof(unwind_info_compressed_second_level_page_header)) / \ |
48 | sizeof(uint32_t)) |
49 | |
50 | #define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24 |
51 | #define COMPRESSED_ENTRY_FUNC_OFFSET_MASK \ |
52 | UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0) |
53 | |
54 | static_assert(static_cast<uint32_t>(UNWIND_X86_64_DWARF_SECTION_OFFSET) == |
55 | static_cast<uint32_t>(UNWIND_ARM64_DWARF_SECTION_OFFSET) && |
56 | static_cast<uint32_t>(UNWIND_X86_64_DWARF_SECTION_OFFSET) == |
57 | static_cast<uint32_t>(UNWIND_X86_DWARF_SECTION_OFFSET)); |
58 | |
59 | constexpr uint64_t DWARF_SECTION_OFFSET = UNWIND_X86_64_DWARF_SECTION_OFFSET; |
60 | |
61 | // Compact Unwind format is a Mach-O evolution of DWARF Unwind that |
62 | // optimizes space and exception-time lookup. Most DWARF unwind |
63 | // entries can be replaced with Compact Unwind entries, but the ones |
64 | // that cannot are retained in DWARF form. |
65 | // |
66 | // This comment will address macro-level organization of the pre-link |
67 | // and post-link compact unwind tables. For micro-level organization |
68 | // pertaining to the bitfield layout of the 32-bit compact unwind |
69 | // entries, see libunwind/include/mach-o/compact_unwind_encoding.h |
70 | // |
71 | // Important clarifying factoids: |
72 | // |
73 | // * __LD,__compact_unwind is the compact unwind format for compiler |
74 | // output and linker input. It is never a final output. It could be |
75 | // an intermediate output with the `-r` option which retains relocs. |
76 | // |
77 | // * __TEXT,__unwind_info is the compact unwind format for final |
78 | // linker output. It is never an input. |
79 | // |
80 | // * __TEXT,__eh_frame is the DWARF format for both linker input and output. |
81 | // |
82 | // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd |
83 | // level) by ascending address, and the pages are referenced by an |
84 | // index (1st level) in the section header. |
85 | // |
86 | // * Following the headers in __TEXT,__unwind_info, the bulk of the |
87 | // section contains a vector of compact unwind entries |
88 | // `{functionOffset, encoding}` sorted by ascending `functionOffset`. |
89 | // Adjacent entries with the same encoding can be folded to great |
90 | // advantage, achieving a 3-order-of-magnitude reduction in the |
91 | // number of entries. |
92 | // |
93 | // Refer to the definition of unwind_info_section_header in |
94 | // compact_unwind_encoding.h for an overview of the format we are encoding |
95 | // here. |
96 | |
97 | // TODO(gkm): how do we align the 2nd-level pages? |
98 | |
99 | // The various fields in the on-disk representation of each compact unwind |
100 | // entry. |
101 | #define FOR_EACH_CU_FIELD(DO) \ |
102 | DO(Ptr, functionAddress) \ |
103 | DO(uint32_t, functionLength) \ |
104 | DO(compact_unwind_encoding_t, encoding) \ |
105 | DO(Ptr, personality) \ |
106 | DO(Ptr, lsda) |
107 | |
108 | CREATE_LAYOUT_CLASS(CompactUnwind, FOR_EACH_CU_FIELD); |
109 | |
110 | #undef FOR_EACH_CU_FIELD |
111 | |
112 | // LLD's internal representation of a compact unwind entry. |
113 | struct CompactUnwindEntry { |
114 | uint64_t functionAddress; |
115 | uint32_t functionLength; |
116 | compact_unwind_encoding_t encoding; |
117 | Symbol *personality; |
118 | InputSection *lsda; |
119 | }; |
120 | |
121 | using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>; |
122 | |
123 | struct SecondLevelPage { |
124 | uint32_t kind; |
125 | size_t entryIndex; |
126 | size_t entryCount; |
127 | size_t byteCount; |
128 | std::vector<compact_unwind_encoding_t> localEncodings; |
129 | EncodingMap localEncodingIndexes; |
130 | }; |
131 | |
132 | // UnwindInfoSectionImpl allows us to avoid cluttering our header file with a |
133 | // lengthy definition of UnwindInfoSection. |
134 | class UnwindInfoSectionImpl final : public UnwindInfoSection { |
135 | public: |
136 | UnwindInfoSectionImpl() : cuLayout(target->wordSize) {} |
137 | uint64_t getSize() const override { return unwindInfoSize; } |
138 | void prepare() override; |
139 | void finalize() override; |
140 | void writeTo(uint8_t *buf) const override; |
141 | |
142 | private: |
143 | void prepareRelocations(ConcatInputSection *); |
144 | void relocateCompactUnwind(std::vector<CompactUnwindEntry> &); |
145 | void encodePersonalities(); |
146 | Symbol *canonicalizePersonality(Symbol *); |
147 | |
148 | uint64_t unwindInfoSize = 0; |
149 | SmallVector<decltype(symbols)::value_type, 0> symbolsVec; |
150 | CompactUnwindLayout cuLayout; |
151 | std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings; |
152 | EncodingMap commonEncodingIndexes; |
153 | // The entries here will be in the same order as their originating symbols |
154 | // in symbolsVec. |
155 | std::vector<CompactUnwindEntry> cuEntries; |
156 | // Indices into the cuEntries vector. |
157 | std::vector<size_t> cuIndices; |
158 | std::vector<Symbol *> personalities; |
159 | SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *> |
160 | personalityTable; |
161 | // Indices into cuEntries for CUEs with a non-null LSDA. |
162 | std::vector<size_t> entriesWithLsda; |
163 | // Map of cuEntries index to an index within the LSDA array. |
164 | DenseMap<size_t, uint32_t> lsdaIndex; |
165 | std::vector<SecondLevelPage> secondLevelPages; |
166 | uint64_t level2PagesOffset = 0; |
167 | // The highest-address function plus its size. The unwinder needs this to |
168 | // determine the address range that is covered by unwind info. |
169 | uint64_t cueEndBoundary = 0; |
170 | }; |
171 | |
172 | UnwindInfoSection::UnwindInfoSection() |
173 | : SyntheticSection(segment_names::text, section_names::unwindInfo) { |
174 | align = 4; |
175 | } |
176 | |
177 | // Record function symbols that may need entries emitted in __unwind_info, which |
178 | // stores unwind data for address ranges. |
179 | // |
180 | // Note that if several adjacent functions have the same unwind encoding and |
181 | // personality function and no LSDA, they share one unwind entry. For this to |
182 | // work, functions without unwind info need explicit "no unwind info" unwind |
183 | // entries -- else the unwinder would think they have the unwind info of the |
184 | // closest function with unwind info right before in the image. Thus, we add |
185 | // function symbols for each unique address regardless of whether they have |
186 | // associated unwind info. |
187 | void UnwindInfoSection::addSymbol(const Defined *d) { |
188 | if (d->unwindEntry()) |
189 | allEntriesAreOmitted = false; |
190 | // We don't yet know the final output address of this symbol, but we know that |
191 | // they are uniquely determined by a combination of the isec and value, so |
192 | // we use that as the key here. |
193 | auto p = symbols.insert(KV: {{d->isec(), d->value}, d}); |
194 | // If we have multiple symbols at the same address, only one of them can have |
195 | // an associated unwind entry. |
196 | if (!p.second && d->unwindEntry()) { |
197 | assert(p.first->second == d || !p.first->second->unwindEntry()); |
198 | p.first->second = d; |
199 | } |
200 | } |
201 | |
202 | void UnwindInfoSectionImpl::prepare() { |
203 | // This iteration needs to be deterministic, since prepareRelocations may add |
204 | // entries to the GOT. Hence the use of a MapVector for |
205 | // UnwindInfoSection::symbols. |
206 | for (const Defined *d : make_second_range(c&: symbols)) |
207 | if (d->unwindEntry()) { |
208 | if (d->unwindEntry()->getName() == section_names::compactUnwind) { |
209 | prepareRelocations(d->unwindEntry()); |
210 | } else { |
211 | // We don't have to add entries to the GOT here because FDEs have |
212 | // explicit GOT relocations, so Writer::scanRelocations() will add those |
213 | // GOT entries. However, we still need to canonicalize the personality |
214 | // pointers (like prepareRelocations() does for CU entries) in order |
215 | // to avoid overflowing the 3-personality limit. |
216 | FDE &fde = cast<ObjFile>(Val: d->getFile())->fdes[d->unwindEntry()]; |
217 | fde.personality = canonicalizePersonality(fde.personality); |
218 | } |
219 | } |
220 | } |
221 | |
222 | // Compact unwind relocations have different semantics, so we handle them in a |
223 | // separate code path from regular relocations. First, we do not wish to add |
224 | // rebase opcodes for __LD,__compact_unwind, because that section doesn't |
225 | // actually end up in the final binary. Second, personality pointers always |
226 | // reside in the GOT and must be treated specially. |
227 | void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) { |
228 | assert(!isec->shouldOmitFromOutput() && |
229 | "__compact_unwind section should not be omitted" ); |
230 | |
231 | // FIXME: Make this skip relocations for CompactUnwindEntries that |
232 | // point to dead-stripped functions. That might save some amount of |
233 | // work. But since there are usually just few personality functions |
234 | // that are referenced from many places, at least some of them likely |
235 | // live, it wouldn't reduce number of got entries. |
236 | for (size_t i = 0; i < isec->relocs.size(); ++i) { |
237 | Reloc &r = isec->relocs[i]; |
238 | assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED)); |
239 | // Since compact unwind sections aren't part of the inputSections vector, |
240 | // they don't get canonicalized by scanRelocations(), so we have to do the |
241 | // canonicalization here. |
242 | if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) |
243 | r.referent = referentIsec->canonical(); |
244 | |
245 | // Functions and LSDA entries always reside in the same object file as the |
246 | // compact unwind entries that references them, and thus appear as section |
247 | // relocs. There is no need to prepare them. We only prepare relocs for |
248 | // personality functions. |
249 | if (r.offset != cuLayout.personalityOffset) |
250 | continue; |
251 | |
252 | if (auto *s = r.referent.dyn_cast<Symbol *>()) { |
253 | // Personality functions are nearly always system-defined (e.g., |
254 | // ___gxx_personality_v0 for C++) and relocated as dylib symbols. When an |
255 | // application provides its own personality function, it might be |
256 | // referenced by an extern Defined symbol reloc, or a local section reloc. |
257 | if (auto *defined = dyn_cast<Defined>(Val: s)) { |
258 | // XXX(vyng) This is a special case for handling duplicate personality |
259 | // symbols. Note that LD64's behavior is a bit different and it is |
260 | // inconsistent with how symbol resolution usually work |
261 | // |
262 | // So we've decided not to follow it. Instead, simply pick the symbol |
263 | // with the same name from the symbol table to replace the local one. |
264 | // |
265 | // (See discussions/alternatives already considered on D107533) |
266 | if (!defined->isExternal()) |
267 | if (Symbol *sym = symtab->find(name: defined->getName())) |
268 | if (!sym->isLazy()) |
269 | r.referent = s = sym; |
270 | } |
271 | if (auto *undefined = dyn_cast<Undefined>(Val: s)) { |
272 | treatUndefinedSymbol(*undefined, isec, offset: r.offset); |
273 | // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check. |
274 | if (isa<Undefined>(Val: s)) |
275 | continue; |
276 | } |
277 | |
278 | // Similar to canonicalizePersonality(), but we also register a GOT entry. |
279 | if (auto *defined = dyn_cast<Defined>(Val: s)) { |
280 | // Check if we have created a synthetic symbol at the same address. |
281 | Symbol *&personality = |
282 | personalityTable[{defined->isec(), defined->value}]; |
283 | if (personality == nullptr) { |
284 | personality = defined; |
285 | in.got->addEntry(sym: defined); |
286 | } else if (personality != defined) { |
287 | r.referent = personality; |
288 | } |
289 | continue; |
290 | } |
291 | |
292 | assert(isa<DylibSymbol>(s)); |
293 | in.got->addEntry(sym: s); |
294 | continue; |
295 | } |
296 | |
297 | if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) { |
298 | assert(!isCoalescedWeak(referentIsec)); |
299 | // Personality functions can be referenced via section relocations |
300 | // if they live in the same object file. Create placeholder synthetic |
301 | // symbols for them in the GOT. If the corresponding symbol is already |
302 | // in the GOT, use that to avoid creating a duplicate entry. All GOT |
303 | // entries needed by non-unwind sections will have already been added |
304 | // by this point. |
305 | Symbol *&s = personalityTable[{referentIsec, r.addend}]; |
306 | if (s == nullptr) { |
307 | Defined *const *gotEntry = |
308 | llvm::find_if(Range&: referentIsec->symbols, P: [&](Defined const *d) { |
309 | return d->value == static_cast<uint64_t>(r.addend) && |
310 | d->isInGot(); |
311 | }); |
312 | if (gotEntry != referentIsec->symbols.end()) { |
313 | s = *gotEntry; |
314 | } else { |
315 | // This runs after dead stripping, so the noDeadStrip argument does |
316 | // not matter. |
317 | s = make<Defined>(args: "<internal>" , /*file=*/args: nullptr, args&: referentIsec, |
318 | args&: r.addend, /*size=*/args: 0, /*isWeakDef=*/args: false, |
319 | /*isExternal=*/args: false, /*isPrivateExtern=*/args: false, |
320 | /*includeInSymtab=*/args: true, |
321 | /*isReferencedDynamically=*/args: false, |
322 | /*noDeadStrip=*/args: false); |
323 | s->used = true; |
324 | in.got->addEntry(sym: s); |
325 | } |
326 | } |
327 | r.referent = s; |
328 | r.addend = 0; |
329 | } |
330 | } |
331 | } |
332 | |
333 | Symbol *UnwindInfoSectionImpl::canonicalizePersonality(Symbol *personality) { |
334 | if (auto *defined = dyn_cast_or_null<Defined>(Val: personality)) { |
335 | // Check if we have created a synthetic symbol at the same address. |
336 | Symbol *&synth = personalityTable[{defined->isec(), defined->value}]; |
337 | if (synth == nullptr) |
338 | synth = defined; |
339 | else if (synth != defined) |
340 | return synth; |
341 | } |
342 | return personality; |
343 | } |
344 | |
345 | // We need to apply the relocations to the pre-link compact unwind section |
346 | // before converting it to post-link form. There should only be absolute |
347 | // relocations here: since we are not emitting the pre-link CU section, there |
348 | // is no source address to make a relative location meaningful. |
349 | void UnwindInfoSectionImpl::relocateCompactUnwind( |
350 | std::vector<CompactUnwindEntry> &cuEntries) { |
351 | parallelFor(Begin: 0, End: symbolsVec.size(), Fn: [&](size_t i) { |
352 | CompactUnwindEntry &cu = cuEntries[i]; |
353 | const Defined *d = symbolsVec[i].second; |
354 | cu.functionAddress = d->getVA(); |
355 | if (!d->unwindEntry()) |
356 | return; |
357 | |
358 | // If we have DWARF unwind info, create a slimmed-down CU entry that points |
359 | // to it. |
360 | if (d->unwindEntry()->getName() == section_names::ehFrame) { |
361 | // The unwinder will look for the DWARF entry starting at the hint, |
362 | // assuming the hint points to a valid CFI record start. If it |
363 | // fails to find the record, it proceeds in a linear search through the |
364 | // contiguous CFI records from the hint until the end of the section. |
365 | // Ideally, in the case where the offset is too large to be encoded, we |
366 | // would instead encode the largest possible offset to a valid CFI record, |
367 | // but since we don't keep track of that, just encode zero -- the start of |
368 | // the section is always the start of a CFI record. |
369 | uint64_t dwarfOffsetHint = |
370 | d->unwindEntry()->outSecOff <= DWARF_SECTION_OFFSET |
371 | ? d->unwindEntry()->outSecOff |
372 | : 0; |
373 | cu.encoding = target->modeDwarfEncoding | dwarfOffsetHint; |
374 | const FDE &fde = cast<ObjFile>(Val: d->getFile())->fdes[d->unwindEntry()]; |
375 | cu.functionLength = fde.funcLength; |
376 | // Omit the DWARF personality from compact-unwind entry so that we |
377 | // don't need to encode it. |
378 | cu.personality = nullptr; |
379 | cu.lsda = fde.lsda; |
380 | return; |
381 | } |
382 | |
383 | assert(d->unwindEntry()->getName() == section_names::compactUnwind); |
384 | |
385 | auto buf = |
386 | reinterpret_cast<const uint8_t *>(d->unwindEntry()->data.data()) - |
387 | target->wordSize; |
388 | cu.functionLength = |
389 | support::endian::read32le(P: buf + cuLayout.functionLengthOffset); |
390 | cu.encoding = support::endian::read32le(P: buf + cuLayout.encodingOffset); |
391 | for (const Reloc &r : d->unwindEntry()->relocs) { |
392 | if (r.offset == cuLayout.personalityOffset) |
393 | cu.personality = r.referent.get<Symbol *>(); |
394 | else if (r.offset == cuLayout.lsdaOffset) |
395 | cu.lsda = r.getReferentInputSection(); |
396 | } |
397 | }); |
398 | } |
399 | |
400 | // There should only be a handful of unique personality pointers, so we can |
401 | // encode them as 2-bit indices into a small array. |
402 | void UnwindInfoSectionImpl::encodePersonalities() { |
403 | for (size_t idx : cuIndices) { |
404 | CompactUnwindEntry &cu = cuEntries[idx]; |
405 | if (cu.personality == nullptr) |
406 | continue; |
407 | // Linear search is fast enough for a small array. |
408 | auto it = find(Range&: personalities, Val: cu.personality); |
409 | uint32_t personalityIndex; // 1-based index |
410 | if (it != personalities.end()) { |
411 | personalityIndex = std::distance(first: personalities.begin(), last: it) + 1; |
412 | } else { |
413 | personalities.push_back(x: cu.personality); |
414 | personalityIndex = personalities.size(); |
415 | } |
416 | cu.encoding |= |
417 | personalityIndex << llvm::countr_zero( |
418 | Val: static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK)); |
419 | } |
420 | if (personalities.size() > 3) |
421 | error(msg: "too many personalities (" + Twine(personalities.size()) + |
422 | ") for compact unwind to encode" ); |
423 | } |
424 | |
425 | static bool canFoldEncoding(compact_unwind_encoding_t encoding) { |
426 | // From compact_unwind_encoding.h: |
427 | // UNWIND_X86_64_MODE_STACK_IND: |
428 | // A "frameless" (RBP not used as frame pointer) function large constant |
429 | // stack size. This case is like the previous, except the stack size is too |
430 | // large to encode in the compact unwind encoding. Instead it requires that |
431 | // the function contains "subq $nnnnnnnn,RSP" in its prolog. The compact |
432 | // encoding contains the offset to the nnnnnnnn value in the function in |
433 | // UNWIND_X86_64_FRAMELESS_STACK_SIZE. |
434 | // Since this means the unwinder has to look at the `subq` in the function |
435 | // of the unwind info's unwind address, two functions that have identical |
436 | // unwind info can't be folded if it's using this encoding since both |
437 | // entries need unique addresses. |
438 | static_assert(static_cast<uint32_t>(UNWIND_X86_64_MODE_STACK_IND) == |
439 | static_cast<uint32_t>(UNWIND_X86_MODE_STACK_IND)); |
440 | if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) && |
441 | (encoding & UNWIND_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) { |
442 | // FIXME: Consider passing in the two function addresses and getting |
443 | // their two stack sizes off the `subq` and only returning false if they're |
444 | // actually different. |
445 | return false; |
446 | } |
447 | return true; |
448 | } |
449 | |
450 | // Scan the __LD,__compact_unwind entries and compute the space needs of |
451 | // __TEXT,__unwind_info and __TEXT,__eh_frame. |
452 | void UnwindInfoSectionImpl::finalize() { |
453 | if (symbols.empty()) |
454 | return; |
455 | |
456 | // At this point, the address space for __TEXT,__text has been |
457 | // assigned, so we can relocate the __LD,__compact_unwind entries |
458 | // into a temporary buffer. Relocation is necessary in order to sort |
459 | // the CU entries by function address. Sorting is necessary so that |
460 | // we can fold adjacent CU entries with identical encoding+personality |
461 | // and without any LSDA. Folding is necessary because it reduces the |
462 | // number of CU entries by as much as 3 orders of magnitude! |
463 | cuEntries.resize(new_size: symbols.size()); |
464 | // The "map" part of the symbols MapVector was only needed for deduplication |
465 | // in addSymbol(). Now that we are done adding, move the contents to a plain |
466 | // std::vector for indexed access. |
467 | symbolsVec = symbols.takeVector(); |
468 | relocateCompactUnwind(cuEntries); |
469 | |
470 | // Rather than sort & fold the 32-byte entries directly, we create a |
471 | // vector of indices to entries and sort & fold that instead. |
472 | cuIndices.resize(new_size: cuEntries.size()); |
473 | std::iota(first: cuIndices.begin(), last: cuIndices.end(), value: 0); |
474 | llvm::sort(C&: cuIndices, Comp: [&](size_t a, size_t b) { |
475 | return cuEntries[a].functionAddress < cuEntries[b].functionAddress; |
476 | }); |
477 | |
478 | // Record the ending boundary before we fold the entries. |
479 | cueEndBoundary = cuEntries[cuIndices.back()].functionAddress + |
480 | cuEntries[cuIndices.back()].functionLength; |
481 | |
482 | // Fold adjacent entries with matching encoding+personality and without LSDA |
483 | // We use three iterators on the same cuIndices to fold in-situ: |
484 | // (1) `foldBegin` is the first of a potential sequence of matching entries |
485 | // (2) `foldEnd` is the first non-matching entry after `foldBegin`. |
486 | // The semi-open interval [ foldBegin .. foldEnd ) contains a range |
487 | // entries that can be folded into a single entry and written to ... |
488 | // (3) `foldWrite` |
489 | auto foldWrite = cuIndices.begin(); |
490 | for (auto foldBegin = cuIndices.begin(); foldBegin < cuIndices.end();) { |
491 | auto foldEnd = foldBegin; |
492 | // Common LSDA encodings (e.g. for C++ and Objective-C) contain offsets from |
493 | // a base address. The base address is normally not contained directly in |
494 | // the LSDA, and in that case, the personality function treats the starting |
495 | // address of the function (which is computed by the unwinder) as the base |
496 | // address and interprets the LSDA accordingly. The unwinder computes the |
497 | // starting address of a function as the address associated with its CU |
498 | // entry. For this reason, we cannot fold adjacent entries if they have an |
499 | // LSDA, because folding would make the unwinder compute the wrong starting |
500 | // address for the functions with the folded entries, which in turn would |
501 | // cause the personality function to misinterpret the LSDA for those |
502 | // functions. In the very rare case where the base address is encoded |
503 | // directly in the LSDA, two functions at different addresses would |
504 | // necessarily have different LSDAs, so their CU entries would not have been |
505 | // folded anyway. |
506 | while (++foldEnd < cuIndices.end() && |
507 | cuEntries[*foldBegin].encoding == cuEntries[*foldEnd].encoding && |
508 | !cuEntries[*foldBegin].lsda && !cuEntries[*foldEnd].lsda && |
509 | // If we've gotten to this point, we don't have an LSDA, which should |
510 | // also imply that we don't have a personality function, since in all |
511 | // likelihood a personality function needs the LSDA to do anything |
512 | // useful. It can be technically valid to have a personality function |
513 | // and no LSDA though (e.g. the C++ personality __gxx_personality_v0 |
514 | // is just a no-op without LSDA), so we still check for personality |
515 | // function equivalence to handle that case. |
516 | cuEntries[*foldBegin].personality == |
517 | cuEntries[*foldEnd].personality && |
518 | canFoldEncoding(encoding: cuEntries[*foldEnd].encoding)) |
519 | ; |
520 | *foldWrite++ = *foldBegin; |
521 | foldBegin = foldEnd; |
522 | } |
523 | cuIndices.erase(first: foldWrite, last: cuIndices.end()); |
524 | |
525 | encodePersonalities(); |
526 | |
527 | // Count frequencies of the folded encodings |
528 | EncodingMap encodingFrequencies; |
529 | for (size_t idx : cuIndices) |
530 | encodingFrequencies[cuEntries[idx].encoding]++; |
531 | |
532 | // Make a vector of encodings, sorted by descending frequency |
533 | for (const auto &frequency : encodingFrequencies) |
534 | commonEncodings.emplace_back(args: frequency); |
535 | llvm::sort(C&: commonEncodings, |
536 | Comp: [](const std::pair<compact_unwind_encoding_t, size_t> &a, |
537 | const std::pair<compact_unwind_encoding_t, size_t> &b) { |
538 | if (a.second == b.second) |
539 | // When frequencies match, secondarily sort on encoding |
540 | // to maintain parity with validate-unwind-info.py |
541 | return a.first > b.first; |
542 | return a.second > b.second; |
543 | }); |
544 | |
545 | // Truncate the vector to 127 elements. |
546 | // Common encoding indexes are limited to 0..126, while encoding |
547 | // indexes 127..255 are local to each second-level page |
548 | if (commonEncodings.size() > COMMON_ENCODINGS_MAX) |
549 | commonEncodings.resize(COMMON_ENCODINGS_MAX); |
550 | |
551 | // Create a map from encoding to common-encoding-table index |
552 | for (size_t i = 0; i < commonEncodings.size(); i++) |
553 | commonEncodingIndexes[commonEncodings[i].first] = i; |
554 | |
555 | // Split folded encodings into pages, where each page is limited by ... |
556 | // (a) 4 KiB capacity |
557 | // (b) 24-bit difference between first & final function address |
558 | // (c) 8-bit compact-encoding-table index, |
559 | // for which 0..126 references the global common-encodings table, |
560 | // and 127..255 references a local per-second-level-page table. |
561 | // First we try the compact format and determine how many entries fit. |
562 | // If more entries fit in the regular format, we use that. |
563 | for (size_t i = 0; i < cuIndices.size();) { |
564 | size_t idx = cuIndices[i]; |
565 | secondLevelPages.emplace_back(); |
566 | SecondLevelPage &page = secondLevelPages.back(); |
567 | page.entryIndex = i; |
568 | uint64_t functionAddressMax = |
569 | cuEntries[idx].functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK; |
570 | size_t n = commonEncodings.size(); |
571 | size_t wordsRemaining = |
572 | SECOND_LEVEL_PAGE_WORDS - |
573 | sizeof(unwind_info_compressed_second_level_page_header) / |
574 | sizeof(uint32_t); |
575 | while (wordsRemaining >= 1 && i < cuIndices.size()) { |
576 | idx = cuIndices[i]; |
577 | const CompactUnwindEntry *cuPtr = &cuEntries[idx]; |
578 | if (cuPtr->functionAddress >= functionAddressMax) |
579 | break; |
580 | if (commonEncodingIndexes.count(Val: cuPtr->encoding) || |
581 | page.localEncodingIndexes.count(Val: cuPtr->encoding)) { |
582 | i++; |
583 | wordsRemaining--; |
584 | } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) { |
585 | page.localEncodings.emplace_back(args: cuPtr->encoding); |
586 | page.localEncodingIndexes[cuPtr->encoding] = n++; |
587 | i++; |
588 | wordsRemaining -= 2; |
589 | } else { |
590 | break; |
591 | } |
592 | } |
593 | page.entryCount = i - page.entryIndex; |
594 | |
595 | // If this is not the final page, see if it's possible to fit more entries |
596 | // by using the regular format. This can happen when there are many unique |
597 | // encodings, and we saturated the local encoding table early. |
598 | if (i < cuIndices.size() && |
599 | page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) { |
600 | page.kind = UNWIND_SECOND_LEVEL_REGULAR; |
601 | page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX, |
602 | b: cuIndices.size() - page.entryIndex); |
603 | i = page.entryIndex + page.entryCount; |
604 | } else { |
605 | page.kind = UNWIND_SECOND_LEVEL_COMPRESSED; |
606 | } |
607 | } |
608 | |
609 | for (size_t idx : cuIndices) { |
610 | lsdaIndex[idx] = entriesWithLsda.size(); |
611 | if (cuEntries[idx].lsda) |
612 | entriesWithLsda.push_back(x: idx); |
613 | } |
614 | |
615 | // compute size of __TEXT,__unwind_info section |
616 | level2PagesOffset = sizeof(unwind_info_section_header) + |
617 | commonEncodings.size() * sizeof(uint32_t) + |
618 | personalities.size() * sizeof(uint32_t) + |
619 | // The extra second-level-page entry is for the sentinel |
620 | (secondLevelPages.size() + 1) * |
621 | sizeof(unwind_info_section_header_index_entry) + |
622 | entriesWithLsda.size() * |
623 | sizeof(unwind_info_section_header_lsda_index_entry); |
624 | unwindInfoSize = |
625 | level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES; |
626 | } |
627 | |
628 | // All inputs are relocated and output addresses are known, so write! |
629 | |
630 | void UnwindInfoSectionImpl::writeTo(uint8_t *buf) const { |
631 | assert(!cuIndices.empty() && "call only if there is unwind info" ); |
632 | |
633 | // section header |
634 | auto *uip = reinterpret_cast<unwind_info_section_header *>(buf); |
635 | uip->version = 1; |
636 | uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header); |
637 | uip->commonEncodingsArrayCount = commonEncodings.size(); |
638 | uip->personalityArraySectionOffset = |
639 | uip->commonEncodingsArraySectionOffset + |
640 | (uip->commonEncodingsArrayCount * sizeof(uint32_t)); |
641 | uip->personalityArrayCount = personalities.size(); |
642 | uip->indexSectionOffset = uip->personalityArraySectionOffset + |
643 | (uip->personalityArrayCount * sizeof(uint32_t)); |
644 | uip->indexCount = secondLevelPages.size() + 1; |
645 | |
646 | // Common encodings |
647 | auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]); |
648 | for (const auto &encoding : commonEncodings) |
649 | *i32p++ = encoding.first; |
650 | |
651 | // Personalities |
652 | for (const Symbol *personality : personalities) |
653 | *i32p++ = personality->getGotVA() - in.header->addr; |
654 | |
655 | // FIXME: LD64 checks and warns aboutgaps or overlapse in cuEntries address |
656 | // ranges. We should do the same too |
657 | |
658 | // Level-1 index |
659 | uint32_t lsdaOffset = |
660 | uip->indexSectionOffset + |
661 | uip->indexCount * sizeof(unwind_info_section_header_index_entry); |
662 | uint64_t l2PagesOffset = level2PagesOffset; |
663 | auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p); |
664 | for (const SecondLevelPage &page : secondLevelPages) { |
665 | size_t idx = cuIndices[page.entryIndex]; |
666 | iep->functionOffset = cuEntries[idx].functionAddress - in.header->addr; |
667 | iep->secondLevelPagesSectionOffset = l2PagesOffset; |
668 | iep->lsdaIndexArraySectionOffset = |
669 | lsdaOffset + lsdaIndex.lookup(Val: idx) * |
670 | sizeof(unwind_info_section_header_lsda_index_entry); |
671 | iep++; |
672 | l2PagesOffset += SECOND_LEVEL_PAGE_BYTES; |
673 | } |
674 | // Level-1 sentinel |
675 | // XXX(vyng): Note that LD64 adds +1 here. |
676 | // Unsure whether it's a bug or it's their workaround for something else. |
677 | // See comments from https://reviews.llvm.org/D138320. |
678 | iep->functionOffset = cueEndBoundary - in.header->addr; |
679 | iep->secondLevelPagesSectionOffset = 0; |
680 | iep->lsdaIndexArraySectionOffset = |
681 | lsdaOffset + entriesWithLsda.size() * |
682 | sizeof(unwind_info_section_header_lsda_index_entry); |
683 | iep++; |
684 | |
685 | // LSDAs |
686 | auto *lep = |
687 | reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep); |
688 | for (size_t idx : entriesWithLsda) { |
689 | const CompactUnwindEntry &cu = cuEntries[idx]; |
690 | lep->lsdaOffset = cu.lsda->getVA(/*off=*/0) - in.header->addr; |
691 | lep->functionOffset = cu.functionAddress - in.header->addr; |
692 | lep++; |
693 | } |
694 | |
695 | // Level-2 pages |
696 | auto *pp = reinterpret_cast<uint32_t *>(lep); |
697 | for (const SecondLevelPage &page : secondLevelPages) { |
698 | if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) { |
699 | uintptr_t functionAddressBase = |
700 | cuEntries[cuIndices[page.entryIndex]].functionAddress; |
701 | auto *p2p = |
702 | reinterpret_cast<unwind_info_compressed_second_level_page_header *>( |
703 | pp); |
704 | p2p->kind = page.kind; |
705 | p2p->entryPageOffset = |
706 | sizeof(unwind_info_compressed_second_level_page_header); |
707 | p2p->entryCount = page.entryCount; |
708 | p2p->encodingsPageOffset = |
709 | p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t); |
710 | p2p->encodingsCount = page.localEncodings.size(); |
711 | auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]); |
712 | for (size_t i = 0; i < page.entryCount; i++) { |
713 | const CompactUnwindEntry &cue = |
714 | cuEntries[cuIndices[page.entryIndex + i]]; |
715 | auto it = commonEncodingIndexes.find(Val: cue.encoding); |
716 | if (it == commonEncodingIndexes.end()) |
717 | it = page.localEncodingIndexes.find(Val: cue.encoding); |
718 | *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) | |
719 | (cue.functionAddress - functionAddressBase); |
720 | } |
721 | if (!page.localEncodings.empty()) |
722 | memcpy(dest: ep, src: page.localEncodings.data(), |
723 | n: page.localEncodings.size() * sizeof(uint32_t)); |
724 | } else { |
725 | auto *p2p = |
726 | reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp); |
727 | p2p->kind = page.kind; |
728 | p2p->entryPageOffset = |
729 | sizeof(unwind_info_regular_second_level_page_header); |
730 | p2p->entryCount = page.entryCount; |
731 | auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]); |
732 | for (size_t i = 0; i < page.entryCount; i++) { |
733 | const CompactUnwindEntry &cue = |
734 | cuEntries[cuIndices[page.entryIndex + i]]; |
735 | *ep++ = cue.functionAddress; |
736 | *ep++ = cue.encoding; |
737 | } |
738 | } |
739 | pp += SECOND_LEVEL_PAGE_WORDS; |
740 | } |
741 | } |
742 | |
743 | UnwindInfoSection *macho::makeUnwindInfoSection() { |
744 | return make<UnwindInfoSectionImpl>(); |
745 | } |
746 | |