1//===- OutputSections.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "OutputSections.h"
10#include "Config.h"
11#include "InputFiles.h"
12#include "LinkerScript.h"
13#include "Symbols.h"
14#include "SyntheticSections.h"
15#include "Target.h"
16#include "lld/Common/Arrays.h"
17#include "lld/Common/Memory.h"
18#include "llvm/BinaryFormat/Dwarf.h"
19#include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20#include "llvm/Support/Compression.h"
21#include "llvm/Support/LEB128.h"
22#include "llvm/Support/Parallel.h"
23#include "llvm/Support/Path.h"
24#include "llvm/Support/TimeProfiler.h"
25#if LLVM_ENABLE_ZLIB
26// Avoid introducing max as a macro from Windows headers.
27#define NOMINMAX
28#include <zlib.h>
29#endif
30#if LLVM_ENABLE_ZSTD
31#include <zstd.h>
32#endif
33
34using namespace llvm;
35using namespace llvm::dwarf;
36using namespace llvm::object;
37using namespace llvm::support::endian;
38using namespace llvm::ELF;
39using namespace lld;
40using namespace lld::elf;
41
42uint8_t *Out::bufferStart;
43PhdrEntry *Out::tlsPhdr;
44OutputSection *Out::elfHeader;
45OutputSection *Out::programHeaders;
46OutputSection *Out::preinitArray;
47OutputSection *Out::initArray;
48OutputSection *Out::finiArray;
49
50SmallVector<OutputSection *, 0> elf::outputSections;
51
52uint32_t OutputSection::getPhdrFlags() const {
53 uint32_t ret = 0;
54 if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
55 ret |= PF_R;
56 if (flags & SHF_WRITE)
57 ret |= PF_W;
58 if (flags & SHF_EXECINSTR)
59 ret |= PF_X;
60 return ret;
61}
62
63template <class ELFT>
64void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
65 shdr->sh_entsize = entsize;
66 shdr->sh_addralign = addralign;
67 shdr->sh_type = type;
68 shdr->sh_offset = offset;
69 shdr->sh_flags = flags;
70 shdr->sh_info = info;
71 shdr->sh_link = link;
72 shdr->sh_addr = addr;
73 shdr->sh_size = size;
74 shdr->sh_name = shName;
75}
76
77OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
78 : SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
79 /*Info*/ 0, /*Link*/ 0) {}
80
81// We allow sections of types listed below to merged into a
82// single progbits section. This is typically done by linker
83// scripts. Merging nobits and progbits will force disk space
84// to be allocated for nobits sections. Other ones don't require
85// any special treatment on top of progbits, so there doesn't
86// seem to be a harm in merging them.
87//
88// NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
89// them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
90static bool canMergeToProgbits(unsigned type) {
91 return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
92 type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
93 type == SHT_NOTE ||
94 (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
95}
96
97// Record that isec will be placed in the OutputSection. isec does not become
98// permanent until finalizeInputSections() is called. The function should not be
99// used after finalizeInputSections() is called. If you need to add an
100// InputSection post finalizeInputSections(), then you must do the following:
101//
102// 1. Find or create an InputSectionDescription to hold InputSection.
103// 2. Add the InputSection to the InputSectionDescription::sections.
104// 3. Call commitSection(isec).
105void OutputSection::recordSection(InputSectionBase *isec) {
106 partition = isec->partition;
107 isec->parent = this;
108 if (commands.empty() || !isa<InputSectionDescription>(Val: commands.back()))
109 commands.push_back(Elt: make<InputSectionDescription>(args: ""));
110 auto *isd = cast<InputSectionDescription>(Val: commands.back());
111 isd->sectionBases.push_back(Elt: isec);
112}
113
114// Update fields (type, flags, alignment, etc) according to the InputSection
115// isec. Also check whether the InputSection flags and type are consistent with
116// other InputSections.
117void OutputSection::commitSection(InputSection *isec) {
118 if (LLVM_UNLIKELY(type != isec->type)) {
119 if (!hasInputSections && !typeIsSet) {
120 type = isec->type;
121 } else if (isStaticRelSecType(type) && isStaticRelSecType(type: isec->type) &&
122 (type == SHT_CREL) != (isec->type == SHT_CREL)) {
123 // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
124 type = SHT_CREL;
125 if (type == SHT_REL) {
126 if (name.consume_front(Prefix: ".rel"))
127 name = saver().save(S: ".crel" + name);
128 } else if (name.consume_front(Prefix: ".rela")) {
129 name = saver().save(S: ".crel" + name);
130 }
131 } else {
132 if (typeIsSet || !canMergeToProgbits(type) ||
133 !canMergeToProgbits(type: isec->type)) {
134 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
135 // that the contents at that address is provided by some other means.
136 // Some projects (e.g.
137 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
138 // behavior. Other types get an error.
139 if (type != SHT_NOBITS) {
140 errorOrWarn(msg: "section type mismatch for " + isec->name + "\n>>> " +
141 toString(isec) + ": " +
142 getELFSectionTypeName(Machine: config->emachine, Type: isec->type) +
143 "\n>>> output section " + name + ": " +
144 getELFSectionTypeName(Machine: config->emachine, Type: type));
145 }
146 }
147 if (!typeIsSet)
148 type = SHT_PROGBITS;
149 }
150 }
151 if (!hasInputSections) {
152 // If IS is the first section to be added to this section,
153 // initialize type, entsize and flags from isec.
154 hasInputSections = true;
155 entsize = isec->entsize;
156 flags = isec->flags;
157 } else {
158 // Otherwise, check if new type or flags are compatible with existing ones.
159 if ((flags ^ isec->flags) & SHF_TLS)
160 error(msg: "incompatible section flags for " + name + "\n>>> " +
161 toString(isec) + ": 0x" + utohexstr(X: isec->flags) +
162 "\n>>> output section " + name + ": 0x" + utohexstr(X: flags));
163 }
164
165 isec->parent = this;
166 uint64_t andMask =
167 config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
168 uint64_t orMask = ~andMask;
169 uint64_t andFlags = (flags & isec->flags) & andMask;
170 uint64_t orFlags = (flags | isec->flags) & orMask;
171 flags = andFlags | orFlags;
172 if (nonAlloc)
173 flags &= ~(uint64_t)SHF_ALLOC;
174
175 addralign = std::max(a: addralign, b: isec->addralign);
176
177 // If this section contains a table of fixed-size entries, sh_entsize
178 // holds the element size. If it contains elements of different size we
179 // set sh_entsize to 0.
180 if (entsize != isec->entsize)
181 entsize = 0;
182}
183
184static MergeSyntheticSection *createMergeSynthetic(StringRef name,
185 uint32_t type,
186 uint64_t flags,
187 uint32_t addralign) {
188 if ((flags & SHF_STRINGS) && config->optimize >= 2)
189 return make<MergeTailSection>(args&: name, args&: type, args&: flags, args&: addralign);
190 return make<MergeNoTailSection>(args&: name, args&: type, args&: flags, args&: addralign);
191}
192
193// This function scans over the InputSectionBase list sectionBases to create
194// InputSectionDescription::sections.
195//
196// It removes MergeInputSections from the input section array and adds
197// new synthetic sections at the location of the first input section
198// that it replaces. It then finalizes each synthetic section in order
199// to compute an output offset for each piece of each input section.
200void OutputSection::finalizeInputSections(LinkerScript *script) {
201 std::vector<MergeSyntheticSection *> mergeSections;
202 for (SectionCommand *cmd : commands) {
203 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
204 if (!isd)
205 continue;
206 isd->sections.reserve(N: isd->sectionBases.size());
207 for (InputSectionBase *s : isd->sectionBases) {
208 MergeInputSection *ms = dyn_cast<MergeInputSection>(Val: s);
209 if (!ms) {
210 isd->sections.push_back(Elt: cast<InputSection>(Val: s));
211 continue;
212 }
213
214 // We do not want to handle sections that are not alive, so just remove
215 // them instead of trying to merge.
216 if (!ms->isLive())
217 continue;
218
219 auto i = llvm::find_if(Range&: mergeSections, P: [=](MergeSyntheticSection *sec) {
220 // While we could create a single synthetic section for two different
221 // values of Entsize, it is better to take Entsize into consideration.
222 //
223 // With a single synthetic section no two pieces with different Entsize
224 // could be equal, so we may as well have two sections.
225 //
226 // Using Entsize in here also allows us to propagate it to the synthetic
227 // section.
228 //
229 // SHF_STRINGS section with different alignments should not be merged.
230 return sec->flags == ms->flags && sec->entsize == ms->entsize &&
231 (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
232 });
233 if (i == mergeSections.end()) {
234 MergeSyntheticSection *syn =
235 createMergeSynthetic(name: s->name, type: ms->type, flags: ms->flags, addralign: ms->addralign);
236 mergeSections.push_back(x: syn);
237 i = std::prev(x: mergeSections.end());
238 syn->entsize = ms->entsize;
239 isd->sections.push_back(Elt: syn);
240 // The merge synthetic section inherits the potential spill locations of
241 // its first contained section.
242 auto it = script->potentialSpillLists.find(Val: ms);
243 if (it != script->potentialSpillLists.end())
244 script->potentialSpillLists.try_emplace(Key: syn, Args&: it->second);
245 }
246 (*i)->addSection(ms);
247 }
248
249 // sectionBases should not be used from this point onwards. Clear it to
250 // catch misuses.
251 isd->sectionBases.clear();
252
253 // Some input sections may be removed from the list after ICF.
254 for (InputSection *s : isd->sections)
255 commitSection(isec: s);
256 }
257 for (auto *ms : mergeSections)
258 ms->finalizeContents();
259}
260
261static void sortByOrder(MutableArrayRef<InputSection *> in,
262 llvm::function_ref<int(InputSectionBase *s)> order) {
263 std::vector<std::pair<int, InputSection *>> v;
264 for (InputSection *s : in)
265 v.emplace_back(args: order(s), args&: s);
266 llvm::stable_sort(Range&: v, C: less_first());
267
268 for (size_t i = 0; i < v.size(); ++i)
269 in[i] = v[i].second;
270}
271
272uint64_t elf::getHeaderSize() {
273 if (config->oFormatBinary)
274 return 0;
275 return Out::elfHeader->size + Out::programHeaders->size;
276}
277
278void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
279 assert(isLive());
280 for (SectionCommand *b : commands)
281 if (auto *isd = dyn_cast<InputSectionDescription>(Val: b))
282 sortByOrder(in: isd->sections, order);
283}
284
285static void nopInstrFill(uint8_t *buf, size_t size) {
286 if (size == 0)
287 return;
288 unsigned i = 0;
289 if (size == 0)
290 return;
291 std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
292 unsigned num = size / nopFiller.back().size();
293 for (unsigned c = 0; c < num; ++c) {
294 memcpy(dest: buf + i, src: nopFiller.back().data(), n: nopFiller.back().size());
295 i += nopFiller.back().size();
296 }
297 unsigned remaining = size - i;
298 if (!remaining)
299 return;
300 assert(nopFiller[remaining - 1].size() == remaining);
301 memcpy(dest: buf + i, src: nopFiller[remaining - 1].data(), n: remaining);
302}
303
304// Fill [Buf, Buf + Size) with Filler.
305// This is used for linker script "=fillexp" command.
306static void fill(uint8_t *buf, size_t size,
307 const std::array<uint8_t, 4> &filler) {
308 size_t i = 0;
309 for (; i + 4 < size; i += 4)
310 memcpy(dest: buf + i, src: filler.data(), n: 4);
311 memcpy(dest: buf + i, src: filler.data(), n: size - i);
312}
313
314#if LLVM_ENABLE_ZLIB
315static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
316 int flush) {
317 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
318 // data with no zlib header or trailer.
319 z_stream s = {};
320 auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
321 if (res != 0) {
322 errorOrWarn(msg: "--compress-sections: deflateInit2 returned " + Twine(res));
323 return {};
324 }
325 s.next_in = const_cast<uint8_t *>(in.data());
326 s.avail_in = in.size();
327
328 // Allocate a buffer of half of the input size, and grow it by 1.5x if
329 // insufficient.
330 SmallVector<uint8_t, 0> out;
331 size_t pos = 0;
332 out.resize_for_overwrite(N: std::max<size_t>(a: in.size() / 2, b: 64));
333 do {
334 if (pos == out.size())
335 out.resize_for_overwrite(N: out.size() * 3 / 2);
336 s.next_out = out.data() + pos;
337 s.avail_out = out.size() - pos;
338 (void)deflate(strm: &s, flush);
339 pos = s.next_out - out.data();
340 } while (s.avail_out == 0);
341 assert(s.avail_in == 0);
342
343 out.truncate(N: pos);
344 deflateEnd(strm: &s);
345 return out;
346}
347#endif
348
349// Compress certain non-SHF_ALLOC sections:
350//
351// * (if --compress-debug-sections is specified) non-empty .debug_* sections
352// * (if --compress-sections is specified) matched sections
353template <class ELFT> void OutputSection::maybeCompress() {
354 using Elf_Chdr = typename ELFT::Chdr;
355 (void)sizeof(Elf_Chdr);
356
357 DebugCompressionType ctype = DebugCompressionType::None;
358 size_t compressedSize = sizeof(Elf_Chdr);
359 unsigned level = 0; // default compression level
360 if (!(flags & SHF_ALLOC) && config->compressDebugSections &&
361 name.starts_with(Prefix: ".debug_"))
362 ctype = *config->compressDebugSections;
363 for (auto &[glob, t, l] : config->compressSections)
364 if (glob.match(S: name))
365 std::tie(args&: ctype, args&: level) = {t, l};
366 if (ctype == DebugCompressionType::None)
367 return;
368 if (flags & SHF_ALLOC) {
369 errorOrWarn(msg: "--compress-sections: section '" + name +
370 "' with the SHF_ALLOC flag cannot be compressed");
371 return;
372 }
373
374 llvm::TimeTraceScope timeScope("Compress sections");
375 auto buf = std::make_unique<uint8_t[]>(num: size);
376 // Write uncompressed data to a temporary zero-initialized buffer.
377 {
378 parallel::TaskGroup tg;
379 writeTo<ELFT>(buf.get(), tg);
380 }
381 // The generic ABI specifies "The sh_size and sh_addralign fields of the
382 // section header for a compressed section reflect the requirements of the
383 // compressed section." However, 1-byte alignment has been wildly accepted
384 // and utilized for a long time. Removing alignment padding is particularly
385 // useful when there are many compressed output sections.
386 addralign = 1;
387
388 // Split input into 1-MiB shards.
389 [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
390 auto shardsIn = split(arr: ArrayRef<uint8_t>(buf.get(), size), chunkSize: shardSize);
391 const size_t numShards = shardsIn.size();
392 auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(num: numShards);
393
394#if LLVM_ENABLE_ZSTD
395 // Use ZSTD's streaming compression API. See
396 // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
397 // HowTo".
398 if (ctype == DebugCompressionType::Zstd) {
399 parallelFor(0, numShards, [&](size_t i) {
400 SmallVector<uint8_t, 0> out;
401 ZSTD_CCtx *cctx = ZSTD_createCCtx();
402 ZSTD_CCtx_setParameter(cctx, param: ZSTD_c_compressionLevel, value: level);
403 ZSTD_inBuffer zib = {.src: shardsIn[i].data(), .size: shardsIn[i].size(), .pos: 0};
404 ZSTD_outBuffer zob = {.dst: nullptr, .size: 0, .pos: 0};
405 size_t size;
406 do {
407 // Allocate a buffer of half of the input size, and grow it by 1.5x if
408 // insufficient.
409 if (zob.pos == zob.size) {
410 out.resize_for_overwrite(
411 N: zob.size ? zob.size * 3 / 2 : std::max<size_t>(a: zib.size / 4, b: 64));
412 zob = {.dst: out.data(), .size: out.size(), .pos: zob.pos};
413 }
414 size = ZSTD_compressStream2(cctx, output: &zob, input: &zib, endOp: ZSTD_e_end);
415 assert(!ZSTD_isError(size));
416 } while (size != 0);
417 out.truncate(N: zob.pos);
418 ZSTD_freeCCtx(cctx);
419 shardsOut[i] = std::move(out);
420 });
421 compressed.type = ELFCOMPRESS_ZSTD;
422 for (size_t i = 0; i != numShards; ++i)
423 compressedSize += shardsOut[i].size();
424 }
425#endif
426
427#if LLVM_ENABLE_ZLIB
428 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
429 // fast and provides decent compression ratios.
430 if (ctype == DebugCompressionType::Zlib) {
431 if (!level)
432 level = Z_BEST_SPEED;
433
434 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
435 // shards but the last to flush the output to a byte boundary to be
436 // concatenated with the next shard.
437 auto shardsAdler = std::make_unique<uint32_t[]>(num: numShards);
438 parallelFor(0, numShards, [&](size_t i) {
439 shardsOut[i] = deflateShard(in: shardsIn[i], level,
440 flush: i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
441 shardsAdler[i] = adler32(adler: 1, buf: shardsIn[i].data(), len: shardsIn[i].size());
442 });
443
444 // Update section size and combine Alder-32 checksums.
445 uint32_t checksum = 1; // Initial Adler-32 value
446 compressedSize += 2; // Elf_Chdir and zlib header
447 for (size_t i = 0; i != numShards; ++i) {
448 compressedSize += shardsOut[i].size();
449 checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
450 }
451 compressedSize += 4; // checksum
452 compressed.type = ELFCOMPRESS_ZLIB;
453 compressed.checksum = checksum;
454 }
455#endif
456
457 if (compressedSize >= size)
458 return;
459 compressed.uncompressedSize = size;
460 compressed.shards = std::move(shardsOut);
461 compressed.numShards = numShards;
462 size = compressedSize;
463 flags |= SHF_COMPRESSED;
464}
465
466static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
467 if (size == 1)
468 *buf = data;
469 else if (size == 2)
470 write16(p: buf, v: data);
471 else if (size == 4)
472 write32(p: buf, v: data);
473 else if (size == 8)
474 write64(p: buf, v: data);
475 else
476 llvm_unreachable("unsupported Size argument");
477}
478
479template <class ELFT>
480void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
481 llvm::TimeTraceScope timeScope("Write sections", name);
482 if (type == SHT_NOBITS)
483 return;
484 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
485 buf += encodeULEB128(Value: crelHeader, p: buf);
486 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
487 return;
488 }
489
490 // If the section is compressed due to
491 // --compress-debug-section/--compress-sections, the content is already known.
492 if (compressed.shards) {
493 auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
494 chdr->ch_type = compressed.type;
495 chdr->ch_size = compressed.uncompressedSize;
496 chdr->ch_addralign = addralign;
497 buf += sizeof(*chdr);
498
499 auto offsets = std::make_unique<size_t[]>(num: compressed.numShards);
500 if (compressed.type == ELFCOMPRESS_ZLIB) {
501 buf[0] = 0x78; // CMF
502 buf[1] = 0x01; // FLG: best speed
503 offsets[0] = 2; // zlib header
504 write32be(P: buf + (size - sizeof(*chdr) - 4), V: compressed.checksum);
505 }
506
507 // Compute shard offsets.
508 for (size_t i = 1; i != compressed.numShards; ++i)
509 offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
510 parallelFor(0, compressed.numShards, [&](size_t i) {
511 memcpy(dest: buf + offsets[i], src: compressed.shards[i].data(),
512 n: compressed.shards[i].size());
513 });
514 return;
515 }
516
517 // Write leading padding.
518 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
519 std::array<uint8_t, 4> filler = getFiller();
520 bool nonZeroFiller = read32(p: filler.data()) != 0;
521 if (nonZeroFiller)
522 fill(buf, size: sections.empty() ? size : sections[0]->outSecOff, filler);
523
524 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
525 buf += encodeULEB128(Value: crelHeader, p: buf);
526 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
527 return;
528 }
529
530 auto fn = [=](size_t begin, size_t end) {
531 size_t numSections = sections.size();
532 for (size_t i = begin; i != end; ++i) {
533 InputSection *isec = sections[i];
534 if (auto *s = dyn_cast<SyntheticSection>(Val: isec))
535 s->writeTo(buf: buf + isec->outSecOff);
536 else
537 isec->writeTo<ELFT>(buf + isec->outSecOff);
538
539 // When in Arm BE8 mode, the linker has to convert the big-endian
540 // instructions to little-endian, leaving the data big-endian.
541 if (config->emachine == EM_ARM && !config->isLE && config->armBe8 &&
542 (flags & SHF_EXECINSTR))
543 convertArmInstructionstoBE8(sec: isec, buf: buf + isec->outSecOff);
544
545 // Fill gaps between sections.
546 if (nonZeroFiller) {
547 uint8_t *start = buf + isec->outSecOff + isec->getSize();
548 uint8_t *end;
549 if (i + 1 == numSections)
550 end = buf + size;
551 else
552 end = buf + sections[i + 1]->outSecOff;
553 if (isec->nopFiller) {
554 assert(target->nopInstrs);
555 nopInstrFill(buf: start, size: end - start);
556 } else
557 fill(buf: start, size: end - start, filler);
558 }
559 }
560 };
561
562 // If there is any BYTE()-family command (rare), write the section content
563 // first then process BYTE to overwrite the filler content. The write is
564 // serial due to the limitation of llvm/Support/Parallel.h.
565 bool written = false;
566 size_t numSections = sections.size();
567 for (SectionCommand *cmd : commands)
568 if (auto *data = dyn_cast<ByteCommand>(Val: cmd)) {
569 if (!std::exchange(obj&: written, new_val: true))
570 fn(0, numSections);
571 writeInt(buf: buf + data->offset, data: data->expression().getValue(), size: data->size);
572 }
573 if (written || !numSections)
574 return;
575
576 // There is no data command. Write content asynchronously to overlap the write
577 // time with other output sections. Note, if a linker script specifies
578 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
579 // to supress the error), the output may be non-deterministic.
580 const size_t taskSizeLimit = 4 << 20;
581 for (size_t begin = 0, i = 0, taskSize = 0;;) {
582 taskSize += sections[i]->getSize();
583 bool done = ++i == numSections;
584 if (done || taskSize >= taskSizeLimit) {
585 tg.spawn(f: [=] { fn(begin, i); });
586 if (done)
587 break;
588 begin = i;
589 taskSize = 0;
590 }
591 }
592}
593
594static void finalizeShtGroup(OutputSection *os, InputSection *section) {
595 // sh_link field for SHT_GROUP sections should contain the section index of
596 // the symbol table.
597 os->link = in.symTab->getParent()->sectionIndex;
598
599 if (!section)
600 return;
601
602 // sh_info then contain index of an entry in symbol table section which
603 // provides signature of the section group.
604 ArrayRef<Symbol *> symbols = section->file->getSymbols();
605 os->info = in.symTab->getSymbolIndex(sym: *symbols[section->info]);
606
607 // Some group members may be combined or discarded, so we need to compute the
608 // new size. The content will be rewritten in InputSection::copyShtGroup.
609 DenseSet<uint32_t> seen;
610 ArrayRef<InputSectionBase *> sections = section->file->getSections();
611 for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(N: 1))
612 if (OutputSection *osec = sections[read32(p: &idx)]->getOutputSection())
613 seen.insert(V: osec->sectionIndex);
614 os->size = (1 + seen.size()) * sizeof(uint32_t);
615}
616
617template <class uint>
618LLVM_ATTRIBUTE_ALWAYS_INLINE static void
619encodeOneCrel(raw_svector_ostream &os, Elf_Crel<sizeof(uint) == 8> &out,
620 uint offset, const Symbol &sym, uint32_t type, uint addend) {
621 const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
622 out.r_offset = offset;
623 int64_t symidx = in.symTab->getSymbolIndex(sym);
624 if (sym.type == STT_SECTION) {
625 auto *d = dyn_cast<Defined>(Val: &sym);
626 if (d) {
627 SectionBase *section = d->section;
628 assert(section->isLive());
629 addend = sym.getVA(addend) - section->getOutputSection()->addr;
630 } else {
631 // Encode R_*_NONE(symidx=0).
632 symidx = type = addend = 0;
633 }
634 }
635
636 // Similar to llvm::ELF::encodeCrel.
637 uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
638 (out.r_type != type ? 2 : 0) +
639 (uint(out.r_addend) != addend ? 4 : 0);
640 if (deltaOffset < 0x10) {
641 os << char(b);
642 } else {
643 os << char(b | 0x80);
644 encodeULEB128(Value: deltaOffset >> 4, OS&: os);
645 }
646 if (b & 1) {
647 encodeSLEB128(Value: static_cast<int32_t>(symidx - out.r_symidx), OS&: os);
648 out.r_symidx = symidx;
649 }
650 if (b & 2) {
651 encodeSLEB128(Value: static_cast<int32_t>(type - out.r_type), OS&: os);
652 out.r_type = type;
653 }
654 if (b & 4) {
655 encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
656 out.r_addend = addend;
657 }
658}
659
660template <class ELFT>
661static size_t relToCrel(raw_svector_ostream &os, Elf_Crel<ELFT::Is64Bits> &out,
662 InputSection *relSec, InputSectionBase *sec) {
663 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
664 if (relSec->type == SHT_REL) {
665 // REL conversion is complex and unsupported yet.
666 errorOrWarn(msg: toString(relSec) + ": REL cannot be converted to CREL");
667 return 0;
668 }
669 auto rels = relSec->getDataAs<typename ELFT::Rela>();
670 for (auto rel : rels) {
671 encodeOneCrel<typename ELFT::uint>(
672 os, out, sec->getVA(offset: rel.r_offset), file.getRelocTargetSym(rel),
673 rel.getType(config->isMips64EL), getAddend<ELFT>(rel));
674 }
675 return rels.size();
676}
677
678// Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
679// Input CREL sections are decoded while REL[A] need to be converted.
680template <bool is64> void OutputSection::finalizeNonAllocCrel() {
681 using uint = typename Elf_Crel_Impl<is64>::uint;
682 raw_svector_ostream os(crelBody);
683 uint64_t totalCount = 0;
684 Elf_Crel<is64> out{};
685 assert(commands.size() == 1);
686 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
687 for (InputSection *relSec : isd->sections) {
688 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
689 InputSectionBase *sec = relSec->getRelocatedSection();
690 if (relSec->type == SHT_CREL) {
691 RelocsCrel<is64> entries(relSec->content_);
692 totalCount += entries.size();
693 for (Elf_Crel_Impl<is64> r : entries) {
694 encodeOneCrel<uint>(os, out, uint(sec->getVA(offset: r.r_offset)),
695 file.getSymbol(symbolIndex: r.r_symidx), r.r_type, r.r_addend);
696 }
697 continue;
698 }
699
700 // Convert REL[A] to CREL.
701 if constexpr (is64) {
702 totalCount += config->isLE ? relToCrel<ELF64LE>(os, out, relSec, sec)
703 : relToCrel<ELF64BE>(os, out, relSec, sec);
704 } else {
705 totalCount += config->isLE ? relToCrel<ELF32LE>(os, out, relSec, sec)
706 : relToCrel<ELF32BE>(os, out, relSec, sec);
707 }
708 }
709
710 crelHeader = totalCount * 8 + 4;
711 size = getULEB128Size(Value: crelHeader) + crelBody.size();
712}
713
714void OutputSection::finalize() {
715 InputSection *first = getFirstInputSection(os: this);
716
717 if (flags & SHF_LINK_ORDER) {
718 // We must preserve the link order dependency of sections with the
719 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
720 // need to translate the InputSection sh_link to the OutputSection sh_link,
721 // all InputSections in the OutputSection have the same dependency.
722 if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(Val: first))
723 link = ex->getLinkOrderDep()->getParent()->sectionIndex;
724 else if (first->flags & SHF_LINK_ORDER)
725 if (auto *d = first->getLinkOrderDep())
726 link = d->getParent()->sectionIndex;
727 }
728
729 if (type == SHT_GROUP) {
730 finalizeShtGroup(os: this, section: first);
731 return;
732 }
733
734 if (!config->copyRelocs || !isStaticRelSecType(type))
735 return;
736
737 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
738 // Normally 'type' was changed by 'first' so 'first' should be non-null.
739 // However, if the output section is .rela.dyn, 'type' can be set by the empty
740 // synthetic .rela.plt and first can be null.
741 if (!first || isa<SyntheticSection>(Val: first))
742 return;
743
744 link = in.symTab->getParent()->sectionIndex;
745 // sh_info for SHT_REL[A] sections should contain the section header index of
746 // the section to which the relocation applies.
747 InputSectionBase *s = first->getRelocatedSection();
748 info = s->getOutputSection()->sectionIndex;
749 flags |= SHF_INFO_LINK;
750 // Finalize the content of non-alloc CREL.
751 if (type == SHT_CREL) {
752 if (config->is64)
753 finalizeNonAllocCrel<true>();
754 else
755 finalizeNonAllocCrel<false>();
756 }
757}
758
759// Returns true if S is in one of the many forms the compiler driver may pass
760// crtbegin files.
761//
762// Gcc uses any of crtbegin[<empty>|S|T].o.
763// Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
764
765static bool isCrt(StringRef s, StringRef beginEnd) {
766 s = sys::path::filename(path: s);
767 if (!s.consume_back(Suffix: ".o"))
768 return false;
769 if (s.consume_front(Prefix: "clang_rt."))
770 return s.consume_front(Prefix: beginEnd);
771 return s.consume_front(Prefix: beginEnd) && s.size() <= 1;
772}
773
774// .ctors and .dtors are sorted by this order:
775//
776// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
777// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
778// 3. The section has an optional priority value in the form of ".ctors.N" or
779// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
780// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
781//
782// For 2 and 3, the sections are sorted by priority from high to low, e.g.
783// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
784// internal linker scripts, the sorting is by string comparison which can
785// achieve the same goal given the optional priority values are of the same
786// length.
787//
788// In an ideal world, we don't need this function because .init_array and
789// .ctors are duplicate features (and .init_array is newer.) However, there
790// are too many real-world use cases of .ctors, so we had no choice to
791// support that with this rather ad-hoc semantics.
792static bool compCtors(const InputSection *a, const InputSection *b) {
793 bool beginA = isCrt(s: a->file->getName(), beginEnd: "crtbegin");
794 bool beginB = isCrt(s: b->file->getName(), beginEnd: "crtbegin");
795 if (beginA != beginB)
796 return beginA;
797 bool endA = isCrt(s: a->file->getName(), beginEnd: "crtend");
798 bool endB = isCrt(s: b->file->getName(), beginEnd: "crtend");
799 if (endA != endB)
800 return endB;
801 return getPriority(s: a->name) > getPriority(s: b->name);
802}
803
804// Sorts input sections by the special rules for .ctors and .dtors.
805// Unfortunately, the rules are different from the one for .{init,fini}_array.
806// Read the comment above.
807void OutputSection::sortCtorsDtors() {
808 assert(commands.size() == 1);
809 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
810 llvm::stable_sort(Range&: isd->sections, C: compCtors);
811}
812
813// If an input string is in the form of "foo.N" where N is a number, return N
814// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
815// greater than the lowest priority.
816int elf::getPriority(StringRef s) {
817 size_t pos = s.rfind(C: '.');
818 if (pos == StringRef::npos)
819 return 65536;
820 int v = 65536;
821 if (to_integer(S: s.substr(Start: pos + 1), Num&: v, Base: 10) &&
822 (pos == 6 && (s.starts_with(Prefix: ".ctors") || s.starts_with(Prefix: ".dtors"))))
823 v = 65535 - v;
824 return v;
825}
826
827InputSection *elf::getFirstInputSection(const OutputSection *os) {
828 for (SectionCommand *cmd : os->commands)
829 if (auto *isd = dyn_cast<InputSectionDescription>(Val: cmd))
830 if (!isd->sections.empty())
831 return isd->sections[0];
832 return nullptr;
833}
834
835ArrayRef<InputSection *>
836elf::getInputSections(const OutputSection &os,
837 SmallVector<InputSection *, 0> &storage) {
838 ArrayRef<InputSection *> ret;
839 storage.clear();
840 for (SectionCommand *cmd : os.commands) {
841 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
842 if (!isd)
843 continue;
844 if (ret.empty()) {
845 ret = isd->sections;
846 } else {
847 if (storage.empty())
848 storage.assign(in_start: ret.begin(), in_end: ret.end());
849 storage.insert(I: storage.end(), From: isd->sections.begin(), To: isd->sections.end());
850 }
851 }
852 return storage.empty() ? ret : ArrayRef(storage);
853}
854
855// Sorts input sections by section name suffixes, so that .foo.N comes
856// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
857// We want to keep the original order if the priorities are the same
858// because the compiler keeps the original initialization order in a
859// translation unit and we need to respect that.
860// For more detail, read the section of the GCC's manual about init_priority.
861void OutputSection::sortInitFini() {
862 // Sort sections by priority.
863 sort(order: [](InputSectionBase *s) { return getPriority(s: s->name); });
864}
865
866std::array<uint8_t, 4> OutputSection::getFiller() {
867 if (filler)
868 return *filler;
869 if (flags & SHF_EXECINSTR)
870 return target->trapInstr;
871 return {0, 0, 0, 0};
872}
873
874void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
875 assert(config->writeAddends && config->checkDynamicRelocs);
876 assert(isStaticRelSecType(type));
877 SmallVector<InputSection *, 0> storage;
878 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
879 parallelFor(Begin: 0, End: sections.size(), Fn: [&](size_t i) {
880 // When linking with -r or --emit-relocs we might also call this function
881 // for input .rel[a].<sec> sections which we simply pass through to the
882 // output. We skip over those and only look at the synthetic relocation
883 // sections created during linking.
884 const auto *sec = dyn_cast<RelocationBaseSection>(Val: sections[i]);
885 if (!sec)
886 return;
887 for (const DynamicReloc &rel : sec->relocs) {
888 int64_t addend = rel.addend;
889 const OutputSection *relOsec = rel.inputSec->getOutputSection();
890 assert(relOsec != nullptr && "missing output section for relocation");
891 // Some targets have NOBITS synthetic sections with dynamic relocations
892 // with non-zero addends. Skip such sections.
893 if (is_contained(Set: {EM_PPC, EM_PPC64}, Element: config->emachine) &&
894 (rel.inputSec == in.ppc64LongBranchTarget.get() ||
895 rel.inputSec == in.igotPlt.get()))
896 continue;
897 const uint8_t *relocTarget =
898 bufStart + relOsec->offset + rel.inputSec->getOffset(offset: rel.offsetInSec);
899 // For SHT_NOBITS the written addend is always zero.
900 int64_t writtenAddend =
901 relOsec->type == SHT_NOBITS
902 ? 0
903 : target->getImplicitAddend(buf: relocTarget, type: rel.type);
904 if (addend != writtenAddend)
905 internalLinkerError(
906 loc: getErrorLocation(loc: relocTarget),
907 msg: "wrote incorrect addend value 0x" + utohexstr(X: writtenAddend) +
908 " instead of 0x" + utohexstr(X: addend) +
909 " for dynamic relocation " + toString(type: rel.type) +
910 " at offset 0x" + utohexstr(X: rel.getOffset()) +
911 (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
912 }
913 });
914}
915
916template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
917template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
918template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
919template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
920
921template void OutputSection::writeTo<ELF32LE>(uint8_t *,
922 llvm::parallel::TaskGroup &);
923template void OutputSection::writeTo<ELF32BE>(uint8_t *,
924 llvm::parallel::TaskGroup &);
925template void OutputSection::writeTo<ELF64LE>(uint8_t *,
926 llvm::parallel::TaskGroup &);
927template void OutputSection::writeTo<ELF64BE>(uint8_t *,
928 llvm::parallel::TaskGroup &);
929
930template void OutputSection::maybeCompress<ELF32LE>();
931template void OutputSection::maybeCompress<ELF32BE>();
932template void OutputSection::maybeCompress<ELF64LE>();
933template void OutputSection::maybeCompress<ELF64BE>();
934