1//===- OutputSections.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "OutputSections.h"
10#include "Config.h"
11#include "InputFiles.h"
12#include "LinkerScript.h"
13#include "Symbols.h"
14#include "SyntheticSections.h"
15#include "Target.h"
16#include "lld/Common/Arrays.h"
17#include "lld/Common/Memory.h"
18#include "llvm/BinaryFormat/Dwarf.h"
19#include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB, LLVM_ENABLE_ZSTD
20#include "llvm/Support/Compression.h"
21#include "llvm/Support/LEB128.h"
22#include "llvm/Support/Parallel.h"
23#include "llvm/Support/Path.h"
24#include "llvm/Support/TimeProfiler.h"
25#undef in
26#if LLVM_ENABLE_ZLIB
27// Avoid introducing max as a macro from Windows headers.
28#define NOMINMAX
29#include <zlib.h>
30#endif
31#if LLVM_ENABLE_ZSTD
32#include <zstd.h>
33#endif
34
35using namespace llvm;
36using namespace llvm::dwarf;
37using namespace llvm::object;
38using namespace llvm::support::endian;
39using namespace llvm::ELF;
40using namespace lld;
41using namespace lld::elf;
42
43uint32_t OutputSection::getPhdrFlags() const {
44 uint32_t ret = 0;
45 bool purecode =
46 (ctx.arg.emachine == EM_ARM && (flags & SHF_ARM_PURECODE)) ||
47 (ctx.arg.emachine == EM_AARCH64 && (flags & SHF_AARCH64_PURECODE));
48 if (!purecode)
49 ret |= PF_R;
50 if (flags & SHF_WRITE)
51 ret |= PF_W;
52 if (flags & SHF_EXECINSTR)
53 ret |= PF_X;
54 return ret;
55}
56
57template <class ELFT>
58void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
59 shdr->sh_entsize = entsize;
60 shdr->sh_addralign = addralign;
61 shdr->sh_type = type;
62 shdr->sh_offset = offset;
63 shdr->sh_flags = flags;
64 shdr->sh_info = info;
65 shdr->sh_link = link;
66 shdr->sh_addr = addr;
67 shdr->sh_size = size;
68 shdr->sh_name = shName;
69}
70
71OutputSection::OutputSection(Ctx &ctx, StringRef name, uint32_t type,
72 uint64_t flags)
73 : SectionBase(Output, ctx.internalFile, name, type, flags, /*link=*/0,
74 /*info=*/0, /*addralign=*/1, /*entsize=*/0),
75 ctx(ctx) {}
76
77uint64_t OutputSection::getLMA() const {
78 return ptLoad ? addr + ptLoad->lmaOffset : addr;
79}
80
81// We allow sections of types listed below to merged into a
82// single progbits section. This is typically done by linker
83// scripts. Merging nobits and progbits will force disk space
84// to be allocated for nobits sections. Other ones don't require
85// any special treatment on top of progbits, so there doesn't
86// seem to be a harm in merging them.
87//
88// NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
89// them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
90static bool canMergeToProgbits(Ctx &ctx, unsigned type) {
91 return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
92 type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
93 type == SHT_NOTE ||
94 (type == SHT_X86_64_UNWIND && ctx.arg.emachine == EM_X86_64);
95}
96
97// Record that isec will be placed in the OutputSection. isec does not become
98// permanent until finalizeInputSections() is called. The function should not be
99// used after finalizeInputSections() is called. If you need to add an
100// InputSection post finalizeInputSections(), then you must do the following:
101//
102// 1. Find or create an InputSectionDescription to hold InputSection.
103// 2. Add the InputSection to the InputSectionDescription::sections.
104// 3. Call commitSection(isec).
105void OutputSection::recordSection(InputSectionBase *isec) {
106 partition = isec->partition;
107 isec->parent = this;
108 if (commands.empty() || !isa<InputSectionDescription>(Val: commands.back()))
109 commands.push_back(Elt: make<InputSectionDescription>(args: ""));
110 auto *isd = cast<InputSectionDescription>(Val: commands.back());
111 isd->sectionBases.push_back(Elt: isec);
112}
113
114// Update fields (type, flags, alignment, etc) according to the InputSection
115// isec. Also check whether the InputSection flags and type are consistent with
116// other InputSections.
117void OutputSection::commitSection(InputSection *isec) {
118 if (LLVM_UNLIKELY(type != isec->type)) {
119 if (!hasInputSections && !typeIsSet) {
120 type = isec->type;
121 } else if (isStaticRelSecType(type) && isStaticRelSecType(type: isec->type) &&
122 (type == SHT_CREL) != (isec->type == SHT_CREL)) {
123 // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
124 type = SHT_CREL;
125 if (type == SHT_REL) {
126 if (name.consume_front(Prefix: ".rel"))
127 name = ctx.saver.save(S: ".crel" + name);
128 } else if (name.consume_front(Prefix: ".rela")) {
129 name = ctx.saver.save(S: ".crel" + name);
130 }
131 } else {
132 if (typeIsSet || !canMergeToProgbits(ctx, type) ||
133 !canMergeToProgbits(ctx, type: isec->type)) {
134 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
135 // that the contents at that address is provided by some other means.
136 // Some projects (e.g.
137 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
138 // behavior. Other types get an error.
139 if (type != SHT_NOBITS) {
140 Err(ctx) << "section type mismatch for " << isec->name << "\n>>> "
141 << isec << ": "
142 << getELFSectionTypeName(Machine: ctx.arg.emachine, Type: isec->type)
143 << "\n>>> output section " << name << ": "
144 << getELFSectionTypeName(Machine: ctx.arg.emachine, Type: type);
145 }
146 }
147 if (!typeIsSet)
148 type = SHT_PROGBITS;
149 }
150 }
151 if (!hasInputSections) {
152 // If IS is the first section to be added to this section,
153 // initialize type, entsize and flags from isec.
154 hasInputSections = true;
155 entsize = isec->entsize;
156 flags = isec->flags;
157 } else {
158 // Otherwise, check if new type or flags are compatible with existing ones.
159 if ((flags ^ isec->flags) & SHF_TLS)
160 ErrAlways(ctx) << "incompatible section flags for " << name << "\n>>> "
161 << isec << ": 0x" << utohexstr(X: isec->flags, LowerCase: true)
162 << "\n>>> output section " << name << ": 0x"
163 << utohexstr(X: flags, LowerCase: true);
164 }
165
166 isec->parent = this;
167 uint64_t andMask = 0;
168 if (ctx.arg.emachine == EM_ARM)
169 andMask |= (uint64_t)SHF_ARM_PURECODE;
170 if (ctx.arg.emachine == EM_AARCH64)
171 andMask |= (uint64_t)SHF_AARCH64_PURECODE;
172 uint64_t orMask = ~andMask;
173 uint64_t andFlags = (flags & isec->flags) & andMask;
174 uint64_t orFlags = (flags | isec->flags) & orMask;
175 flags = andFlags | orFlags;
176 if (nonAlloc)
177 flags &= ~(uint64_t)SHF_ALLOC;
178
179 addralign = std::max(a: addralign, b: isec->addralign);
180
181 // If this section contains a table of fixed-size entries, sh_entsize
182 // holds the element size. If it contains elements of different size we
183 // set sh_entsize to 0.
184 if (entsize != isec->entsize)
185 entsize = 0;
186}
187
188static MergeSyntheticSection *createMergeSynthetic(Ctx &ctx, StringRef name,
189 uint32_t type,
190 uint64_t flags,
191 uint32_t addralign) {
192 if ((flags & SHF_STRINGS) && ctx.arg.optimize >= 2)
193 return make<MergeTailSection>(args&: ctx, args&: name, args&: type, args&: flags, args&: addralign);
194 return make<MergeNoTailSection>(args&: ctx, args&: name, args&: type, args&: flags, args&: addralign);
195}
196
197// This function scans over the InputSectionBase list sectionBases to create
198// InputSectionDescription::sections.
199//
200// It removes MergeInputSections from the input section array and adds
201// new synthetic sections at the location of the first input section
202// that it replaces. It then finalizes each synthetic section in order
203// to compute an output offset for each piece of each input section.
204void OutputSection::finalizeInputSections() {
205 auto *script = ctx.script;
206 std::vector<MergeSyntheticSection *> mergeSections;
207 for (SectionCommand *cmd : commands) {
208 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
209 if (!isd)
210 continue;
211 isd->sections.reserve(N: isd->sectionBases.size());
212 for (InputSectionBase *s : isd->sectionBases) {
213 MergeInputSection *ms = dyn_cast<MergeInputSection>(Val: s);
214 if (!ms) {
215 isd->sections.push_back(Elt: cast<InputSection>(Val: s));
216 continue;
217 }
218
219 // We do not want to handle sections that are not alive, so just remove
220 // them instead of trying to merge.
221 if (!ms->isLive())
222 continue;
223
224 auto i = llvm::find_if(Range&: mergeSections, P: [=](MergeSyntheticSection *sec) {
225 // While we could create a single synthetic section for two different
226 // values of Entsize, it is better to take Entsize into consideration.
227 //
228 // With a single synthetic section no two pieces with different Entsize
229 // could be equal, so we may as well have two sections.
230 //
231 // Using Entsize in here also allows us to propagate it to the synthetic
232 // section.
233 //
234 // SHF_STRINGS section with different alignments should not be merged.
235 return sec->flags == ms->flags && sec->entsize == ms->entsize &&
236 (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
237 });
238 if (i == mergeSections.end()) {
239 MergeSyntheticSection *syn = createMergeSynthetic(
240 ctx, name: s->name, type: ms->type, flags: ms->flags, addralign: ms->addralign);
241 mergeSections.push_back(x: syn);
242 i = std::prev(x: mergeSections.end());
243 syn->entsize = ms->entsize;
244 isd->sections.push_back(Elt: syn);
245 // The merge synthetic section inherits the potential spill locations of
246 // its first contained section.
247 auto it = script->potentialSpillLists.find(Val: ms);
248 if (it != script->potentialSpillLists.end())
249 script->potentialSpillLists.try_emplace(Key: syn, Args&: it->second);
250 }
251 (*i)->addSection(ms);
252 }
253
254 // sectionBases should not be used from this point onwards. Clear it to
255 // catch misuses.
256 isd->sectionBases.clear();
257
258 // Some input sections may be removed from the list after ICF.
259 for (InputSection *s : isd->sections)
260 commitSection(isec: s);
261 }
262 for (auto *ms : mergeSections) {
263 // Merging may have increased the alignment of a spillable section. Update
264 // the alignment of potential spill sections and their containing output
265 // sections.
266 if (auto it = script->potentialSpillLists.find(Val: ms);
267 it != script->potentialSpillLists.end()) {
268 for (PotentialSpillSection *s = it->second.head; s; s = s->next) {
269 s->addralign = std::max(a: s->addralign, b: ms->addralign);
270 s->parent->addralign = std::max(a: s->parent->addralign, b: s->addralign);
271 }
272 }
273
274 ms->finalizeContents();
275 }
276}
277
278static void sortByOrder(MutableArrayRef<InputSection *> in,
279 llvm::function_ref<int(InputSectionBase *s)> order) {
280 std::vector<std::pair<int, InputSection *>> v;
281 for (InputSection *s : in)
282 v.emplace_back(args: order(s), args&: s);
283 llvm::stable_sort(Range&: v, C: less_first());
284
285 for (size_t i = 0; i < v.size(); ++i)
286 in[i] = v[i].second;
287}
288
289uint64_t elf::getHeaderSize(Ctx &ctx) {
290 if (ctx.arg.oFormatBinary)
291 return 0;
292 return ctx.out.elfHeader->size + ctx.out.programHeaders->size;
293}
294
295void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
296 assert(isLive());
297 for (SectionCommand *b : commands)
298 if (auto *isd = dyn_cast<InputSectionDescription>(Val: b))
299 sortByOrder(in: isd->sections, order);
300}
301
302static void nopInstrFill(Ctx &ctx, uint8_t *buf, size_t size) {
303 if (size == 0)
304 return;
305 unsigned i = 0;
306 std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
307 unsigned num = size / nopFiller.back().size();
308 for (unsigned c = 0; c < num; ++c) {
309 memcpy(dest: buf + i, src: nopFiller.back().data(), n: nopFiller.back().size());
310 i += nopFiller.back().size();
311 }
312 unsigned remaining = size - i;
313 if (!remaining)
314 return;
315 assert(nopFiller[remaining - 1].size() == remaining);
316 memcpy(dest: buf + i, src: nopFiller[remaining - 1].data(), n: remaining);
317}
318
319// Fill [Buf, Buf + Size) with Filler.
320// This is used for linker script "=fillexp" command.
321static void fill(uint8_t *buf, size_t size,
322 const std::array<uint8_t, 4> &filler) {
323 size_t i = 0;
324 for (; i + 4 < size; i += 4)
325 memcpy(dest: buf + i, src: filler.data(), n: 4);
326 memcpy(dest: buf + i, src: filler.data(), n: size - i);
327}
328
329#if LLVM_ENABLE_ZLIB
330static SmallVector<uint8_t, 0> deflateShard(Ctx &ctx, ArrayRef<uint8_t> in,
331 int level, int flush) {
332 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
333 // data with no zlib header or trailer.
334 z_stream s = {};
335 auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
336 if (res != 0) {
337 Err(ctx) << "--compress-sections: deflateInit2 returned " << res;
338 return {};
339 }
340 s.next_in = const_cast<uint8_t *>(in.data());
341 s.avail_in = in.size();
342
343 // Allocate a buffer of half of the input size, and grow it by 1.5x if
344 // insufficient.
345 SmallVector<uint8_t, 0> out;
346 size_t pos = 0;
347 out.resize_for_overwrite(N: std::max<size_t>(a: in.size() / 2, b: 64));
348 do {
349 if (pos == out.size())
350 out.resize_for_overwrite(N: out.size() * 3 / 2);
351 s.next_out = out.data() + pos;
352 s.avail_out = out.size() - pos;
353 (void)deflate(strm: &s, flush);
354 pos = s.next_out - out.data();
355 } while (s.avail_out == 0);
356 assert(s.avail_in == 0);
357
358 out.truncate(N: pos);
359 deflateEnd(strm: &s);
360 return out;
361}
362#endif
363
364// Compress certain non-SHF_ALLOC sections:
365//
366// * (if --compress-debug-sections is specified) non-empty .debug_* sections
367// * (if --compress-sections is specified) matched sections
368template <class ELFT> void OutputSection::maybeCompress(Ctx &ctx) {
369 using Elf_Chdr = typename ELFT::Chdr;
370 (void)sizeof(Elf_Chdr);
371
372 DebugCompressionType ctype = DebugCompressionType::None;
373 size_t compressedSize = sizeof(Elf_Chdr);
374 unsigned level = 0; // default compression level
375 if (!(flags & SHF_ALLOC) && ctx.arg.compressDebugSections &&
376 name.starts_with(Prefix: ".debug_"))
377 ctype = *ctx.arg.compressDebugSections;
378 for (auto &[glob, t, l] : ctx.arg.compressSections)
379 if (glob.match(S: name))
380 std::tie(args&: ctype, args&: level) = {t, l};
381 if (ctype == DebugCompressionType::None)
382 return;
383 if (flags & SHF_ALLOC) {
384 Err(ctx) << "--compress-sections: section '" << name
385 << "' with the SHF_ALLOC flag cannot be compressed";
386 return;
387 }
388
389 llvm::TimeTraceScope timeScope("Compress sections");
390 auto buf = std::make_unique<uint8_t[]>(num: size);
391 // Write uncompressed data to a temporary zero-initialized buffer.
392 {
393 parallel::TaskGroup tg;
394 writeTo<ELFT>(ctx, buf.get(), tg);
395 }
396 // The generic ABI specifies "The sh_size and sh_addralign fields of the
397 // section header for a compressed section reflect the requirements of the
398 // compressed section." However, 1-byte alignment has been wildly accepted
399 // and utilized for a long time. Removing alignment padding is particularly
400 // useful when there are many compressed output sections.
401 addralign = 1;
402
403 // Split input into 1-MiB shards.
404 [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
405 auto shardsIn = split(arr: ArrayRef<uint8_t>(buf.get(), size), chunkSize: shardSize);
406 const size_t numShards = shardsIn.size();
407 auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(num: numShards);
408
409#if LLVM_ENABLE_ZSTD
410 // Use ZSTD's streaming compression API. See
411 // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
412 // HowTo".
413 if (ctype == DebugCompressionType::Zstd) {
414 parallelFor(0, numShards, [&](size_t i) {
415 SmallVector<uint8_t, 0> out;
416 ZSTD_CCtx *cctx = ZSTD_createCCtx();
417 ZSTD_CCtx_setParameter(cctx, param: ZSTD_c_compressionLevel, value: level);
418 ZSTD_inBuffer zib = {.src: shardsIn[i].data(), .size: shardsIn[i].size(), .pos: 0};
419 ZSTD_outBuffer zob = {.dst: nullptr, .size: 0, .pos: 0};
420 size_t size;
421 do {
422 // Allocate a buffer of half of the input size, and grow it by 1.5x if
423 // insufficient.
424 if (zob.pos == zob.size) {
425 out.resize_for_overwrite(
426 N: zob.size ? zob.size * 3 / 2 : std::max<size_t>(a: zib.size / 4, b: 64));
427 zob = {.dst: out.data(), .size: out.size(), .pos: zob.pos};
428 }
429 size = ZSTD_compressStream2(cctx, output: &zob, input: &zib, endOp: ZSTD_e_end);
430 assert(!ZSTD_isError(size));
431 } while (size != 0);
432 out.truncate(N: zob.pos);
433 ZSTD_freeCCtx(cctx);
434 shardsOut[i] = std::move(out);
435 });
436 compressed.type = ELFCOMPRESS_ZSTD;
437 for (size_t i = 0; i != numShards; ++i)
438 compressedSize += shardsOut[i].size();
439 }
440#endif
441
442#if LLVM_ENABLE_ZLIB
443 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
444 // fast and provides decent compression ratios.
445 if (ctype == DebugCompressionType::Zlib) {
446 if (!level)
447 level = Z_BEST_SPEED;
448
449 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
450 // shards but the last to flush the output to a byte boundary to be
451 // concatenated with the next shard.
452 auto shardsAdler = std::make_unique<uint32_t[]>(num: numShards);
453 parallelFor(0, numShards, [&](size_t i) {
454 shardsOut[i] = deflateShard(ctx, in: shardsIn[i], level,
455 flush: i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
456 shardsAdler[i] = adler32(adler: 1, buf: shardsIn[i].data(), len: shardsIn[i].size());
457 });
458
459 // Update section size and combine Alder-32 checksums.
460 uint32_t checksum = 1; // Initial Adler-32 value
461 compressedSize += 2; // Elf_Chdir and zlib header
462 for (size_t i = 0; i != numShards; ++i) {
463 compressedSize += shardsOut[i].size();
464 checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
465 }
466 compressedSize += 4; // checksum
467 compressed.type = ELFCOMPRESS_ZLIB;
468 compressed.checksum = checksum;
469 }
470#endif
471
472 if (compressedSize >= size)
473 return;
474 compressed.uncompressedSize = size;
475 compressed.shards = std::move(shardsOut);
476 compressed.numShards = numShards;
477 size = compressedSize;
478 flags |= SHF_COMPRESSED;
479}
480
481static void writeInt(Ctx &ctx, uint8_t *buf, uint64_t data, uint64_t size) {
482 if (size == 1)
483 *buf = data;
484 else if (size == 2)
485 write16(ctx, p: buf, v: data);
486 else if (size == 4)
487 write32(ctx, p: buf, v: data);
488 else if (size == 8)
489 write64(ctx, p: buf, v: data);
490 else
491 llvm_unreachable("unsupported Size argument");
492}
493
494template <class ELFT>
495void OutputSection::writeTo(Ctx &ctx, uint8_t *buf, parallel::TaskGroup &tg) {
496 llvm::TimeTraceScope timeScope("Write sections", name);
497 if (type == SHT_NOBITS)
498 return;
499 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
500 buf += encodeULEB128(Value: crelHeader, p: buf);
501 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
502 return;
503 }
504
505 // If the section is compressed due to
506 // --compress-debug-section/--compress-sections, the content is already known.
507 if (compressed.shards) {
508 auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
509 chdr->ch_type = compressed.type;
510 chdr->ch_size = compressed.uncompressedSize;
511 chdr->ch_addralign = addralign;
512 buf += sizeof(*chdr);
513
514 auto offsets = std::make_unique<size_t[]>(num: compressed.numShards);
515 if (compressed.type == ELFCOMPRESS_ZLIB) {
516 buf[0] = 0x78; // CMF
517 buf[1] = 0x01; // FLG: best speed
518 offsets[0] = 2; // zlib header
519 write32be(P: buf + (size - sizeof(*chdr) - 4), V: compressed.checksum);
520 }
521
522 // Compute shard offsets.
523 for (size_t i = 1; i != compressed.numShards; ++i)
524 offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
525 parallelFor(0, compressed.numShards, [&](size_t i) {
526 memcpy(dest: buf + offsets[i], src: compressed.shards[i].data(),
527 n: compressed.shards[i].size());
528 });
529 return;
530 }
531
532 // Write leading padding.
533 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
534 std::array<uint8_t, 4> filler = getFiller(ctx);
535 bool nonZeroFiller = read32(ctx, p: filler.data()) != 0;
536 if (nonZeroFiller)
537 fill(buf, size: sections.empty() ? size : sections[0]->outSecOff, filler);
538
539 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
540 buf += encodeULEB128(Value: crelHeader, p: buf);
541 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
542 return;
543 }
544
545 auto fn = [=, &ctx](size_t begin, size_t end) {
546 size_t numSections = sections.size();
547 for (size_t i = begin; i != end; ++i) {
548 InputSection *isec = sections[i];
549 if (auto *s = dyn_cast<SyntheticSection>(Val: isec))
550 s->writeTo(buf: buf + isec->outSecOff);
551 else
552 isec->writeTo<ELFT>(ctx, buf + isec->outSecOff);
553
554 // When in Arm BE8 mode, the linker has to convert the big-endian
555 // instructions to little-endian, leaving the data big-endian.
556 if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8 &&
557 (flags & SHF_EXECINSTR))
558 convertArmInstructionstoBE8(ctx, sec: isec, buf: buf + isec->outSecOff);
559
560 // Fill gaps between sections.
561 if (nonZeroFiller) {
562 uint8_t *start = buf + isec->outSecOff + isec->getSize();
563 uint8_t *end;
564 if (i + 1 == numSections)
565 end = buf + size;
566 else
567 end = buf + sections[i + 1]->outSecOff;
568 if (isec->nopFiller) {
569 assert(ctx.target->nopInstrs);
570 nopInstrFill(ctx, buf: start, size: end - start);
571 } else
572 fill(buf: start, size: end - start, filler);
573 }
574 }
575 };
576
577 // If there is any BYTE()-family command (rare), write the section content
578 // first then process BYTE to overwrite the filler content. The write is
579 // serial due to the limitation of llvm/Support/Parallel.h.
580 bool written = false;
581 size_t numSections = sections.size();
582 for (SectionCommand *cmd : commands)
583 if (auto *data = dyn_cast<ByteCommand>(Val: cmd)) {
584 if (!std::exchange(obj&: written, new_val: true))
585 fn(0, numSections);
586 writeInt(ctx, buf: buf + data->offset, data: data->expression().getValue(),
587 size: data->size);
588 }
589 if (written || !numSections)
590 return;
591
592 // There is no data command. Write content asynchronously to overlap the write
593 // time with other output sections. Note, if a linker script specifies
594 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
595 // to supress the error), the output may be non-deterministic.
596 const size_t taskSizeLimit = 4 << 20;
597 for (size_t begin = 0, i = 0, taskSize = 0;;) {
598 taskSize += sections[i]->getSize();
599 bool done = ++i == numSections;
600 if (done || taskSize >= taskSizeLimit) {
601 tg.spawn(f: [=] { fn(begin, i); });
602 if (done)
603 break;
604 begin = i;
605 taskSize = 0;
606 }
607 }
608}
609
610static void finalizeShtGroup(Ctx &ctx, OutputSection *os,
611 InputSection *section) {
612 // sh_link field for SHT_GROUP sections should contain the section index of
613 // the symbol table.
614 os->link = ctx.in.symTab->getParent()->sectionIndex;
615
616 if (!section)
617 return;
618
619 // sh_info then contain index of an entry in symbol table section which
620 // provides signature of the section group.
621 ArrayRef<Symbol *> symbols = section->file->getSymbols();
622 os->info = ctx.in.symTab->getSymbolIndex(sym: *symbols[section->info]);
623
624 // Some group members may be combined or discarded, so we need to compute the
625 // new size. The content will be rewritten in InputSection::copyShtGroup.
626 DenseSet<uint32_t> seen;
627 ArrayRef<InputSectionBase *> sections = section->file->getSections();
628 for (auto &idx : section->getDataAs<std::array<char, 4>>().slice(N: 1))
629 if (OutputSection *osec = sections[read32(ctx, p: &idx)]->getOutputSection())
630 seen.insert(V: osec->sectionIndex);
631 os->size = (1 + seen.size()) * sizeof(uint32_t);
632}
633
634template <class uint>
635LLVM_ATTRIBUTE_ALWAYS_INLINE static void
636encodeOneCrel(Ctx &ctx, raw_svector_ostream &os,
637 Elf_Crel<sizeof(uint) == 8> &out, uint offset, const Symbol &sym,
638 uint32_t type, uint addend) {
639 const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
640 out.r_offset = offset;
641 int64_t symidx = ctx.in.symTab->getSymbolIndex(sym);
642 if (sym.type == STT_SECTION) {
643 auto *d = dyn_cast<Defined>(Val: &sym);
644 if (d) {
645 SectionBase *section = d->section;
646 assert(section->isLive());
647 addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr;
648 } else {
649 // Encode R_*_NONE(symidx=0).
650 symidx = type = addend = 0;
651 }
652 }
653
654 // Similar to llvm::ELF::encodeCrel.
655 uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
656 (out.r_type != type ? 2 : 0) +
657 (uint(out.r_addend) != addend ? 4 : 0);
658 if (deltaOffset < 0x10) {
659 os << char(b);
660 } else {
661 os << char(b | 0x80);
662 encodeULEB128(Value: deltaOffset >> 4, OS&: os);
663 }
664 if (b & 1) {
665 encodeSLEB128(Value: static_cast<int32_t>(symidx - out.r_symidx), OS&: os);
666 out.r_symidx = symidx;
667 }
668 if (b & 2) {
669 encodeSLEB128(Value: static_cast<int32_t>(type - out.r_type), OS&: os);
670 out.r_type = type;
671 }
672 if (b & 4) {
673 encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
674 out.r_addend = addend;
675 }
676}
677
678template <class ELFT>
679static size_t relToCrel(Ctx &ctx, raw_svector_ostream &os,
680 Elf_Crel<ELFT::Is64Bits> &out, InputSection *relSec,
681 InputSectionBase *sec) {
682 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
683 if (relSec->type == SHT_REL) {
684 // REL conversion is complex and unsupported yet.
685 Err(ctx) << relSec << ": REL cannot be converted to CREL";
686 return 0;
687 }
688 auto rels = relSec->getDataAs<typename ELFT::Rela>();
689 for (auto rel : rels) {
690 encodeOneCrel<typename ELFT::uint>(
691 ctx, os, out, sec->getVA(offset: rel.r_offset), file.getRelocTargetSym(rel),
692 rel.getType(ctx.arg.isMips64EL), getAddend<ELFT>(rel));
693 }
694 return rels.size();
695}
696
697// Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
698// Input CREL sections are decoded while REL[A] need to be converted.
699template <bool is64> void OutputSection::finalizeNonAllocCrel(Ctx &ctx) {
700 using uint = typename Elf_Crel_Impl<is64>::uint;
701 raw_svector_ostream os(crelBody);
702 uint64_t totalCount = 0;
703 Elf_Crel<is64> out{};
704 assert(commands.size() == 1);
705 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
706 for (InputSection *relSec : isd->sections) {
707 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
708 InputSectionBase *sec = relSec->getRelocatedSection();
709 if (relSec->type == SHT_CREL) {
710 RelocsCrel<is64> entries(relSec->content_);
711 totalCount += entries.size();
712 for (Elf_Crel_Impl<is64> r : entries) {
713 encodeOneCrel<uint>(ctx, os, out, uint(sec->getVA(offset: r.r_offset)),
714 file.getSymbol(symbolIndex: r.r_symidx), r.r_type, r.r_addend);
715 }
716 continue;
717 }
718
719 // Convert REL[A] to CREL.
720 if constexpr (is64) {
721 totalCount += ctx.arg.isLE
722 ? relToCrel<ELF64LE>(ctx, os, out, relSec, sec)
723 : relToCrel<ELF64BE>(ctx, os, out, relSec, sec);
724 } else {
725 totalCount += ctx.arg.isLE
726 ? relToCrel<ELF32LE>(ctx, os, out, relSec, sec)
727 : relToCrel<ELF32BE>(ctx, os, out, relSec, sec);
728 }
729 }
730
731 crelHeader = totalCount * 8 + 4;
732 size = getULEB128Size(Value: crelHeader) + crelBody.size();
733}
734
735void OutputSection::finalize(Ctx &ctx) {
736 InputSection *first = getFirstInputSection(os: this);
737
738 if (flags & SHF_LINK_ORDER) {
739 // We must preserve the link order dependency of sections with the
740 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
741 // need to translate the InputSection sh_link to the OutputSection sh_link,
742 // all InputSections in the OutputSection have the same dependency.
743 if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(Val: first))
744 link = ex->getLinkOrderDep()->getParent()->sectionIndex;
745 else if (first->flags & SHF_LINK_ORDER)
746 if (auto *d = first->getLinkOrderDep())
747 link = d->getParent()->sectionIndex;
748 }
749
750 if (type == SHT_GROUP) {
751 finalizeShtGroup(ctx, os: this, section: first);
752 return;
753 }
754
755 if (!ctx.arg.copyRelocs || !isStaticRelSecType(type))
756 return;
757
758 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
759 // Normally 'type' was changed by 'first' so 'first' should be non-null.
760 // However, if the output section is .rela.dyn, 'type' can be set by the empty
761 // synthetic .rela.plt and first can be null.
762 if (!first || isa<SyntheticSection>(Val: first))
763 return;
764
765 link = ctx.in.symTab->getParent()->sectionIndex;
766 // sh_info for SHT_REL[A] sections should contain the section header index of
767 // the section to which the relocation applies.
768 InputSectionBase *s = first->getRelocatedSection();
769 info = s->getOutputSection()->sectionIndex;
770 flags |= SHF_INFO_LINK;
771 // Finalize the content of non-alloc CREL.
772 if (type == SHT_CREL) {
773 if (ctx.arg.is64)
774 finalizeNonAllocCrel<true>(ctx);
775 else
776 finalizeNonAllocCrel<false>(ctx);
777 }
778}
779
780// Returns true if S is in one of the many forms the compiler driver may pass
781// crtbegin files.
782//
783// Gcc uses any of crtbegin[<empty>|S|T].o.
784// Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
785
786static bool isCrt(StringRef s, StringRef beginEnd) {
787 s = sys::path::filename(path: s);
788 if (!s.consume_back(Suffix: ".o"))
789 return false;
790 if (s.consume_front(Prefix: "clang_rt."))
791 return s.consume_front(Prefix: beginEnd);
792 return s.consume_front(Prefix: beginEnd) && s.size() <= 1;
793}
794
795// .ctors and .dtors are sorted by this order:
796//
797// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
798// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
799// 3. The section has an optional priority value in the form of ".ctors.N" or
800// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
801// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
802//
803// For 2 and 3, the sections are sorted by priority from high to low, e.g.
804// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
805// internal linker scripts, the sorting is by string comparison which can
806// achieve the same goal given the optional priority values are of the same
807// length.
808//
809// In an ideal world, we don't need this function because .init_array and
810// .ctors are duplicate features (and .init_array is newer.) However, there
811// are too many real-world use cases of .ctors, so we had no choice to
812// support that with this rather ad-hoc semantics.
813static bool compCtors(const InputSection *a, const InputSection *b) {
814 bool beginA = isCrt(s: a->file->getName(), beginEnd: "crtbegin");
815 bool beginB = isCrt(s: b->file->getName(), beginEnd: "crtbegin");
816 if (beginA != beginB)
817 return beginA;
818 bool endA = isCrt(s: a->file->getName(), beginEnd: "crtend");
819 bool endB = isCrt(s: b->file->getName(), beginEnd: "crtend");
820 if (endA != endB)
821 return endB;
822 return getPriority(s: a->name) > getPriority(s: b->name);
823}
824
825// Sorts input sections by the special rules for .ctors and .dtors.
826// Unfortunately, the rules are different from the one for .{init,fini}_array.
827// Read the comment above.
828void OutputSection::sortCtorsDtors() {
829 assert(commands.size() == 1);
830 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
831 llvm::stable_sort(Range&: isd->sections, C: compCtors);
832}
833
834// If an input string is in the form of "foo.N" where N is a number, return N
835// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
836// greater than the lowest priority.
837int elf::getPriority(StringRef s) {
838 size_t pos = s.rfind(C: '.');
839 if (pos == StringRef::npos)
840 return 65536;
841 int v = 65536;
842 if (to_integer(S: s.substr(Start: pos + 1), Num&: v, Base: 10) &&
843 (pos == 6 && (s.starts_with(Prefix: ".ctors") || s.starts_with(Prefix: ".dtors"))))
844 v = 65535 - v;
845 return v;
846}
847
848InputSection *elf::getFirstInputSection(const OutputSection *os) {
849 for (SectionCommand *cmd : os->commands)
850 if (auto *isd = dyn_cast<InputSectionDescription>(Val: cmd))
851 if (!isd->sections.empty())
852 return isd->sections[0];
853 return nullptr;
854}
855
856ArrayRef<InputSection *>
857elf::getInputSections(const OutputSection &os,
858 SmallVector<InputSection *, 0> &storage) {
859 ArrayRef<InputSection *> ret;
860 storage.clear();
861 for (SectionCommand *cmd : os.commands) {
862 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
863 if (!isd)
864 continue;
865 if (ret.empty()) {
866 ret = isd->sections;
867 } else {
868 if (storage.empty())
869 storage.assign(in_start: ret.begin(), in_end: ret.end());
870 storage.insert(I: storage.end(), From: isd->sections.begin(), To: isd->sections.end());
871 }
872 }
873 return storage.empty() ? ret : ArrayRef(storage);
874}
875
876// Sorts input sections by section name suffixes, so that .foo.N comes
877// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
878// We want to keep the original order if the priorities are the same
879// because the compiler keeps the original initialization order in a
880// translation unit and we need to respect that.
881// For more detail, read the section of the GCC's manual about init_priority.
882void OutputSection::sortInitFini() {
883 // Sort sections by priority.
884 sort(order: [](InputSectionBase *s) { return getPriority(s: s->name); });
885}
886
887std::array<uint8_t, 4> OutputSection::getFiller(Ctx &ctx) {
888 if (filler)
889 return *filler;
890 if (!(flags & SHF_EXECINSTR))
891 return {0, 0, 0, 0};
892 if (ctx.arg.relocatable && ctx.arg.emachine == EM_RISCV) {
893 // See RISCV::maybeSynthesizeAlign: Synthesized NOP bytes and ALIGN
894 // relocations might be needed between two input sections. Use a NOP for the
895 // filler.
896 if (ctx.arg.eflags & EF_RISCV_RVC)
897 return {1, 0, 1, 0};
898 return {0x13, 0, 0, 0};
899 }
900 if (ctx.arg.relocatable && ctx.arg.emachine == EM_LOONGARCH)
901 return {0, 0, 0x40, 0x03};
902 return ctx.target->trapInstr;
903}
904
905void OutputSection::checkDynRelAddends(Ctx &ctx) {
906 assert(ctx.arg.writeAddends && ctx.arg.checkDynamicRelocs);
907 assert(isStaticRelSecType(type));
908 SmallVector<InputSection *, 0> storage;
909 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
910 parallelFor(Begin: 0, End: sections.size(), Fn: [&](size_t i) {
911 // When linking with -r or --emit-relocs we might also call this function
912 // for input .rel[a].<sec> sections which we simply pass through to the
913 // output. We skip over those and only look at the synthetic relocation
914 // sections created during linking.
915 if (!SyntheticSection::classof(sec: sections[i]) ||
916 !is_contained(Set: {ELF::SHT_REL, ELF::SHT_RELA, ELF::SHT_RELR},
917 Element: sections[i]->type))
918 return;
919 const auto *sec = cast<RelocationBaseSection>(Val: sections[i]);
920 if (!sec)
921 return;
922 for (const DynamicReloc &rel : sec->relocs) {
923 int64_t addend = rel.addend;
924 const OutputSection *relOsec = rel.inputSec->getOutputSection();
925 assert(relOsec != nullptr && "missing output section for relocation");
926 // Some targets have NOBITS synthetic sections with dynamic relocations
927 // with non-zero addends. Skip such sections.
928 if (is_contained(Set: {EM_PPC, EM_PPC64}, Element: ctx.arg.emachine) &&
929 (rel.inputSec == ctx.in.ppc64LongBranchTarget.get() ||
930 rel.inputSec == ctx.in.igotPlt.get()))
931 continue;
932 const uint8_t *relocTarget = ctx.bufferStart + relOsec->offset +
933 rel.inputSec->getOffset(offset: rel.offsetInSec);
934 // For SHT_NOBITS the written addend is always zero.
935 int64_t writtenAddend =
936 relOsec->type == SHT_NOBITS
937 ? 0
938 : ctx.target->getImplicitAddend(buf: relocTarget, type: rel.type);
939 if (addend != writtenAddend)
940 InternalErr(ctx, buf: relocTarget)
941 << "wrote incorrect addend value 0x" << utohexstr(X: writtenAddend)
942 << " instead of 0x" << utohexstr(X: addend)
943 << " for dynamic relocation " << rel.type << " at offset 0x"
944 << utohexstr(X: rel.getOffset())
945 << (rel.sym ? " against symbol " + rel.sym->getName() : "");
946 }
947 });
948}
949
950template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
951template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
952template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
953template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
954
955template void OutputSection::writeTo<ELF32LE>(Ctx &, uint8_t *,
956 llvm::parallel::TaskGroup &);
957template void OutputSection::writeTo<ELF32BE>(Ctx &, uint8_t *,
958 llvm::parallel::TaskGroup &);
959template void OutputSection::writeTo<ELF64LE>(Ctx &, uint8_t *,
960 llvm::parallel::TaskGroup &);
961template void OutputSection::writeTo<ELF64BE>(Ctx &, uint8_t *,
962 llvm::parallel::TaskGroup &);
963
964template void OutputSection::maybeCompress<ELF32LE>(Ctx &);
965template void OutputSection::maybeCompress<ELF32BE>(Ctx &);
966template void OutputSection::maybeCompress<ELF64LE>(Ctx &);
967template void OutputSection::maybeCompress<ELF64BE>(Ctx &);
968