1//===- Writer.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Writer.h"
10#include "AArch64ErrataFix.h"
11#include "ARMErrataFix.h"
12#include "BPSectionOrderer.h"
13#include "CallGraphSort.h"
14#include "Config.h"
15#include "InputFiles.h"
16#include "LinkerScript.h"
17#include "MapFile.h"
18#include "OutputSections.h"
19#include "Relocations.h"
20#include "SymbolTable.h"
21#include "Symbols.h"
22#include "SyntheticSections.h"
23#include "Target.h"
24#include "lld/Common/Arrays.h"
25#include "lld/Common/CommonLinkerContext.h"
26#include "lld/Common/Filesystem.h"
27#include "lld/Common/Strings.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/StringMap.h"
30#include "llvm/Support/BLAKE3.h"
31#include "llvm/Support/Parallel.h"
32#include "llvm/Support/RandomNumberGenerator.h"
33#include "llvm/Support/TimeProfiler.h"
34#include "llvm/Support/xxhash.h"
35#include <climits>
36
37#define DEBUG_TYPE "lld"
38
39using namespace llvm;
40using namespace llvm::ELF;
41using namespace llvm::object;
42using namespace llvm::support;
43using namespace llvm::support::endian;
44using namespace lld;
45using namespace lld::elf;
46
47namespace {
48// The writer writes a SymbolTable result to a file.
49template <class ELFT> class Writer {
50public:
51 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
52
53 Writer(Ctx &ctx) : ctx(ctx), buffer(ctx.e.outputBuffer), tc(ctx) {}
54
55 void run();
56
57private:
58 void addSectionSymbols();
59 void sortSections();
60 void resolveShfLinkOrder();
61 void finalizeAddressDependentContent();
62 void optimizeBasicBlockJumps();
63 void sortInputSections();
64 void sortOrphanSections();
65 void finalizeSections();
66 void checkExecuteOnly();
67 void checkExecuteOnlyReport();
68 void setReservedSymbolSections();
69
70 SmallVector<std::unique_ptr<PhdrEntry>, 0> createPhdrs(Partition &part);
71 void addPhdrForSection(Partition &part, unsigned shType, unsigned pType,
72 unsigned pFlags);
73 void assignFileOffsets();
74 void assignFileOffsetsBinary();
75 void setPhdrs(Partition &part);
76 void checkSections();
77 void fixSectionAlignments();
78 void openFile();
79 void writeTrapInstr();
80 void writeHeader();
81 void writeSections();
82 void writeSectionsBinary();
83 void writeBuildId();
84
85 Ctx &ctx;
86 std::unique_ptr<FileOutputBuffer> &buffer;
87 // ThunkCreator holds Thunks that are used at writeTo time.
88 ThunkCreator tc;
89
90 void addRelIpltSymbols();
91 void addStartEndSymbols();
92 void addStartStopSymbols(OutputSection &osec);
93
94 uint64_t fileSize;
95 uint64_t sectionHeaderOff;
96};
97} // anonymous namespace
98
99template <class ELFT> void elf::writeResult(Ctx &ctx) {
100 Writer<ELFT>(ctx).run();
101}
102
103static void
104removeEmptyPTLoad(Ctx &ctx, SmallVector<std::unique_ptr<PhdrEntry>, 0> &phdrs) {
105 auto it = std::stable_partition(first: phdrs.begin(), last: phdrs.end(), pred: [&](auto &p) {
106 if (p->p_type != PT_LOAD)
107 return true;
108 if (!p->firstSec)
109 return false;
110 uint64_t size = p->lastSec->addr + p->lastSec->size - p->firstSec->addr;
111 return size != 0;
112 });
113
114 // Clear OutputSection::ptLoad for sections contained in removed
115 // segments.
116 DenseSet<PhdrEntry *> removed;
117 for (auto it2 = it; it2 != phdrs.end(); ++it2)
118 removed.insert(V: it2->get());
119 for (OutputSection *sec : ctx.outputSections)
120 if (removed.contains(V: sec->ptLoad))
121 sec->ptLoad = nullptr;
122 phdrs.erase(CS: it, CE: phdrs.end());
123}
124
125void elf::copySectionsIntoPartitions(Ctx &ctx) {
126 SmallVector<InputSectionBase *, 0> newSections;
127 const size_t ehSize = ctx.ehInputSections.size();
128 for (unsigned part = 2; part != ctx.partitions.size() + 1; ++part) {
129 for (InputSectionBase *s : ctx.inputSections) {
130 if (!(s->flags & SHF_ALLOC) || !s->isLive() || s->type != SHT_NOTE)
131 continue;
132 auto *copy = make<InputSection>(args&: cast<InputSection>(Val&: *s));
133 copy->partition = part;
134 newSections.push_back(Elt: copy);
135 }
136 for (size_t i = 0; i != ehSize; ++i) {
137 assert(ctx.ehInputSections[i]->isLive());
138 auto *copy = make<EhInputSection>(args&: *ctx.ehInputSections[i]);
139 copy->partition = part;
140 ctx.ehInputSections.push_back(Elt: copy);
141 }
142 }
143
144 ctx.inputSections.insert(I: ctx.inputSections.end(), From: newSections.begin(),
145 To: newSections.end());
146}
147
148static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec,
149 uint64_t val, uint8_t stOther = STV_HIDDEN) {
150 Symbol *s = ctx.symtab->find(name);
151 if (!s || s->isDefined() || s->isCommon())
152 return nullptr;
153
154 ctx.synthesizedSymbols.push_back(Elt: s);
155 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
156 stOther, STT_NOTYPE, val,
157 /*size=*/0, sec});
158 s->isUsedInRegularObj = true;
159 return cast<Defined>(Val: s);
160}
161
162// The linker is expected to define some symbols depending on
163// the linking result. This function defines such symbols.
164void elf::addReservedSymbols(Ctx &ctx) {
165 if (ctx.arg.emachine == EM_MIPS) {
166 auto addAbsolute = [&](StringRef name) {
167 Symbol *sym =
168 ctx.symtab->addSymbol(newSym: Defined{ctx, ctx.internalFile, name, STB_GLOBAL,
169 STV_HIDDEN, STT_NOTYPE, 0, 0, nullptr});
170 sym->isUsedInRegularObj = true;
171 return cast<Defined>(Val: sym);
172 };
173 // Define _gp for MIPS. st_value of _gp symbol will be updated by Writer
174 // so that it points to an absolute address which by default is relative
175 // to GOT. Default offset is 0x7ff0.
176 // See "Global Data Symbols" in Chapter 6 in the following document:
177 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
178 ctx.sym.mipsGp = addAbsolute("_gp");
179
180 // On MIPS O32 ABI, _gp_disp is a magic symbol designates offset between
181 // start of function and 'gp' pointer into GOT.
182 if (ctx.symtab->find(name: "_gp_disp"))
183 ctx.sym.mipsGpDisp = addAbsolute("_gp_disp");
184
185 // The __gnu_local_gp is a magic symbol equal to the current value of 'gp'
186 // pointer. This symbol is used in the code generated by .cpload pseudo-op
187 // in case of using -mno-shared option.
188 // https://sourceware.org/ml/binutils/2004-12/msg00094.html
189 if (ctx.symtab->find(name: "__gnu_local_gp"))
190 ctx.sym.mipsLocalGp = addAbsolute("__gnu_local_gp");
191 } else if (ctx.arg.emachine == EM_PPC) {
192 // glibc *crt1.o has a undefined reference to _SDA_BASE_. Since we don't
193 // support Small Data Area, define it arbitrarily as 0.
194 addOptionalRegular(ctx, name: "_SDA_BASE_", sec: nullptr, val: 0, stOther: STV_HIDDEN);
195 } else if (ctx.arg.emachine == EM_PPC64) {
196 addPPC64SaveRestore(ctx);
197 }
198
199 // The Power Architecture 64-bit v2 ABI defines a TableOfContents (TOC) which
200 // combines the typical ELF GOT with the small data sections. It commonly
201 // includes .got .toc .sdata .sbss. The .TOC. symbol replaces both
202 // _GLOBAL_OFFSET_TABLE_ and _SDA_BASE_ from the 32-bit ABI. It is used to
203 // represent the TOC base which is offset by 0x8000 bytes from the start of
204 // the .got section.
205 // We do not allow _GLOBAL_OFFSET_TABLE_ to be defined by input objects as the
206 // correctness of some relocations depends on its value.
207 StringRef gotSymName =
208 (ctx.arg.emachine == EM_PPC64) ? ".TOC." : "_GLOBAL_OFFSET_TABLE_";
209
210 if (Symbol *s = ctx.symtab->find(name: gotSymName)) {
211 if (s->isDefined()) {
212 ErrAlways(ctx) << s->file << " cannot redefine linker defined symbol '"
213 << gotSymName << "'";
214 return;
215 }
216
217 uint64_t gotOff = 0;
218 if (ctx.arg.emachine == EM_PPC64)
219 gotOff = 0x8000;
220
221 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
222 STV_HIDDEN, STT_NOTYPE, gotOff, /*size=*/0,
223 ctx.out.elfHeader.get()});
224 ctx.sym.globalOffsetTable = cast<Defined>(Val: s);
225 }
226
227 // __ehdr_start is the location of ELF file headers. Note that we define
228 // this symbol unconditionally even when using a linker script, which
229 // differs from the behavior implemented by GNU linker which only define
230 // this symbol if ELF headers are in the memory mapped segment.
231 addOptionalRegular(ctx, name: "__ehdr_start", sec: ctx.out.elfHeader.get(), val: 0,
232 stOther: STV_HIDDEN);
233
234 // __executable_start is not documented, but the expectation of at
235 // least the Android libc is that it points to the ELF header.
236 addOptionalRegular(ctx, name: "__executable_start", sec: ctx.out.elfHeader.get(), val: 0,
237 stOther: STV_HIDDEN);
238
239 // __dso_handle symbol is passed to cxa_finalize as a marker to identify
240 // each DSO. The address of the symbol doesn't matter as long as they are
241 // different in different DSOs, so we chose the start address of the DSO.
242 addOptionalRegular(ctx, name: "__dso_handle", sec: ctx.out.elfHeader.get(), val: 0,
243 stOther: STV_HIDDEN);
244
245 // If linker script do layout we do not need to create any standard symbols.
246 if (ctx.script->hasSectionsCommand)
247 return;
248
249 auto add = [&](StringRef s, int64_t pos) {
250 return addOptionalRegular(ctx, name: s, sec: ctx.out.elfHeader.get(), val: pos,
251 stOther: STV_DEFAULT);
252 };
253
254 ctx.sym.bss = add("__bss_start", 0);
255 ctx.sym.end1 = add("end", -1);
256 ctx.sym.end2 = add("_end", -1);
257 ctx.sym.etext1 = add("etext", -1);
258 ctx.sym.etext2 = add("_etext", -1);
259 ctx.sym.edata1 = add("edata", -1);
260 ctx.sym.edata2 = add("_edata", -1);
261}
262
263static void demoteDefined(Defined &sym, DenseMap<SectionBase *, size_t> &map) {
264 if (map.empty())
265 for (auto [i, sec] : llvm::enumerate(First: sym.file->getSections()))
266 map.try_emplace(Key: sec, Args&: i);
267 // Change WEAK to GLOBAL so that if a scanned relocation references sym,
268 // maybeReportUndefined will report an error.
269 uint8_t binding = sym.isWeak() ? uint8_t(STB_GLOBAL) : sym.binding;
270 Undefined(sym.file, sym.getName(), binding, sym.stOther, sym.type,
271 /*discardedSecIdx=*/map.lookup(Val: sym.section))
272 .overwrite(sym);
273 // Eliminate from the symbol table, otherwise we would leave an undefined
274 // symbol if the symbol is unreferenced in the absence of GC.
275 sym.isUsedInRegularObj = false;
276}
277
278// If all references to a DSO happen to be weak, the DSO is not added to
279// DT_NEEDED. If that happens, replace ShardSymbol with Undefined to avoid
280// dangling references to an unneeded DSO. Use a weak binding to avoid
281// --no-allow-shlib-undefined diagnostics. Similarly, demote lazy symbols.
282//
283// In addition, demote symbols defined in discarded sections, so that
284// references to /DISCARD/ discarded symbols will lead to errors.
285static void demoteSymbolsAndComputeIsPreemptible(Ctx &ctx) {
286 llvm::TimeTraceScope timeScope("Demote symbols");
287 DenseMap<InputFile *, DenseMap<SectionBase *, size_t>> sectionIndexMap;
288 for (Symbol *sym : ctx.symtab->getSymbols()) {
289 if (auto *d = dyn_cast<Defined>(Val: sym)) {
290 if (d->section && !d->section->isLive())
291 demoteDefined(sym&: *d, map&: sectionIndexMap[d->file]);
292 } else {
293 auto *s = dyn_cast<SharedSymbol>(Val: sym);
294 if (sym->isLazy() || (s && !cast<SharedFile>(Val: s->file)->isNeeded)) {
295 uint8_t binding = sym->isLazy() ? sym->binding : uint8_t(STB_WEAK);
296 Undefined(ctx.internalFile, sym->getName(), binding, sym->stOther,
297 sym->type)
298 .overwrite(sym&: *sym);
299 sym->versionId = VER_NDX_GLOBAL;
300 }
301 }
302
303 sym->isPreemptible = (sym->isUndefined() || sym->isExported) &&
304 computeIsPreemptible(ctx, sym: *sym);
305 }
306}
307
308static OutputSection *findSection(Ctx &ctx, StringRef name,
309 unsigned partition = 1) {
310 for (SectionCommand *cmd : ctx.script->sectionCommands)
311 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
312 if (osd->osec.name == name && osd->osec.partition == partition)
313 return &osd->osec;
314 return nullptr;
315}
316
317// The main function of the writer.
318template <class ELFT> void Writer<ELFT>::run() {
319 // Now that we have a complete set of output sections. This function
320 // completes section contents. For example, we need to add strings
321 // to the string table, and add entries to .got and .plt.
322 // finalizeSections does that.
323 finalizeSections();
324 checkExecuteOnly();
325 checkExecuteOnlyReport();
326
327 // If --compressed-debug-sections is specified, compress .debug_* sections.
328 // Do it right now because it changes the size of output sections.
329 for (OutputSection *sec : ctx.outputSections)
330 sec->maybeCompress<ELFT>(ctx);
331
332 if (ctx.script->hasSectionsCommand)
333 ctx.script->allocateHeaders(phdrs&: ctx.mainPart->phdrs);
334
335 // Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a
336 // 0 sized region. This has to be done late since only after assignAddresses
337 // we know the size of the sections.
338 for (Partition &part : ctx.partitions)
339 removeEmptyPTLoad(ctx, phdrs&: part.phdrs);
340
341 if (!ctx.arg.oFormatBinary)
342 assignFileOffsets();
343 else
344 assignFileOffsetsBinary();
345
346 for (Partition &part : ctx.partitions)
347 setPhdrs(part);
348
349 // Handle --print-map(-M)/--Map and --cref. Dump them before checkSections()
350 // because the files may be useful in case checkSections() or openFile()
351 // fails, for example, due to an erroneous file size.
352 writeMapAndCref(ctx);
353
354 // Handle --print-memory-usage option.
355 if (ctx.arg.printMemoryUsage)
356 ctx.script->printMemoryUsage(os&: ctx.e.outs());
357
358 if (ctx.arg.checkSections)
359 checkSections();
360
361 // It does not make sense try to open the file if we have error already.
362 if (errCount(ctx))
363 return;
364
365 {
366 llvm::TimeTraceScope timeScope("Write output file");
367 // Write the result down to a file.
368 openFile();
369 if (errCount(ctx))
370 return;
371
372 if (!ctx.arg.oFormatBinary) {
373 if (ctx.arg.zSeparate != SeparateSegmentKind::None)
374 writeTrapInstr();
375 writeHeader();
376 writeSections();
377 } else {
378 writeSectionsBinary();
379 }
380
381 // Backfill .note.gnu.build-id section content. This is done at last
382 // because the content is usually a hash value of the entire output file.
383 writeBuildId();
384 if (errCount(ctx))
385 return;
386
387 if (!ctx.e.disableOutput) {
388 if (auto e = buffer->commit())
389 Err(ctx) << "failed to write output '" << buffer->getPath()
390 << "': " << std::move(e);
391 }
392
393 if (!ctx.arg.cmseOutputLib.empty())
394 writeARMCmseImportLib<ELFT>(ctx);
395 }
396}
397
398template <class ELFT, class RelTy>
399static void markUsedLocalSymbolsImpl(ObjFile<ELFT> *file,
400 llvm::ArrayRef<RelTy> rels) {
401 for (const RelTy &rel : rels) {
402 Symbol &sym = file->getRelocTargetSym(rel);
403 if (sym.isLocal())
404 sym.setFlags(USED);
405 }
406}
407
408// The function ensures that the USED flag of local symbols reflects the fact
409// that the symbol is used in a relocation from a live section.
410template <class ELFT> static void markUsedLocalSymbols(Ctx &ctx) {
411 // With --gc-sections, the field is already filled.
412 // See MarkLive<ELFT>::resolveReloc().
413 if (ctx.arg.gcSections)
414 return;
415 for (ELFFileBase *file : ctx.objectFiles) {
416 ObjFile<ELFT> *f = cast<ObjFile<ELFT>>(file);
417 for (InputSectionBase *s : f->getSections()) {
418 InputSection *isec = dyn_cast_or_null<InputSection>(Val: s);
419 if (!isec)
420 continue;
421 if (isec->type == SHT_REL) {
422 markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rel>());
423 } else if (isec->type == SHT_RELA) {
424 markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rela>());
425 } else if (isec->type == SHT_CREL) {
426 // The is64=true variant also works with ELF32 since only the r_symidx
427 // member is used.
428 for (Elf_Crel_Impl<true> r : RelocsCrel<true>(isec->content_)) {
429 Symbol &sym = file->getSymbol(symbolIndex: r.r_symidx);
430 if (sym.isLocal())
431 sym.setFlags(USED);
432 }
433 }
434 }
435 }
436}
437
438static bool shouldKeepInSymtab(Ctx &ctx, const Defined &sym) {
439 if (sym.isSection())
440 return false;
441
442 // If --emit-reloc or -r is given, preserve symbols referenced by relocations
443 // from live sections.
444 if (sym.hasFlag(bit: USED) && ctx.arg.copyRelocs)
445 return true;
446
447 // Exclude local symbols pointing to .ARM.exidx sections.
448 // They are probably mapping symbols "$d", which are optional for these
449 // sections. After merging the .ARM.exidx sections, some of these symbols
450 // may become dangling. The easiest way to avoid the issue is not to add
451 // them to the symbol table from the beginning.
452 if (ctx.arg.emachine == EM_ARM && sym.section &&
453 sym.section->type == SHT_ARM_EXIDX)
454 return false;
455
456 if (ctx.arg.discard == DiscardPolicy::None)
457 return true;
458 if (ctx.arg.discard == DiscardPolicy::All)
459 return false;
460
461 // In ELF assembly .L symbols are normally discarded by the assembler.
462 // If the assembler fails to do so, the linker discards them if
463 // * --discard-locals is used.
464 // * The symbol is in a SHF_MERGE section, which is normally the reason for
465 // the assembler keeping the .L symbol.
466 if (sym.getName().starts_with(Prefix: ".L") &&
467 (ctx.arg.discard == DiscardPolicy::Locals ||
468 (sym.section && (sym.section->flags & SHF_MERGE))))
469 return false;
470 return true;
471}
472
473bool elf::includeInSymtab(Ctx &ctx, const Symbol &b) {
474 if (auto *d = dyn_cast<Defined>(Val: &b)) {
475 // Always include absolute symbols.
476 SectionBase *sec = d->section;
477 if (!sec)
478 return true;
479 assert(sec->isLive());
480
481 if (auto *s = dyn_cast<MergeInputSection>(Val: sec))
482 return s->getSectionPiece(offset: d->value).live;
483 return true;
484 }
485 return b.hasFlag(bit: USED) || !ctx.arg.gcSections;
486}
487
488// Scan local symbols to:
489//
490// - demote symbols defined relative to /DISCARD/ discarded input sections so
491// that relocations referencing them will lead to errors.
492// - copy eligible symbols to .symTab
493static void demoteAndCopyLocalSymbols(Ctx &ctx) {
494 llvm::TimeTraceScope timeScope("Add local symbols");
495 auto symsVec =
496 std::make_unique<SmallVector<Symbol *, 0>[]>(num: ctx.objectFiles.size());
497 parallelFor(Begin: 0, End: ctx.objectFiles.size(), Fn: [&](size_t i) {
498 DenseMap<SectionBase *, size_t> sectionIndexMap;
499 for (Symbol *b : ctx.objectFiles[i]->getLocalSymbols()) {
500 assert(b->isLocal() && "should have been caught in initializeSymbols()");
501 auto *dr = dyn_cast<Defined>(Val: b);
502 if (!dr)
503 continue;
504
505 if (dr->section && !dr->section->isLive())
506 demoteDefined(sym&: *dr, map&: sectionIndexMap);
507 else if (ctx.in.symTab && includeInSymtab(ctx, b: *b) &&
508 shouldKeepInSymtab(ctx, sym: *dr))
509 symsVec[i].push_back(Elt: b);
510 }
511 });
512 for (auto &syms : ArrayRef(symsVec.get(), ctx.objectFiles.size()))
513 for (Symbol *sym : syms)
514 ctx.in.symTab->addSymbol(sym);
515}
516
517// Create a section symbol for each output section so that we can represent
518// relocations that point to the section. If we know that no relocation is
519// referring to a section (that happens if the section is a synthetic one), we
520// don't create a section symbol for that section.
521template <class ELFT> void Writer<ELFT>::addSectionSymbols() {
522 for (SectionCommand *cmd : ctx.script->sectionCommands) {
523 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
524 if (!osd)
525 continue;
526 OutputSection &osec = osd->osec;
527 InputSectionBase *isec = nullptr;
528 // Iterate over all input sections and add a STT_SECTION symbol if any input
529 // section may be a relocation target.
530 for (SectionCommand *cmd : osec.commands) {
531 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
532 if (!isd)
533 continue;
534 for (InputSectionBase *s : isd->sections) {
535 // Relocations are not using REL[A] section symbols.
536 if (isStaticRelSecType(type: s->type))
537 continue;
538
539 // Unlike other synthetic sections, mergeable output sections contain
540 // data copied from input sections, and there may be a relocation
541 // pointing to its contents if -r or --emit-reloc is given.
542 if (isa<SyntheticSection>(Val: s) && !(s->flags & SHF_MERGE))
543 continue;
544
545 isec = s;
546 break;
547 }
548 }
549 if (!isec)
550 continue;
551
552 // Set the symbol to be relative to the output section so that its st_value
553 // equals the output section address. Note, there may be a gap between the
554 // start of the output section and isec.
555 ctx.in.symTab->addSymbol(sym: makeDefined(args&: ctx, args&: isec->file, args: "", args: STB_LOCAL,
556 /*stOther=*/args: 0, args: STT_SECTION,
557 /*value=*/args: 0, /*size=*/args: 0, args: &osec));
558 }
559}
560
561// Returns true if this is a variant of .data.rel.ro.
562static bool isRelRoDataSection(Ctx &ctx, StringRef secName) {
563 if (!secName.consume_front(Prefix: ".data.rel.ro"))
564 return false;
565 if (secName.empty())
566 return true;
567 // If -z keep-data-section-prefix is specified, additionally allow
568 // '.data.rel.ro.hot' and '.data.rel.ro.unlikely'.
569 if (ctx.arg.zKeepDataSectionPrefix)
570 return secName == ".hot" || secName == ".unlikely";
571 return false;
572}
573
574// Today's loaders have a feature to make segments read-only after
575// processing dynamic relocations to enhance security. PT_GNU_RELRO
576// is defined for that.
577//
578// This function returns true if a section needs to be put into a
579// PT_GNU_RELRO segment.
580static bool isRelroSection(Ctx &ctx, const OutputSection *sec) {
581 if (!ctx.arg.zRelro)
582 return false;
583 if (sec->relro)
584 return true;
585
586 uint64_t flags = sec->flags;
587
588 // Non-allocatable or non-writable sections don't need RELRO because
589 // they are not writable or not even mapped to memory in the first place.
590 // RELRO is for sections that are essentially read-only but need to
591 // be writable only at process startup to allow dynamic linker to
592 // apply relocations.
593 if (!(flags & SHF_ALLOC) || !(flags & SHF_WRITE))
594 return false;
595
596 // Once initialized, TLS data segments are used as data templates
597 // for a thread-local storage. For each new thread, runtime
598 // allocates memory for a TLS and copy templates there. No thread
599 // are supposed to use templates directly. Thus, it can be in RELRO.
600 if (flags & SHF_TLS)
601 return true;
602
603 // .init_array, .preinit_array and .fini_array contain pointers to
604 // functions that are executed on process startup or exit. These
605 // pointers are set by the static linker, and they are not expected
606 // to change at runtime. But if you are an attacker, you could do
607 // interesting things by manipulating pointers in .fini_array, for
608 // example. So they are put into RELRO.
609 uint32_t type = sec->type;
610 if (type == SHT_INIT_ARRAY || type == SHT_FINI_ARRAY ||
611 type == SHT_PREINIT_ARRAY)
612 return true;
613
614 // .got contains pointers to external symbols. They are resolved by
615 // the dynamic linker when a module is loaded into memory, and after
616 // that they are not expected to change. So, it can be in RELRO.
617 if (ctx.in.got && sec == ctx.in.got->getParent())
618 return true;
619
620 // .toc is a GOT-ish section for PowerPC64. Their contents are accessed
621 // through r2 register, which is reserved for that purpose. Since r2 is used
622 // for accessing .got as well, .got and .toc need to be close enough in the
623 // virtual address space. Usually, .toc comes just after .got. Since we place
624 // .got into RELRO, .toc needs to be placed into RELRO too.
625 if (sec->name == ".toc")
626 return true;
627
628 // .got.plt contains pointers to external function symbols. They are
629 // by default resolved lazily, so we usually cannot put it into RELRO.
630 // However, if "-z now" is given, the lazy symbol resolution is
631 // disabled, which enables us to put it into RELRO.
632 if (sec == ctx.in.gotPlt->getParent())
633 return ctx.arg.zNow;
634
635 if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent())
636 return true;
637
638 // .dynamic section contains data for the dynamic linker, and
639 // there's no need to write to it at runtime, so it's better to put
640 // it into RELRO.
641 if (sec->name == ".dynamic")
642 return true;
643
644 // Sections with some special names are put into RELRO. This is a
645 // bit unfortunate because section names shouldn't be significant in
646 // ELF in spirit. But in reality many linker features depend on
647 // magic section names.
648 StringRef s = sec->name;
649
650 bool abiAgnostic = isRelRoDataSection(ctx, secName: s) || s == ".bss.rel.ro" ||
651 s == ".ctors" || s == ".dtors" || s == ".jcr" ||
652 s == ".eh_frame" || s == ".fini_array" ||
653 s == ".init_array" || s == ".preinit_array";
654
655 bool abiSpecific =
656 ctx.arg.osabi == ELFOSABI_OPENBSD && s == ".openbsd.randomdata";
657
658 return abiAgnostic || abiSpecific;
659}
660
661// We compute a rank for each section. The rank indicates where the
662// section should be placed in the file. Instead of using simple
663// numbers (0,1,2...), we use a series of flags. One for each decision
664// point when placing the section.
665// Using flags has two key properties:
666// * It is easy to check if a give branch was taken.
667// * It is easy two see how similar two ranks are (see getRankProximity).
668enum RankFlags {
669 RF_NOT_ADDR_SET = 1 << 27,
670 RF_NOT_ALLOC = 1 << 26,
671 RF_PARTITION = 1 << 18, // Partition number (8 bits)
672 RF_LARGE_EXEC_WRITE = 1 << 16,
673 RF_LARGE_ALT = 1 << 15,
674 RF_WRITE = 1 << 14,
675 RF_EXEC_WRITE = 1 << 13,
676 RF_EXEC = 1 << 12,
677 RF_RODATA = 1 << 11,
678 RF_LARGE_EXEC = 1 << 10,
679 RF_LARGE = 1 << 9,
680 RF_NOT_RELRO = 1 << 8,
681 RF_NOT_TLS = 1 << 7,
682 RF_BSS = 1 << 6,
683};
684
685unsigned elf::getSectionRank(Ctx &ctx, OutputSection &osec) {
686 unsigned rank = osec.partition * RF_PARTITION;
687
688 // We want to put section specified by -T option first, so we
689 // can start assigning VA starting from them later.
690 if (ctx.arg.sectionStartMap.contains(Key: osec.name))
691 return rank;
692 rank |= RF_NOT_ADDR_SET;
693
694 // Allocatable sections go first to reduce the total PT_LOAD size and
695 // so debug info doesn't change addresses in actual code.
696 if (!(osec.flags & SHF_ALLOC))
697 return rank | RF_NOT_ALLOC;
698
699 // Sort sections based on their access permission in the following
700 // order: R, RX, RXW, RW(RELRO), RW(non-RELRO).
701 //
702 // Read-only sections come first such that they go in the PT_LOAD covering the
703 // program headers at the start of the file.
704 //
705 // The layout for writable sections is PT_LOAD(PT_GNU_RELRO(.data.rel.ro
706 // .bss.rel.ro) | .data .bss), where | marks where page alignment happens.
707 // An alternative ordering is PT_LOAD(.data | PT_GNU_RELRO( .data.rel.ro
708 // .bss.rel.ro) | .bss), but it may waste more bytes due to 2 alignment
709 // places.
710 bool isExec = osec.flags & SHF_EXECINSTR;
711 bool isWrite = osec.flags & SHF_WRITE;
712 bool isLarge = osec.flags & SHF_X86_64_LARGE && ctx.arg.emachine == EM_X86_64;
713
714 if (!isWrite && !isExec) {
715 // Among PROGBITS sections, place .lrodata further from .text.
716 // For -z lrodata-after-bss, place .lrodata after .lbss like GNU ld. This
717 // layout has one extra PT_LOAD, but alleviates relocation overflow
718 // pressure for absolute relocations referencing small data from -fno-pic
719 // relocatable files.
720 if (isLarge)
721 rank |= ctx.arg.zLrodataAfterBss ? RF_LARGE_ALT : 0;
722 else
723 rank |= ctx.arg.zLrodataAfterBss ? 0 : RF_LARGE;
724
725 if (osec.type == SHT_LLVM_PART_EHDR)
726 ;
727 else if (osec.type == SHT_LLVM_PART_PHDR)
728 rank |= 1;
729 else if (osec.name == ".interp")
730 rank |= 2;
731 // Put .note sections at the beginning so that they are likely to be
732 // included in a truncate core file. In particular, .note.gnu.build-id, if
733 // available, can identify the object file.
734 else if (osec.type == SHT_NOTE)
735 rank |= 3;
736 // Make PROGBITS sections (e.g .rodata .eh_frame) closer to .text to
737 // alleviate relocation overflow pressure. Large special sections such as
738 // .dynstr and .dynsym can be away from .text.
739 else if (osec.type != SHT_PROGBITS)
740 rank |= 4;
741 else
742 rank |= RF_RODATA;
743 } else if (isExec) {
744 // Place readonly .ltext before .lrodata and writable .ltext after .lbss to
745 // keep writable and readonly segments separate.
746 if (isLarge) {
747 rank |= isWrite ? RF_LARGE_EXEC_WRITE : RF_LARGE_EXEC;
748 } else {
749 rank |= isWrite ? RF_EXEC_WRITE : RF_EXEC;
750 }
751 } else {
752 rank |= RF_WRITE;
753 // The TLS initialization block needs to be a single contiguous block. Place
754 // TLS sections directly before the other RELRO sections.
755 if (!(osec.flags & SHF_TLS))
756 rank |= RF_NOT_TLS;
757 if (isRelroSection(ctx, sec: &osec))
758 osec.relro = true;
759 else
760 rank |= RF_NOT_RELRO;
761 // Place .ldata and .lbss after .bss. Making .bss closer to .text
762 // alleviates relocation overflow pressure.
763 // For -z lrodata-after-bss, place .lbss/.lrodata/.ldata after .bss.
764 // .bss/.lbss being adjacent reuses the NOBITS size optimization.
765 if (isLarge) {
766 rank |= ctx.arg.zLrodataAfterBss
767 ? (osec.type == SHT_NOBITS ? 1 : RF_LARGE_ALT)
768 : RF_LARGE;
769 }
770 }
771
772 // Within TLS sections, or within other RelRo sections, or within non-RelRo
773 // sections, place non-NOBITS sections first.
774 if (osec.type == SHT_NOBITS)
775 rank |= RF_BSS;
776
777 // Some architectures have additional ordering restrictions for sections
778 // within the same PT_LOAD.
779 if (ctx.arg.emachine == EM_PPC64) {
780 // PPC64 has a number of special SHT_PROGBITS+SHF_ALLOC+SHF_WRITE sections
781 // that we would like to make sure appear is a specific order to maximize
782 // their coverage by a single signed 16-bit offset from the TOC base
783 // pointer.
784 StringRef name = osec.name;
785 if (name == ".got")
786 rank |= 1;
787 else if (name == ".toc")
788 rank |= 2;
789 }
790
791 if (ctx.arg.emachine == EM_MIPS) {
792 if (osec.name != ".got")
793 rank |= 1;
794 // All sections with SHF_MIPS_GPREL flag should be grouped together
795 // because data in these sections is addressable with a gp relative address.
796 if (osec.flags & SHF_MIPS_GPREL)
797 rank |= 2;
798 }
799
800 if (ctx.arg.emachine == EM_RISCV) {
801 // .sdata and .sbss are placed closer to make GP relaxation more profitable
802 // and match GNU ld.
803 StringRef name = osec.name;
804 if (name == ".sdata" || (osec.type == SHT_NOBITS && name != ".sbss"))
805 rank |= 1;
806 }
807
808 return rank;
809}
810
811static bool compareSections(Ctx &ctx, const SectionCommand *aCmd,
812 const SectionCommand *bCmd) {
813 const OutputSection *a = &cast<OutputDesc>(Val: aCmd)->osec;
814 const OutputSection *b = &cast<OutputDesc>(Val: bCmd)->osec;
815
816 if (a->sortRank != b->sortRank)
817 return a->sortRank < b->sortRank;
818
819 if (!(a->sortRank & RF_NOT_ADDR_SET))
820 return ctx.arg.sectionStartMap.lookup(Key: a->name) <
821 ctx.arg.sectionStartMap.lookup(Key: b->name);
822 return false;
823}
824
825void PhdrEntry::add(OutputSection *sec) {
826 lastSec = sec;
827 if (!firstSec)
828 firstSec = sec;
829 p_align = std::max(a: p_align, b: sec->addralign);
830 if (p_type == PT_LOAD)
831 sec->ptLoad = this;
832}
833
834// A statically linked position-dependent executable should only contain
835// IRELATIVE relocations and no other dynamic relocations. Encapsulation symbols
836// __rel[a]_iplt_{start,end} will be defined for .rel[a].dyn, to be
837// processed by the libc runtime. Other executables or DSOs use dynamic tags
838// instead.
839template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
840 if (ctx.arg.isPic)
841 return;
842
843 // __rela_iplt_{start,end} are initially defined relative to dummy section 0.
844 // We'll override ctx.out.elfHeader with relaDyn later when we are sure that
845 // .rela.dyn will be present in the output.
846 std::string name = ctx.arg.isRela ? "__rela_iplt_start" : "__rel_iplt_start";
847 ctx.sym.relaIpltStart =
848 addOptionalRegular(ctx, name, sec: ctx.out.elfHeader.get(), val: 0, stOther: STV_HIDDEN);
849 name.replace(pos: name.size() - 5, n1: 5, s: "end");
850 ctx.sym.relaIpltEnd =
851 addOptionalRegular(ctx, name, sec: ctx.out.elfHeader.get(), val: 0, stOther: STV_HIDDEN);
852}
853
854// This function generates assignments for predefined symbols (e.g. _end or
855// _etext) and inserts them into the commands sequence to be processed at the
856// appropriate time. This ensures that the value is going to be correct by the
857// time any references to these symbols are processed and is equivalent to
858// defining these symbols explicitly in the linker script.
859template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
860 if (ctx.sym.globalOffsetTable) {
861 // The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
862 // to the start of the .got or .got.plt section.
863 InputSection *sec = ctx.in.gotPlt.get();
864 if (!ctx.target->gotBaseSymInGotPlt)
865 sec = ctx.in.mipsGot ? cast<InputSection>(Val: ctx.in.mipsGot.get())
866 : cast<InputSection>(Val: ctx.in.got.get());
867 ctx.sym.globalOffsetTable->section = sec;
868 }
869
870 // .rela_iplt_{start,end} mark the start and the end of the section containing
871 // IRELATIVE relocations.
872 if (ctx.sym.relaIpltStart) {
873 auto &dyn = getIRelativeSection(ctx);
874 if (dyn.isNeeded()) {
875 ctx.sym.relaIpltStart->section = &dyn;
876 ctx.sym.relaIpltEnd->section = &dyn;
877 ctx.sym.relaIpltEnd->value = dyn.getSize();
878 }
879 }
880
881 PhdrEntry *last = nullptr;
882 OutputSection *lastRO = nullptr;
883 auto isLarge = [&ctx = ctx](OutputSection *osec) {
884 return ctx.arg.emachine == EM_X86_64 && osec->flags & SHF_X86_64_LARGE;
885 };
886 for (Partition &part : ctx.partitions) {
887 for (auto &p : part.phdrs) {
888 if (p->p_type != PT_LOAD)
889 continue;
890 last = p.get();
891 if (!(p->p_flags & PF_W) && p->lastSec && !isLarge(p->lastSec))
892 lastRO = p->lastSec;
893 }
894 }
895
896 if (lastRO) {
897 // _etext is the first location after the last read-only loadable segment
898 // that does not contain large sections.
899 if (ctx.sym.etext1)
900 ctx.sym.etext1->section = lastRO;
901 if (ctx.sym.etext2)
902 ctx.sym.etext2->section = lastRO;
903 }
904
905 if (last) {
906 // _edata points to the end of the last non-large mapped initialized
907 // section.
908 OutputSection *edata = nullptr;
909 for (OutputSection *os : ctx.outputSections) {
910 if (os->type != SHT_NOBITS && !isLarge(os))
911 edata = os;
912 if (os == last->lastSec)
913 break;
914 }
915
916 if (ctx.sym.edata1)
917 ctx.sym.edata1->section = edata;
918 if (ctx.sym.edata2)
919 ctx.sym.edata2->section = edata;
920
921 // _end is the first location after the uninitialized data region.
922 if (ctx.sym.end1)
923 ctx.sym.end1->section = last->lastSec;
924 if (ctx.sym.end2)
925 ctx.sym.end2->section = last->lastSec;
926 }
927
928 if (ctx.sym.bss) {
929 // On RISC-V, set __bss_start to the start of .sbss if present.
930 OutputSection *sbss =
931 ctx.arg.emachine == EM_RISCV ? findSection(ctx, name: ".sbss") : nullptr;
932 ctx.sym.bss->section = sbss ? sbss : findSection(ctx, name: ".bss");
933 }
934
935 // Setup MIPS _gp_disp/__gnu_local_gp symbols which should
936 // be equal to the _gp symbol's value.
937 if (ctx.sym.mipsGp) {
938 // Find GP-relative section with the lowest address
939 // and use this address to calculate default _gp value.
940 for (OutputSection *os : ctx.outputSections) {
941 if (os->flags & SHF_MIPS_GPREL) {
942 ctx.sym.mipsGp->section = os;
943 ctx.sym.mipsGp->value = 0x7ff0;
944 break;
945 }
946 }
947 }
948}
949
950// We want to find how similar two ranks are.
951// The more branches in getSectionRank that match, the more similar they are.
952// Since each branch corresponds to a bit flag, we can just use
953// countLeadingZeros.
954static int getRankProximity(OutputSection *a, SectionCommand *b) {
955 auto *osd = dyn_cast<OutputDesc>(Val: b);
956 return (osd && osd->osec.hasInputSections)
957 ? llvm::countl_zero(Val: a->sortRank ^ osd->osec.sortRank)
958 : -1;
959}
960
961// When placing orphan sections, we want to place them after symbol assignments
962// so that an orphan after
963// begin_foo = .;
964// foo : { *(foo) }
965// end_foo = .;
966// doesn't break the intended meaning of the begin/end symbols.
967// We don't want to go over sections since findOrphanPos is the
968// one in charge of deciding the order of the sections.
969// We don't want to go over changes to '.', since doing so in
970// rx_sec : { *(rx_sec) }
971// . = ALIGN(0x1000);
972// /* The RW PT_LOAD starts here*/
973// rw_sec : { *(rw_sec) }
974// would mean that the RW PT_LOAD would become unaligned.
975static bool shouldSkip(SectionCommand *cmd) {
976 if (auto *assign = dyn_cast<SymbolAssignment>(Val: cmd))
977 return assign->name != ".";
978 return false;
979}
980
981// We want to place orphan sections so that they share as much
982// characteristics with their neighbors as possible. For example, if
983// both are rw, or both are tls.
984static SmallVectorImpl<SectionCommand *>::iterator
985findOrphanPos(Ctx &ctx, SmallVectorImpl<SectionCommand *>::iterator b,
986 SmallVectorImpl<SectionCommand *>::iterator e) {
987 // Place non-alloc orphan sections at the end. This matches how we assign file
988 // offsets to non-alloc sections.
989 OutputSection *sec = &cast<OutputDesc>(Val: *e)->osec;
990 if (!(sec->flags & SHF_ALLOC))
991 return e;
992
993 // As a special case, place .relro_padding before the SymbolAssignment using
994 // DATA_SEGMENT_RELRO_END, if present.
995 if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) {
996 auto i = std::find_if(first: b, last: e, pred: [=](SectionCommand *a) {
997 if (auto *assign = dyn_cast<SymbolAssignment>(Val: a))
998 return assign->dataSegmentRelroEnd;
999 return false;
1000 });
1001 if (i != e)
1002 return i;
1003 }
1004
1005 // Find the most similar output section as the anchor. Rank Proximity is a
1006 // value in the range [-1, 32] where [0, 32] indicates potential anchors (0:
1007 // least similar; 32: identical). -1 means not an anchor.
1008 //
1009 // In the event of proximity ties, we select the first or last section
1010 // depending on whether the orphan's rank is smaller.
1011 int maxP = 0;
1012 auto i = e;
1013 for (auto j = b; j != e; ++j) {
1014 int p = getRankProximity(a: sec, b: *j);
1015 if (p > maxP ||
1016 (p == maxP && cast<OutputDesc>(Val: *j)->osec.sortRank <= sec->sortRank)) {
1017 maxP = p;
1018 i = j;
1019 }
1020 }
1021 if (i == e)
1022 return e;
1023
1024 auto isOutputSecWithInputSections = [](SectionCommand *cmd) {
1025 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
1026 return osd && osd->osec.hasInputSections;
1027 };
1028
1029 // Then, scan backward or forward through the script for a suitable insertion
1030 // point. If i's rank is larger, the orphan section can be placed before i.
1031 //
1032 // However, don't do this if custom program headers are defined. Otherwise,
1033 // adding the orphan to a previous segment can change its flags, for example,
1034 // making a read-only segment writable. If memory regions are defined, an
1035 // orphan section should continue the same region as the found section to
1036 // better resemble the behavior of GNU ld.
1037 bool mustAfter =
1038 ctx.script->hasPhdrsCommands() || !ctx.script->memoryRegions.empty();
1039 if (cast<OutputDesc>(Val: *i)->osec.sortRank <= sec->sortRank || mustAfter) {
1040 for (auto j = ++i; j != e; ++j) {
1041 if (!isOutputSecWithInputSections(*j))
1042 continue;
1043 if (getRankProximity(a: sec, b: *j) != maxP)
1044 break;
1045 i = j + 1;
1046 }
1047 } else {
1048 for (; i != b; --i)
1049 if (isOutputSecWithInputSections(i[-1]))
1050 break;
1051 }
1052
1053 // As a special case, if the orphan section is the last section, put
1054 // it at the very end, past any other commands.
1055 // This matches bfd's behavior and is convenient when the linker script fully
1056 // specifies the start of the file, but doesn't care about the end (the non
1057 // alloc sections for example).
1058 if (std::none_of(first: i, last: e, pred: isOutputSecWithInputSections))
1059 return e;
1060
1061 while (i != e && shouldSkip(cmd: *i))
1062 ++i;
1063 return i;
1064}
1065
1066// Adds random priorities to sections not already in the map.
1067static void maybeShuffle(Ctx &ctx,
1068 DenseMap<const InputSectionBase *, int> &order) {
1069 if (ctx.arg.shuffleSections.empty())
1070 return;
1071
1072 SmallVector<InputSectionBase *, 0> matched, sections = ctx.inputSections;
1073 matched.reserve(N: sections.size());
1074 for (const auto &patAndSeed : ctx.arg.shuffleSections) {
1075 matched.clear();
1076 for (InputSectionBase *sec : sections)
1077 if (patAndSeed.first.match(S: sec->name))
1078 matched.push_back(Elt: sec);
1079 const uint32_t seed = patAndSeed.second;
1080 if (seed == UINT32_MAX) {
1081 // If --shuffle-sections <section-glob>=-1, reverse the section order. The
1082 // section order is stable even if the number of sections changes. This is
1083 // useful to catch issues like static initialization order fiasco
1084 // reliably.
1085 std::reverse(first: matched.begin(), last: matched.end());
1086 } else {
1087 std::mt19937 g(seed ? seed : std::random_device()());
1088 llvm::shuffle(first: matched.begin(), last: matched.end(), g);
1089 }
1090 size_t i = 0;
1091 for (InputSectionBase *&sec : sections)
1092 if (patAndSeed.first.match(S: sec->name))
1093 sec = matched[i++];
1094 }
1095
1096 // Existing priorities are < 0, so use priorities >= 0 for the missing
1097 // sections.
1098 int prio = 0;
1099 for (InputSectionBase *sec : sections) {
1100 if (order.try_emplace(Key: sec, Args&: prio).second)
1101 ++prio;
1102 }
1103}
1104
1105// Return section order within an InputSectionDescription.
1106// If both --symbol-ordering-file and call graph profile are present, the order
1107// file takes precedence, but the call graph profile is still used for symbols
1108// that don't appear in the order file.
1109static DenseMap<const InputSectionBase *, int> buildSectionOrder(Ctx &ctx) {
1110 DenseMap<const InputSectionBase *, int> sectionOrder;
1111 if (ctx.arg.bpStartupFunctionSort || ctx.arg.bpFunctionOrderForCompression ||
1112 ctx.arg.bpDataOrderForCompression ||
1113 !ctx.arg.bpCompressionSortSpecs.empty()) {
1114 TimeTraceScope timeScope("Balanced Partitioning Section Orderer");
1115 sectionOrder = runBalancedPartitioning(
1116 ctx, profilePath: ctx.arg.bpStartupFunctionSort ? ctx.arg.irpgoProfilePath : "",
1117 compressionSortSpecs: ctx.arg.bpCompressionSortSpecs, forFunctionCompression: ctx.arg.bpFunctionOrderForCompression,
1118 forDataCompression: ctx.arg.bpDataOrderForCompression,
1119 compressionSortStartupFunctions: ctx.arg.bpCompressionSortStartupFunctions,
1120 verbose: ctx.arg.bpVerboseSectionOrderer);
1121 } else if (!ctx.arg.callGraphProfile.empty()) {
1122 sectionOrder = computeCallGraphProfileOrder(ctx);
1123 }
1124
1125 if (ctx.arg.symbolOrderingFile.empty())
1126 return sectionOrder;
1127
1128 struct SymbolOrderEntry {
1129 int priority;
1130 bool present;
1131 };
1132
1133 // Build a map from symbols to their priorities. Symbols that didn't
1134 // appear in the symbol ordering file have the lowest priority 0.
1135 // All explicitly mentioned symbols have negative (higher) priorities.
1136 DenseMap<CachedHashStringRef, SymbolOrderEntry> symbolOrder;
1137 int priority = -sectionOrder.size() - ctx.arg.symbolOrderingFile.size();
1138 for (StringRef s : ctx.arg.symbolOrderingFile)
1139 symbolOrder.insert(KV: {CachedHashStringRef(s), {.priority: priority++, .present: false}});
1140
1141 // Build a map from sections to their priorities.
1142 auto addSym = [&](Symbol &sym) {
1143 auto it = symbolOrder.find(Val: CachedHashStringRef(sym.getName()));
1144 if (it == symbolOrder.end())
1145 return;
1146 SymbolOrderEntry &ent = it->second;
1147 ent.present = true;
1148
1149 maybeWarnUnorderableSymbol(ctx, sym: &sym);
1150
1151 if (auto *d = dyn_cast<Defined>(Val: &sym)) {
1152 if (auto *sec = dyn_cast_or_null<InputSectionBase>(Val: d->section)) {
1153 int &priority = sectionOrder[cast<InputSectionBase>(Val: sec)];
1154 priority = std::min(a: priority, b: ent.priority);
1155 }
1156 }
1157 };
1158
1159 // We want both global and local symbols. We get the global ones from the
1160 // symbol table and iterate the object files for the local ones.
1161 for (Symbol *sym : ctx.symtab->getSymbols())
1162 addSym(*sym);
1163
1164 for (ELFFileBase *file : ctx.objectFiles)
1165 for (Symbol *sym : file->getLocalSymbols())
1166 addSym(*sym);
1167
1168 if (ctx.arg.warnSymbolOrdering)
1169 for (auto orderEntry : symbolOrder)
1170 if (!orderEntry.second.present)
1171 Warn(ctx) << "symbol ordering file: no such symbol: "
1172 << orderEntry.first.val();
1173
1174 return sectionOrder;
1175}
1176
1177// Sorts the sections in ISD according to the provided section order.
1178static void
1179sortISDBySectionOrder(Ctx &ctx, InputSectionDescription *isd,
1180 const DenseMap<const InputSectionBase *, int> &order,
1181 bool executableOutputSection) {
1182 SmallVector<InputSection *, 0> unorderedSections;
1183 SmallVector<std::pair<InputSection *, int>, 0> orderedSections;
1184 uint64_t unorderedSize = 0;
1185 uint64_t totalSize = 0;
1186
1187 for (InputSection *isec : isd->sections) {
1188 if (executableOutputSection)
1189 totalSize += isec->getSize();
1190 auto i = order.find(Val: isec);
1191 if (i == order.end()) {
1192 unorderedSections.push_back(Elt: isec);
1193 unorderedSize += isec->getSize();
1194 continue;
1195 }
1196 orderedSections.push_back(Elt: {isec, i->second});
1197 }
1198 llvm::sort(C&: orderedSections, Comp: llvm::less_second());
1199
1200 // Find an insertion point for the ordered section list in the unordered
1201 // section list. On targets with limited-range branches, this is the mid-point
1202 // of the unordered section list. This decreases the likelihood that a range
1203 // extension thunk will be needed to enter or exit the ordered region. If the
1204 // ordered section list is a list of hot functions, we can generally expect
1205 // the ordered functions to be called more often than the unordered functions,
1206 // making it more likely that any particular call will be within range, and
1207 // therefore reducing the number of thunks required.
1208 //
1209 // For example, imagine that you have 8MB of hot code and 32MB of cold code.
1210 // If the layout is:
1211 //
1212 // 8MB hot
1213 // 32MB cold
1214 //
1215 // only the first 8-16MB of the cold code (depending on which hot function it
1216 // is actually calling) can call the hot code without a range extension thunk.
1217 // However, if we use this layout:
1218 //
1219 // 16MB cold
1220 // 8MB hot
1221 // 16MB cold
1222 //
1223 // both the last 8-16MB of the first block of cold code and the first 8-16MB
1224 // of the second block of cold code can call the hot code without a thunk. So
1225 // we effectively double the amount of code that could potentially call into
1226 // the hot code without a thunk.
1227 //
1228 // The above is not necessary if total size of input sections in this "isd"
1229 // is small. Note that we assume all input sections are executable if the
1230 // output section is executable (which is not always true but supposed to
1231 // cover most cases).
1232 size_t insPt = 0;
1233 if (executableOutputSection && !orderedSections.empty() &&
1234 ctx.target->getThunkSectionSpacing() &&
1235 totalSize >= ctx.target->getThunkSectionSpacing()) {
1236 uint64_t unorderedPos = 0;
1237 for (; insPt != unorderedSections.size(); ++insPt) {
1238 unorderedPos += unorderedSections[insPt]->getSize();
1239 if (unorderedPos > unorderedSize / 2)
1240 break;
1241 }
1242 }
1243
1244 isd->sections.clear();
1245 for (InputSection *isec : ArrayRef(unorderedSections).slice(N: 0, M: insPt))
1246 isd->sections.push_back(Elt: isec);
1247 for (std::pair<InputSection *, int> p : orderedSections)
1248 isd->sections.push_back(Elt: p.first);
1249 for (InputSection *isec : ArrayRef(unorderedSections).slice(N: insPt))
1250 isd->sections.push_back(Elt: isec);
1251}
1252
1253static void sortSection(Ctx &ctx, OutputSection &osec,
1254 const DenseMap<const InputSectionBase *, int> &order) {
1255 StringRef name = osec.name;
1256
1257 // Never sort these.
1258 if (name == ".init" || name == ".fini")
1259 return;
1260
1261 // Sort input sections by priority using the list provided by
1262 // --symbol-ordering-file or --shuffle-sections=. This is a least significant
1263 // digit radix sort. The sections may be sorted stably again by a more
1264 // significant key.
1265 if (!order.empty())
1266 for (SectionCommand *b : osec.commands)
1267 if (auto *isd = dyn_cast<InputSectionDescription>(Val: b))
1268 sortISDBySectionOrder(ctx, isd, order, executableOutputSection: osec.flags & SHF_EXECINSTR);
1269
1270 if (ctx.script->hasSectionsCommand)
1271 return;
1272
1273 if (name == ".init_array" || name == ".fini_array") {
1274 osec.sortInitFini();
1275 } else if (name == ".ctors" || name == ".dtors") {
1276 osec.sortCtorsDtors();
1277 } else if (ctx.arg.emachine == EM_PPC64 && name == ".toc") {
1278 // .toc is allocated just after .got and is accessed using GOT-relative
1279 // relocations. Object files compiled with small code model have an
1280 // addressable range of [.got, .got + 0xFFFC] for GOT-relative relocations.
1281 // To reduce the risk of relocation overflow, .toc contents are sorted so
1282 // that sections having smaller relocation offsets are at beginning of .toc
1283 assert(osec.commands.size() == 1);
1284 auto *isd = cast<InputSectionDescription>(Val: osec.commands[0]);
1285 llvm::stable_sort(Range&: isd->sections,
1286 C: [](const InputSection *a, const InputSection *b) -> bool {
1287 return a->file->ppc64SmallCodeModelTocRelocs &&
1288 !b->file->ppc64SmallCodeModelTocRelocs;
1289 });
1290 }
1291}
1292
1293// Sort sections within each InputSectionDescription.
1294template <class ELFT> void Writer<ELFT>::sortInputSections() {
1295 // Assign negative priorities.
1296 DenseMap<const InputSectionBase *, int> order = buildSectionOrder(ctx);
1297 // Assign non-negative priorities due to --shuffle-sections.
1298 maybeShuffle(ctx, order);
1299 for (SectionCommand *cmd : ctx.script->sectionCommands)
1300 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1301 sortSection(ctx, osec&: osd->osec, order);
1302}
1303
1304template <class ELFT> void Writer<ELFT>::sortSections() {
1305 llvm::TimeTraceScope timeScope("Sort sections");
1306
1307 // Don't sort if using -r. It is not necessary and we want to preserve the
1308 // relative order for SHF_LINK_ORDER sections.
1309 if (ctx.arg.relocatable) {
1310 ctx.script->adjustOutputSections();
1311 return;
1312 }
1313
1314 sortInputSections();
1315
1316 for (SectionCommand *cmd : ctx.script->sectionCommands)
1317 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1318 osd->osec.sortRank = getSectionRank(ctx, osec&: osd->osec);
1319 if (!ctx.script->hasSectionsCommand) {
1320 // OutputDescs are mostly contiguous, but may be interleaved with
1321 // SymbolAssignments in the presence of INSERT commands.
1322 auto mid = std::stable_partition(
1323 ctx.script->sectionCommands.begin(), ctx.script->sectionCommands.end(),
1324 [](SectionCommand *cmd) { return isa<OutputDesc>(Val: cmd); });
1325 std::stable_sort(
1326 ctx.script->sectionCommands.begin(), mid,
1327 [&ctx = ctx](auto *l, auto *r) { return compareSections(ctx, l, r); });
1328 }
1329
1330 // Process INSERT commands and update output section attributes. From this
1331 // point onwards the order of script->sectionCommands is fixed.
1332 ctx.script->processInsertCommands();
1333 ctx.script->adjustOutputSections();
1334
1335 if (ctx.script->hasSectionsCommand)
1336 sortOrphanSections();
1337
1338 ctx.script->adjustSectionsAfterSorting();
1339}
1340
1341template <class ELFT> void Writer<ELFT>::sortOrphanSections() {
1342 // Orphan sections are sections present in the input files which are
1343 // not explicitly placed into the output file by the linker script.
1344 //
1345 // The sections in the linker script are already in the correct
1346 // order. We have to figuere out where to insert the orphan
1347 // sections.
1348 //
1349 // The order of the sections in the script is arbitrary and may not agree with
1350 // compareSections. This means that we cannot easily define a strict weak
1351 // ordering. To see why, consider a comparison of a section in the script and
1352 // one not in the script. We have a two simple options:
1353 // * Make them equivalent (a is not less than b, and b is not less than a).
1354 // The problem is then that equivalence has to be transitive and we can
1355 // have sections a, b and c with only b in a script and a less than c
1356 // which breaks this property.
1357 // * Use compareSectionsNonScript. Given that the script order doesn't have
1358 // to match, we can end up with sections a, b, c, d where b and c are in the
1359 // script and c is compareSectionsNonScript less than b. In which case d
1360 // can be equivalent to c, a to b and d < a. As a concrete example:
1361 // .a (rx) # not in script
1362 // .b (rx) # in script
1363 // .c (ro) # in script
1364 // .d (ro) # not in script
1365 //
1366 // The way we define an order then is:
1367 // * Sort only the orphan sections. They are in the end right now.
1368 // * Move each orphan section to its preferred position. We try
1369 // to put each section in the last position where it can share
1370 // a PT_LOAD.
1371 //
1372 // There is some ambiguity as to where exactly a new entry should be
1373 // inserted, because Commands contains not only output section
1374 // commands but also other types of commands such as symbol assignment
1375 // expressions. There's no correct answer here due to the lack of the
1376 // formal specification of the linker script. We use heuristics to
1377 // determine whether a new output command should be added before or
1378 // after another commands. For the details, look at shouldSkip
1379 // function.
1380
1381 auto i = ctx.script->sectionCommands.begin();
1382 auto e = ctx.script->sectionCommands.end();
1383 auto nonScriptI = std::find_if(i, e, [](SectionCommand *cmd) {
1384 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1385 return osd->osec.sectionIndex == UINT32_MAX;
1386 return false;
1387 });
1388
1389 // Sort the orphan sections.
1390 std::stable_sort(nonScriptI, e, [&ctx = ctx](auto *l, auto *r) {
1391 return compareSections(ctx, l, r);
1392 });
1393
1394 // As a horrible special case, skip the first . assignment if it is before any
1395 // section. We do this because it is common to set a load address by starting
1396 // the script with ". = 0xabcd" and the expectation is that every section is
1397 // after that.
1398 auto firstSectionOrDotAssignment =
1399 std::find_if(i, e, [](SectionCommand *cmd) { return !shouldSkip(cmd); });
1400 if (firstSectionOrDotAssignment != e &&
1401 isa<SymbolAssignment>(**firstSectionOrDotAssignment))
1402 ++firstSectionOrDotAssignment;
1403 i = firstSectionOrDotAssignment;
1404
1405 while (nonScriptI != e) {
1406 auto pos = findOrphanPos(ctx, i, nonScriptI);
1407 OutputSection *orphan = &cast<OutputDesc>(*nonScriptI)->osec;
1408
1409 // As an optimization, find all sections with the same sort rank
1410 // and insert them with one rotate.
1411 unsigned rank = orphan->sortRank;
1412 auto end = std::find_if(nonScriptI + 1, e, [=](SectionCommand *cmd) {
1413 return cast<OutputDesc>(Val: cmd)->osec.sortRank != rank;
1414 });
1415 std::rotate(pos, nonScriptI, end);
1416 nonScriptI = end;
1417 }
1418}
1419
1420static bool compareByFilePosition(InputSection *a, InputSection *b) {
1421 InputSection *la = a->flags & SHF_LINK_ORDER ? a->getLinkOrderDep() : nullptr;
1422 InputSection *lb = b->flags & SHF_LINK_ORDER ? b->getLinkOrderDep() : nullptr;
1423 // SHF_LINK_ORDER sections with non-zero sh_link are ordered before
1424 // non-SHF_LINK_ORDER sections and SHF_LINK_ORDER sections with zero sh_link.
1425 if (!la || !lb)
1426 return la && !lb;
1427 OutputSection *aOut = la->getParent();
1428 OutputSection *bOut = lb->getParent();
1429
1430 if (aOut == bOut)
1431 return la->outSecOff < lb->outSecOff;
1432 if (aOut->addr == bOut->addr)
1433 return aOut->sectionIndex < bOut->sectionIndex;
1434 return aOut->addr < bOut->addr;
1435}
1436
1437template <class ELFT> void Writer<ELFT>::resolveShfLinkOrder() {
1438 llvm::TimeTraceScope timeScope("Resolve SHF_LINK_ORDER");
1439 for (OutputSection *sec : ctx.outputSections) {
1440 if (!(sec->flags & SHF_LINK_ORDER))
1441 continue;
1442
1443 // The ARM.exidx section use SHF_LINK_ORDER, but we have consolidated
1444 // this processing inside the ARMExidxsyntheticsection::finalizeContents().
1445 if (!ctx.arg.relocatable && ctx.arg.emachine == EM_ARM &&
1446 sec->type == SHT_ARM_EXIDX)
1447 continue;
1448
1449 // Link order may be distributed across several InputSectionDescriptions.
1450 // Sorting is performed separately.
1451 SmallVector<InputSection **, 0> scriptSections;
1452 SmallVector<InputSection *, 0> sections;
1453 for (SectionCommand *cmd : sec->commands) {
1454 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
1455 if (!isd)
1456 continue;
1457 bool hasLinkOrder = false;
1458 scriptSections.clear();
1459 sections.clear();
1460 for (InputSection *&isec : isd->sections) {
1461 if (isec->flags & SHF_LINK_ORDER) {
1462 InputSection *link = isec->getLinkOrderDep();
1463 if (link && !link->getParent())
1464 ErrAlways(ctx) << isec << ": sh_link points to discarded section "
1465 << link;
1466 hasLinkOrder = true;
1467 }
1468 scriptSections.push_back(Elt: &isec);
1469 sections.push_back(Elt: isec);
1470 }
1471 if (hasLinkOrder && errCount(ctx) == 0) {
1472 llvm::stable_sort(Range&: sections, C: compareByFilePosition);
1473 for (int i = 0, n = sections.size(); i != n; ++i)
1474 *scriptSections[i] = sections[i];
1475 }
1476 }
1477 }
1478}
1479
1480static void finalizeSynthetic(Ctx &ctx, SyntheticSection *sec) {
1481 if (sec && sec->isNeeded() && sec->getParent()) {
1482 llvm::TimeTraceScope timeScope("Finalize synthetic sections", sec->name);
1483 sec->finalizeContents();
1484 }
1485}
1486
1487static bool canInsertPadding(OutputSection *sec) {
1488 StringRef s = sec->name;
1489 return s == ".bss" || s == ".data" || s == ".data.rel.ro" || s == ".lbss" ||
1490 s == ".ldata" || s == ".lrodata" || s == ".ltext" || s == ".rodata" ||
1491 s.starts_with(Prefix: ".text");
1492}
1493
1494static void randomizeSectionPadding(Ctx &ctx) {
1495 std::mt19937 g(*ctx.arg.randomizeSectionPadding);
1496 PhdrEntry *curPtLoad = nullptr;
1497 for (OutputSection *os : ctx.outputSections) {
1498 if (!canInsertPadding(sec: os))
1499 continue;
1500 for (SectionCommand *bc : os->commands) {
1501 if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc)) {
1502 SmallVector<InputSection *, 0> tmp;
1503 if (os->ptLoad != curPtLoad) {
1504 tmp.push_back(
1505 Elt: make<PaddingSection>(args&: ctx, args: g() % ctx.arg.maxPageSize, args&: os));
1506 curPtLoad = os->ptLoad;
1507 }
1508 for (InputSection *isec : isd->sections) {
1509 // Probability of inserting padding is 1 in 16.
1510 if (g() % 16 == 0)
1511 tmp.push_back(Elt: make<PaddingSection>(args&: ctx, args&: isec->addralign, args&: os));
1512 tmp.push_back(Elt: isec);
1513 }
1514 isd->sections = std::move(tmp);
1515 }
1516 }
1517 }
1518}
1519
1520// We need to generate and finalize the content that depends on the address of
1521// InputSections. As the generation of the content may also alter InputSection
1522// addresses we must converge to a fixed point. We do that here. See the comment
1523// in Writer<ELFT>::finalizeSections().
1524template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
1525 llvm::TimeTraceScope timeScope("Finalize address dependent content");
1526 AArch64Err843419Patcher a64p(ctx);
1527 ARMErr657417Patcher a32p(ctx);
1528 ctx.script->assignAddresses();
1529
1530 // .ARM.exidx and SHF_LINK_ORDER do not require precise addresses, but they
1531 // do require the relative addresses of OutputSections because linker scripts
1532 // can assign Virtual Addresses to OutputSections that are not monotonically
1533 // increasing. Anything here must be repeatable, since spilling may change
1534 // section order.
1535 const auto finalizeOrderDependentContent = [this] {
1536 for (Partition &part : ctx.partitions)
1537 finalizeSynthetic(ctx, sec: part.armExidx.get());
1538 resolveShfLinkOrder();
1539 };
1540 finalizeOrderDependentContent();
1541
1542 if (ctx.arg.randomizeSectionPadding)
1543 randomizeSectionPadding(ctx);
1544
1545 // Iterate until a fixed point is reached, skipping relocatable links since
1546 // the final addresses are unavailable.
1547 uint32_t pass = 0, assignPasses = 0;
1548 while (!ctx.arg.relocatable) {
1549 bool changed = ctx.target->needsThunks
1550 ? tc.createThunks(pass, outputSections: ctx.outputSections)
1551 : ctx.target->relaxOnce(pass);
1552 bool spilled = ctx.script->spillSections();
1553 changed |= spilled;
1554 ++pass;
1555
1556 // With Thunk Size much smaller than branch range we expect to
1557 // converge quickly; if we get to 30 something has gone wrong.
1558 if (changed && pass >= 30) {
1559 Err(ctx) << "address assignment did not converge";
1560 break;
1561 }
1562
1563 if (ctx.arg.fixCortexA53Errata843419) {
1564 if (changed)
1565 ctx.script->assignAddresses();
1566 changed |= a64p.createFixes();
1567 }
1568 if (ctx.arg.fixCortexA8) {
1569 if (changed)
1570 ctx.script->assignAddresses();
1571 changed |= a32p.createFixes();
1572 }
1573
1574 finalizeSynthetic(ctx, sec: ctx.in.got.get());
1575 if (ctx.in.mipsGot)
1576 ctx.in.mipsGot->updateAllocSize(ctx);
1577
1578 for (Partition &part : ctx.partitions) {
1579 // The R_AARCH64_AUTH_RELATIVE has a smaller addend field as bits [63:32]
1580 // encode the signing schema. We've put relocations in .relr.auth.dyn
1581 // during RelocationScanner::processAux, but the target VA for some of
1582 // them might be wider than 32 bits. We can only know the final VA at this
1583 // point, so move relocations with large values from .relr.auth.dyn to
1584 // .rela.dyn. See also AArch64::relocate.
1585 if (part.relrAuthDyn) {
1586 auto it = llvm::remove_if(
1587 part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) {
1588 Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
1589 if (isInt<32>(x: reloc.sym->getVA(ctx, addend: reloc.addend)))
1590 return false;
1591 reloc.expr = R_NONE;
1592 part.relaDyn->addReloc(reloc: {R_AARCH64_AUTH_RELATIVE, elem.inputSec,
1593 reloc.offset, false, *reloc.sym,
1594 reloc.addend, R_ABS});
1595 return true;
1596 });
1597 changed |= (it != part.relrAuthDyn->relocs.end());
1598 part.relrAuthDyn->relocs.erase(it, part.relrAuthDyn->relocs.end());
1599 }
1600 if (part.relaDyn)
1601 changed |= part.relaDyn->updateAllocSize(ctx);
1602 if (part.relrDyn)
1603 changed |= part.relrDyn->updateAllocSize(ctx);
1604 if (part.relrAuthDyn)
1605 changed |= part.relrAuthDyn->updateAllocSize(ctx);
1606 if (part.memtagGlobalDescriptors)
1607 changed |= part.memtagGlobalDescriptors->updateAllocSize(ctx);
1608 if (part.ehFrameHdr && part.ehFrameHdr->isNeeded())
1609 changed |= part.ehFrameHdr->updateAllocSize(ctx);
1610 }
1611
1612 std::pair<const OutputSection *, const Defined *> changes =
1613 ctx.script->assignAddresses();
1614 if (!changed) {
1615 // Some symbols may be dependent on section addresses. When we break the
1616 // loop, the symbol values are finalized because a previous
1617 // assignAddresses() finalized section addresses.
1618 if (!changes.first && !changes.second)
1619 break;
1620 if (++assignPasses == 5) {
1621 if (changes.first)
1622 Err(ctx) << "address (0x" << Twine::utohexstr(Val: changes.first->addr)
1623 << ") of section '" << changes.first->name
1624 << "' does not converge";
1625 if (changes.second)
1626 Err(ctx) << "assignment to symbol " << changes.second
1627 << " does not converge";
1628 break;
1629 }
1630 } else if (spilled) {
1631 // Spilling can change relative section order.
1632 finalizeOrderDependentContent();
1633 }
1634 // If updateAllocSize reported errors (e.g. "unknown FDE size encoding" for
1635 // part.ehFrameHdr), break to avoid duplicate diagnostics from the loop.
1636 if (errCount(ctx))
1637 break;
1638 }
1639 if (!ctx.arg.relocatable)
1640 ctx.target->finalizeRelax(passes: pass);
1641
1642 if (ctx.arg.relocatable)
1643 for (OutputSection *sec : ctx.outputSections)
1644 sec->addr = 0;
1645
1646 uint64_t imageBase = ctx.script->hasSectionsCommand || ctx.arg.relocatable
1647 ? 0
1648 : ctx.target->getImageBase();
1649 for (SectionCommand *cmd : ctx.script->sectionCommands) {
1650 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
1651 if (!osd)
1652 continue;
1653 OutputSection *osec = &osd->osec;
1654 // Error if the address is below the image base when SECTIONS is absent
1655 // (e.g. when -Ttext is specified and smaller than the default target image
1656 // base for no-pie).
1657 if (osec->addr < imageBase && (osec->flags & SHF_ALLOC)) {
1658 Err(ctx) << "section '" << osec->name << "' address (0x"
1659 << Twine::utohexstr(Val: osec->addr)
1660 << ") is smaller than image base (0x"
1661 << Twine::utohexstr(Val: imageBase) << "); specify --image-base";
1662 }
1663
1664 // If addrExpr is set, the address may not be a multiple of the alignment.
1665 // Warn because this is error-prone.
1666 if (osec->addr % osec->addralign != 0)
1667 Warn(ctx) << "address (0x" << Twine::utohexstr(Val: osec->addr)
1668 << ") of section " << osec->name
1669 << " is not a multiple of alignment (" << osec->addralign
1670 << ")";
1671 }
1672
1673 // Sizes are no longer allowed to grow, so all allowable spills have been
1674 // taken. Remove any leftover potential spills.
1675 ctx.script->erasePotentialSpillSections();
1676}
1677
1678// If Input Sections have been shrunk (basic block sections) then
1679// update symbol values and sizes associated with these sections. With basic
1680// block sections, input sections can shrink when the jump instructions at
1681// the end of the section are relaxed.
1682static void fixSymbolsAfterShrinking(Ctx &ctx) {
1683 for (InputFile *File : ctx.objectFiles) {
1684 parallelForEach(R: File->getSymbols(), Fn: [&](Symbol *Sym) {
1685 auto *def = dyn_cast<Defined>(Val: Sym);
1686 if (!def)
1687 return;
1688
1689 const SectionBase *sec = def->section;
1690 if (!sec)
1691 return;
1692
1693 const InputSectionBase *inputSec = dyn_cast<InputSectionBase>(Val: sec);
1694 if (!inputSec || !inputSec->bytesDropped)
1695 return;
1696
1697 const size_t OldSize = inputSec->content().size();
1698 const size_t NewSize = OldSize - inputSec->bytesDropped;
1699
1700 if (def->value > NewSize && def->value <= OldSize) {
1701 LLVM_DEBUG(llvm::dbgs()
1702 << "Moving symbol " << Sym->getName() << " from "
1703 << def->value << " to "
1704 << def->value - inputSec->bytesDropped << " bytes\n");
1705 def->value -= inputSec->bytesDropped;
1706 return;
1707 }
1708
1709 if (def->value + def->size > NewSize && def->value <= OldSize &&
1710 def->value + def->size <= OldSize) {
1711 LLVM_DEBUG(llvm::dbgs()
1712 << "Shrinking symbol " << Sym->getName() << " from "
1713 << def->size << " to " << def->size - inputSec->bytesDropped
1714 << " bytes\n");
1715 def->size -= inputSec->bytesDropped;
1716 }
1717 });
1718 }
1719}
1720
1721// If basic block sections exist, there are opportunities to delete fall thru
1722// jumps and shrink jump instructions after basic block reordering. This
1723// relaxation pass does that. It is only enabled when --optimize-bb-jumps
1724// option is used.
1725template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
1726 assert(ctx.arg.optimizeBBJumps);
1727 SmallVector<InputSection *, 0> storage;
1728
1729 ctx.script->assignAddresses();
1730 // For every output section that has executable input sections, this
1731 // does the following:
1732 // 1. Deletes all direct jump instructions in input sections that
1733 // jump to the following section as it is not required.
1734 // 2. If there are two consecutive jump instructions, it checks
1735 // if they can be flipped and one can be deleted.
1736 for (OutputSection *osec : ctx.outputSections) {
1737 if (!(osec->flags & SHF_EXECINSTR))
1738 continue;
1739 ArrayRef<InputSection *> sections = getInputSections(os: *osec, storage);
1740 size_t numDeleted = 0;
1741 // Delete all fall through jump instructions. Also, check if two
1742 // consecutive jump instructions can be flipped so that a fall
1743 // through jmp instruction can be deleted.
1744 for (size_t i = 0, e = sections.size(); i != e; ++i) {
1745 InputSection *next = i + 1 < sections.size() ? sections[i + 1] : nullptr;
1746 InputSection &sec = *sections[i];
1747 numDeleted += ctx.target->deleteFallThruJmpInsn(is&: sec, nextIS: next);
1748 }
1749 if (numDeleted > 0) {
1750 ctx.script->assignAddresses();
1751 LLVM_DEBUG(llvm::dbgs()
1752 << "Removing " << numDeleted << " fall through jumps\n");
1753 }
1754 }
1755
1756 fixSymbolsAfterShrinking(ctx);
1757
1758 for (OutputSection *osec : ctx.outputSections)
1759 for (InputSection *is : getInputSections(os: *osec, storage))
1760 is->trim();
1761}
1762
1763// In order to allow users to manipulate linker-synthesized sections,
1764// we had to add synthetic sections to the input section list early,
1765// even before we make decisions whether they are needed. This allows
1766// users to write scripts like this: ".mygot : { .got }".
1767//
1768// Doing it has an unintended side effects. If it turns out that we
1769// don't need a .got (for example) at all because there's no
1770// relocation that needs a .got, we don't want to emit .got.
1771//
1772// To deal with the above problem, this function is called after
1773// scanRelocations is called to remove synthetic sections that turn
1774// out to be empty.
1775static void removeUnusedSyntheticSections(Ctx &ctx) {
1776 // All input synthetic sections that can be empty are placed after
1777 // all regular ones. Reverse iterate to find the first synthetic section
1778 // after a non-synthetic one which will be our starting point.
1779 auto start =
1780 llvm::find_if(Range: llvm::reverse(C&: ctx.inputSections), P: [](InputSectionBase *s) {
1781 return !isa<SyntheticSection>(Val: s);
1782 }).base();
1783
1784 // Remove unused synthetic sections from ctx.inputSections;
1785 DenseSet<InputSectionBase *> unused;
1786 auto end =
1787 std::remove_if(first: start, last: ctx.inputSections.end(), pred: [&](InputSectionBase *s) {
1788 auto *sec = cast<SyntheticSection>(Val: s);
1789 if (sec->getParent() && sec->isNeeded())
1790 return false;
1791 // .relr.auth.dyn relocations may be moved to .rela.dyn in
1792 // finalizeAddressDependentContent, making .rela.dyn no longer empty.
1793 // Conservatively keep .rela.dyn. .relr.auth.dyn can be made empty, but
1794 // we would fail to remove it here.
1795 if (ctx.arg.emachine == EM_AARCH64 && ctx.arg.relrPackDynRelocs &&
1796 sec == ctx.mainPart->relaDyn.get())
1797 return false;
1798 unused.insert(V: sec);
1799 return true;
1800 });
1801 ctx.inputSections.erase(CS: end, CE: ctx.inputSections.end());
1802
1803 // Remove unused synthetic sections from the corresponding input section
1804 // description and orphanSections.
1805 for (auto *sec : unused)
1806 if (OutputSection *osec = cast<SyntheticSection>(Val: sec)->getParent())
1807 for (SectionCommand *cmd : osec->commands)
1808 if (auto *isd = dyn_cast<InputSectionDescription>(Val: cmd))
1809 llvm::erase_if(C&: isd->sections, P: [&](InputSection *isec) {
1810 return unused.contains(V: isec);
1811 });
1812 llvm::erase_if(C&: ctx.script->orphanSections, P: [&](const InputSectionBase *sec) {
1813 return unused.contains(V: sec);
1814 });
1815}
1816
1817// Create output section objects and add them to OutputSections.
1818template <class ELFT> void Writer<ELFT>::finalizeSections() {
1819 if (!ctx.arg.relocatable) {
1820 ctx.out.preinitArray = findSection(ctx, name: ".preinit_array");
1821 ctx.out.initArray = findSection(ctx, name: ".init_array");
1822 ctx.out.finiArray = findSection(ctx, name: ".fini_array");
1823
1824 // The linker needs to define SECNAME_start, SECNAME_end and SECNAME_stop
1825 // symbols for sections, so that the runtime can get the start and end
1826 // addresses of each section by section name. Add such symbols.
1827 addStartEndSymbols();
1828 for (SectionCommand *cmd : ctx.script->sectionCommands)
1829 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1830 addStartStopSymbols(osec&: osd->osec);
1831
1832 // Add _DYNAMIC symbol. Unlike GNU gold, our _DYNAMIC symbol has no type.
1833 // It should be okay as no one seems to care about the type.
1834 // Even the author of gold doesn't remember why gold behaves that way.
1835 // https://sourceware.org/ml/binutils/2002-03/msg00360.html
1836 if (ctx.mainPart->dynamic->parent) {
1837 Symbol *s = ctx.symtab->addSymbol(newSym: Defined{
1838 ctx, ctx.internalFile, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE,
1839 /*value=*/0, /*size=*/0, ctx.mainPart->dynamic.get()});
1840 s->isUsedInRegularObj = true;
1841 }
1842
1843 // Define __rel[a]_iplt_{start,end} symbols if needed.
1844 addRelIpltSymbols();
1845
1846 // RISC-V's gp can address +/- 2 KiB, set it to .sdata + 0x800. This symbol
1847 // should only be defined in an executable. If .sdata does not exist, its
1848 // value/section does not matter but it has to be relative, so set its
1849 // st_shndx arbitrarily to 1 (ctx.out.elfHeader).
1850 if (ctx.arg.emachine == EM_RISCV) {
1851 if (!ctx.arg.shared) {
1852 OutputSection *sec = findSection(ctx, name: ".sdata");
1853 addOptionalRegular(ctx, name: "__global_pointer$",
1854 sec: sec ? sec : ctx.out.elfHeader.get(), val: 0x800,
1855 stOther: STV_DEFAULT);
1856 // Set riscvGlobalPointer to be used by the optional global pointer
1857 // relaxation.
1858 if (ctx.arg.relaxGP) {
1859 Symbol *s = ctx.symtab->find(name: "__global_pointer$");
1860 if (s && s->isDefined())
1861 ctx.sym.riscvGlobalPointer = cast<Defined>(Val: s);
1862 }
1863 }
1864 }
1865
1866 if (ctx.arg.emachine == EM_386 || ctx.arg.emachine == EM_X86_64) {
1867 // On targets that support TLSDESC, _TLS_MODULE_BASE_ is defined in such a
1868 // way that:
1869 //
1870 // 1) Without relaxation: it produces a dynamic TLSDESC relocation that
1871 // computes 0.
1872 // 2) With LD->LE relaxation: _TLS_MODULE_BASE_@tpoff = 0 (lowest address
1873 // in the TLS block).
1874 //
1875 // 2) is special cased in @tpoff computation. To satisfy 1), we define it
1876 // as an absolute symbol of zero. This is different from GNU linkers which
1877 // define _TLS_MODULE_BASE_ relative to the first TLS section.
1878 Symbol *s = ctx.symtab->find(name: "_TLS_MODULE_BASE_");
1879 if (s && s->isUndefined()) {
1880 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
1881 STV_HIDDEN, STT_TLS, /*value=*/0, 0,
1882 /*section=*/nullptr});
1883 ctx.sym.tlsModuleBase = cast<Defined>(Val: s);
1884 }
1885 }
1886
1887 // This responsible for splitting up .eh_frame section into
1888 // pieces. The relocation scan uses those pieces, so this has to be
1889 // earlier.
1890 {
1891 llvm::TimeTraceScope timeScope("Finalize .eh_frame");
1892 for (Partition &part : ctx.partitions)
1893 finalizeSynthetic(ctx, sec: part.ehFrame.get());
1894 }
1895 }
1896
1897 // If the previous code block defines any non-hidden symbols (e.g.
1898 // __global_pointer$), they may be exported.
1899 if (ctx.arg.exportDynamic)
1900 for (Symbol *sym : ctx.synthesizedSymbols)
1901 if (sym->computeBinding(ctx) != STB_LOCAL)
1902 sym->isExported = true;
1903
1904 demoteSymbolsAndComputeIsPreemptible(ctx);
1905
1906 if (ctx.arg.copyRelocs && ctx.arg.discard != DiscardPolicy::None)
1907 markUsedLocalSymbols<ELFT>(ctx);
1908 demoteAndCopyLocalSymbols(ctx);
1909
1910 if (ctx.arg.copyRelocs)
1911 addSectionSymbols();
1912
1913 // Change values of linker-script-defined symbols from placeholders (assigned
1914 // by declareSymbols) to actual definitions.
1915 ctx.script->processSymbolAssignments();
1916
1917 if (!ctx.arg.relocatable) {
1918 llvm::TimeTraceScope timeScope("Scan relocations");
1919 // Scan relocations. This must be done after every symbol is declared so
1920 // that we can correctly decide if a dynamic relocation is needed. This is
1921 // called after processSymbolAssignments() because it needs to know whether
1922 // a linker-script-defined symbol is absolute.
1923 scanRelocations<ELFT>(ctx);
1924 reportUndefinedSymbols(ctx);
1925 postScanRelocations(ctx);
1926
1927 if (ctx.in.plt && ctx.in.plt->isNeeded())
1928 ctx.in.plt->addSymbols();
1929 if (ctx.in.iplt && ctx.in.iplt->isNeeded())
1930 ctx.in.iplt->addSymbols();
1931
1932 if (ctx.arg.unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore) {
1933 auto diag =
1934 ctx.arg.unresolvedSymbolsInShlib == UnresolvedPolicy::ReportError &&
1935 !ctx.arg.noinhibitExec
1936 ? DiagLevel::Err
1937 : DiagLevel::Warn;
1938 // Error on undefined symbols in a shared object, if all of its DT_NEEDED
1939 // entries are seen. These cases would otherwise lead to runtime errors
1940 // reported by the dynamic linker.
1941 //
1942 // ld.bfd traces all DT_NEEDED to emulate the logic of the dynamic linker
1943 // to catch more cases. That is too much for us. Our approach resembles
1944 // the one used in ld.gold, achieves a good balance to be useful but not
1945 // too smart.
1946 //
1947 // If a DSO reference is resolved by a SharedSymbol, but the SharedSymbol
1948 // is overridden by a hidden visibility Defined (which is later discarded
1949 // due to GC), don't report the diagnostic. However, this may indicate an
1950 // unintended SharedSymbol.
1951 for (SharedFile *file : ctx.sharedFiles) {
1952 bool allNeededIsKnown =
1953 llvm::all_of(file->dtNeeded, [&](StringRef needed) {
1954 return ctx.symtab->soNames.contains(Val: CachedHashStringRef(needed));
1955 });
1956 if (!allNeededIsKnown)
1957 continue;
1958 for (Symbol *sym : file->requiredSymbols) {
1959 if (sym->dsoDefined)
1960 continue;
1961 if (sym->isUndefined() && !sym->isWeak()) {
1962 ELFSyncStream(ctx, diag)
1963 << "undefined reference: " << sym << "\n>>> referenced by "
1964 << file << " (disallowed by --no-allow-shlib-undefined)";
1965 } else if (sym->isDefined() &&
1966 sym->computeBinding(ctx) == STB_LOCAL) {
1967 ELFSyncStream(ctx, diag)
1968 << "non-exported symbol '" << sym << "' in '" << sym->file
1969 << "' is referenced by DSO '" << file << "'";
1970 }
1971 }
1972 }
1973 }
1974 }
1975
1976 {
1977 llvm::TimeTraceScope timeScope("Add symbols to symtabs");
1978 // Now that we have defined all possible global symbols including linker-
1979 // synthesized ones. Visit all symbols to give the finishing touches.
1980 for (Symbol *sym : ctx.symtab->getSymbols()) {
1981 if (!sym->isUsedInRegularObj || !includeInSymtab(ctx, b: *sym))
1982 continue;
1983 if (!ctx.arg.relocatable)
1984 sym->binding = sym->computeBinding(ctx);
1985 if (ctx.in.symTab)
1986 ctx.in.symTab->addSymbol(sym);
1987
1988 // computeBinding might localize a symbol that was considered exported
1989 // but then synthesized as hidden (e.g. _DYNAMIC).
1990 if ((sym->isExported || sym->isPreemptible) && !sym->isLocal()) {
1991 ctx.partitions[sym->partition - 1].dynSymTab->addSymbol(sym);
1992 if (auto *file = dyn_cast<SharedFile>(Val: sym->file))
1993 if (file->isNeeded && !sym->isUndefined())
1994 addVerneed(ctx, ss&: *sym);
1995 }
1996 }
1997
1998 // We also need to scan the dynamic relocation tables of the other
1999 // partitions and add any referenced symbols to the partition's dynsym.
2000 for (Partition &part :
2001 MutableArrayRef<Partition>(ctx.partitions).slice(N: 1)) {
2002 DenseSet<Symbol *> syms;
2003 for (const SymbolTableEntry &e : part.dynSymTab->getSymbols())
2004 syms.insert(V: e.sym);
2005 for (DynamicReloc &reloc : part.relaDyn->relocs)
2006 if (reloc.sym && reloc.needsDynSymIndex() &&
2007 syms.insert(V: reloc.sym).second)
2008 part.dynSymTab->addSymbol(sym: reloc.sym);
2009 }
2010 }
2011
2012 if (ctx.in.mipsGot)
2013 ctx.in.mipsGot->build();
2014
2015 removeUnusedSyntheticSections(ctx);
2016 ctx.script->diagnoseOrphanHandling();
2017 ctx.script->diagnoseMissingSGSectionAddress();
2018
2019 sortSections();
2020
2021 // Create a list of OutputSections, assign sectionIndex, and populate
2022 // ctx.in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC
2023 // sections.
2024 for (SectionCommand *cmd : ctx.script->sectionCommands)
2025 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd)) {
2026 OutputSection *osec = &osd->osec;
2027 if (!ctx.in.shStrTab && !(osec->flags & SHF_ALLOC))
2028 continue;
2029 ctx.outputSections.push_back(Elt: osec);
2030 osec->sectionIndex = ctx.outputSections.size();
2031 if (ctx.in.shStrTab)
2032 osec->shName = ctx.in.shStrTab->addString(s: osec->name);
2033 }
2034
2035 // Prefer command line supplied address over other constraints.
2036 for (OutputSection *sec : ctx.outputSections) {
2037 auto i = ctx.arg.sectionStartMap.find(Key: sec->name);
2038 if (i != ctx.arg.sectionStartMap.end())
2039 sec->addrExpr = [=] { return i->second; };
2040 }
2041
2042 // This is a bit of a hack. A value of 0 means undef, so we set it
2043 // to 1 to make __ehdr_start defined. The section number is not
2044 // particularly relevant.
2045 ctx.out.elfHeader->sectionIndex = 1;
2046 ctx.out.elfHeader->size = sizeof(typename ELFT::Ehdr);
2047
2048 // Binary and relocatable output does not have PHDRS.
2049 // The headers have to be created before finalize as that can influence the
2050 // image base and the dynamic section on mips includes the image base.
2051 if (!ctx.arg.relocatable && !ctx.arg.oFormatBinary) {
2052 for (Partition &part : ctx.partitions) {
2053 part.phdrs = ctx.script->hasPhdrsCommands() ? ctx.script->createPhdrs()
2054 : createPhdrs(part);
2055 if (ctx.arg.emachine == EM_ARM) {
2056 // PT_ARM_EXIDX is the ARM EHABI equivalent of PT_GNU_EH_FRAME
2057 addPhdrForSection(part, shType: SHT_ARM_EXIDX, pType: PT_ARM_EXIDX, pFlags: PF_R);
2058 }
2059 if (ctx.arg.emachine == EM_MIPS) {
2060 // Add separate segments for MIPS-specific sections.
2061 addPhdrForSection(part, shType: SHT_MIPS_REGINFO, pType: PT_MIPS_REGINFO, pFlags: PF_R);
2062 addPhdrForSection(part, shType: SHT_MIPS_OPTIONS, pType: PT_MIPS_OPTIONS, pFlags: PF_R);
2063 addPhdrForSection(part, shType: SHT_MIPS_ABIFLAGS, pType: PT_MIPS_ABIFLAGS, pFlags: PF_R);
2064 }
2065 if (ctx.arg.emachine == EM_RISCV)
2066 addPhdrForSection(part, shType: SHT_RISCV_ATTRIBUTES, pType: PT_RISCV_ATTRIBUTES,
2067 pFlags: PF_R);
2068 }
2069 ctx.out.programHeaders->size =
2070 sizeof(Elf_Phdr) * ctx.mainPart->phdrs.size();
2071
2072 // Find the TLS segment. This happens before the section layout loop so that
2073 // Android relocation packing can look up TLS symbol addresses. We only need
2074 // to care about the main partition here because all TLS symbols were moved
2075 // to the main partition (see MarkLive.cpp).
2076 for (auto &p : ctx.mainPart->phdrs)
2077 if (p->p_type == PT_TLS)
2078 ctx.tlsPhdr = p.get();
2079 }
2080
2081 // Some symbols are defined in term of program headers. Now that we
2082 // have the headers, we can find out which sections they point to.
2083 setReservedSymbolSections();
2084
2085 if (ctx.script->noCrossRefs.size()) {
2086 llvm::TimeTraceScope timeScope("Check NOCROSSREFS");
2087 checkNoCrossRefs<ELFT>(ctx);
2088 }
2089
2090 {
2091 llvm::TimeTraceScope timeScope("Finalize synthetic sections");
2092
2093 finalizeSynthetic(ctx, sec: ctx.in.bss.get());
2094 finalizeSynthetic(ctx, sec: ctx.in.bssRelRo.get());
2095 finalizeSynthetic(ctx, sec: ctx.in.symTabShndx.get());
2096 finalizeSynthetic(ctx, sec: ctx.in.shStrTab.get());
2097 finalizeSynthetic(ctx, sec: ctx.in.strTab.get());
2098 finalizeSynthetic(ctx, sec: ctx.in.got.get());
2099 finalizeSynthetic(ctx, sec: ctx.in.mipsGot.get());
2100 finalizeSynthetic(ctx, sec: ctx.in.igotPlt.get());
2101 finalizeSynthetic(ctx, sec: ctx.in.gotPlt.get());
2102 finalizeSynthetic(ctx, sec: ctx.in.relaPlt.get());
2103 finalizeSynthetic(ctx, sec: ctx.in.plt.get());
2104 finalizeSynthetic(ctx, sec: ctx.in.iplt.get());
2105 finalizeSynthetic(ctx, sec: ctx.in.ppc32Got2.get());
2106 finalizeSynthetic(ctx, sec: ctx.in.partIndex.get());
2107
2108 // Dynamic section must be the last one in this list and dynamic
2109 // symbol table section (dynSymTab) must be the first one.
2110 for (Partition &part : ctx.partitions) {
2111 finalizeSynthetic(ctx, sec: part.relaDyn.get());
2112 finalizeSynthetic(ctx, sec: part.relrDyn.get());
2113 finalizeSynthetic(ctx, sec: part.relrAuthDyn.get());
2114
2115 finalizeSynthetic(ctx, sec: part.dynSymTab.get());
2116 finalizeSynthetic(ctx, sec: part.gnuHashTab.get());
2117 finalizeSynthetic(ctx, sec: part.hashTab.get());
2118 finalizeSynthetic(ctx, sec: part.verDef.get());
2119 finalizeSynthetic(ctx, sec: part.ehFrameHdr.get());
2120 finalizeSynthetic(ctx, sec: part.verSym.get());
2121 finalizeSynthetic(ctx, sec: part.verNeed.get());
2122 finalizeSynthetic(ctx, sec: part.dynamic.get());
2123 }
2124 }
2125
2126 if (!ctx.script->hasSectionsCommand && !ctx.arg.relocatable)
2127 fixSectionAlignments();
2128
2129 // This is used to:
2130 // 1) Create "thunks":
2131 // Jump instructions in many ISAs have small displacements, and therefore
2132 // they cannot jump to arbitrary addresses in memory. For example, RISC-V
2133 // JAL instruction can target only +-1 MiB from PC. It is a linker's
2134 // responsibility to create and insert small pieces of code between
2135 // sections to extend the ranges if jump targets are out of range. Such
2136 // code pieces are called "thunks".
2137 //
2138 // We add thunks at this stage. We couldn't do this before this point
2139 // because this is the earliest point where we know sizes of sections and
2140 // their layouts (that are needed to determine if jump targets are in
2141 // range).
2142 //
2143 // 2) Update the sections. We need to generate content that depends on the
2144 // address of InputSections. For example, MIPS GOT section content or
2145 // android packed relocations sections content.
2146 //
2147 // 3) Assign the final values for the linker script symbols. Linker scripts
2148 // sometimes using forward symbol declarations. We want to set the correct
2149 // values. They also might change after adding the thunks.
2150 finalizeAddressDependentContent();
2151
2152 // All information needed for OutputSection part of Map file is available.
2153 if (errCount(ctx))
2154 return;
2155
2156 {
2157 llvm::TimeTraceScope timeScope("Finalize synthetic sections");
2158 // finalizeAddressDependentContent may have added local symbols to the
2159 // static symbol table.
2160 finalizeSynthetic(ctx, sec: ctx.in.symTab.get());
2161 finalizeSynthetic(ctx, sec: ctx.in.debugNames.get());
2162 finalizeSynthetic(ctx, sec: ctx.in.ppc64LongBranchTarget.get());
2163 finalizeSynthetic(ctx, sec: ctx.in.armCmseSGSection.get());
2164 }
2165
2166 // Relaxation to delete inter-basic block jumps created by basic block
2167 // sections. Run after ctx.in.symTab is finalized as optimizeBasicBlockJumps
2168 // can relax jump instructions based on symbol offset.
2169 if (ctx.arg.optimizeBBJumps)
2170 optimizeBasicBlockJumps();
2171
2172 // Fill other section headers. The dynamic table is finalized
2173 // at the end because some tags like RELSZ depend on result
2174 // of finalizing other sections.
2175 for (OutputSection *sec : ctx.outputSections)
2176 sec->finalize(ctx);
2177
2178 ctx.script->checkFinalScriptConditions();
2179
2180 if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8) {
2181 addArmInputSectionMappingSymbols(ctx);
2182 sortArmMappingSymbols(ctx);
2183 }
2184}
2185
2186// Ensure data sections are not mixed with executable sections when
2187// --execute-only is used. --execute-only make pages executable but not
2188// readable.
2189template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
2190 if (!ctx.arg.executeOnly)
2191 return;
2192
2193 SmallVector<InputSection *, 0> storage;
2194 for (OutputSection *osec : ctx.outputSections)
2195 if (osec->flags & SHF_EXECINSTR)
2196 for (InputSection *isec : getInputSections(os: *osec, storage))
2197 if (!(isec->flags & SHF_EXECINSTR))
2198 ErrAlways(ctx) << "cannot place " << isec << " into " << osec->name
2199 << ": --execute-only does not support intermingling "
2200 "data and code";
2201}
2202
2203// Check which input sections of RX output sections don't have the
2204// SHF_AARCH64_PURECODE or SHF_ARM_PURECODE flag set.
2205template <class ELFT> void Writer<ELFT>::checkExecuteOnlyReport() {
2206 if (ctx.arg.zExecuteOnlyReport == ReportPolicy::None)
2207 return;
2208
2209 auto reportUnless = [&](bool cond) -> ELFSyncStream {
2210 if (cond)
2211 return {ctx, DiagLevel::None};
2212 return {ctx, toDiagLevel(policy: ctx.arg.zExecuteOnlyReport)};
2213 };
2214
2215 uint64_t purecodeFlag =
2216 ctx.arg.emachine == EM_AARCH64 ? SHF_AARCH64_PURECODE : SHF_ARM_PURECODE;
2217 StringRef purecodeFlagName = ctx.arg.emachine == EM_AARCH64
2218 ? "SHF_AARCH64_PURECODE"
2219 : "SHF_ARM_PURECODE";
2220 SmallVector<InputSection *, 0> storage;
2221 for (OutputSection *osec : ctx.outputSections) {
2222 if (osec->getPhdrFlags() != (PF_R | PF_X))
2223 continue;
2224 for (InputSection *sec : getInputSections(os: *osec, storage)) {
2225 if (isa<SyntheticSection>(Val: sec))
2226 continue;
2227 reportUnless(sec->flags & purecodeFlag)
2228 << "-z execute-only-report: " << sec << " does not have "
2229 << purecodeFlagName << " flag set";
2230 }
2231 }
2232}
2233
2234// The linker is expected to define SECNAME_start and SECNAME_end
2235// symbols for a few sections. This function defines them.
2236template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
2237 // If the associated output section does not exist, there is ambiguity as to
2238 // how we define _start and _end symbols for an init/fini section. Users
2239 // expect no "undefined symbol" linker errors and loaders expect equal
2240 // st_value but do not particularly care whether the symbols are defined or
2241 // not. We retain the output section so that the section indexes will be
2242 // correct.
2243 auto define = [=](StringRef start, StringRef end, OutputSection *os) {
2244 if (os) {
2245 Defined *startSym = addOptionalRegular(ctx, name: start, sec: os, val: 0);
2246 Defined *stopSym = addOptionalRegular(ctx, name: end, sec: os, val: -1);
2247 if (startSym || stopSym)
2248 os->usedInExpression = true;
2249 } else {
2250 addOptionalRegular(ctx, name: start, sec: ctx.out.elfHeader.get(), val: 0);
2251 addOptionalRegular(ctx, name: end, sec: ctx.out.elfHeader.get(), val: 0);
2252 }
2253 };
2254
2255 define("__preinit_array_start", "__preinit_array_end", ctx.out.preinitArray);
2256 define("__init_array_start", "__init_array_end", ctx.out.initArray);
2257 define("__fini_array_start", "__fini_array_end", ctx.out.finiArray);
2258
2259 // As a special case, don't unnecessarily retain .ARM.exidx, which would
2260 // create an empty PT_ARM_EXIDX.
2261 if (OutputSection *sec = findSection(ctx, name: ".ARM.exidx"))
2262 define("__exidx_start", "__exidx_end", sec);
2263}
2264
2265// If a section name is valid as a C identifier (which is rare because of
2266// the leading '.'), linkers are expected to define __start_<secname> and
2267// __stop_<secname> symbols. They are at beginning and end of the section,
2268// respectively. This is not requested by the ELF standard, but GNU ld and
2269// gold provide the feature, and used by many programs.
2270template <class ELFT>
2271void Writer<ELFT>::addStartStopSymbols(OutputSection &osec) {
2272 StringRef s = osec.name;
2273 if (!isValidCIdentifier(s))
2274 return;
2275 StringSaver &ss = ctx.saver;
2276 Defined *startSym = addOptionalRegular(ctx, name: ss.save(S: "__start_" + s), sec: &osec, val: 0,
2277 stOther: ctx.arg.zStartStopVisibility);
2278 Defined *stopSym = addOptionalRegular(ctx, name: ss.save(S: "__stop_" + s), sec: &osec, val: -1,
2279 stOther: ctx.arg.zStartStopVisibility);
2280 if (startSym || stopSym)
2281 osec.usedInExpression = true;
2282}
2283
2284static bool needsPtLoad(OutputSection *sec) {
2285 if (!(sec->flags & SHF_ALLOC))
2286 return false;
2287
2288 // Don't allocate VA space for TLS NOBITS sections. The PT_TLS PHDR is
2289 // responsible for allocating space for them, not the PT_LOAD that
2290 // contains the TLS initialization image.
2291 if ((sec->flags & SHF_TLS) && sec->type == SHT_NOBITS)
2292 return false;
2293 return true;
2294}
2295
2296// Adjust phdr flags according to certain options.
2297static uint64_t computeFlags(Ctx &ctx, uint64_t flags) {
2298 if (ctx.arg.omagic)
2299 return PF_R | PF_W | PF_X;
2300 if (ctx.arg.executeOnly && (flags & PF_X))
2301 return flags & ~PF_R;
2302 return flags;
2303}
2304
2305// Decide which program headers to create and which sections to include in each
2306// one.
2307template <class ELFT>
2308SmallVector<std::unique_ptr<PhdrEntry>, 0>
2309Writer<ELFT>::createPhdrs(Partition &part) {
2310 SmallVector<std::unique_ptr<PhdrEntry>, 0> ret;
2311 auto addHdr = [&, &ctx = ctx](unsigned type, unsigned flags) -> PhdrEntry * {
2312 ret.push_back(Elt: std::make_unique<PhdrEntry>(args&: ctx, args&: type, args&: flags));
2313 return ret.back().get();
2314 };
2315
2316 unsigned partNo = part.getNumber(ctx);
2317 bool isMain = partNo == 1;
2318
2319 // Add the first PT_LOAD segment for regular output sections.
2320 uint64_t flags = computeFlags(ctx, flags: PF_R);
2321 PhdrEntry *load = nullptr;
2322
2323 // nmagic or omagic output does not have PT_PHDR, PT_INTERP, or the readonly
2324 // PT_LOAD.
2325 if (!ctx.arg.nmagic && !ctx.arg.omagic) {
2326 // The first phdr entry is PT_PHDR which describes the program header
2327 // itself.
2328 if (isMain)
2329 addHdr(PT_PHDR, PF_R)->add(ctx.out.programHeaders.get());
2330 else
2331 addHdr(PT_PHDR, PF_R)->add(part.programHeaders->getParent());
2332
2333 // PT_INTERP must be the second entry if exists.
2334 if (OutputSection *cmd = findSection(ctx, name: ".interp", partition: partNo))
2335 addHdr(PT_INTERP, cmd->getPhdrFlags())->add(cmd);
2336
2337 // Add the headers. We will remove them if they don't fit.
2338 // In the other partitions the headers are ordinary sections, so they don't
2339 // need to be added here.
2340 if (isMain) {
2341 load = addHdr(PT_LOAD, flags);
2342 load->add(sec: ctx.out.elfHeader.get());
2343 load->add(sec: ctx.out.programHeaders.get());
2344 }
2345 }
2346
2347 // PT_GNU_RELRO includes all sections that should be marked as
2348 // read-only by dynamic linker after processing relocations.
2349 // Current dynamic loaders only support one PT_GNU_RELRO PHDR, give
2350 // an error message if more than one PT_GNU_RELRO PHDR is required.
2351 auto relRo = std::make_unique<PhdrEntry>(args&: ctx, args: PT_GNU_RELRO, args: PF_R);
2352 bool inRelroPhdr = false;
2353 OutputSection *relroEnd = nullptr;
2354 for (OutputSection *sec : ctx.outputSections) {
2355 if (sec->partition != partNo || !needsPtLoad(sec))
2356 continue;
2357 if (isRelroSection(ctx, sec)) {
2358 inRelroPhdr = true;
2359 if (!relroEnd)
2360 relRo->add(sec);
2361 else
2362 ErrAlways(ctx) << "section: " << sec->name
2363 << " is not contiguous with other relro" << " sections";
2364 } else if (inRelroPhdr) {
2365 inRelroPhdr = false;
2366 relroEnd = sec;
2367 }
2368 }
2369 relRo->p_align = 1;
2370
2371 for (OutputSection *sec : ctx.outputSections) {
2372 if (!needsPtLoad(sec))
2373 continue;
2374
2375 // Normally, sections in partitions other than the current partition are
2376 // ignored. But partition number 255 is a special case: it contains the
2377 // partition end marker (.part.end). It needs to be added to the main
2378 // partition so that a segment is created for it in the main partition,
2379 // which will cause the dynamic loader to reserve space for the other
2380 // partitions.
2381 if (sec->partition != partNo) {
2382 if (isMain && sec->partition == 255)
2383 addHdr(PT_LOAD, computeFlags(ctx, flags: sec->getPhdrFlags()))->add(sec);
2384 continue;
2385 }
2386
2387 // Segments are contiguous memory regions that has the same attributes
2388 // (e.g. executable or writable). There is one phdr for each segment.
2389 // Therefore, we need to create a new phdr when the next section has
2390 // incompatible flags or is loaded at a discontiguous address or memory
2391 // region using AT or AT> linker script command, respectively.
2392 //
2393 // As an exception, we don't create a separate load segment for the ELF
2394 // headers, even if the first "real" output has an AT or AT> attribute.
2395 //
2396 // In addition, NOBITS sections should only be placed at the end of a LOAD
2397 // segment (since it's represented as p_filesz < p_memsz). If we have a
2398 // not-NOBITS section after a NOBITS, we create a new LOAD for the latter
2399 // even if flags match, so as not to require actually writing the
2400 // supposed-to-be-NOBITS section to the output file. (However, we cannot do
2401 // so when hasSectionsCommand, since we cannot introduce the extra alignment
2402 // needed to create a new LOAD)
2403 uint64_t newFlags = computeFlags(ctx, flags: sec->getPhdrFlags());
2404 uint64_t incompatible = flags ^ newFlags;
2405 if (!(newFlags & PF_W)) {
2406 // When --no-rosegment is specified, RO and RX sections are compatible.
2407 if (ctx.arg.singleRoRx)
2408 incompatible &= ~PF_X;
2409 // When --no-xosegment is specified (the default), XO and RX sections are
2410 // compatible.
2411 if (ctx.arg.singleXoRx)
2412 incompatible &= ~PF_R;
2413 }
2414 if (incompatible)
2415 load = nullptr;
2416
2417 bool sameLMARegion =
2418 load && !sec->lmaExpr && sec->lmaRegion == load->firstSec->lmaRegion;
2419 if (load && sec != relroEnd &&
2420 sec->memRegion == load->firstSec->memRegion &&
2421 (sameLMARegion || load->lastSec == ctx.out.programHeaders.get()) &&
2422 (ctx.script->hasSectionsCommand || sec->type == SHT_NOBITS ||
2423 load->lastSec->type != SHT_NOBITS)) {
2424 load->p_flags |= newFlags;
2425 } else {
2426 load = addHdr(PT_LOAD, newFlags);
2427 flags = newFlags;
2428 }
2429
2430 load->add(sec);
2431 }
2432
2433 // Add a TLS segment if any.
2434 auto tlsHdr = std::make_unique<PhdrEntry>(args&: ctx, args: PT_TLS, args: PF_R);
2435 for (OutputSection *sec : ctx.outputSections)
2436 if (sec->partition == partNo && sec->flags & SHF_TLS)
2437 tlsHdr->add(sec);
2438 if (tlsHdr->firstSec)
2439 ret.push_back(Elt: std::move(tlsHdr));
2440
2441 // Add an entry for .dynamic.
2442 if (OutputSection *sec = part.dynamic->getParent())
2443 addHdr(PT_DYNAMIC, sec->getPhdrFlags())->add(sec);
2444
2445 if (relRo->firstSec)
2446 ret.push_back(Elt: std::move(relRo));
2447
2448 // PT_GNU_EH_FRAME is a special section pointing on .eh_frame_hdr.
2449 if (part.ehFrameHdr && part.ehFrameHdr->isNeeded())
2450 addHdr(PT_GNU_EH_FRAME, part.ehFrameHdr->getParent()->getPhdrFlags())
2451 ->add(part.ehFrameHdr->getParent());
2452
2453 if (ctx.arg.osabi == ELFOSABI_OPENBSD) {
2454 // PT_OPENBSD_MUTABLE makes the dynamic linker fill the segment with
2455 // zero data, like bss, but it can be treated differently.
2456 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.mutable", partition: partNo))
2457 addHdr(PT_OPENBSD_MUTABLE, cmd->getPhdrFlags())->add(cmd);
2458
2459 // PT_OPENBSD_RANDOMIZE makes the dynamic linker fill the segment
2460 // with random data.
2461 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.randomdata", partition: partNo))
2462 addHdr(PT_OPENBSD_RANDOMIZE, cmd->getPhdrFlags())->add(cmd);
2463
2464 // PT_OPENBSD_SYSCALLS makes the kernel and dynamic linker register
2465 // system call sites.
2466 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.syscalls", partition: partNo))
2467 addHdr(PT_OPENBSD_SYSCALLS, cmd->getPhdrFlags())->add(cmd);
2468 }
2469
2470 if (ctx.arg.zGnustack != GnuStackKind::None) {
2471 // PT_GNU_STACK is a special section to tell the loader to make the
2472 // pages for the stack non-executable. If you really want an executable
2473 // stack, you can pass -z execstack, but that's not recommended for
2474 // security reasons.
2475 unsigned perm = PF_R | PF_W;
2476 if (ctx.arg.zGnustack == GnuStackKind::Exec)
2477 perm |= PF_X;
2478 addHdr(PT_GNU_STACK, perm)->p_memsz = ctx.arg.zStackSize;
2479 }
2480
2481 // PT_OPENBSD_NOBTCFI is an OpenBSD-specific header to mark that the
2482 // executable is expected to violate branch-target CFI checks.
2483 if (ctx.arg.zNoBtCfi)
2484 addHdr(PT_OPENBSD_NOBTCFI, PF_X);
2485
2486 // PT_OPENBSD_WXNEEDED is a OpenBSD-specific header to mark the executable
2487 // is expected to perform W^X violations, such as calling mprotect(2) or
2488 // mmap(2) with PROT_WRITE | PROT_EXEC, which is prohibited by default on
2489 // OpenBSD.
2490 if (ctx.arg.zWxneeded)
2491 addHdr(PT_OPENBSD_WXNEEDED, PF_X);
2492
2493 if (OutputSection *cmd = findSection(ctx, name: ".note.gnu.property", partition: partNo))
2494 addHdr(PT_GNU_PROPERTY, PF_R)->add(cmd);
2495
2496 // Create one PT_NOTE per a group of contiguous SHT_NOTE sections with the
2497 // same alignment.
2498 PhdrEntry *note = nullptr;
2499 for (OutputSection *sec : ctx.outputSections) {
2500 if (sec->partition != partNo)
2501 continue;
2502 if (sec->type == SHT_NOTE && (sec->flags & SHF_ALLOC)) {
2503 if (!note || sec->lmaExpr || note->lastSec->addralign != sec->addralign)
2504 note = addHdr(PT_NOTE, PF_R);
2505 note->add(sec);
2506 } else {
2507 note = nullptr;
2508 }
2509 }
2510 return ret;
2511}
2512
2513template <class ELFT>
2514void Writer<ELFT>::addPhdrForSection(Partition &part, unsigned shType,
2515 unsigned pType, unsigned pFlags) {
2516 unsigned partNo = part.getNumber(ctx);
2517 auto i = llvm::find_if(ctx.outputSections, [=](OutputSection *cmd) {
2518 return cmd->partition == partNo && cmd->type == shType;
2519 });
2520 if (i == ctx.outputSections.end())
2521 return;
2522
2523 auto entry = std::make_unique<PhdrEntry>(args&: ctx, args&: pType, args&: pFlags);
2524 entry->add(sec: *i);
2525 part.phdrs.push_back(Elt: std::move(entry));
2526}
2527
2528// Place the first section of each PT_LOAD to a different page (of maxPageSize).
2529// This is achieved by assigning an alignment expression to addrExpr of each
2530// such section.
2531template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
2532 const PhdrEntry *prev;
2533 auto pageAlign = [&, &ctx = this->ctx](const PhdrEntry *p) {
2534 OutputSection *cmd = p->firstSec;
2535 if (!cmd)
2536 return;
2537 cmd->alignExpr = [align = cmd->addralign]() { return align; };
2538 if (!cmd->addrExpr) {
2539 // Prefer advancing to align(dot, maxPageSize) + dot%maxPageSize to avoid
2540 // padding in the file contents.
2541 //
2542 // When -z separate-code is used we must not have any overlap in pages
2543 // between an executable segment and a non-executable segment. We align to
2544 // the next maximum page size boundary on transitions between executable
2545 // and non-executable segments.
2546 //
2547 // SHT_LLVM_PART_EHDR marks the start of a partition. The partition
2548 // sections will be extracted to a separate file. Align to the next
2549 // maximum page size boundary so that we can find the ELF header at the
2550 // start. We cannot benefit from overlapping p_offset ranges with the
2551 // previous segment anyway.
2552 if (ctx.arg.zSeparate == SeparateSegmentKind::Loadable ||
2553 (ctx.arg.zSeparate == SeparateSegmentKind::Code && prev &&
2554 (prev->p_flags & PF_X) != (p->p_flags & PF_X)) ||
2555 cmd->type == SHT_LLVM_PART_EHDR)
2556 cmd->addrExpr = [&ctx = this->ctx] {
2557 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize);
2558 };
2559 // PT_TLS is at the start of the first RW PT_LOAD. If `p` includes PT_TLS,
2560 // it must be the RW. Align to p_align(PT_TLS) to make sure
2561 // p_vaddr(PT_LOAD)%p_align(PT_LOAD) = 0. Otherwise, if
2562 // sh_addralign(.tdata) < sh_addralign(.tbss), we will set p_align(PT_TLS)
2563 // to sh_addralign(.tbss), while p_vaddr(PT_TLS)=p_vaddr(PT_LOAD) may not
2564 // be congruent to 0 modulo p_align(PT_TLS).
2565 //
2566 // Technically this is not required, but as of 2019, some dynamic loaders
2567 // don't handle p_vaddr%p_align != 0 correctly, e.g. glibc (i386 and
2568 // x86-64) doesn't make runtime address congruent to p_vaddr modulo
2569 // p_align for dynamic TLS blocks (PR/24606), FreeBSD rtld has the same
2570 // bug, musl (TLS Variant 1 architectures) before 1.1.23 handled TLS
2571 // blocks correctly. We need to keep the workaround for a while.
2572 else if (ctx.tlsPhdr && ctx.tlsPhdr->firstSec == p->firstSec)
2573 cmd->addrExpr = [&ctx] {
2574 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize) +
2575 alignToPowerOf2(ctx.script->getDot() % ctx.arg.maxPageSize,
2576 ctx.tlsPhdr->p_align);
2577 };
2578 else
2579 cmd->addrExpr = [&ctx] {
2580 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize) +
2581 ctx.script->getDot() % ctx.arg.maxPageSize;
2582 };
2583 }
2584 };
2585
2586 for (Partition &part : ctx.partitions) {
2587 prev = nullptr;
2588 for (auto &p : part.phdrs)
2589 if (p->p_type == PT_LOAD && p->firstSec) {
2590 pageAlign(p.get());
2591 prev = p.get();
2592 }
2593 }
2594}
2595
2596// Compute an in-file position for a given section. The file offset must be the
2597// same with its virtual address modulo the page size, so that the loader can
2598// load executables without any address adjustment.
2599static uint64_t computeFileOffset(Ctx &ctx, OutputSection *os, uint64_t off) {
2600 // The first section in a PT_LOAD has to have congruent offset and address
2601 // modulo the maximum page size.
2602 if (os->ptLoad && os->ptLoad->firstSec == os)
2603 return alignTo(Value: off, Align: os->ptLoad->p_align, Skew: os->addr);
2604
2605 // File offsets are not significant for .bss sections other than the first one
2606 // in a PT_LOAD/PT_TLS. By convention, we keep section offsets monotonically
2607 // increasing rather than setting to zero.
2608 if (os->type == SHT_NOBITS && (!ctx.tlsPhdr || ctx.tlsPhdr->firstSec != os))
2609 return off;
2610
2611 // If the section is not in a PT_LOAD, we just have to align it.
2612 if (!os->ptLoad)
2613 return alignToPowerOf2(Value: off, Align: os->addralign);
2614
2615 // If two sections share the same PT_LOAD the file offset is calculated
2616 // using this formula: Off2 = Off1 + (VA2 - VA1).
2617 OutputSection *first = os->ptLoad->firstSec;
2618 return first->offset + os->addr - first->addr;
2619}
2620
2621template <class ELFT> void Writer<ELFT>::assignFileOffsetsBinary() {
2622 // Compute the minimum LMA of all non-empty non-NOBITS sections as minAddr.
2623 auto needsOffset = [](OutputSection &sec) {
2624 return sec.type != SHT_NOBITS && (sec.flags & SHF_ALLOC) && sec.size > 0;
2625 };
2626 uint64_t minAddr = UINT64_MAX;
2627 for (OutputSection *sec : ctx.outputSections)
2628 if (needsOffset(*sec)) {
2629 sec->offset = sec->getLMA();
2630 minAddr = std::min(a: minAddr, b: sec->offset);
2631 }
2632
2633 // Sections are laid out at LMA minus minAddr.
2634 fileSize = 0;
2635 for (OutputSection *sec : ctx.outputSections)
2636 if (needsOffset(*sec)) {
2637 sec->offset -= minAddr;
2638 fileSize = std::max(a: fileSize, b: sec->offset + sec->size);
2639 }
2640}
2641
2642static std::string rangeToString(uint64_t addr, uint64_t len) {
2643 return "[0x" + utohexstr(X: addr) + ", 0x" + utohexstr(X: addr + len - 1) + "]";
2644}
2645
2646// Assign file offsets to output sections.
2647template <class ELFT> void Writer<ELFT>::assignFileOffsets() {
2648 ctx.out.programHeaders->offset = ctx.out.elfHeader->size;
2649 uint64_t off = ctx.out.elfHeader->size + ctx.out.programHeaders->size;
2650
2651 PhdrEntry *lastRX = nullptr;
2652 for (Partition &part : ctx.partitions)
2653 for (auto &p : part.phdrs)
2654 if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
2655 lastRX = p.get();
2656
2657 // Layout SHF_ALLOC sections before non-SHF_ALLOC sections. A non-SHF_ALLOC
2658 // will not occupy file offsets contained by a PT_LOAD.
2659 for (OutputSection *sec : ctx.outputSections) {
2660 if (!(sec->flags & SHF_ALLOC))
2661 continue;
2662 off = computeFileOffset(ctx, os: sec, off);
2663 sec->offset = off;
2664 if (sec->type != SHT_NOBITS)
2665 off += sec->size;
2666
2667 // If this is a last section of the last executable segment and that
2668 // segment is the last loadable segment, align the offset of the
2669 // following section to avoid loading non-segments parts of the file.
2670 if (ctx.arg.zSeparate != SeparateSegmentKind::None && lastRX &&
2671 lastRX->lastSec == sec)
2672 off = alignToPowerOf2(Value: off, Align: ctx.arg.maxPageSize);
2673 }
2674 for (OutputSection *osec : ctx.outputSections) {
2675 if (osec->flags & SHF_ALLOC)
2676 continue;
2677 osec->offset = alignToPowerOf2(Value: off, Align: osec->addralign);
2678 off = osec->offset + osec->size;
2679 }
2680
2681 sectionHeaderOff = alignToPowerOf2(Value: off, Align: ctx.arg.wordsize);
2682 fileSize =
2683 sectionHeaderOff + (ctx.outputSections.size() + 1) * sizeof(Elf_Shdr);
2684
2685 // Our logic assumes that sections have rising VA within the same segment.
2686 // With use of linker scripts it is possible to violate this rule and get file
2687 // offset overlaps or overflows. That should never happen with a valid script
2688 // which does not move the location counter backwards and usually scripts do
2689 // not do that. Unfortunately, there are apps in the wild, for example, Linux
2690 // kernel, which control segment distribution explicitly and move the counter
2691 // backwards, so we have to allow doing that to support linking them. We
2692 // perform non-critical checks for overlaps in checkSectionOverlap(), but here
2693 // we want to prevent file size overflows because it would crash the linker.
2694 for (OutputSection *sec : ctx.outputSections) {
2695 if (sec->type == SHT_NOBITS)
2696 continue;
2697 if ((sec->offset > fileSize) || (sec->offset + sec->size > fileSize))
2698 ErrAlways(ctx) << "unable to place section " << sec->name
2699 << " at file offset "
2700 << rangeToString(addr: sec->offset, len: sec->size)
2701 << "; check your linker script for overflows";
2702 }
2703}
2704
2705// Finalize the program headers. We call this function after we assign
2706// file offsets and VAs to all sections.
2707template <class ELFT> void Writer<ELFT>::setPhdrs(Partition &part) {
2708 for (std::unique_ptr<PhdrEntry> &p : part.phdrs) {
2709 OutputSection *first = p->firstSec;
2710 OutputSection *last = p->lastSec;
2711
2712 // .ARM.exidx sections may not be within a single .ARM.exidx
2713 // output section. We always want to describe just the
2714 // SyntheticSection.
2715 if (part.armExidx && p->p_type == PT_ARM_EXIDX) {
2716 p->p_filesz = part.armExidx->getSize();
2717 p->p_memsz = p->p_filesz;
2718 p->p_offset = first->offset + part.armExidx->outSecOff;
2719 p->p_vaddr = first->addr + part.armExidx->outSecOff;
2720 p->p_align = part.armExidx->addralign;
2721 if (part.elfHeader)
2722 p->p_offset -= part.elfHeader->getParent()->offset;
2723
2724 if (!p->hasLMA)
2725 p->p_paddr = first->getLMA() + part.armExidx->outSecOff;
2726 return;
2727 }
2728
2729 if (first) {
2730 p->p_filesz = last->offset - first->offset;
2731 if (last->type != SHT_NOBITS)
2732 p->p_filesz += last->size;
2733
2734 p->p_memsz = last->addr + last->size - first->addr;
2735 p->p_offset = first->offset;
2736 p->p_vaddr = first->addr;
2737
2738 // File offsets in partitions other than the main partition are relative
2739 // to the offset of the ELF headers. Perform that adjustment now.
2740 if (part.elfHeader)
2741 p->p_offset -= part.elfHeader->getParent()->offset;
2742
2743 if (!p->hasLMA)
2744 p->p_paddr = first->getLMA();
2745 }
2746 }
2747}
2748
2749// A helper struct for checkSectionOverlap.
2750namespace {
2751struct SectionOffset {
2752 OutputSection *sec;
2753 uint64_t offset;
2754};
2755} // namespace
2756
2757// Check whether sections overlap for a specific address range (file offsets,
2758// load and virtual addresses).
2759static void checkOverlap(Ctx &ctx, StringRef name,
2760 std::vector<SectionOffset> &sections,
2761 bool isVirtualAddr) {
2762 llvm::sort(C&: sections, Comp: [=](const SectionOffset &a, const SectionOffset &b) {
2763 return a.offset < b.offset;
2764 });
2765
2766 // Finding overlap is easy given a vector is sorted by start position.
2767 // If an element starts before the end of the previous element, they overlap.
2768 for (size_t i = 1, end = sections.size(); i < end; ++i) {
2769 SectionOffset a = sections[i - 1];
2770 SectionOffset b = sections[i];
2771 if (b.offset >= a.offset + a.sec->size)
2772 continue;
2773
2774 // If both sections are in OVERLAY we allow the overlapping of virtual
2775 // addresses, because it is what OVERLAY was designed for.
2776 if (isVirtualAddr && a.sec->inOverlay && b.sec->inOverlay)
2777 continue;
2778
2779 Err(ctx) << "section " << a.sec->name << " " << name
2780 << " range overlaps with " << b.sec->name << "\n>>> "
2781 << a.sec->name << " range is "
2782 << rangeToString(addr: a.offset, len: a.sec->size) << "\n>>> " << b.sec->name
2783 << " range is " << rangeToString(addr: b.offset, len: b.sec->size);
2784 }
2785}
2786
2787// Check for overlapping sections and address overflows.
2788//
2789// In this function we check that none of the output sections have overlapping
2790// file offsets. For SHF_ALLOC sections we also check that the load address
2791// ranges and the virtual address ranges don't overlap
2792template <class ELFT> void Writer<ELFT>::checkSections() {
2793 // First, check that section's VAs fit in available address space for target.
2794 for (OutputSection *os : ctx.outputSections)
2795 if ((os->addr + os->size < os->addr) ||
2796 (!ELFT::Is64Bits && os->addr + os->size > uint64_t(UINT32_MAX) + 1))
2797 Err(ctx) << "section " << os->name << " at 0x"
2798 << utohexstr(X: os->addr, LowerCase: true) << " of size 0x"
2799 << utohexstr(X: os->size, LowerCase: true)
2800 << " exceeds available address space";
2801
2802 // Check for overlapping file offsets. In this case we need to skip any
2803 // section marked as SHT_NOBITS. These sections don't actually occupy space in
2804 // the file so Sec->Offset + Sec->Size can overlap with others. If --oformat
2805 // binary is specified only add SHF_ALLOC sections are added to the output
2806 // file so we skip any non-allocated sections in that case.
2807 std::vector<SectionOffset> fileOffs;
2808 for (OutputSection *sec : ctx.outputSections)
2809 if (sec->size > 0 && sec->type != SHT_NOBITS &&
2810 (!ctx.arg.oFormatBinary || (sec->flags & SHF_ALLOC)))
2811 fileOffs.push_back(x: {.sec: sec, .offset: sec->offset});
2812 checkOverlap(ctx, name: "file", sections&: fileOffs, isVirtualAddr: false);
2813
2814 // When linking with -r there is no need to check for overlapping virtual/load
2815 // addresses since those addresses will only be assigned when the final
2816 // executable/shared object is created.
2817 if (ctx.arg.relocatable)
2818 return;
2819
2820 // Checking for overlapping virtual and load addresses only needs to take
2821 // into account SHF_ALLOC sections since others will not be loaded.
2822 // Furthermore, we also need to skip SHF_TLS sections since these will be
2823 // mapped to other addresses at runtime and can therefore have overlapping
2824 // ranges in the file.
2825 std::vector<SectionOffset> vmas;
2826 for (OutputSection *sec : ctx.outputSections)
2827 if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
2828 vmas.push_back(x: {.sec: sec, .offset: sec->addr});
2829 checkOverlap(ctx, name: "virtual address", sections&: vmas, isVirtualAddr: true);
2830
2831 // Finally, check that the load addresses don't overlap. This will usually be
2832 // the same as the virtual addresses but can be different when using a linker
2833 // script with AT().
2834 std::vector<SectionOffset> lmas;
2835 for (OutputSection *sec : ctx.outputSections)
2836 if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
2837 lmas.push_back(x: {.sec: sec, .offset: sec->getLMA()});
2838 checkOverlap(ctx, name: "load address", sections&: lmas, isVirtualAddr: false);
2839}
2840
2841// The entry point address is chosen in the following ways.
2842//
2843// 1. the '-e' entry command-line option;
2844// 2. the ENTRY(symbol) command in a linker control script;
2845// 3. the value of the symbol _start, if present;
2846// 4. the number represented by the entry symbol, if it is a number;
2847// 5. the address 0.
2848static uint64_t getEntryAddr(Ctx &ctx) {
2849 // Case 1, 2 or 3
2850 if (Symbol *b = ctx.symtab->find(name: ctx.arg.entry))
2851 return b->getVA(ctx);
2852
2853 // Case 4
2854 uint64_t addr;
2855 if (to_integer(S: ctx.arg.entry, Num&: addr))
2856 return addr;
2857
2858 // Case 5
2859 if (ctx.arg.warnMissingEntry)
2860 Warn(ctx) << "cannot find entry symbol " << ctx.arg.entry
2861 << "; not setting start address";
2862 return 0;
2863}
2864
2865static uint16_t getELFType(Ctx &ctx) {
2866 if (ctx.arg.isPic)
2867 return ET_DYN;
2868 if (ctx.arg.relocatable)
2869 return ET_REL;
2870 return ET_EXEC;
2871}
2872
2873template <class ELFT> void Writer<ELFT>::writeHeader() {
2874 writeEhdr<ELFT>(ctx, ctx.bufferStart, *ctx.mainPart);
2875 writePhdrs<ELFT>(ctx.bufferStart + sizeof(Elf_Ehdr), *ctx.mainPart);
2876
2877 auto *eHdr = reinterpret_cast<Elf_Ehdr *>(ctx.bufferStart);
2878 eHdr->e_type = getELFType(ctx);
2879 eHdr->e_entry = getEntryAddr(ctx);
2880
2881 // If -z nosectionheader is specified, omit the section header table.
2882 if (!ctx.in.shStrTab)
2883 return;
2884 eHdr->e_shoff = sectionHeaderOff;
2885
2886 // Write the section header table.
2887 //
2888 // The ELF header can only store numbers up to SHN_LORESERVE in the e_shnum
2889 // and e_shstrndx fields. When the value of one of these fields exceeds
2890 // SHN_LORESERVE ELF requires us to put sentinel values in the ELF header and
2891 // use fields in the section header at index 0 to store
2892 // the value. The sentinel values and fields are:
2893 // e_shnum = 0, SHdrs[0].sh_size = number of sections.
2894 // e_shstrndx = SHN_XINDEX, SHdrs[0].sh_link = .shstrtab section index.
2895 auto *sHdrs = reinterpret_cast<Elf_Shdr *>(ctx.bufferStart + eHdr->e_shoff);
2896 size_t num = ctx.outputSections.size() + 1;
2897 if (num >= SHN_LORESERVE)
2898 sHdrs->sh_size = num;
2899 else
2900 eHdr->e_shnum = num;
2901
2902 uint32_t strTabIndex = ctx.in.shStrTab->getParent()->sectionIndex;
2903 if (strTabIndex >= SHN_LORESERVE) {
2904 sHdrs->sh_link = strTabIndex;
2905 eHdr->e_shstrndx = SHN_XINDEX;
2906 } else {
2907 eHdr->e_shstrndx = strTabIndex;
2908 }
2909
2910 for (OutputSection *sec : ctx.outputSections)
2911 sec->writeHeaderTo<ELFT>(++sHdrs);
2912}
2913
2914// Open a result file.
2915template <class ELFT> void Writer<ELFT>::openFile() {
2916 uint64_t maxSize = ctx.arg.is64 ? INT64_MAX : UINT32_MAX;
2917 if (fileSize != size_t(fileSize) || maxSize < fileSize) {
2918 std::string msg;
2919 raw_string_ostream s(msg);
2920 s << "output file too large: " << fileSize << " bytes\n"
2921 << "section sizes:\n";
2922 for (OutputSection *os : ctx.outputSections)
2923 s << os->name << ' ' << os->size << "\n";
2924 ErrAlways(ctx) << msg;
2925 return;
2926 }
2927
2928 unlinkAsync(path: ctx.arg.outputFile);
2929 unsigned flags = 0;
2930 if (!ctx.arg.relocatable)
2931 flags |= FileOutputBuffer::F_executable;
2932 if (ctx.arg.mmapOutputFile)
2933 flags |= FileOutputBuffer::F_mmap;
2934 Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
2935 FileOutputBuffer::create(FilePath: ctx.arg.outputFile, Size: fileSize, Flags: flags);
2936
2937 if (!bufferOrErr) {
2938 ErrAlways(ctx) << "failed to open " << ctx.arg.outputFile << ": "
2939 << bufferOrErr.takeError();
2940 return;
2941 }
2942 buffer = std::move(*bufferOrErr);
2943 ctx.bufferStart = buffer->getBufferStart();
2944}
2945
2946template <class ELFT> void Writer<ELFT>::writeSectionsBinary() {
2947 parallel::TaskGroup tg;
2948 for (OutputSection *sec : ctx.outputSections)
2949 if (sec->flags & SHF_ALLOC)
2950 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
2951}
2952
2953static void fillTrap(std::array<uint8_t, 4> trapInstr, uint8_t *i,
2954 uint8_t *end) {
2955 for (; i + 4 <= end; i += 4)
2956 memcpy(dest: i, src: trapInstr.data(), n: 4);
2957}
2958
2959// Fill executable segments with trap instructions. This includes both the
2960// gaps between sections (due to alignment) and the tail padding to the page
2961// boundary. Even though it is not required by any standard, it is in general
2962// a good thing to do for security reasons.
2963template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
2964 for (Partition &part : ctx.partitions) {
2965 // Fill gaps between consecutive sections in the same executable segment.
2966 OutputSection *prev = nullptr;
2967 for (OutputSection *sec : ctx.outputSections) {
2968 PhdrEntry *p = sec->ptLoad;
2969 if (!p || !(p->p_flags & PF_X))
2970 continue;
2971 if (prev && prev->ptLoad == p)
2972 fillTrap(trapInstr: ctx.target->trapInstr,
2973 i: ctx.bufferStart + alignDown(Value: prev->offset + prev->size, Align: 4),
2974 end: ctx.bufferStart + sec->offset);
2975 prev = sec;
2976 }
2977
2978 // Fill the last page.
2979 for (std::unique_ptr<PhdrEntry> &p : part.phdrs)
2980 if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
2981 fillTrap(
2982 trapInstr: ctx.target->trapInstr,
2983 i: ctx.bufferStart + alignDown(Value: p->firstSec->offset + p->p_filesz, Align: 4),
2984 end: ctx.bufferStart + alignToPowerOf2(Value: p->firstSec->offset + p->p_filesz,
2985 Align: ctx.arg.maxPageSize));
2986
2987 // Round up the file size of the last segment to the page boundary iff it is
2988 // an executable segment to ensure that other tools don't accidentally
2989 // trim the instruction padding (e.g. when stripping the file).
2990 PhdrEntry *last = nullptr;
2991 for (std::unique_ptr<PhdrEntry> &p : part.phdrs)
2992 if (p->p_type == PT_LOAD)
2993 last = p.get();
2994
2995 if (last && (last->p_flags & PF_X)) {
2996 last->p_filesz = alignToPowerOf2(Value: last->p_filesz, Align: ctx.arg.maxPageSize);
2997 // p_memsz might be larger than the aligned p_filesz due to trailing BSS
2998 // sections. Don't decrease it.
2999 last->p_memsz = std::max(a: last->p_memsz, b: last->p_filesz);
3000 }
3001 }
3002}
3003
3004// Write section contents to a mmap'ed file.
3005template <class ELFT> void Writer<ELFT>::writeSections() {
3006 llvm::TimeTraceScope timeScope("Write sections");
3007
3008 {
3009 // In -r or --emit-relocs mode, write the relocation sections first as in
3010 // ELf_Rel targets we might find out that we need to modify the relocated
3011 // section while doing it.
3012 parallel::TaskGroup tg;
3013 for (OutputSection *sec : ctx.outputSections)
3014 if (isStaticRelSecType(type: sec->type))
3015 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
3016 }
3017 {
3018 parallel::TaskGroup tg;
3019 for (OutputSection *sec : ctx.outputSections)
3020 if (!isStaticRelSecType(type: sec->type))
3021 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
3022 }
3023
3024 // Finally, check that all dynamic relocation addends were written correctly.
3025 if (ctx.arg.checkDynamicRelocs && ctx.arg.writeAddends) {
3026 for (OutputSection *sec : ctx.outputSections)
3027 if (isStaticRelSecType(type: sec->type))
3028 sec->checkDynRelAddends(ctx);
3029 }
3030}
3031
3032// Computes a hash value of Data using a given hash function.
3033// In order to utilize multiple cores, we first split data into 1MB
3034// chunks, compute a hash for each chunk, and then compute a hash value
3035// of the hash values.
3036static void
3037computeHash(llvm::MutableArrayRef<uint8_t> hashBuf,
3038 llvm::ArrayRef<uint8_t> data,
3039 std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) {
3040 std::vector<ArrayRef<uint8_t>> chunks = split(arr: data, chunkSize: 1024 * 1024);
3041 const size_t hashesSize = chunks.size() * hashBuf.size();
3042 std::unique_ptr<uint8_t[]> hashes(new uint8_t[hashesSize]);
3043
3044 // Compute hash values.
3045 parallelFor(Begin: 0, End: chunks.size(), Fn: [&](size_t i) {
3046 hashFn(hashes.get() + i * hashBuf.size(), chunks[i]);
3047 });
3048
3049 // Write to the final output buffer.
3050 hashFn(hashBuf.data(), ArrayRef(hashes.get(), hashesSize));
3051}
3052
3053template <class ELFT> void Writer<ELFT>::writeBuildId() {
3054 if (!ctx.mainPart->buildId || !ctx.mainPart->buildId->getParent())
3055 return;
3056
3057 if (ctx.arg.buildId == BuildIdKind::Hexstring) {
3058 for (Partition &part : ctx.partitions)
3059 part.buildId->writeBuildId(buf: ctx.arg.buildIdVector);
3060 return;
3061 }
3062
3063 // Compute a hash of all sections of the output file.
3064 size_t hashSize = ctx.mainPart->buildId->hashSize;
3065 std::unique_ptr<uint8_t[]> buildId(new uint8_t[hashSize]);
3066 MutableArrayRef<uint8_t> output(buildId.get(), hashSize);
3067 llvm::ArrayRef<uint8_t> input{ctx.bufferStart, size_t(fileSize)};
3068
3069 // Fedora introduced build ID as "approximation of true uniqueness across all
3070 // binaries that might be used by overlapping sets of people". It does not
3071 // need some security goals that some hash algorithms strive to provide, e.g.
3072 // (second-)preimage and collision resistance. In practice people use 'md5'
3073 // and 'sha1' just for different lengths. Implement them with the more
3074 // efficient BLAKE3.
3075 switch (ctx.arg.buildId) {
3076 case BuildIdKind::Fast:
3077 computeHash(output, input, [](uint8_t *dest, ArrayRef<uint8_t> arr) {
3078 write64le(P: dest, V: xxh3_64bits(data: arr));
3079 });
3080 break;
3081 case BuildIdKind::Md5:
3082 computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
3083 memcpy(dest: dest, src: BLAKE3::hash<16>(Data: arr).data(), n: hashSize);
3084 });
3085 break;
3086 case BuildIdKind::Sha1:
3087 computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
3088 memcpy(dest: dest, src: BLAKE3::hash<20>(Data: arr).data(), n: hashSize);
3089 });
3090 break;
3091 case BuildIdKind::Uuid:
3092 if (auto ec = llvm::getRandomBytes(Buffer: buildId.get(), Size: hashSize))
3093 ErrAlways(ctx) << "entropy source failure: " << ec.message();
3094 break;
3095 default:
3096 llvm_unreachable("unknown BuildIdKind");
3097 }
3098 for (Partition &part : ctx.partitions)
3099 part.buildId->writeBuildId(buf: output);
3100}
3101
3102template void elf::writeResult<ELF32LE>(Ctx &);
3103template void elf::writeResult<ELF32BE>(Ctx &);
3104template void elf::writeResult<ELF64LE>(Ctx &);
3105template void elf::writeResult<ELF64BE>(Ctx &);
3106