1//===- Writer.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Writer.h"
10#include "AArch64ErrataFix.h"
11#include "ARMErrataFix.h"
12#include "BPSectionOrderer.h"
13#include "CallGraphSort.h"
14#include "Config.h"
15#include "InputFiles.h"
16#include "LinkerScript.h"
17#include "MapFile.h"
18#include "OutputSections.h"
19#include "Relocations.h"
20#include "SymbolTable.h"
21#include "Symbols.h"
22#include "SyntheticSections.h"
23#include "Target.h"
24#include "lld/Common/Arrays.h"
25#include "lld/Common/CommonLinkerContext.h"
26#include "lld/Common/Filesystem.h"
27#include "lld/Common/Strings.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/StringMap.h"
30#include "llvm/Support/BLAKE3.h"
31#include "llvm/Support/Parallel.h"
32#include "llvm/Support/RandomNumberGenerator.h"
33#include "llvm/Support/TimeProfiler.h"
34#include "llvm/Support/xxhash.h"
35#include <climits>
36
37#define DEBUG_TYPE "lld"
38
39using namespace llvm;
40using namespace llvm::ELF;
41using namespace llvm::object;
42using namespace llvm::support;
43using namespace llvm::support::endian;
44using namespace lld;
45using namespace lld::elf;
46
47namespace {
48// The writer writes a SymbolTable result to a file.
49template <class ELFT> class Writer {
50public:
51 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
52
53 Writer(Ctx &ctx) : ctx(ctx), buffer(ctx.e.outputBuffer), tc(ctx) {}
54
55 void run();
56
57private:
58 void addSectionSymbols();
59 void sortSections();
60 void resolveShfLinkOrder();
61 void finalizeAddressDependentContent();
62 void optimizeBasicBlockJumps();
63 void sortInputSections();
64 void sortOrphanSections();
65 void finalizeSections();
66 void checkExecuteOnly();
67 void checkExecuteOnlyReport();
68 void setReservedSymbolSections();
69
70 SmallVector<std::unique_ptr<PhdrEntry>, 0> createPhdrs(Partition &part);
71 void addPhdrForSection(Partition &part, unsigned shType, unsigned pType,
72 unsigned pFlags);
73 void assignFileOffsets();
74 void assignFileOffsetsBinary();
75 void setPhdrs(Partition &part);
76 void checkSections();
77 void fixSectionAlignments();
78 void openFile();
79 void writeTrapInstr();
80 void writeHeader();
81 void writeSections();
82 void writeSectionsBinary();
83 void writeBuildId();
84
85 Ctx &ctx;
86 std::unique_ptr<FileOutputBuffer> &buffer;
87 // ThunkCreator holds Thunks that are used at writeTo time.
88 ThunkCreator tc;
89
90 void addRelIpltSymbols();
91 void addStartEndSymbols();
92 void addStartStopSymbols(OutputSection &osec);
93
94 uint64_t fileSize;
95 uint64_t sectionHeaderOff;
96};
97} // anonymous namespace
98
99template <class ELFT> void elf::writeResult(Ctx &ctx) {
100 Writer<ELFT>(ctx).run();
101}
102
103static void
104removeEmptyPTLoad(Ctx &ctx, SmallVector<std::unique_ptr<PhdrEntry>, 0> &phdrs) {
105 auto it = std::stable_partition(first: phdrs.begin(), last: phdrs.end(), pred: [&](auto &p) {
106 if (p->p_type != PT_LOAD)
107 return true;
108 if (!p->firstSec)
109 return false;
110 uint64_t size = p->lastSec->addr + p->lastSec->size - p->firstSec->addr;
111 return size != 0;
112 });
113
114 // Clear OutputSection::ptLoad for sections contained in removed
115 // segments.
116 DenseSet<PhdrEntry *> removed;
117 for (auto it2 = it; it2 != phdrs.end(); ++it2)
118 removed.insert(V: it2->get());
119 for (OutputSection *sec : ctx.outputSections)
120 if (removed.contains(V: sec->ptLoad))
121 sec->ptLoad = nullptr;
122 phdrs.erase(CS: it, CE: phdrs.end());
123}
124
125void elf::copySectionsIntoPartitions(Ctx &ctx) {
126 SmallVector<InputSectionBase *, 0> newSections;
127 const size_t ehSize = ctx.ehInputSections.size();
128 for (unsigned part = 2; part != ctx.partitions.size() + 1; ++part) {
129 for (InputSectionBase *s : ctx.inputSections) {
130 if (!(s->flags & SHF_ALLOC) || !s->isLive() || s->type != SHT_NOTE)
131 continue;
132 auto *copy = make<InputSection>(args&: cast<InputSection>(Val&: *s));
133 copy->partition = part;
134 newSections.push_back(Elt: copy);
135 }
136 for (size_t i = 0; i != ehSize; ++i) {
137 assert(ctx.ehInputSections[i]->isLive());
138 auto *copy = make<EhInputSection>(args&: *ctx.ehInputSections[i]);
139 copy->partition = part;
140 ctx.ehInputSections.push_back(Elt: copy);
141 }
142 }
143
144 ctx.inputSections.insert(I: ctx.inputSections.end(), From: newSections.begin(),
145 To: newSections.end());
146}
147
148static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec,
149 uint64_t val, uint8_t stOther = STV_HIDDEN) {
150 Symbol *s = ctx.symtab->find(name);
151 if (!s || s->isDefined() || s->isCommon())
152 return nullptr;
153
154 ctx.synthesizedSymbols.push_back(Elt: s);
155 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
156 stOther, STT_NOTYPE, val,
157 /*size=*/0, sec});
158 s->isUsedInRegularObj = true;
159 return cast<Defined>(Val: s);
160}
161
162// The linker is expected to define some symbols depending on
163// the linking result. This function defines such symbols.
164void elf::addReservedSymbols(Ctx &ctx) {
165 if (ctx.arg.emachine == EM_MIPS) {
166 auto addAbsolute = [&](StringRef name) {
167 Symbol *sym =
168 ctx.symtab->addSymbol(newSym: Defined{ctx, ctx.internalFile, name, STB_GLOBAL,
169 STV_HIDDEN, STT_NOTYPE, 0, 0, nullptr});
170 sym->isUsedInRegularObj = true;
171 return cast<Defined>(Val: sym);
172 };
173 // Define _gp for MIPS. st_value of _gp symbol will be updated by Writer
174 // so that it points to an absolute address which by default is relative
175 // to GOT. Default offset is 0x7ff0.
176 // See "Global Data Symbols" in Chapter 6 in the following document:
177 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
178 ctx.sym.mipsGp = addAbsolute("_gp");
179
180 // On MIPS O32 ABI, _gp_disp is a magic symbol designates offset between
181 // start of function and 'gp' pointer into GOT.
182 if (ctx.symtab->find(name: "_gp_disp"))
183 ctx.sym.mipsGpDisp = addAbsolute("_gp_disp");
184
185 // The __gnu_local_gp is a magic symbol equal to the current value of 'gp'
186 // pointer. This symbol is used in the code generated by .cpload pseudo-op
187 // in case of using -mno-shared option.
188 // https://sourceware.org/ml/binutils/2004-12/msg00094.html
189 if (ctx.symtab->find(name: "__gnu_local_gp"))
190 ctx.sym.mipsLocalGp = addAbsolute("__gnu_local_gp");
191 } else if (ctx.arg.emachine == EM_PPC) {
192 // glibc *crt1.o has a undefined reference to _SDA_BASE_. Since we don't
193 // support Small Data Area, define it arbitrarily as 0.
194 addOptionalRegular(ctx, name: "_SDA_BASE_", sec: nullptr, val: 0, stOther: STV_HIDDEN);
195 } else if (ctx.arg.emachine == EM_PPC64) {
196 addPPC64SaveRestore(ctx);
197 }
198
199 // The Power Architecture 64-bit v2 ABI defines a TableOfContents (TOC) which
200 // combines the typical ELF GOT with the small data sections. It commonly
201 // includes .got .toc .sdata .sbss. The .TOC. symbol replaces both
202 // _GLOBAL_OFFSET_TABLE_ and _SDA_BASE_ from the 32-bit ABI. It is used to
203 // represent the TOC base which is offset by 0x8000 bytes from the start of
204 // the .got section.
205 // We do not allow _GLOBAL_OFFSET_TABLE_ to be defined by input objects as the
206 // correctness of some relocations depends on its value.
207 StringRef gotSymName =
208 (ctx.arg.emachine == EM_PPC64) ? ".TOC." : "_GLOBAL_OFFSET_TABLE_";
209
210 if (Symbol *s = ctx.symtab->find(name: gotSymName)) {
211 if (s->isDefined()) {
212 ErrAlways(ctx) << s->file << " cannot redefine linker defined symbol '"
213 << gotSymName << "'";
214 return;
215 }
216
217 uint64_t gotOff = 0;
218 if (ctx.arg.emachine == EM_PPC64)
219 gotOff = 0x8000;
220
221 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
222 STV_HIDDEN, STT_NOTYPE, gotOff, /*size=*/0,
223 ctx.out.elfHeader.get()});
224 ctx.sym.globalOffsetTable = cast<Defined>(Val: s);
225 }
226
227 // __ehdr_start is the location of ELF file headers. Note that we define
228 // this symbol unconditionally even when using a linker script, which
229 // differs from the behavior implemented by GNU linker which only define
230 // this symbol if ELF headers are in the memory mapped segment.
231 addOptionalRegular(ctx, name: "__ehdr_start", sec: ctx.out.elfHeader.get(), val: 0,
232 stOther: STV_HIDDEN);
233
234 // __executable_start is not documented, but the expectation of at
235 // least the Android libc is that it points to the ELF header.
236 addOptionalRegular(ctx, name: "__executable_start", sec: ctx.out.elfHeader.get(), val: 0,
237 stOther: STV_HIDDEN);
238
239 // __dso_handle symbol is passed to cxa_finalize as a marker to identify
240 // each DSO. The address of the symbol doesn't matter as long as they are
241 // different in different DSOs, so we chose the start address of the DSO.
242 addOptionalRegular(ctx, name: "__dso_handle", sec: ctx.out.elfHeader.get(), val: 0,
243 stOther: STV_HIDDEN);
244
245 // If linker script do layout we do not need to create any standard symbols.
246 if (ctx.script->hasSectionsCommand)
247 return;
248
249 auto add = [&](StringRef s, int64_t pos) {
250 return addOptionalRegular(ctx, name: s, sec: ctx.out.elfHeader.get(), val: pos,
251 stOther: STV_DEFAULT);
252 };
253
254 ctx.sym.bss = add("__bss_start", 0);
255 ctx.sym.end1 = add("end", -1);
256 ctx.sym.end2 = add("_end", -1);
257 ctx.sym.etext1 = add("etext", -1);
258 ctx.sym.etext2 = add("_etext", -1);
259 ctx.sym.edata1 = add("edata", -1);
260 ctx.sym.edata2 = add("_edata", -1);
261}
262
263static void demoteDefined(Defined &sym, DenseMap<SectionBase *, size_t> &map) {
264 if (map.empty())
265 for (auto [i, sec] : llvm::enumerate(First: sym.file->getSections()))
266 map.try_emplace(Key: sec, Args&: i);
267 // Change WEAK to GLOBAL so that if a scanned relocation references sym,
268 // maybeReportUndefined will report an error.
269 uint8_t binding = sym.isWeak() ? uint8_t(STB_GLOBAL) : sym.binding;
270 Undefined(sym.file, sym.getName(), binding, sym.stOther, sym.type,
271 /*discardedSecIdx=*/map.lookup(Val: sym.section))
272 .overwrite(sym);
273 // Eliminate from the symbol table, otherwise we would leave an undefined
274 // symbol if the symbol is unreferenced in the absence of GC.
275 sym.isUsedInRegularObj = false;
276}
277
278// If all references to a DSO happen to be weak, the DSO is not added to
279// DT_NEEDED. If that happens, replace ShardSymbol with Undefined to avoid
280// dangling references to an unneeded DSO. Use a weak binding to avoid
281// --no-allow-shlib-undefined diagnostics. Similarly, demote lazy symbols.
282//
283// In addition, demote symbols defined in discarded sections, so that
284// references to /DISCARD/ discarded symbols will lead to errors.
285static void demoteSymbolsAndComputeIsPreemptible(Ctx &ctx) {
286 llvm::TimeTraceScope timeScope("Demote symbols");
287 DenseMap<InputFile *, DenseMap<SectionBase *, size_t>> sectionIndexMap;
288 for (Symbol *sym : ctx.symtab->getSymbols()) {
289 if (auto *d = dyn_cast<Defined>(Val: sym)) {
290 if (d->section && !d->section->isLive())
291 demoteDefined(sym&: *d, map&: sectionIndexMap[d->file]);
292 } else {
293 auto *s = dyn_cast<SharedSymbol>(Val: sym);
294 if (sym->isLazy() || (s && !cast<SharedFile>(Val: s->file)->isNeeded)) {
295 uint8_t binding = sym->isLazy() ? sym->binding : uint8_t(STB_WEAK);
296 Undefined(ctx.internalFile, sym->getName(), binding, sym->stOther,
297 sym->type)
298 .overwrite(sym&: *sym);
299 sym->versionId = VER_NDX_GLOBAL;
300 }
301 }
302
303 sym->isPreemptible = (sym->isUndefined() || sym->isExported) &&
304 computeIsPreemptible(ctx, sym: *sym);
305 }
306}
307
308static OutputSection *findSection(Ctx &ctx, StringRef name,
309 unsigned partition = 1) {
310 for (SectionCommand *cmd : ctx.script->sectionCommands)
311 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
312 if (osd->osec.name == name && osd->osec.partition == partition)
313 return &osd->osec;
314 return nullptr;
315}
316
317// The main function of the writer.
318template <class ELFT> void Writer<ELFT>::run() {
319 // Now that we have a complete set of output sections. This function
320 // completes section contents. For example, we need to add strings
321 // to the string table, and add entries to .got and .plt.
322 // finalizeSections does that.
323 finalizeSections();
324 checkExecuteOnly();
325 checkExecuteOnlyReport();
326
327 // If --compressed-debug-sections is specified, compress .debug_* sections.
328 // Do it right now because it changes the size of output sections.
329 for (OutputSection *sec : ctx.outputSections)
330 sec->maybeCompress<ELFT>(ctx);
331
332 if (ctx.script->hasSectionsCommand)
333 ctx.script->allocateHeaders(phdrs&: ctx.mainPart->phdrs);
334
335 // Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a
336 // 0 sized region. This has to be done late since only after assignAddresses
337 // we know the size of the sections.
338 for (Partition &part : ctx.partitions)
339 removeEmptyPTLoad(ctx, phdrs&: part.phdrs);
340
341 if (!ctx.arg.oFormatBinary)
342 assignFileOffsets();
343 else
344 assignFileOffsetsBinary();
345
346 for (Partition &part : ctx.partitions)
347 setPhdrs(part);
348
349 // Handle --print-map(-M)/--Map and --cref. Dump them before checkSections()
350 // because the files may be useful in case checkSections() or openFile()
351 // fails, for example, due to an erroneous file size.
352 writeMapAndCref(ctx);
353
354 // Handle --print-memory-usage option.
355 if (ctx.arg.printMemoryUsage)
356 ctx.script->printMemoryUsage(os&: ctx.e.outs());
357
358 if (ctx.arg.checkSections)
359 checkSections();
360
361 // It does not make sense try to open the file if we have error already.
362 if (errCount(ctx))
363 return;
364
365 {
366 llvm::TimeTraceScope timeScope("Write output file");
367 // Write the result down to a file.
368 openFile();
369 if (errCount(ctx))
370 return;
371
372 if (!ctx.arg.oFormatBinary) {
373 if (ctx.arg.zSeparate != SeparateSegmentKind::None)
374 writeTrapInstr();
375 writeHeader();
376 writeSections();
377 } else {
378 writeSectionsBinary();
379 }
380
381 // Backfill .note.gnu.build-id section content. This is done at last
382 // because the content is usually a hash value of the entire output file.
383 writeBuildId();
384 if (errCount(ctx))
385 return;
386
387 if (!ctx.e.disableOutput) {
388 if (auto e = buffer->commit())
389 Err(ctx) << "failed to write output '" << buffer->getPath()
390 << "': " << std::move(e);
391 }
392
393 if (!ctx.arg.cmseOutputLib.empty())
394 writeARMCmseImportLib<ELFT>(ctx);
395 }
396}
397
398template <class ELFT, class RelTy>
399static void markUsedLocalSymbolsImpl(ObjFile<ELFT> *file,
400 llvm::ArrayRef<RelTy> rels) {
401 for (const RelTy &rel : rels) {
402 Symbol &sym = file->getRelocTargetSym(rel);
403 if (sym.isLocal())
404 sym.used = true;
405 }
406}
407
408// The function ensures that the "used" field of local symbols reflects the fact
409// that the symbol is used in a relocation from a live section.
410template <class ELFT> static void markUsedLocalSymbols(Ctx &ctx) {
411 // With --gc-sections, the field is already filled.
412 // See MarkLive<ELFT>::resolveReloc().
413 if (ctx.arg.gcSections)
414 return;
415 for (ELFFileBase *file : ctx.objectFiles) {
416 ObjFile<ELFT> *f = cast<ObjFile<ELFT>>(file);
417 for (InputSectionBase *s : f->getSections()) {
418 InputSection *isec = dyn_cast_or_null<InputSection>(Val: s);
419 if (!isec)
420 continue;
421 if (isec->type == SHT_REL) {
422 markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rel>());
423 } else if (isec->type == SHT_RELA) {
424 markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rela>());
425 } else if (isec->type == SHT_CREL) {
426 // The is64=true variant also works with ELF32 since only the r_symidx
427 // member is used.
428 for (Elf_Crel_Impl<true> r : RelocsCrel<true>(isec->content_)) {
429 Symbol &sym = file->getSymbol(symbolIndex: r.r_symidx);
430 if (sym.isLocal())
431 sym.used = true;
432 }
433 }
434 }
435 }
436}
437
438static bool shouldKeepInSymtab(Ctx &ctx, const Defined &sym) {
439 if (sym.isSection())
440 return false;
441
442 // If --emit-reloc or -r is given, preserve symbols referenced by relocations
443 // from live sections.
444 if (sym.used && ctx.arg.copyRelocs)
445 return true;
446
447 // Exclude local symbols pointing to .ARM.exidx sections.
448 // They are probably mapping symbols "$d", which are optional for these
449 // sections. After merging the .ARM.exidx sections, some of these symbols
450 // may become dangling. The easiest way to avoid the issue is not to add
451 // them to the symbol table from the beginning.
452 if (ctx.arg.emachine == EM_ARM && sym.section &&
453 sym.section->type == SHT_ARM_EXIDX)
454 return false;
455
456 if (ctx.arg.discard == DiscardPolicy::None)
457 return true;
458 if (ctx.arg.discard == DiscardPolicy::All)
459 return false;
460
461 // In ELF assembly .L symbols are normally discarded by the assembler.
462 // If the assembler fails to do so, the linker discards them if
463 // * --discard-locals is used.
464 // * The symbol is in a SHF_MERGE section, which is normally the reason for
465 // the assembler keeping the .L symbol.
466 if (sym.getName().starts_with(Prefix: ".L") &&
467 (ctx.arg.discard == DiscardPolicy::Locals ||
468 (sym.section && (sym.section->flags & SHF_MERGE))))
469 return false;
470 return true;
471}
472
473bool elf::includeInSymtab(Ctx &ctx, const Symbol &b) {
474 if (auto *d = dyn_cast<Defined>(Val: &b)) {
475 // Always include absolute symbols.
476 SectionBase *sec = d->section;
477 if (!sec)
478 return true;
479 assert(sec->isLive());
480
481 if (auto *s = dyn_cast<MergeInputSection>(Val: sec))
482 return s->getSectionPiece(offset: d->value).live;
483 return true;
484 }
485 return b.used || !ctx.arg.gcSections;
486}
487
488// Scan local symbols to:
489//
490// - demote symbols defined relative to /DISCARD/ discarded input sections so
491// that relocations referencing them will lead to errors.
492// - copy eligible symbols to .symTab
493static void demoteAndCopyLocalSymbols(Ctx &ctx) {
494 llvm::TimeTraceScope timeScope("Add local symbols");
495 for (ELFFileBase *file : ctx.objectFiles) {
496 DenseMap<SectionBase *, size_t> sectionIndexMap;
497 for (Symbol *b : file->getLocalSymbols()) {
498 assert(b->isLocal() && "should have been caught in initializeSymbols()");
499 auto *dr = dyn_cast<Defined>(Val: b);
500 if (!dr)
501 continue;
502
503 if (dr->section && !dr->section->isLive())
504 demoteDefined(sym&: *dr, map&: sectionIndexMap);
505 else if (ctx.in.symTab && includeInSymtab(ctx, b: *b) &&
506 shouldKeepInSymtab(ctx, sym: *dr))
507 ctx.in.symTab->addSymbol(sym: b);
508 }
509 }
510}
511
512// Create a section symbol for each output section so that we can represent
513// relocations that point to the section. If we know that no relocation is
514// referring to a section (that happens if the section is a synthetic one), we
515// don't create a section symbol for that section.
516template <class ELFT> void Writer<ELFT>::addSectionSymbols() {
517 for (SectionCommand *cmd : ctx.script->sectionCommands) {
518 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
519 if (!osd)
520 continue;
521 OutputSection &osec = osd->osec;
522 InputSectionBase *isec = nullptr;
523 // Iterate over all input sections and add a STT_SECTION symbol if any input
524 // section may be a relocation target.
525 for (SectionCommand *cmd : osec.commands) {
526 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
527 if (!isd)
528 continue;
529 for (InputSectionBase *s : isd->sections) {
530 // Relocations are not using REL[A] section symbols.
531 if (isStaticRelSecType(type: s->type))
532 continue;
533
534 // Unlike other synthetic sections, mergeable output sections contain
535 // data copied from input sections, and there may be a relocation
536 // pointing to its contents if -r or --emit-reloc is given.
537 if (isa<SyntheticSection>(Val: s) && !(s->flags & SHF_MERGE))
538 continue;
539
540 isec = s;
541 break;
542 }
543 }
544 if (!isec)
545 continue;
546
547 // Set the symbol to be relative to the output section so that its st_value
548 // equals the output section address. Note, there may be a gap between the
549 // start of the output section and isec.
550 ctx.in.symTab->addSymbol(sym: makeDefined(args&: ctx, args&: isec->file, args: "", args: STB_LOCAL,
551 /*stOther=*/args: 0, args: STT_SECTION,
552 /*value=*/args: 0, /*size=*/args: 0, args: &osec));
553 }
554}
555
556// Returns true if this is a variant of .data.rel.ro.
557static bool isRelRoDataSection(Ctx &ctx, StringRef secName) {
558 if (!secName.consume_front(Prefix: ".data.rel.ro"))
559 return false;
560 if (secName.empty())
561 return true;
562 // If -z keep-data-section-prefix is specified, additionally allow
563 // '.data.rel.ro.hot' and '.data.rel.ro.unlikely'.
564 if (ctx.arg.zKeepDataSectionPrefix)
565 return secName == ".hot" || secName == ".unlikely";
566 return false;
567}
568
569// Today's loaders have a feature to make segments read-only after
570// processing dynamic relocations to enhance security. PT_GNU_RELRO
571// is defined for that.
572//
573// This function returns true if a section needs to be put into a
574// PT_GNU_RELRO segment.
575static bool isRelroSection(Ctx &ctx, const OutputSection *sec) {
576 if (!ctx.arg.zRelro)
577 return false;
578 if (sec->relro)
579 return true;
580
581 uint64_t flags = sec->flags;
582
583 // Non-allocatable or non-writable sections don't need RELRO because
584 // they are not writable or not even mapped to memory in the first place.
585 // RELRO is for sections that are essentially read-only but need to
586 // be writable only at process startup to allow dynamic linker to
587 // apply relocations.
588 if (!(flags & SHF_ALLOC) || !(flags & SHF_WRITE))
589 return false;
590
591 // Once initialized, TLS data segments are used as data templates
592 // for a thread-local storage. For each new thread, runtime
593 // allocates memory for a TLS and copy templates there. No thread
594 // are supposed to use templates directly. Thus, it can be in RELRO.
595 if (flags & SHF_TLS)
596 return true;
597
598 // .init_array, .preinit_array and .fini_array contain pointers to
599 // functions that are executed on process startup or exit. These
600 // pointers are set by the static linker, and they are not expected
601 // to change at runtime. But if you are an attacker, you could do
602 // interesting things by manipulating pointers in .fini_array, for
603 // example. So they are put into RELRO.
604 uint32_t type = sec->type;
605 if (type == SHT_INIT_ARRAY || type == SHT_FINI_ARRAY ||
606 type == SHT_PREINIT_ARRAY)
607 return true;
608
609 // .got contains pointers to external symbols. They are resolved by
610 // the dynamic linker when a module is loaded into memory, and after
611 // that they are not expected to change. So, it can be in RELRO.
612 if (ctx.in.got && sec == ctx.in.got->getParent())
613 return true;
614
615 // .toc is a GOT-ish section for PowerPC64. Their contents are accessed
616 // through r2 register, which is reserved for that purpose. Since r2 is used
617 // for accessing .got as well, .got and .toc need to be close enough in the
618 // virtual address space. Usually, .toc comes just after .got. Since we place
619 // .got into RELRO, .toc needs to be placed into RELRO too.
620 if (sec->name == ".toc")
621 return true;
622
623 // .got.plt contains pointers to external function symbols. They are
624 // by default resolved lazily, so we usually cannot put it into RELRO.
625 // However, if "-z now" is given, the lazy symbol resolution is
626 // disabled, which enables us to put it into RELRO.
627 if (sec == ctx.in.gotPlt->getParent())
628 return ctx.arg.zNow;
629
630 if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent())
631 return true;
632
633 // .dynamic section contains data for the dynamic linker, and
634 // there's no need to write to it at runtime, so it's better to put
635 // it into RELRO.
636 if (sec->name == ".dynamic")
637 return true;
638
639 // Sections with some special names are put into RELRO. This is a
640 // bit unfortunate because section names shouldn't be significant in
641 // ELF in spirit. But in reality many linker features depend on
642 // magic section names.
643 StringRef s = sec->name;
644
645 bool abiAgnostic = isRelRoDataSection(ctx, secName: s) || s == ".bss.rel.ro" ||
646 s == ".ctors" || s == ".dtors" || s == ".jcr" ||
647 s == ".eh_frame" || s == ".fini_array" ||
648 s == ".init_array" || s == ".preinit_array";
649
650 bool abiSpecific =
651 ctx.arg.osabi == ELFOSABI_OPENBSD && s == ".openbsd.randomdata";
652
653 return abiAgnostic || abiSpecific;
654}
655
656// We compute a rank for each section. The rank indicates where the
657// section should be placed in the file. Instead of using simple
658// numbers (0,1,2...), we use a series of flags. One for each decision
659// point when placing the section.
660// Using flags has two key properties:
661// * It is easy to check if a give branch was taken.
662// * It is easy two see how similar two ranks are (see getRankProximity).
663enum RankFlags {
664 RF_NOT_ADDR_SET = 1 << 27,
665 RF_NOT_ALLOC = 1 << 26,
666 RF_PARTITION = 1 << 18, // Partition number (8 bits)
667 RF_LARGE_EXEC_WRITE = 1 << 16,
668 RF_LARGE_ALT = 1 << 15,
669 RF_WRITE = 1 << 14,
670 RF_EXEC_WRITE = 1 << 13,
671 RF_EXEC = 1 << 12,
672 RF_RODATA = 1 << 11,
673 RF_LARGE_EXEC = 1 << 10,
674 RF_LARGE = 1 << 9,
675 RF_NOT_RELRO = 1 << 8,
676 RF_NOT_TLS = 1 << 7,
677 RF_BSS = 1 << 6,
678};
679
680unsigned elf::getSectionRank(Ctx &ctx, OutputSection &osec) {
681 unsigned rank = osec.partition * RF_PARTITION;
682
683 // We want to put section specified by -T option first, so we
684 // can start assigning VA starting from them later.
685 if (ctx.arg.sectionStartMap.contains(Key: osec.name))
686 return rank;
687 rank |= RF_NOT_ADDR_SET;
688
689 // Allocatable sections go first to reduce the total PT_LOAD size and
690 // so debug info doesn't change addresses in actual code.
691 if (!(osec.flags & SHF_ALLOC))
692 return rank | RF_NOT_ALLOC;
693
694 // Sort sections based on their access permission in the following
695 // order: R, RX, RXW, RW(RELRO), RW(non-RELRO).
696 //
697 // Read-only sections come first such that they go in the PT_LOAD covering the
698 // program headers at the start of the file.
699 //
700 // The layout for writable sections is PT_LOAD(PT_GNU_RELRO(.data.rel.ro
701 // .bss.rel.ro) | .data .bss), where | marks where page alignment happens.
702 // An alternative ordering is PT_LOAD(.data | PT_GNU_RELRO( .data.rel.ro
703 // .bss.rel.ro) | .bss), but it may waste more bytes due to 2 alignment
704 // places.
705 bool isExec = osec.flags & SHF_EXECINSTR;
706 bool isWrite = osec.flags & SHF_WRITE;
707 bool isLarge = osec.flags & SHF_X86_64_LARGE && ctx.arg.emachine == EM_X86_64;
708
709 if (!isWrite && !isExec) {
710 // Among PROGBITS sections, place .lrodata further from .text.
711 // For -z lrodata-after-bss, place .lrodata after .lbss like GNU ld. This
712 // layout has one extra PT_LOAD, but alleviates relocation overflow
713 // pressure for absolute relocations referencing small data from -fno-pic
714 // relocatable files.
715 if (isLarge)
716 rank |= ctx.arg.zLrodataAfterBss ? RF_LARGE_ALT : 0;
717 else
718 rank |= ctx.arg.zLrodataAfterBss ? 0 : RF_LARGE;
719
720 if (osec.type == SHT_LLVM_PART_EHDR)
721 ;
722 else if (osec.type == SHT_LLVM_PART_PHDR)
723 rank |= 1;
724 else if (osec.name == ".interp")
725 rank |= 2;
726 // Put .note sections at the beginning so that they are likely to be
727 // included in a truncate core file. In particular, .note.gnu.build-id, if
728 // available, can identify the object file.
729 else if (osec.type == SHT_NOTE)
730 rank |= 3;
731 // Make PROGBITS sections (e.g .rodata .eh_frame) closer to .text to
732 // alleviate relocation overflow pressure. Large special sections such as
733 // .dynstr and .dynsym can be away from .text.
734 else if (osec.type != SHT_PROGBITS)
735 rank |= 4;
736 else
737 rank |= RF_RODATA;
738 } else if (isExec) {
739 // Place readonly .ltext before .lrodata and writable .ltext after .lbss to
740 // keep writable and readonly segments separate.
741 if (isLarge) {
742 rank |= isWrite ? RF_LARGE_EXEC_WRITE : RF_LARGE_EXEC;
743 } else {
744 rank |= isWrite ? RF_EXEC_WRITE : RF_EXEC;
745 }
746 } else {
747 rank |= RF_WRITE;
748 // The TLS initialization block needs to be a single contiguous block. Place
749 // TLS sections directly before the other RELRO sections.
750 if (!(osec.flags & SHF_TLS))
751 rank |= RF_NOT_TLS;
752 if (isRelroSection(ctx, sec: &osec))
753 osec.relro = true;
754 else
755 rank |= RF_NOT_RELRO;
756 // Place .ldata and .lbss after .bss. Making .bss closer to .text
757 // alleviates relocation overflow pressure.
758 // For -z lrodata-after-bss, place .lbss/.lrodata/.ldata after .bss.
759 // .bss/.lbss being adjacent reuses the NOBITS size optimization.
760 if (isLarge) {
761 rank |= ctx.arg.zLrodataAfterBss
762 ? (osec.type == SHT_NOBITS ? 1 : RF_LARGE_ALT)
763 : RF_LARGE;
764 }
765 }
766
767 // Within TLS sections, or within other RelRo sections, or within non-RelRo
768 // sections, place non-NOBITS sections first.
769 if (osec.type == SHT_NOBITS)
770 rank |= RF_BSS;
771
772 // Some architectures have additional ordering restrictions for sections
773 // within the same PT_LOAD.
774 if (ctx.arg.emachine == EM_PPC64) {
775 // PPC64 has a number of special SHT_PROGBITS+SHF_ALLOC+SHF_WRITE sections
776 // that we would like to make sure appear is a specific order to maximize
777 // their coverage by a single signed 16-bit offset from the TOC base
778 // pointer.
779 StringRef name = osec.name;
780 if (name == ".got")
781 rank |= 1;
782 else if (name == ".toc")
783 rank |= 2;
784 }
785
786 if (ctx.arg.emachine == EM_MIPS) {
787 if (osec.name != ".got")
788 rank |= 1;
789 // All sections with SHF_MIPS_GPREL flag should be grouped together
790 // because data in these sections is addressable with a gp relative address.
791 if (osec.flags & SHF_MIPS_GPREL)
792 rank |= 2;
793 }
794
795 if (ctx.arg.emachine == EM_RISCV) {
796 // .sdata and .sbss are placed closer to make GP relaxation more profitable
797 // and match GNU ld.
798 StringRef name = osec.name;
799 if (name == ".sdata" || (osec.type == SHT_NOBITS && name != ".sbss"))
800 rank |= 1;
801 }
802
803 return rank;
804}
805
806static bool compareSections(Ctx &ctx, const SectionCommand *aCmd,
807 const SectionCommand *bCmd) {
808 const OutputSection *a = &cast<OutputDesc>(Val: aCmd)->osec;
809 const OutputSection *b = &cast<OutputDesc>(Val: bCmd)->osec;
810
811 if (a->sortRank != b->sortRank)
812 return a->sortRank < b->sortRank;
813
814 if (!(a->sortRank & RF_NOT_ADDR_SET))
815 return ctx.arg.sectionStartMap.lookup(Key: a->name) <
816 ctx.arg.sectionStartMap.lookup(Key: b->name);
817 return false;
818}
819
820void PhdrEntry::add(OutputSection *sec) {
821 lastSec = sec;
822 if (!firstSec)
823 firstSec = sec;
824 p_align = std::max(a: p_align, b: sec->addralign);
825 if (p_type == PT_LOAD)
826 sec->ptLoad = this;
827}
828
829// A statically linked position-dependent executable should only contain
830// IRELATIVE relocations and no other dynamic relocations. Encapsulation symbols
831// __rel[a]_iplt_{start,end} will be defined for .rel[a].dyn, to be
832// processed by the libc runtime. Other executables or DSOs use dynamic tags
833// instead.
834template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
835 if (ctx.arg.isPic)
836 return;
837
838 // __rela_iplt_{start,end} are initially defined relative to dummy section 0.
839 // We'll override ctx.out.elfHeader with relaDyn later when we are sure that
840 // .rela.dyn will be present in the output.
841 std::string name = ctx.arg.isRela ? "__rela_iplt_start" : "__rel_iplt_start";
842 ctx.sym.relaIpltStart =
843 addOptionalRegular(ctx, name, sec: ctx.out.elfHeader.get(), val: 0, stOther: STV_HIDDEN);
844 name.replace(pos: name.size() - 5, n1: 5, s: "end");
845 ctx.sym.relaIpltEnd =
846 addOptionalRegular(ctx, name, sec: ctx.out.elfHeader.get(), val: 0, stOther: STV_HIDDEN);
847}
848
849// This function generates assignments for predefined symbols (e.g. _end or
850// _etext) and inserts them into the commands sequence to be processed at the
851// appropriate time. This ensures that the value is going to be correct by the
852// time any references to these symbols are processed and is equivalent to
853// defining these symbols explicitly in the linker script.
854template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
855 if (ctx.sym.globalOffsetTable) {
856 // The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
857 // to the start of the .got or .got.plt section.
858 InputSection *sec = ctx.in.gotPlt.get();
859 if (!ctx.target->gotBaseSymInGotPlt)
860 sec = ctx.in.mipsGot ? cast<InputSection>(Val: ctx.in.mipsGot.get())
861 : cast<InputSection>(Val: ctx.in.got.get());
862 ctx.sym.globalOffsetTable->section = sec;
863 }
864
865 // .rela_iplt_{start,end} mark the start and the end of the section containing
866 // IRELATIVE relocations.
867 if (ctx.sym.relaIpltStart) {
868 auto &dyn = getIRelativeSection(ctx);
869 if (dyn.isNeeded()) {
870 ctx.sym.relaIpltStart->section = &dyn;
871 ctx.sym.relaIpltEnd->section = &dyn;
872 ctx.sym.relaIpltEnd->value = dyn.getSize();
873 }
874 }
875
876 PhdrEntry *last = nullptr;
877 OutputSection *lastRO = nullptr;
878 auto isLarge = [&ctx = ctx](OutputSection *osec) {
879 return ctx.arg.emachine == EM_X86_64 && osec->flags & SHF_X86_64_LARGE;
880 };
881 for (Partition &part : ctx.partitions) {
882 for (auto &p : part.phdrs) {
883 if (p->p_type != PT_LOAD)
884 continue;
885 last = p.get();
886 if (!(p->p_flags & PF_W) && p->lastSec && !isLarge(p->lastSec))
887 lastRO = p->lastSec;
888 }
889 }
890
891 if (lastRO) {
892 // _etext is the first location after the last read-only loadable segment
893 // that does not contain large sections.
894 if (ctx.sym.etext1)
895 ctx.sym.etext1->section = lastRO;
896 if (ctx.sym.etext2)
897 ctx.sym.etext2->section = lastRO;
898 }
899
900 if (last) {
901 // _edata points to the end of the last non-large mapped initialized
902 // section.
903 OutputSection *edata = nullptr;
904 for (OutputSection *os : ctx.outputSections) {
905 if (os->type != SHT_NOBITS && !isLarge(os))
906 edata = os;
907 if (os == last->lastSec)
908 break;
909 }
910
911 if (ctx.sym.edata1)
912 ctx.sym.edata1->section = edata;
913 if (ctx.sym.edata2)
914 ctx.sym.edata2->section = edata;
915
916 // _end is the first location after the uninitialized data region.
917 if (ctx.sym.end1)
918 ctx.sym.end1->section = last->lastSec;
919 if (ctx.sym.end2)
920 ctx.sym.end2->section = last->lastSec;
921 }
922
923 if (ctx.sym.bss) {
924 // On RISC-V, set __bss_start to the start of .sbss if present.
925 OutputSection *sbss =
926 ctx.arg.emachine == EM_RISCV ? findSection(ctx, name: ".sbss") : nullptr;
927 ctx.sym.bss->section = sbss ? sbss : findSection(ctx, name: ".bss");
928 }
929
930 // Setup MIPS _gp_disp/__gnu_local_gp symbols which should
931 // be equal to the _gp symbol's value.
932 if (ctx.sym.mipsGp) {
933 // Find GP-relative section with the lowest address
934 // and use this address to calculate default _gp value.
935 for (OutputSection *os : ctx.outputSections) {
936 if (os->flags & SHF_MIPS_GPREL) {
937 ctx.sym.mipsGp->section = os;
938 ctx.sym.mipsGp->value = 0x7ff0;
939 break;
940 }
941 }
942 }
943}
944
945// We want to find how similar two ranks are.
946// The more branches in getSectionRank that match, the more similar they are.
947// Since each branch corresponds to a bit flag, we can just use
948// countLeadingZeros.
949static int getRankProximity(OutputSection *a, SectionCommand *b) {
950 auto *osd = dyn_cast<OutputDesc>(Val: b);
951 return (osd && osd->osec.hasInputSections)
952 ? llvm::countl_zero(Val: a->sortRank ^ osd->osec.sortRank)
953 : -1;
954}
955
956// When placing orphan sections, we want to place them after symbol assignments
957// so that an orphan after
958// begin_foo = .;
959// foo : { *(foo) }
960// end_foo = .;
961// doesn't break the intended meaning of the begin/end symbols.
962// We don't want to go over sections since findOrphanPos is the
963// one in charge of deciding the order of the sections.
964// We don't want to go over changes to '.', since doing so in
965// rx_sec : { *(rx_sec) }
966// . = ALIGN(0x1000);
967// /* The RW PT_LOAD starts here*/
968// rw_sec : { *(rw_sec) }
969// would mean that the RW PT_LOAD would become unaligned.
970static bool shouldSkip(SectionCommand *cmd) {
971 if (auto *assign = dyn_cast<SymbolAssignment>(Val: cmd))
972 return assign->name != ".";
973 return false;
974}
975
976// We want to place orphan sections so that they share as much
977// characteristics with their neighbors as possible. For example, if
978// both are rw, or both are tls.
979static SmallVectorImpl<SectionCommand *>::iterator
980findOrphanPos(Ctx &ctx, SmallVectorImpl<SectionCommand *>::iterator b,
981 SmallVectorImpl<SectionCommand *>::iterator e) {
982 // Place non-alloc orphan sections at the end. This matches how we assign file
983 // offsets to non-alloc sections.
984 OutputSection *sec = &cast<OutputDesc>(Val: *e)->osec;
985 if (!(sec->flags & SHF_ALLOC))
986 return e;
987
988 // As a special case, place .relro_padding before the SymbolAssignment using
989 // DATA_SEGMENT_RELRO_END, if present.
990 if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) {
991 auto i = std::find_if(first: b, last: e, pred: [=](SectionCommand *a) {
992 if (auto *assign = dyn_cast<SymbolAssignment>(Val: a))
993 return assign->dataSegmentRelroEnd;
994 return false;
995 });
996 if (i != e)
997 return i;
998 }
999
1000 // Find the most similar output section as the anchor. Rank Proximity is a
1001 // value in the range [-1, 32] where [0, 32] indicates potential anchors (0:
1002 // least similar; 32: identical). -1 means not an anchor.
1003 //
1004 // In the event of proximity ties, we select the first or last section
1005 // depending on whether the orphan's rank is smaller.
1006 int maxP = 0;
1007 auto i = e;
1008 for (auto j = b; j != e; ++j) {
1009 int p = getRankProximity(a: sec, b: *j);
1010 if (p > maxP ||
1011 (p == maxP && cast<OutputDesc>(Val: *j)->osec.sortRank <= sec->sortRank)) {
1012 maxP = p;
1013 i = j;
1014 }
1015 }
1016 if (i == e)
1017 return e;
1018
1019 auto isOutputSecWithInputSections = [](SectionCommand *cmd) {
1020 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
1021 return osd && osd->osec.hasInputSections;
1022 };
1023
1024 // Then, scan backward or forward through the script for a suitable insertion
1025 // point. If i's rank is larger, the orphan section can be placed before i.
1026 //
1027 // However, don't do this if custom program headers are defined. Otherwise,
1028 // adding the orphan to a previous segment can change its flags, for example,
1029 // making a read-only segment writable. If memory regions are defined, an
1030 // orphan section should continue the same region as the found section to
1031 // better resemble the behavior of GNU ld.
1032 bool mustAfter =
1033 ctx.script->hasPhdrsCommands() || !ctx.script->memoryRegions.empty();
1034 if (cast<OutputDesc>(Val: *i)->osec.sortRank <= sec->sortRank || mustAfter) {
1035 for (auto j = ++i; j != e; ++j) {
1036 if (!isOutputSecWithInputSections(*j))
1037 continue;
1038 if (getRankProximity(a: sec, b: *j) != maxP)
1039 break;
1040 i = j + 1;
1041 }
1042 } else {
1043 for (; i != b; --i)
1044 if (isOutputSecWithInputSections(i[-1]))
1045 break;
1046 }
1047
1048 // As a special case, if the orphan section is the last section, put
1049 // it at the very end, past any other commands.
1050 // This matches bfd's behavior and is convenient when the linker script fully
1051 // specifies the start of the file, but doesn't care about the end (the non
1052 // alloc sections for example).
1053 if (std::none_of(first: i, last: e, pred: isOutputSecWithInputSections))
1054 return e;
1055
1056 while (i != e && shouldSkip(cmd: *i))
1057 ++i;
1058 return i;
1059}
1060
1061// Adds random priorities to sections not already in the map.
1062static void maybeShuffle(Ctx &ctx,
1063 DenseMap<const InputSectionBase *, int> &order) {
1064 if (ctx.arg.shuffleSections.empty())
1065 return;
1066
1067 SmallVector<InputSectionBase *, 0> matched, sections = ctx.inputSections;
1068 matched.reserve(N: sections.size());
1069 for (const auto &patAndSeed : ctx.arg.shuffleSections) {
1070 matched.clear();
1071 for (InputSectionBase *sec : sections)
1072 if (patAndSeed.first.match(S: sec->name))
1073 matched.push_back(Elt: sec);
1074 const uint32_t seed = patAndSeed.second;
1075 if (seed == UINT32_MAX) {
1076 // If --shuffle-sections <section-glob>=-1, reverse the section order. The
1077 // section order is stable even if the number of sections changes. This is
1078 // useful to catch issues like static initialization order fiasco
1079 // reliably.
1080 std::reverse(first: matched.begin(), last: matched.end());
1081 } else {
1082 std::mt19937 g(seed ? seed : std::random_device()());
1083 llvm::shuffle(first: matched.begin(), last: matched.end(), g);
1084 }
1085 size_t i = 0;
1086 for (InputSectionBase *&sec : sections)
1087 if (patAndSeed.first.match(S: sec->name))
1088 sec = matched[i++];
1089 }
1090
1091 // Existing priorities are < 0, so use priorities >= 0 for the missing
1092 // sections.
1093 int prio = 0;
1094 for (InputSectionBase *sec : sections) {
1095 if (order.try_emplace(Key: sec, Args&: prio).second)
1096 ++prio;
1097 }
1098}
1099
1100// Return section order within an InputSectionDescription.
1101// If both --symbol-ordering-file and call graph profile are present, the order
1102// file takes precedence, but the call graph profile is still used for symbols
1103// that don't appear in the order file.
1104static DenseMap<const InputSectionBase *, int> buildSectionOrder(Ctx &ctx) {
1105 DenseMap<const InputSectionBase *, int> sectionOrder;
1106 if (ctx.arg.bpStartupFunctionSort || ctx.arg.bpFunctionOrderForCompression ||
1107 ctx.arg.bpDataOrderForCompression) {
1108 TimeTraceScope timeScope("Balanced Partitioning Section Orderer");
1109 sectionOrder = runBalancedPartitioning(
1110 ctx, profilePath: ctx.arg.bpStartupFunctionSort ? ctx.arg.irpgoProfilePath : "",
1111 forFunctionCompression: ctx.arg.bpFunctionOrderForCompression,
1112 forDataCompression: ctx.arg.bpDataOrderForCompression,
1113 compressionSortStartupFunctions: ctx.arg.bpCompressionSortStartupFunctions,
1114 verbose: ctx.arg.bpVerboseSectionOrderer);
1115 } else if (!ctx.arg.callGraphProfile.empty()) {
1116 sectionOrder = computeCallGraphProfileOrder(ctx);
1117 }
1118
1119 if (ctx.arg.symbolOrderingFile.empty())
1120 return sectionOrder;
1121
1122 struct SymbolOrderEntry {
1123 int priority;
1124 bool present;
1125 };
1126
1127 // Build a map from symbols to their priorities. Symbols that didn't
1128 // appear in the symbol ordering file have the lowest priority 0.
1129 // All explicitly mentioned symbols have negative (higher) priorities.
1130 DenseMap<CachedHashStringRef, SymbolOrderEntry> symbolOrder;
1131 int priority = -sectionOrder.size() - ctx.arg.symbolOrderingFile.size();
1132 for (StringRef s : ctx.arg.symbolOrderingFile)
1133 symbolOrder.insert(KV: {CachedHashStringRef(s), {.priority: priority++, .present: false}});
1134
1135 // Build a map from sections to their priorities.
1136 auto addSym = [&](Symbol &sym) {
1137 auto it = symbolOrder.find(Val: CachedHashStringRef(sym.getName()));
1138 if (it == symbolOrder.end())
1139 return;
1140 SymbolOrderEntry &ent = it->second;
1141 ent.present = true;
1142
1143 maybeWarnUnorderableSymbol(ctx, sym: &sym);
1144
1145 if (auto *d = dyn_cast<Defined>(Val: &sym)) {
1146 if (auto *sec = dyn_cast_or_null<InputSectionBase>(Val: d->section)) {
1147 int &priority = sectionOrder[cast<InputSectionBase>(Val: sec)];
1148 priority = std::min(a: priority, b: ent.priority);
1149 }
1150 }
1151 };
1152
1153 // We want both global and local symbols. We get the global ones from the
1154 // symbol table and iterate the object files for the local ones.
1155 for (Symbol *sym : ctx.symtab->getSymbols())
1156 addSym(*sym);
1157
1158 for (ELFFileBase *file : ctx.objectFiles)
1159 for (Symbol *sym : file->getLocalSymbols())
1160 addSym(*sym);
1161
1162 if (ctx.arg.warnSymbolOrdering)
1163 for (auto orderEntry : symbolOrder)
1164 if (!orderEntry.second.present)
1165 Warn(ctx) << "symbol ordering file: no such symbol: "
1166 << orderEntry.first.val();
1167
1168 return sectionOrder;
1169}
1170
1171// Sorts the sections in ISD according to the provided section order.
1172static void
1173sortISDBySectionOrder(Ctx &ctx, InputSectionDescription *isd,
1174 const DenseMap<const InputSectionBase *, int> &order,
1175 bool executableOutputSection) {
1176 SmallVector<InputSection *, 0> unorderedSections;
1177 SmallVector<std::pair<InputSection *, int>, 0> orderedSections;
1178 uint64_t unorderedSize = 0;
1179 uint64_t totalSize = 0;
1180
1181 for (InputSection *isec : isd->sections) {
1182 if (executableOutputSection)
1183 totalSize += isec->getSize();
1184 auto i = order.find(Val: isec);
1185 if (i == order.end()) {
1186 unorderedSections.push_back(Elt: isec);
1187 unorderedSize += isec->getSize();
1188 continue;
1189 }
1190 orderedSections.push_back(Elt: {isec, i->second});
1191 }
1192 llvm::sort(C&: orderedSections, Comp: llvm::less_second());
1193
1194 // Find an insertion point for the ordered section list in the unordered
1195 // section list. On targets with limited-range branches, this is the mid-point
1196 // of the unordered section list. This decreases the likelihood that a range
1197 // extension thunk will be needed to enter or exit the ordered region. If the
1198 // ordered section list is a list of hot functions, we can generally expect
1199 // the ordered functions to be called more often than the unordered functions,
1200 // making it more likely that any particular call will be within range, and
1201 // therefore reducing the number of thunks required.
1202 //
1203 // For example, imagine that you have 8MB of hot code and 32MB of cold code.
1204 // If the layout is:
1205 //
1206 // 8MB hot
1207 // 32MB cold
1208 //
1209 // only the first 8-16MB of the cold code (depending on which hot function it
1210 // is actually calling) can call the hot code without a range extension thunk.
1211 // However, if we use this layout:
1212 //
1213 // 16MB cold
1214 // 8MB hot
1215 // 16MB cold
1216 //
1217 // both the last 8-16MB of the first block of cold code and the first 8-16MB
1218 // of the second block of cold code can call the hot code without a thunk. So
1219 // we effectively double the amount of code that could potentially call into
1220 // the hot code without a thunk.
1221 //
1222 // The above is not necessary if total size of input sections in this "isd"
1223 // is small. Note that we assume all input sections are executable if the
1224 // output section is executable (which is not always true but supposed to
1225 // cover most cases).
1226 size_t insPt = 0;
1227 if (executableOutputSection && !orderedSections.empty() &&
1228 ctx.target->getThunkSectionSpacing() &&
1229 totalSize >= ctx.target->getThunkSectionSpacing()) {
1230 uint64_t unorderedPos = 0;
1231 for (; insPt != unorderedSections.size(); ++insPt) {
1232 unorderedPos += unorderedSections[insPt]->getSize();
1233 if (unorderedPos > unorderedSize / 2)
1234 break;
1235 }
1236 }
1237
1238 isd->sections.clear();
1239 for (InputSection *isec : ArrayRef(unorderedSections).slice(N: 0, M: insPt))
1240 isd->sections.push_back(Elt: isec);
1241 for (std::pair<InputSection *, int> p : orderedSections)
1242 isd->sections.push_back(Elt: p.first);
1243 for (InputSection *isec : ArrayRef(unorderedSections).slice(N: insPt))
1244 isd->sections.push_back(Elt: isec);
1245}
1246
1247static void sortSection(Ctx &ctx, OutputSection &osec,
1248 const DenseMap<const InputSectionBase *, int> &order) {
1249 StringRef name = osec.name;
1250
1251 // Never sort these.
1252 if (name == ".init" || name == ".fini")
1253 return;
1254
1255 // Sort input sections by priority using the list provided by
1256 // --symbol-ordering-file or --shuffle-sections=. This is a least significant
1257 // digit radix sort. The sections may be sorted stably again by a more
1258 // significant key.
1259 if (!order.empty())
1260 for (SectionCommand *b : osec.commands)
1261 if (auto *isd = dyn_cast<InputSectionDescription>(Val: b))
1262 sortISDBySectionOrder(ctx, isd, order, executableOutputSection: osec.flags & SHF_EXECINSTR);
1263
1264 if (ctx.script->hasSectionsCommand)
1265 return;
1266
1267 if (name == ".init_array" || name == ".fini_array") {
1268 osec.sortInitFini();
1269 } else if (name == ".ctors" || name == ".dtors") {
1270 osec.sortCtorsDtors();
1271 } else if (ctx.arg.emachine == EM_PPC64 && name == ".toc") {
1272 // .toc is allocated just after .got and is accessed using GOT-relative
1273 // relocations. Object files compiled with small code model have an
1274 // addressable range of [.got, .got + 0xFFFC] for GOT-relative relocations.
1275 // To reduce the risk of relocation overflow, .toc contents are sorted so
1276 // that sections having smaller relocation offsets are at beginning of .toc
1277 assert(osec.commands.size() == 1);
1278 auto *isd = cast<InputSectionDescription>(Val: osec.commands[0]);
1279 llvm::stable_sort(Range&: isd->sections,
1280 C: [](const InputSection *a, const InputSection *b) -> bool {
1281 return a->file->ppc64SmallCodeModelTocRelocs &&
1282 !b->file->ppc64SmallCodeModelTocRelocs;
1283 });
1284 }
1285}
1286
1287// Sort sections within each InputSectionDescription.
1288template <class ELFT> void Writer<ELFT>::sortInputSections() {
1289 // Assign negative priorities.
1290 DenseMap<const InputSectionBase *, int> order = buildSectionOrder(ctx);
1291 // Assign non-negative priorities due to --shuffle-sections.
1292 maybeShuffle(ctx, order);
1293 for (SectionCommand *cmd : ctx.script->sectionCommands)
1294 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1295 sortSection(ctx, osec&: osd->osec, order);
1296}
1297
1298template <class ELFT> void Writer<ELFT>::sortSections() {
1299 llvm::TimeTraceScope timeScope("Sort sections");
1300
1301 // Don't sort if using -r. It is not necessary and we want to preserve the
1302 // relative order for SHF_LINK_ORDER sections.
1303 if (ctx.arg.relocatable) {
1304 ctx.script->adjustOutputSections();
1305 return;
1306 }
1307
1308 sortInputSections();
1309
1310 for (SectionCommand *cmd : ctx.script->sectionCommands)
1311 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1312 osd->osec.sortRank = getSectionRank(ctx, osec&: osd->osec);
1313 if (!ctx.script->hasSectionsCommand) {
1314 // OutputDescs are mostly contiguous, but may be interleaved with
1315 // SymbolAssignments in the presence of INSERT commands.
1316 auto mid = std::stable_partition(
1317 ctx.script->sectionCommands.begin(), ctx.script->sectionCommands.end(),
1318 [](SectionCommand *cmd) { return isa<OutputDesc>(Val: cmd); });
1319 std::stable_sort(
1320 ctx.script->sectionCommands.begin(), mid,
1321 [&ctx = ctx](auto *l, auto *r) { return compareSections(ctx, l, r); });
1322 }
1323
1324 // Process INSERT commands and update output section attributes. From this
1325 // point onwards the order of script->sectionCommands is fixed.
1326 ctx.script->processInsertCommands();
1327 ctx.script->adjustOutputSections();
1328
1329 if (ctx.script->hasSectionsCommand)
1330 sortOrphanSections();
1331
1332 ctx.script->adjustSectionsAfterSorting();
1333}
1334
1335template <class ELFT> void Writer<ELFT>::sortOrphanSections() {
1336 // Orphan sections are sections present in the input files which are
1337 // not explicitly placed into the output file by the linker script.
1338 //
1339 // The sections in the linker script are already in the correct
1340 // order. We have to figuere out where to insert the orphan
1341 // sections.
1342 //
1343 // The order of the sections in the script is arbitrary and may not agree with
1344 // compareSections. This means that we cannot easily define a strict weak
1345 // ordering. To see why, consider a comparison of a section in the script and
1346 // one not in the script. We have a two simple options:
1347 // * Make them equivalent (a is not less than b, and b is not less than a).
1348 // The problem is then that equivalence has to be transitive and we can
1349 // have sections a, b and c with only b in a script and a less than c
1350 // which breaks this property.
1351 // * Use compareSectionsNonScript. Given that the script order doesn't have
1352 // to match, we can end up with sections a, b, c, d where b and c are in the
1353 // script and c is compareSectionsNonScript less than b. In which case d
1354 // can be equivalent to c, a to b and d < a. As a concrete example:
1355 // .a (rx) # not in script
1356 // .b (rx) # in script
1357 // .c (ro) # in script
1358 // .d (ro) # not in script
1359 //
1360 // The way we define an order then is:
1361 // * Sort only the orphan sections. They are in the end right now.
1362 // * Move each orphan section to its preferred position. We try
1363 // to put each section in the last position where it can share
1364 // a PT_LOAD.
1365 //
1366 // There is some ambiguity as to where exactly a new entry should be
1367 // inserted, because Commands contains not only output section
1368 // commands but also other types of commands such as symbol assignment
1369 // expressions. There's no correct answer here due to the lack of the
1370 // formal specification of the linker script. We use heuristics to
1371 // determine whether a new output command should be added before or
1372 // after another commands. For the details, look at shouldSkip
1373 // function.
1374
1375 auto i = ctx.script->sectionCommands.begin();
1376 auto e = ctx.script->sectionCommands.end();
1377 auto nonScriptI = std::find_if(i, e, [](SectionCommand *cmd) {
1378 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1379 return osd->osec.sectionIndex == UINT32_MAX;
1380 return false;
1381 });
1382
1383 // Sort the orphan sections.
1384 std::stable_sort(nonScriptI, e, [&ctx = ctx](auto *l, auto *r) {
1385 return compareSections(ctx, l, r);
1386 });
1387
1388 // As a horrible special case, skip the first . assignment if it is before any
1389 // section. We do this because it is common to set a load address by starting
1390 // the script with ". = 0xabcd" and the expectation is that every section is
1391 // after that.
1392 auto firstSectionOrDotAssignment =
1393 std::find_if(i, e, [](SectionCommand *cmd) { return !shouldSkip(cmd); });
1394 if (firstSectionOrDotAssignment != e &&
1395 isa<SymbolAssignment>(**firstSectionOrDotAssignment))
1396 ++firstSectionOrDotAssignment;
1397 i = firstSectionOrDotAssignment;
1398
1399 while (nonScriptI != e) {
1400 auto pos = findOrphanPos(ctx, i, nonScriptI);
1401 OutputSection *orphan = &cast<OutputDesc>(*nonScriptI)->osec;
1402
1403 // As an optimization, find all sections with the same sort rank
1404 // and insert them with one rotate.
1405 unsigned rank = orphan->sortRank;
1406 auto end = std::find_if(nonScriptI + 1, e, [=](SectionCommand *cmd) {
1407 return cast<OutputDesc>(Val: cmd)->osec.sortRank != rank;
1408 });
1409 std::rotate(pos, nonScriptI, end);
1410 nonScriptI = end;
1411 }
1412}
1413
1414static bool compareByFilePosition(InputSection *a, InputSection *b) {
1415 InputSection *la = a->flags & SHF_LINK_ORDER ? a->getLinkOrderDep() : nullptr;
1416 InputSection *lb = b->flags & SHF_LINK_ORDER ? b->getLinkOrderDep() : nullptr;
1417 // SHF_LINK_ORDER sections with non-zero sh_link are ordered before
1418 // non-SHF_LINK_ORDER sections and SHF_LINK_ORDER sections with zero sh_link.
1419 if (!la || !lb)
1420 return la && !lb;
1421 OutputSection *aOut = la->getParent();
1422 OutputSection *bOut = lb->getParent();
1423
1424 if (aOut == bOut)
1425 return la->outSecOff < lb->outSecOff;
1426 if (aOut->addr == bOut->addr)
1427 return aOut->sectionIndex < bOut->sectionIndex;
1428 return aOut->addr < bOut->addr;
1429}
1430
1431template <class ELFT> void Writer<ELFT>::resolveShfLinkOrder() {
1432 llvm::TimeTraceScope timeScope("Resolve SHF_LINK_ORDER");
1433 for (OutputSection *sec : ctx.outputSections) {
1434 if (!(sec->flags & SHF_LINK_ORDER))
1435 continue;
1436
1437 // The ARM.exidx section use SHF_LINK_ORDER, but we have consolidated
1438 // this processing inside the ARMExidxsyntheticsection::finalizeContents().
1439 if (!ctx.arg.relocatable && ctx.arg.emachine == EM_ARM &&
1440 sec->type == SHT_ARM_EXIDX)
1441 continue;
1442
1443 // Link order may be distributed across several InputSectionDescriptions.
1444 // Sorting is performed separately.
1445 SmallVector<InputSection **, 0> scriptSections;
1446 SmallVector<InputSection *, 0> sections;
1447 for (SectionCommand *cmd : sec->commands) {
1448 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
1449 if (!isd)
1450 continue;
1451 bool hasLinkOrder = false;
1452 scriptSections.clear();
1453 sections.clear();
1454 for (InputSection *&isec : isd->sections) {
1455 if (isec->flags & SHF_LINK_ORDER) {
1456 InputSection *link = isec->getLinkOrderDep();
1457 if (link && !link->getParent())
1458 ErrAlways(ctx) << isec << ": sh_link points to discarded section "
1459 << link;
1460 hasLinkOrder = true;
1461 }
1462 scriptSections.push_back(Elt: &isec);
1463 sections.push_back(Elt: isec);
1464 }
1465 if (hasLinkOrder && errCount(ctx) == 0) {
1466 llvm::stable_sort(Range&: sections, C: compareByFilePosition);
1467 for (int i = 0, n = sections.size(); i != n; ++i)
1468 *scriptSections[i] = sections[i];
1469 }
1470 }
1471 }
1472}
1473
1474static void finalizeSynthetic(Ctx &ctx, SyntheticSection *sec) {
1475 if (sec && sec->isNeeded() && sec->getParent()) {
1476 llvm::TimeTraceScope timeScope("Finalize synthetic sections", sec->name);
1477 sec->finalizeContents();
1478 }
1479}
1480
1481static bool canInsertPadding(OutputSection *sec) {
1482 StringRef s = sec->name;
1483 return s == ".bss" || s == ".data" || s == ".data.rel.ro" || s == ".lbss" ||
1484 s == ".ldata" || s == ".lrodata" || s == ".ltext" || s == ".rodata" ||
1485 s.starts_with(Prefix: ".text");
1486}
1487
1488static void randomizeSectionPadding(Ctx &ctx) {
1489 std::mt19937 g(*ctx.arg.randomizeSectionPadding);
1490 PhdrEntry *curPtLoad = nullptr;
1491 for (OutputSection *os : ctx.outputSections) {
1492 if (!canInsertPadding(sec: os))
1493 continue;
1494 for (SectionCommand *bc : os->commands) {
1495 if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc)) {
1496 SmallVector<InputSection *, 0> tmp;
1497 if (os->ptLoad != curPtLoad) {
1498 tmp.push_back(
1499 Elt: make<PaddingSection>(args&: ctx, args: g() % ctx.arg.maxPageSize, args&: os));
1500 curPtLoad = os->ptLoad;
1501 }
1502 for (InputSection *isec : isd->sections) {
1503 // Probability of inserting padding is 1 in 16.
1504 if (g() % 16 == 0)
1505 tmp.push_back(Elt: make<PaddingSection>(args&: ctx, args&: isec->addralign, args&: os));
1506 tmp.push_back(Elt: isec);
1507 }
1508 isd->sections = std::move(tmp);
1509 }
1510 }
1511 }
1512}
1513
1514// We need to generate and finalize the content that depends on the address of
1515// InputSections. As the generation of the content may also alter InputSection
1516// addresses we must converge to a fixed point. We do that here. See the comment
1517// in Writer<ELFT>::finalizeSections().
1518template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
1519 llvm::TimeTraceScope timeScope("Finalize address dependent content");
1520 AArch64Err843419Patcher a64p(ctx);
1521 ARMErr657417Patcher a32p(ctx);
1522 ctx.script->assignAddresses();
1523
1524 // .ARM.exidx and SHF_LINK_ORDER do not require precise addresses, but they
1525 // do require the relative addresses of OutputSections because linker scripts
1526 // can assign Virtual Addresses to OutputSections that are not monotonically
1527 // increasing. Anything here must be repeatable, since spilling may change
1528 // section order.
1529 const auto finalizeOrderDependentContent = [this] {
1530 for (Partition &part : ctx.partitions)
1531 finalizeSynthetic(ctx, sec: part.armExidx.get());
1532 resolveShfLinkOrder();
1533 };
1534 finalizeOrderDependentContent();
1535
1536 // Converts call x@GDPLT to call __tls_get_addr
1537 if (ctx.arg.emachine == EM_HEXAGON)
1538 hexagonTLSSymbolUpdate(ctx);
1539
1540 if (ctx.arg.randomizeSectionPadding)
1541 randomizeSectionPadding(ctx);
1542
1543 // Iterate until a fixed point is reached, skipping relocatable links since
1544 // the final addresses are unavailable.
1545 uint32_t pass = 0, assignPasses = 0;
1546 while (!ctx.arg.relocatable) {
1547 bool changed = ctx.target->needsThunks
1548 ? tc.createThunks(pass, outputSections: ctx.outputSections)
1549 : ctx.target->relaxOnce(pass);
1550 bool spilled = ctx.script->spillSections();
1551 changed |= spilled;
1552 ++pass;
1553
1554 // With Thunk Size much smaller than branch range we expect to
1555 // converge quickly; if we get to 30 something has gone wrong.
1556 if (changed && pass >= 30) {
1557 Err(ctx) << "address assignment did not converge";
1558 break;
1559 }
1560
1561 if (ctx.arg.fixCortexA53Errata843419) {
1562 if (changed)
1563 ctx.script->assignAddresses();
1564 changed |= a64p.createFixes();
1565 }
1566 if (ctx.arg.fixCortexA8) {
1567 if (changed)
1568 ctx.script->assignAddresses();
1569 changed |= a32p.createFixes();
1570 }
1571
1572 finalizeSynthetic(ctx, sec: ctx.in.got.get());
1573 if (ctx.in.mipsGot)
1574 ctx.in.mipsGot->updateAllocSize(ctx);
1575
1576 for (Partition &part : ctx.partitions) {
1577 // The R_AARCH64_AUTH_RELATIVE has a smaller addend field as bits [63:32]
1578 // encode the signing schema. We've put relocations in .relr.auth.dyn
1579 // during RelocationScanner::processAux, but the target VA for some of
1580 // them might be wider than 32 bits. We can only know the final VA at this
1581 // point, so move relocations with large values from .relr.auth.dyn to
1582 // .rela.dyn. See also AArch64::relocate.
1583 if (part.relrAuthDyn) {
1584 auto it = llvm::remove_if(
1585 part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) {
1586 Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
1587 if (isInt<32>(x: reloc.sym->getVA(ctx, addend: reloc.addend)))
1588 return false;
1589 reloc.expr = R_NONE;
1590 part.relaDyn->addReloc(reloc: {R_AARCH64_AUTH_RELATIVE, elem.inputSec,
1591 reloc.offset, false, *reloc.sym,
1592 reloc.addend, R_ABS});
1593 return true;
1594 });
1595 changed |= (it != part.relrAuthDyn->relocs.end());
1596 part.relrAuthDyn->relocs.erase(it, part.relrAuthDyn->relocs.end());
1597 }
1598 if (part.relaDyn)
1599 changed |= part.relaDyn->updateAllocSize(ctx);
1600 if (part.relrDyn)
1601 changed |= part.relrDyn->updateAllocSize(ctx);
1602 if (part.relrAuthDyn)
1603 changed |= part.relrAuthDyn->updateAllocSize(ctx);
1604 if (part.memtagGlobalDescriptors)
1605 changed |= part.memtagGlobalDescriptors->updateAllocSize(ctx);
1606 }
1607
1608 std::pair<const OutputSection *, const Defined *> changes =
1609 ctx.script->assignAddresses();
1610 if (!changed) {
1611 // Some symbols may be dependent on section addresses. When we break the
1612 // loop, the symbol values are finalized because a previous
1613 // assignAddresses() finalized section addresses.
1614 if (!changes.first && !changes.second)
1615 break;
1616 if (++assignPasses == 5) {
1617 if (changes.first)
1618 Err(ctx) << "address (0x" << Twine::utohexstr(Val: changes.first->addr)
1619 << ") of section '" << changes.first->name
1620 << "' does not converge";
1621 if (changes.second)
1622 Err(ctx) << "assignment to symbol " << changes.second
1623 << " does not converge";
1624 break;
1625 }
1626 } else if (spilled) {
1627 // Spilling can change relative section order.
1628 finalizeOrderDependentContent();
1629 }
1630 }
1631 if (!ctx.arg.relocatable)
1632 ctx.target->finalizeRelax(passes: pass);
1633
1634 if (ctx.arg.relocatable)
1635 for (OutputSection *sec : ctx.outputSections)
1636 sec->addr = 0;
1637
1638 uint64_t imageBase = ctx.script->hasSectionsCommand || ctx.arg.relocatable
1639 ? 0
1640 : ctx.target->getImageBase();
1641 for (SectionCommand *cmd : ctx.script->sectionCommands) {
1642 auto *osd = dyn_cast<OutputDesc>(Val: cmd);
1643 if (!osd)
1644 continue;
1645 OutputSection *osec = &osd->osec;
1646 // Error if the address is below the image base when SECTIONS is absent
1647 // (e.g. when -Ttext is specified and smaller than the default target image
1648 // base for no-pie).
1649 if (osec->addr < imageBase && (osec->flags & SHF_ALLOC)) {
1650 Err(ctx) << "section '" << osec->name << "' address (0x"
1651 << Twine::utohexstr(Val: osec->addr)
1652 << ") is smaller than image base (0x"
1653 << Twine::utohexstr(Val: imageBase) << "); specify --image-base";
1654 }
1655
1656 // If addrExpr is set, the address may not be a multiple of the alignment.
1657 // Warn because this is error-prone.
1658 if (osec->addr % osec->addralign != 0)
1659 Warn(ctx) << "address (0x" << Twine::utohexstr(Val: osec->addr)
1660 << ") of section " << osec->name
1661 << " is not a multiple of alignment (" << osec->addralign
1662 << ")";
1663 }
1664
1665 // Sizes are no longer allowed to grow, so all allowable spills have been
1666 // taken. Remove any leftover potential spills.
1667 ctx.script->erasePotentialSpillSections();
1668}
1669
1670// If Input Sections have been shrunk (basic block sections) then
1671// update symbol values and sizes associated with these sections. With basic
1672// block sections, input sections can shrink when the jump instructions at
1673// the end of the section are relaxed.
1674static void fixSymbolsAfterShrinking(Ctx &ctx) {
1675 for (InputFile *File : ctx.objectFiles) {
1676 parallelForEach(R: File->getSymbols(), Fn: [&](Symbol *Sym) {
1677 auto *def = dyn_cast<Defined>(Val: Sym);
1678 if (!def)
1679 return;
1680
1681 const SectionBase *sec = def->section;
1682 if (!sec)
1683 return;
1684
1685 const InputSectionBase *inputSec = dyn_cast<InputSectionBase>(Val: sec);
1686 if (!inputSec || !inputSec->bytesDropped)
1687 return;
1688
1689 const size_t OldSize = inputSec->content().size();
1690 const size_t NewSize = OldSize - inputSec->bytesDropped;
1691
1692 if (def->value > NewSize && def->value <= OldSize) {
1693 LLVM_DEBUG(llvm::dbgs()
1694 << "Moving symbol " << Sym->getName() << " from "
1695 << def->value << " to "
1696 << def->value - inputSec->bytesDropped << " bytes\n");
1697 def->value -= inputSec->bytesDropped;
1698 return;
1699 }
1700
1701 if (def->value + def->size > NewSize && def->value <= OldSize &&
1702 def->value + def->size <= OldSize) {
1703 LLVM_DEBUG(llvm::dbgs()
1704 << "Shrinking symbol " << Sym->getName() << " from "
1705 << def->size << " to " << def->size - inputSec->bytesDropped
1706 << " bytes\n");
1707 def->size -= inputSec->bytesDropped;
1708 }
1709 });
1710 }
1711}
1712
1713// If basic block sections exist, there are opportunities to delete fall thru
1714// jumps and shrink jump instructions after basic block reordering. This
1715// relaxation pass does that. It is only enabled when --optimize-bb-jumps
1716// option is used.
1717template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
1718 assert(ctx.arg.optimizeBBJumps);
1719 SmallVector<InputSection *, 0> storage;
1720
1721 ctx.script->assignAddresses();
1722 // For every output section that has executable input sections, this
1723 // does the following:
1724 // 1. Deletes all direct jump instructions in input sections that
1725 // jump to the following section as it is not required.
1726 // 2. If there are two consecutive jump instructions, it checks
1727 // if they can be flipped and one can be deleted.
1728 for (OutputSection *osec : ctx.outputSections) {
1729 if (!(osec->flags & SHF_EXECINSTR))
1730 continue;
1731 ArrayRef<InputSection *> sections = getInputSections(os: *osec, storage);
1732 size_t numDeleted = 0;
1733 // Delete all fall through jump instructions. Also, check if two
1734 // consecutive jump instructions can be flipped so that a fall
1735 // through jmp instruction can be deleted.
1736 for (size_t i = 0, e = sections.size(); i != e; ++i) {
1737 InputSection *next = i + 1 < sections.size() ? sections[i + 1] : nullptr;
1738 InputSection &sec = *sections[i];
1739 numDeleted += ctx.target->deleteFallThruJmpInsn(is&: sec, file: sec.file, nextIS: next);
1740 }
1741 if (numDeleted > 0) {
1742 ctx.script->assignAddresses();
1743 LLVM_DEBUG(llvm::dbgs()
1744 << "Removing " << numDeleted << " fall through jumps\n");
1745 }
1746 }
1747
1748 fixSymbolsAfterShrinking(ctx);
1749
1750 for (OutputSection *osec : ctx.outputSections)
1751 for (InputSection *is : getInputSections(os: *osec, storage))
1752 is->trim();
1753}
1754
1755// In order to allow users to manipulate linker-synthesized sections,
1756// we had to add synthetic sections to the input section list early,
1757// even before we make decisions whether they are needed. This allows
1758// users to write scripts like this: ".mygot : { .got }".
1759//
1760// Doing it has an unintended side effects. If it turns out that we
1761// don't need a .got (for example) at all because there's no
1762// relocation that needs a .got, we don't want to emit .got.
1763//
1764// To deal with the above problem, this function is called after
1765// scanRelocations is called to remove synthetic sections that turn
1766// out to be empty.
1767static void removeUnusedSyntheticSections(Ctx &ctx) {
1768 // All input synthetic sections that can be empty are placed after
1769 // all regular ones. Reverse iterate to find the first synthetic section
1770 // after a non-synthetic one which will be our starting point.
1771 auto start =
1772 llvm::find_if(Range: llvm::reverse(C&: ctx.inputSections), P: [](InputSectionBase *s) {
1773 return !isa<SyntheticSection>(Val: s);
1774 }).base();
1775
1776 // Remove unused synthetic sections from ctx.inputSections;
1777 DenseSet<InputSectionBase *> unused;
1778 auto end =
1779 std::remove_if(first: start, last: ctx.inputSections.end(), pred: [&](InputSectionBase *s) {
1780 auto *sec = cast<SyntheticSection>(Val: s);
1781 if (sec->getParent() && sec->isNeeded())
1782 return false;
1783 // .relr.auth.dyn relocations may be moved to .rela.dyn in
1784 // finalizeAddressDependentContent, making .rela.dyn no longer empty.
1785 // Conservatively keep .rela.dyn. .relr.auth.dyn can be made empty, but
1786 // we would fail to remove it here.
1787 if (ctx.arg.emachine == EM_AARCH64 && ctx.arg.relrPackDynRelocs &&
1788 sec == ctx.mainPart->relaDyn.get())
1789 return false;
1790 unused.insert(V: sec);
1791 return true;
1792 });
1793 ctx.inputSections.erase(CS: end, CE: ctx.inputSections.end());
1794
1795 // Remove unused synthetic sections from the corresponding input section
1796 // description and orphanSections.
1797 for (auto *sec : unused)
1798 if (OutputSection *osec = cast<SyntheticSection>(Val: sec)->getParent())
1799 for (SectionCommand *cmd : osec->commands)
1800 if (auto *isd = dyn_cast<InputSectionDescription>(Val: cmd))
1801 llvm::erase_if(C&: isd->sections, P: [&](InputSection *isec) {
1802 return unused.contains(V: isec);
1803 });
1804 llvm::erase_if(C&: ctx.script->orphanSections, P: [&](const InputSectionBase *sec) {
1805 return unused.contains(V: sec);
1806 });
1807}
1808
1809// Create output section objects and add them to OutputSections.
1810template <class ELFT> void Writer<ELFT>::finalizeSections() {
1811 if (!ctx.arg.relocatable) {
1812 ctx.out.preinitArray = findSection(ctx, name: ".preinit_array");
1813 ctx.out.initArray = findSection(ctx, name: ".init_array");
1814 ctx.out.finiArray = findSection(ctx, name: ".fini_array");
1815
1816 // The linker needs to define SECNAME_start, SECNAME_end and SECNAME_stop
1817 // symbols for sections, so that the runtime can get the start and end
1818 // addresses of each section by section name. Add such symbols.
1819 addStartEndSymbols();
1820 for (SectionCommand *cmd : ctx.script->sectionCommands)
1821 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd))
1822 addStartStopSymbols(osec&: osd->osec);
1823
1824 // Add _DYNAMIC symbol. Unlike GNU gold, our _DYNAMIC symbol has no type.
1825 // It should be okay as no one seems to care about the type.
1826 // Even the author of gold doesn't remember why gold behaves that way.
1827 // https://sourceware.org/ml/binutils/2002-03/msg00360.html
1828 if (ctx.mainPart->dynamic->parent) {
1829 Symbol *s = ctx.symtab->addSymbol(newSym: Defined{
1830 ctx, ctx.internalFile, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE,
1831 /*value=*/0, /*size=*/0, ctx.mainPart->dynamic.get()});
1832 s->isUsedInRegularObj = true;
1833 }
1834
1835 // Define __rel[a]_iplt_{start,end} symbols if needed.
1836 addRelIpltSymbols();
1837
1838 // RISC-V's gp can address +/- 2 KiB, set it to .sdata + 0x800. This symbol
1839 // should only be defined in an executable. If .sdata does not exist, its
1840 // value/section does not matter but it has to be relative, so set its
1841 // st_shndx arbitrarily to 1 (ctx.out.elfHeader).
1842 if (ctx.arg.emachine == EM_RISCV) {
1843 if (!ctx.arg.shared) {
1844 OutputSection *sec = findSection(ctx, name: ".sdata");
1845 addOptionalRegular(ctx, name: "__global_pointer$",
1846 sec: sec ? sec : ctx.out.elfHeader.get(), val: 0x800,
1847 stOther: STV_DEFAULT);
1848 // Set riscvGlobalPointer to be used by the optional global pointer
1849 // relaxation.
1850 if (ctx.arg.relaxGP) {
1851 Symbol *s = ctx.symtab->find(name: "__global_pointer$");
1852 if (s && s->isDefined())
1853 ctx.sym.riscvGlobalPointer = cast<Defined>(Val: s);
1854 }
1855 }
1856 }
1857
1858 if (ctx.arg.emachine == EM_386 || ctx.arg.emachine == EM_X86_64) {
1859 // On targets that support TLSDESC, _TLS_MODULE_BASE_ is defined in such a
1860 // way that:
1861 //
1862 // 1) Without relaxation: it produces a dynamic TLSDESC relocation that
1863 // computes 0.
1864 // 2) With LD->LE relaxation: _TLS_MODULE_BASE_@tpoff = 0 (lowest address
1865 // in the TLS block).
1866 //
1867 // 2) is special cased in @tpoff computation. To satisfy 1), we define it
1868 // as an absolute symbol of zero. This is different from GNU linkers which
1869 // define _TLS_MODULE_BASE_ relative to the first TLS section.
1870 Symbol *s = ctx.symtab->find(name: "_TLS_MODULE_BASE_");
1871 if (s && s->isUndefined()) {
1872 s->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
1873 STV_HIDDEN, STT_TLS, /*value=*/0, 0,
1874 /*section=*/nullptr});
1875 ctx.sym.tlsModuleBase = cast<Defined>(Val: s);
1876 }
1877 }
1878
1879 // This responsible for splitting up .eh_frame section into
1880 // pieces. The relocation scan uses those pieces, so this has to be
1881 // earlier.
1882 {
1883 llvm::TimeTraceScope timeScope("Finalize .eh_frame");
1884 for (Partition &part : ctx.partitions)
1885 finalizeSynthetic(ctx, sec: part.ehFrame.get());
1886 }
1887 }
1888
1889 // If the previous code block defines any non-hidden symbols (e.g.
1890 // __global_pointer$), they may be exported.
1891 if (ctx.arg.exportDynamic)
1892 for (Symbol *sym : ctx.synthesizedSymbols)
1893 if (sym->computeBinding(ctx) != STB_LOCAL)
1894 sym->isExported = true;
1895
1896 demoteSymbolsAndComputeIsPreemptible(ctx);
1897
1898 if (ctx.arg.copyRelocs && ctx.arg.discard != DiscardPolicy::None)
1899 markUsedLocalSymbols<ELFT>(ctx);
1900 demoteAndCopyLocalSymbols(ctx);
1901
1902 if (ctx.arg.copyRelocs)
1903 addSectionSymbols();
1904
1905 // Change values of linker-script-defined symbols from placeholders (assigned
1906 // by declareSymbols) to actual definitions.
1907 ctx.script->processSymbolAssignments();
1908
1909 if (!ctx.arg.relocatable) {
1910 llvm::TimeTraceScope timeScope("Scan relocations");
1911 // Scan relocations. This must be done after every symbol is declared so
1912 // that we can correctly decide if a dynamic relocation is needed. This is
1913 // called after processSymbolAssignments() because it needs to know whether
1914 // a linker-script-defined symbol is absolute.
1915 scanRelocations<ELFT>(ctx);
1916 reportUndefinedSymbols(ctx);
1917 postScanRelocations(ctx);
1918
1919 if (ctx.in.plt && ctx.in.plt->isNeeded())
1920 ctx.in.plt->addSymbols();
1921 if (ctx.in.iplt && ctx.in.iplt->isNeeded())
1922 ctx.in.iplt->addSymbols();
1923
1924 if (ctx.arg.unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore) {
1925 auto diag =
1926 ctx.arg.unresolvedSymbolsInShlib == UnresolvedPolicy::ReportError &&
1927 !ctx.arg.noinhibitExec
1928 ? DiagLevel::Err
1929 : DiagLevel::Warn;
1930 // Error on undefined symbols in a shared object, if all of its DT_NEEDED
1931 // entries are seen. These cases would otherwise lead to runtime errors
1932 // reported by the dynamic linker.
1933 //
1934 // ld.bfd traces all DT_NEEDED to emulate the logic of the dynamic linker
1935 // to catch more cases. That is too much for us. Our approach resembles
1936 // the one used in ld.gold, achieves a good balance to be useful but not
1937 // too smart.
1938 //
1939 // If a DSO reference is resolved by a SharedSymbol, but the SharedSymbol
1940 // is overridden by a hidden visibility Defined (which is later discarded
1941 // due to GC), don't report the diagnostic. However, this may indicate an
1942 // unintended SharedSymbol.
1943 for (SharedFile *file : ctx.sharedFiles) {
1944 bool allNeededIsKnown =
1945 llvm::all_of(file->dtNeeded, [&](StringRef needed) {
1946 return ctx.symtab->soNames.contains(Val: CachedHashStringRef(needed));
1947 });
1948 if (!allNeededIsKnown)
1949 continue;
1950 for (Symbol *sym : file->requiredSymbols) {
1951 if (sym->dsoDefined)
1952 continue;
1953 if (sym->isUndefined() && !sym->isWeak()) {
1954 ELFSyncStream(ctx, diag)
1955 << "undefined reference: " << sym << "\n>>> referenced by "
1956 << file << " (disallowed by --no-allow-shlib-undefined)";
1957 } else if (sym->isDefined() &&
1958 sym->computeBinding(ctx) == STB_LOCAL) {
1959 ELFSyncStream(ctx, diag)
1960 << "non-exported symbol '" << sym << "' in '" << sym->file
1961 << "' is referenced by DSO '" << file << "'";
1962 }
1963 }
1964 }
1965 }
1966 }
1967
1968 {
1969 llvm::TimeTraceScope timeScope("Add symbols to symtabs");
1970 // Now that we have defined all possible global symbols including linker-
1971 // synthesized ones. Visit all symbols to give the finishing touches.
1972 for (Symbol *sym : ctx.symtab->getSymbols()) {
1973 if (!sym->isUsedInRegularObj || !includeInSymtab(ctx, b: *sym))
1974 continue;
1975 if (!ctx.arg.relocatable)
1976 sym->binding = sym->computeBinding(ctx);
1977 if (ctx.in.symTab)
1978 ctx.in.symTab->addSymbol(sym);
1979
1980 // computeBinding might localize a symbol that was considered exported
1981 // but then synthesized as hidden (e.g. _DYNAMIC).
1982 if ((sym->isExported || sym->isPreemptible) && !sym->isLocal()) {
1983 ctx.partitions[sym->partition - 1].dynSymTab->addSymbol(sym);
1984 if (auto *file = dyn_cast<SharedFile>(Val: sym->file))
1985 if (file->isNeeded && !sym->isUndefined())
1986 addVerneed(ctx, ss&: *sym);
1987 }
1988 }
1989
1990 // We also need to scan the dynamic relocation tables of the other
1991 // partitions and add any referenced symbols to the partition's dynsym.
1992 for (Partition &part :
1993 MutableArrayRef<Partition>(ctx.partitions).slice(N: 1)) {
1994 DenseSet<Symbol *> syms;
1995 for (const SymbolTableEntry &e : part.dynSymTab->getSymbols())
1996 syms.insert(V: e.sym);
1997 for (DynamicReloc &reloc : part.relaDyn->relocs)
1998 if (reloc.sym && reloc.needsDynSymIndex() &&
1999 syms.insert(V: reloc.sym).second)
2000 part.dynSymTab->addSymbol(sym: reloc.sym);
2001 }
2002 }
2003
2004 if (ctx.in.mipsGot)
2005 ctx.in.mipsGot->build();
2006
2007 removeUnusedSyntheticSections(ctx);
2008 ctx.script->diagnoseOrphanHandling();
2009 ctx.script->diagnoseMissingSGSectionAddress();
2010
2011 sortSections();
2012
2013 // Create a list of OutputSections, assign sectionIndex, and populate
2014 // ctx.in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC
2015 // sections.
2016 for (SectionCommand *cmd : ctx.script->sectionCommands)
2017 if (auto *osd = dyn_cast<OutputDesc>(Val: cmd)) {
2018 OutputSection *osec = &osd->osec;
2019 if (!ctx.in.shStrTab && !(osec->flags & SHF_ALLOC))
2020 continue;
2021 ctx.outputSections.push_back(Elt: osec);
2022 osec->sectionIndex = ctx.outputSections.size();
2023 if (ctx.in.shStrTab)
2024 osec->shName = ctx.in.shStrTab->addString(s: osec->name);
2025 }
2026
2027 // Prefer command line supplied address over other constraints.
2028 for (OutputSection *sec : ctx.outputSections) {
2029 auto i = ctx.arg.sectionStartMap.find(Key: sec->name);
2030 if (i != ctx.arg.sectionStartMap.end())
2031 sec->addrExpr = [=] { return i->second; };
2032 }
2033
2034 // With the ctx.outputSections available check for GDPLT relocations
2035 // and add __tls_get_addr symbol if needed.
2036 if (ctx.arg.emachine == EM_HEXAGON &&
2037 hexagonNeedsTLSSymbol(outputSections: ctx.outputSections)) {
2038 Symbol *sym =
2039 ctx.symtab->addSymbol(newSym: Undefined{ctx.internalFile, "__tls_get_addr",
2040 STB_GLOBAL, STV_DEFAULT, STT_NOTYPE});
2041 sym->isPreemptible = true;
2042 ctx.partitions[0].dynSymTab->addSymbol(sym);
2043 }
2044
2045 // This is a bit of a hack. A value of 0 means undef, so we set it
2046 // to 1 to make __ehdr_start defined. The section number is not
2047 // particularly relevant.
2048 ctx.out.elfHeader->sectionIndex = 1;
2049 ctx.out.elfHeader->size = sizeof(typename ELFT::Ehdr);
2050
2051 // Binary and relocatable output does not have PHDRS.
2052 // The headers have to be created before finalize as that can influence the
2053 // image base and the dynamic section on mips includes the image base.
2054 if (!ctx.arg.relocatable && !ctx.arg.oFormatBinary) {
2055 for (Partition &part : ctx.partitions) {
2056 part.phdrs = ctx.script->hasPhdrsCommands() ? ctx.script->createPhdrs()
2057 : createPhdrs(part);
2058 if (ctx.arg.emachine == EM_ARM) {
2059 // PT_ARM_EXIDX is the ARM EHABI equivalent of PT_GNU_EH_FRAME
2060 addPhdrForSection(part, shType: SHT_ARM_EXIDX, pType: PT_ARM_EXIDX, pFlags: PF_R);
2061 }
2062 if (ctx.arg.emachine == EM_MIPS) {
2063 // Add separate segments for MIPS-specific sections.
2064 addPhdrForSection(part, shType: SHT_MIPS_REGINFO, pType: PT_MIPS_REGINFO, pFlags: PF_R);
2065 addPhdrForSection(part, shType: SHT_MIPS_OPTIONS, pType: PT_MIPS_OPTIONS, pFlags: PF_R);
2066 addPhdrForSection(part, shType: SHT_MIPS_ABIFLAGS, pType: PT_MIPS_ABIFLAGS, pFlags: PF_R);
2067 }
2068 if (ctx.arg.emachine == EM_RISCV)
2069 addPhdrForSection(part, shType: SHT_RISCV_ATTRIBUTES, pType: PT_RISCV_ATTRIBUTES,
2070 pFlags: PF_R);
2071 }
2072 ctx.out.programHeaders->size =
2073 sizeof(Elf_Phdr) * ctx.mainPart->phdrs.size();
2074
2075 // Find the TLS segment. This happens before the section layout loop so that
2076 // Android relocation packing can look up TLS symbol addresses. We only need
2077 // to care about the main partition here because all TLS symbols were moved
2078 // to the main partition (see MarkLive.cpp).
2079 for (auto &p : ctx.mainPart->phdrs)
2080 if (p->p_type == PT_TLS)
2081 ctx.tlsPhdr = p.get();
2082 }
2083
2084 // Some symbols are defined in term of program headers. Now that we
2085 // have the headers, we can find out which sections they point to.
2086 setReservedSymbolSections();
2087
2088 if (ctx.script->noCrossRefs.size()) {
2089 llvm::TimeTraceScope timeScope("Check NOCROSSREFS");
2090 checkNoCrossRefs<ELFT>(ctx);
2091 }
2092
2093 {
2094 llvm::TimeTraceScope timeScope("Finalize synthetic sections");
2095
2096 finalizeSynthetic(ctx, sec: ctx.in.bss.get());
2097 finalizeSynthetic(ctx, sec: ctx.in.bssRelRo.get());
2098 finalizeSynthetic(ctx, sec: ctx.in.symTabShndx.get());
2099 finalizeSynthetic(ctx, sec: ctx.in.shStrTab.get());
2100 finalizeSynthetic(ctx, sec: ctx.in.strTab.get());
2101 finalizeSynthetic(ctx, sec: ctx.in.got.get());
2102 finalizeSynthetic(ctx, sec: ctx.in.mipsGot.get());
2103 finalizeSynthetic(ctx, sec: ctx.in.igotPlt.get());
2104 finalizeSynthetic(ctx, sec: ctx.in.gotPlt.get());
2105 finalizeSynthetic(ctx, sec: ctx.in.relaPlt.get());
2106 finalizeSynthetic(ctx, sec: ctx.in.plt.get());
2107 finalizeSynthetic(ctx, sec: ctx.in.iplt.get());
2108 finalizeSynthetic(ctx, sec: ctx.in.ppc32Got2.get());
2109 finalizeSynthetic(ctx, sec: ctx.in.partIndex.get());
2110
2111 // Dynamic section must be the last one in this list and dynamic
2112 // symbol table section (dynSymTab) must be the first one.
2113 for (Partition &part : ctx.partitions) {
2114 finalizeSynthetic(ctx, sec: part.relaDyn.get());
2115 finalizeSynthetic(ctx, sec: part.relrDyn.get());
2116 finalizeSynthetic(ctx, sec: part.relrAuthDyn.get());
2117
2118 finalizeSynthetic(ctx, sec: part.dynSymTab.get());
2119 finalizeSynthetic(ctx, sec: part.gnuHashTab.get());
2120 finalizeSynthetic(ctx, sec: part.hashTab.get());
2121 finalizeSynthetic(ctx, sec: part.verDef.get());
2122 finalizeSynthetic(ctx, sec: part.ehFrameHdr.get());
2123 finalizeSynthetic(ctx, sec: part.verSym.get());
2124 finalizeSynthetic(ctx, sec: part.verNeed.get());
2125 finalizeSynthetic(ctx, sec: part.dynamic.get());
2126 }
2127 }
2128
2129 if (!ctx.script->hasSectionsCommand && !ctx.arg.relocatable)
2130 fixSectionAlignments();
2131
2132 // This is used to:
2133 // 1) Create "thunks":
2134 // Jump instructions in many ISAs have small displacements, and therefore
2135 // they cannot jump to arbitrary addresses in memory. For example, RISC-V
2136 // JAL instruction can target only +-1 MiB from PC. It is a linker's
2137 // responsibility to create and insert small pieces of code between
2138 // sections to extend the ranges if jump targets are out of range. Such
2139 // code pieces are called "thunks".
2140 //
2141 // We add thunks at this stage. We couldn't do this before this point
2142 // because this is the earliest point where we know sizes of sections and
2143 // their layouts (that are needed to determine if jump targets are in
2144 // range).
2145 //
2146 // 2) Update the sections. We need to generate content that depends on the
2147 // address of InputSections. For example, MIPS GOT section content or
2148 // android packed relocations sections content.
2149 //
2150 // 3) Assign the final values for the linker script symbols. Linker scripts
2151 // sometimes using forward symbol declarations. We want to set the correct
2152 // values. They also might change after adding the thunks.
2153 finalizeAddressDependentContent();
2154
2155 // All information needed for OutputSection part of Map file is available.
2156 if (errCount(ctx))
2157 return;
2158
2159 {
2160 llvm::TimeTraceScope timeScope("Finalize synthetic sections");
2161 // finalizeAddressDependentContent may have added local symbols to the
2162 // static symbol table.
2163 finalizeSynthetic(ctx, sec: ctx.in.symTab.get());
2164 finalizeSynthetic(ctx, sec: ctx.in.debugNames.get());
2165 finalizeSynthetic(ctx, sec: ctx.in.ppc64LongBranchTarget.get());
2166 finalizeSynthetic(ctx, sec: ctx.in.armCmseSGSection.get());
2167 }
2168
2169 // Relaxation to delete inter-basic block jumps created by basic block
2170 // sections. Run after ctx.in.symTab is finalized as optimizeBasicBlockJumps
2171 // can relax jump instructions based on symbol offset.
2172 if (ctx.arg.optimizeBBJumps)
2173 optimizeBasicBlockJumps();
2174
2175 // Fill other section headers. The dynamic table is finalized
2176 // at the end because some tags like RELSZ depend on result
2177 // of finalizing other sections.
2178 for (OutputSection *sec : ctx.outputSections)
2179 sec->finalize(ctx);
2180
2181 ctx.script->checkFinalScriptConditions();
2182
2183 if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8) {
2184 addArmInputSectionMappingSymbols(ctx);
2185 sortArmMappingSymbols(ctx);
2186 }
2187}
2188
2189// Ensure data sections are not mixed with executable sections when
2190// --execute-only is used. --execute-only make pages executable but not
2191// readable.
2192template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
2193 if (!ctx.arg.executeOnly)
2194 return;
2195
2196 SmallVector<InputSection *, 0> storage;
2197 for (OutputSection *osec : ctx.outputSections)
2198 if (osec->flags & SHF_EXECINSTR)
2199 for (InputSection *isec : getInputSections(os: *osec, storage))
2200 if (!(isec->flags & SHF_EXECINSTR))
2201 ErrAlways(ctx) << "cannot place " << isec << " into " << osec->name
2202 << ": --execute-only does not support intermingling "
2203 "data and code";
2204}
2205
2206// Check which input sections of RX output sections don't have the
2207// SHF_AARCH64_PURECODE or SHF_ARM_PURECODE flag set.
2208template <class ELFT> void Writer<ELFT>::checkExecuteOnlyReport() {
2209 if (ctx.arg.zExecuteOnlyReport == ReportPolicy::None)
2210 return;
2211
2212 auto reportUnless = [&](bool cond) -> ELFSyncStream {
2213 if (cond)
2214 return {ctx, DiagLevel::None};
2215 return {ctx, toDiagLevel(policy: ctx.arg.zExecuteOnlyReport)};
2216 };
2217
2218 uint64_t purecodeFlag =
2219 ctx.arg.emachine == EM_AARCH64 ? SHF_AARCH64_PURECODE : SHF_ARM_PURECODE;
2220 StringRef purecodeFlagName = ctx.arg.emachine == EM_AARCH64
2221 ? "SHF_AARCH64_PURECODE"
2222 : "SHF_ARM_PURECODE";
2223 SmallVector<InputSection *, 0> storage;
2224 for (OutputSection *osec : ctx.outputSections) {
2225 if (osec->getPhdrFlags() != (PF_R | PF_X))
2226 continue;
2227 for (InputSection *sec : getInputSections(os: *osec, storage)) {
2228 if (isa<SyntheticSection>(Val: sec))
2229 continue;
2230 reportUnless(sec->flags & purecodeFlag)
2231 << "-z execute-only-report: " << sec << " does not have "
2232 << purecodeFlagName << " flag set";
2233 }
2234 }
2235}
2236
2237// The linker is expected to define SECNAME_start and SECNAME_end
2238// symbols for a few sections. This function defines them.
2239template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
2240 // If the associated output section does not exist, there is ambiguity as to
2241 // how we define _start and _end symbols for an init/fini section. Users
2242 // expect no "undefined symbol" linker errors and loaders expect equal
2243 // st_value but do not particularly care whether the symbols are defined or
2244 // not. We retain the output section so that the section indexes will be
2245 // correct.
2246 auto define = [=](StringRef start, StringRef end, OutputSection *os) {
2247 if (os) {
2248 Defined *startSym = addOptionalRegular(ctx, name: start, sec: os, val: 0);
2249 Defined *stopSym = addOptionalRegular(ctx, name: end, sec: os, val: -1);
2250 if (startSym || stopSym)
2251 os->usedInExpression = true;
2252 } else {
2253 addOptionalRegular(ctx, name: start, sec: ctx.out.elfHeader.get(), val: 0);
2254 addOptionalRegular(ctx, name: end, sec: ctx.out.elfHeader.get(), val: 0);
2255 }
2256 };
2257
2258 define("__preinit_array_start", "__preinit_array_end", ctx.out.preinitArray);
2259 define("__init_array_start", "__init_array_end", ctx.out.initArray);
2260 define("__fini_array_start", "__fini_array_end", ctx.out.finiArray);
2261
2262 // As a special case, don't unnecessarily retain .ARM.exidx, which would
2263 // create an empty PT_ARM_EXIDX.
2264 if (OutputSection *sec = findSection(ctx, name: ".ARM.exidx"))
2265 define("__exidx_start", "__exidx_end", sec);
2266}
2267
2268// If a section name is valid as a C identifier (which is rare because of
2269// the leading '.'), linkers are expected to define __start_<secname> and
2270// __stop_<secname> symbols. They are at beginning and end of the section,
2271// respectively. This is not requested by the ELF standard, but GNU ld and
2272// gold provide the feature, and used by many programs.
2273template <class ELFT>
2274void Writer<ELFT>::addStartStopSymbols(OutputSection &osec) {
2275 StringRef s = osec.name;
2276 if (!isValidCIdentifier(s))
2277 return;
2278 StringSaver &ss = ctx.saver;
2279 Defined *startSym = addOptionalRegular(ctx, name: ss.save(S: "__start_" + s), sec: &osec, val: 0,
2280 stOther: ctx.arg.zStartStopVisibility);
2281 Defined *stopSym = addOptionalRegular(ctx, name: ss.save(S: "__stop_" + s), sec: &osec, val: -1,
2282 stOther: ctx.arg.zStartStopVisibility);
2283 if (startSym || stopSym)
2284 osec.usedInExpression = true;
2285}
2286
2287static bool needsPtLoad(OutputSection *sec) {
2288 if (!(sec->flags & SHF_ALLOC))
2289 return false;
2290
2291 // Don't allocate VA space for TLS NOBITS sections. The PT_TLS PHDR is
2292 // responsible for allocating space for them, not the PT_LOAD that
2293 // contains the TLS initialization image.
2294 if ((sec->flags & SHF_TLS) && sec->type == SHT_NOBITS)
2295 return false;
2296 return true;
2297}
2298
2299// Adjust phdr flags according to certain options.
2300static uint64_t computeFlags(Ctx &ctx, uint64_t flags) {
2301 if (ctx.arg.omagic)
2302 return PF_R | PF_W | PF_X;
2303 if (ctx.arg.executeOnly && (flags & PF_X))
2304 return flags & ~PF_R;
2305 return flags;
2306}
2307
2308// Decide which program headers to create and which sections to include in each
2309// one.
2310template <class ELFT>
2311SmallVector<std::unique_ptr<PhdrEntry>, 0>
2312Writer<ELFT>::createPhdrs(Partition &part) {
2313 SmallVector<std::unique_ptr<PhdrEntry>, 0> ret;
2314 auto addHdr = [&, &ctx = ctx](unsigned type, unsigned flags) -> PhdrEntry * {
2315 ret.push_back(Elt: std::make_unique<PhdrEntry>(args&: ctx, args&: type, args&: flags));
2316 return ret.back().get();
2317 };
2318
2319 unsigned partNo = part.getNumber(ctx);
2320 bool isMain = partNo == 1;
2321
2322 // Add the first PT_LOAD segment for regular output sections.
2323 uint64_t flags = computeFlags(ctx, flags: PF_R);
2324 PhdrEntry *load = nullptr;
2325
2326 // nmagic or omagic output does not have PT_PHDR, PT_INTERP, or the readonly
2327 // PT_LOAD.
2328 if (!ctx.arg.nmagic && !ctx.arg.omagic) {
2329 // The first phdr entry is PT_PHDR which describes the program header
2330 // itself.
2331 if (isMain)
2332 addHdr(PT_PHDR, PF_R)->add(ctx.out.programHeaders.get());
2333 else
2334 addHdr(PT_PHDR, PF_R)->add(part.programHeaders->getParent());
2335
2336 // PT_INTERP must be the second entry if exists.
2337 if (OutputSection *cmd = findSection(ctx, name: ".interp", partition: partNo))
2338 addHdr(PT_INTERP, cmd->getPhdrFlags())->add(cmd);
2339
2340 // Add the headers. We will remove them if they don't fit.
2341 // In the other partitions the headers are ordinary sections, so they don't
2342 // need to be added here.
2343 if (isMain) {
2344 load = addHdr(PT_LOAD, flags);
2345 load->add(sec: ctx.out.elfHeader.get());
2346 load->add(sec: ctx.out.programHeaders.get());
2347 }
2348 }
2349
2350 // PT_GNU_RELRO includes all sections that should be marked as
2351 // read-only by dynamic linker after processing relocations.
2352 // Current dynamic loaders only support one PT_GNU_RELRO PHDR, give
2353 // an error message if more than one PT_GNU_RELRO PHDR is required.
2354 auto relRo = std::make_unique<PhdrEntry>(args&: ctx, args: PT_GNU_RELRO, args: PF_R);
2355 bool inRelroPhdr = false;
2356 OutputSection *relroEnd = nullptr;
2357 for (OutputSection *sec : ctx.outputSections) {
2358 if (sec->partition != partNo || !needsPtLoad(sec))
2359 continue;
2360 if (isRelroSection(ctx, sec)) {
2361 inRelroPhdr = true;
2362 if (!relroEnd)
2363 relRo->add(sec);
2364 else
2365 ErrAlways(ctx) << "section: " << sec->name
2366 << " is not contiguous with other relro" << " sections";
2367 } else if (inRelroPhdr) {
2368 inRelroPhdr = false;
2369 relroEnd = sec;
2370 }
2371 }
2372 relRo->p_align = 1;
2373
2374 for (OutputSection *sec : ctx.outputSections) {
2375 if (!needsPtLoad(sec))
2376 continue;
2377
2378 // Normally, sections in partitions other than the current partition are
2379 // ignored. But partition number 255 is a special case: it contains the
2380 // partition end marker (.part.end). It needs to be added to the main
2381 // partition so that a segment is created for it in the main partition,
2382 // which will cause the dynamic loader to reserve space for the other
2383 // partitions.
2384 if (sec->partition != partNo) {
2385 if (isMain && sec->partition == 255)
2386 addHdr(PT_LOAD, computeFlags(ctx, flags: sec->getPhdrFlags()))->add(sec);
2387 continue;
2388 }
2389
2390 // Segments are contiguous memory regions that has the same attributes
2391 // (e.g. executable or writable). There is one phdr for each segment.
2392 // Therefore, we need to create a new phdr when the next section has
2393 // incompatible flags or is loaded at a discontiguous address or memory
2394 // region using AT or AT> linker script command, respectively.
2395 //
2396 // As an exception, we don't create a separate load segment for the ELF
2397 // headers, even if the first "real" output has an AT or AT> attribute.
2398 //
2399 // In addition, NOBITS sections should only be placed at the end of a LOAD
2400 // segment (since it's represented as p_filesz < p_memsz). If we have a
2401 // not-NOBITS section after a NOBITS, we create a new LOAD for the latter
2402 // even if flags match, so as not to require actually writing the
2403 // supposed-to-be-NOBITS section to the output file. (However, we cannot do
2404 // so when hasSectionsCommand, since we cannot introduce the extra alignment
2405 // needed to create a new LOAD)
2406 uint64_t newFlags = computeFlags(ctx, flags: sec->getPhdrFlags());
2407 uint64_t incompatible = flags ^ newFlags;
2408 if (!(newFlags & PF_W)) {
2409 // When --no-rosegment is specified, RO and RX sections are compatible.
2410 if (ctx.arg.singleRoRx)
2411 incompatible &= ~PF_X;
2412 // When --no-xosegment is specified (the default), XO and RX sections are
2413 // compatible.
2414 if (ctx.arg.singleXoRx)
2415 incompatible &= ~PF_R;
2416 }
2417 if (incompatible)
2418 load = nullptr;
2419
2420 bool sameLMARegion =
2421 load && !sec->lmaExpr && sec->lmaRegion == load->firstSec->lmaRegion;
2422 if (load && sec != relroEnd &&
2423 sec->memRegion == load->firstSec->memRegion &&
2424 (sameLMARegion || load->lastSec == ctx.out.programHeaders.get()) &&
2425 (ctx.script->hasSectionsCommand || sec->type == SHT_NOBITS ||
2426 load->lastSec->type != SHT_NOBITS)) {
2427 load->p_flags |= newFlags;
2428 } else {
2429 load = addHdr(PT_LOAD, newFlags);
2430 flags = newFlags;
2431 }
2432
2433 load->add(sec);
2434 }
2435
2436 // Add a TLS segment if any.
2437 auto tlsHdr = std::make_unique<PhdrEntry>(args&: ctx, args: PT_TLS, args: PF_R);
2438 for (OutputSection *sec : ctx.outputSections)
2439 if (sec->partition == partNo && sec->flags & SHF_TLS)
2440 tlsHdr->add(sec);
2441 if (tlsHdr->firstSec)
2442 ret.push_back(Elt: std::move(tlsHdr));
2443
2444 // Add an entry for .dynamic.
2445 if (OutputSection *sec = part.dynamic->getParent())
2446 addHdr(PT_DYNAMIC, sec->getPhdrFlags())->add(sec);
2447
2448 if (relRo->firstSec)
2449 ret.push_back(Elt: std::move(relRo));
2450
2451 // PT_GNU_EH_FRAME is a special section pointing on .eh_frame_hdr.
2452 if (part.ehFrame->isNeeded() && part.ehFrameHdr &&
2453 part.ehFrame->getParent() && part.ehFrameHdr->getParent())
2454 addHdr(PT_GNU_EH_FRAME, part.ehFrameHdr->getParent()->getPhdrFlags())
2455 ->add(part.ehFrameHdr->getParent());
2456
2457 if (ctx.arg.osabi == ELFOSABI_OPENBSD) {
2458 // PT_OPENBSD_MUTABLE makes the dynamic linker fill the segment with
2459 // zero data, like bss, but it can be treated differently.
2460 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.mutable", partition: partNo))
2461 addHdr(PT_OPENBSD_MUTABLE, cmd->getPhdrFlags())->add(cmd);
2462
2463 // PT_OPENBSD_RANDOMIZE makes the dynamic linker fill the segment
2464 // with random data.
2465 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.randomdata", partition: partNo))
2466 addHdr(PT_OPENBSD_RANDOMIZE, cmd->getPhdrFlags())->add(cmd);
2467
2468 // PT_OPENBSD_SYSCALLS makes the kernel and dynamic linker register
2469 // system call sites.
2470 if (OutputSection *cmd = findSection(ctx, name: ".openbsd.syscalls", partition: partNo))
2471 addHdr(PT_OPENBSD_SYSCALLS, cmd->getPhdrFlags())->add(cmd);
2472 }
2473
2474 if (ctx.arg.zGnustack != GnuStackKind::None) {
2475 // PT_GNU_STACK is a special section to tell the loader to make the
2476 // pages for the stack non-executable. If you really want an executable
2477 // stack, you can pass -z execstack, but that's not recommended for
2478 // security reasons.
2479 unsigned perm = PF_R | PF_W;
2480 if (ctx.arg.zGnustack == GnuStackKind::Exec)
2481 perm |= PF_X;
2482 addHdr(PT_GNU_STACK, perm)->p_memsz = ctx.arg.zStackSize;
2483 }
2484
2485 // PT_OPENBSD_NOBTCFI is an OpenBSD-specific header to mark that the
2486 // executable is expected to violate branch-target CFI checks.
2487 if (ctx.arg.zNoBtCfi)
2488 addHdr(PT_OPENBSD_NOBTCFI, PF_X);
2489
2490 // PT_OPENBSD_WXNEEDED is a OpenBSD-specific header to mark the executable
2491 // is expected to perform W^X violations, such as calling mprotect(2) or
2492 // mmap(2) with PROT_WRITE | PROT_EXEC, which is prohibited by default on
2493 // OpenBSD.
2494 if (ctx.arg.zWxneeded)
2495 addHdr(PT_OPENBSD_WXNEEDED, PF_X);
2496
2497 if (OutputSection *cmd = findSection(ctx, name: ".note.gnu.property", partition: partNo))
2498 addHdr(PT_GNU_PROPERTY, PF_R)->add(cmd);
2499
2500 // Create one PT_NOTE per a group of contiguous SHT_NOTE sections with the
2501 // same alignment.
2502 PhdrEntry *note = nullptr;
2503 for (OutputSection *sec : ctx.outputSections) {
2504 if (sec->partition != partNo)
2505 continue;
2506 if (sec->type == SHT_NOTE && (sec->flags & SHF_ALLOC)) {
2507 if (!note || sec->lmaExpr || note->lastSec->addralign != sec->addralign)
2508 note = addHdr(PT_NOTE, PF_R);
2509 note->add(sec);
2510 } else {
2511 note = nullptr;
2512 }
2513 }
2514 return ret;
2515}
2516
2517template <class ELFT>
2518void Writer<ELFT>::addPhdrForSection(Partition &part, unsigned shType,
2519 unsigned pType, unsigned pFlags) {
2520 unsigned partNo = part.getNumber(ctx);
2521 auto i = llvm::find_if(ctx.outputSections, [=](OutputSection *cmd) {
2522 return cmd->partition == partNo && cmd->type == shType;
2523 });
2524 if (i == ctx.outputSections.end())
2525 return;
2526
2527 auto entry = std::make_unique<PhdrEntry>(args&: ctx, args&: pType, args&: pFlags);
2528 entry->add(sec: *i);
2529 part.phdrs.push_back(Elt: std::move(entry));
2530}
2531
2532// Place the first section of each PT_LOAD to a different page (of maxPageSize).
2533// This is achieved by assigning an alignment expression to addrExpr of each
2534// such section.
2535template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
2536 const PhdrEntry *prev;
2537 auto pageAlign = [&, &ctx = this->ctx](const PhdrEntry *p) {
2538 OutputSection *cmd = p->firstSec;
2539 if (!cmd)
2540 return;
2541 cmd->alignExpr = [align = cmd->addralign]() { return align; };
2542 if (!cmd->addrExpr) {
2543 // Prefer advancing to align(dot, maxPageSize) + dot%maxPageSize to avoid
2544 // padding in the file contents.
2545 //
2546 // When -z separate-code is used we must not have any overlap in pages
2547 // between an executable segment and a non-executable segment. We align to
2548 // the next maximum page size boundary on transitions between executable
2549 // and non-executable segments.
2550 //
2551 // SHT_LLVM_PART_EHDR marks the start of a partition. The partition
2552 // sections will be extracted to a separate file. Align to the next
2553 // maximum page size boundary so that we can find the ELF header at the
2554 // start. We cannot benefit from overlapping p_offset ranges with the
2555 // previous segment anyway.
2556 if (ctx.arg.zSeparate == SeparateSegmentKind::Loadable ||
2557 (ctx.arg.zSeparate == SeparateSegmentKind::Code && prev &&
2558 (prev->p_flags & PF_X) != (p->p_flags & PF_X)) ||
2559 cmd->type == SHT_LLVM_PART_EHDR)
2560 cmd->addrExpr = [&ctx = this->ctx] {
2561 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize);
2562 };
2563 // PT_TLS is at the start of the first RW PT_LOAD. If `p` includes PT_TLS,
2564 // it must be the RW. Align to p_align(PT_TLS) to make sure
2565 // p_vaddr(PT_LOAD)%p_align(PT_LOAD) = 0. Otherwise, if
2566 // sh_addralign(.tdata) < sh_addralign(.tbss), we will set p_align(PT_TLS)
2567 // to sh_addralign(.tbss), while p_vaddr(PT_TLS)=p_vaddr(PT_LOAD) may not
2568 // be congruent to 0 modulo p_align(PT_TLS).
2569 //
2570 // Technically this is not required, but as of 2019, some dynamic loaders
2571 // don't handle p_vaddr%p_align != 0 correctly, e.g. glibc (i386 and
2572 // x86-64) doesn't make runtime address congruent to p_vaddr modulo
2573 // p_align for dynamic TLS blocks (PR/24606), FreeBSD rtld has the same
2574 // bug, musl (TLS Variant 1 architectures) before 1.1.23 handled TLS
2575 // blocks correctly. We need to keep the workaround for a while.
2576 else if (ctx.tlsPhdr && ctx.tlsPhdr->firstSec == p->firstSec)
2577 cmd->addrExpr = [&ctx] {
2578 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize) +
2579 alignToPowerOf2(ctx.script->getDot() % ctx.arg.maxPageSize,
2580 ctx.tlsPhdr->p_align);
2581 };
2582 else
2583 cmd->addrExpr = [&ctx] {
2584 return alignToPowerOf2(ctx.script->getDot(), ctx.arg.maxPageSize) +
2585 ctx.script->getDot() % ctx.arg.maxPageSize;
2586 };
2587 }
2588 };
2589
2590 for (Partition &part : ctx.partitions) {
2591 prev = nullptr;
2592 for (auto &p : part.phdrs)
2593 if (p->p_type == PT_LOAD && p->firstSec) {
2594 pageAlign(p.get());
2595 prev = p.get();
2596 }
2597 }
2598}
2599
2600// Compute an in-file position for a given section. The file offset must be the
2601// same with its virtual address modulo the page size, so that the loader can
2602// load executables without any address adjustment.
2603static uint64_t computeFileOffset(Ctx &ctx, OutputSection *os, uint64_t off) {
2604 // The first section in a PT_LOAD has to have congruent offset and address
2605 // modulo the maximum page size.
2606 if (os->ptLoad && os->ptLoad->firstSec == os)
2607 return alignTo(Value: off, Align: os->ptLoad->p_align, Skew: os->addr);
2608
2609 // File offsets are not significant for .bss sections other than the first one
2610 // in a PT_LOAD/PT_TLS. By convention, we keep section offsets monotonically
2611 // increasing rather than setting to zero.
2612 if (os->type == SHT_NOBITS && (!ctx.tlsPhdr || ctx.tlsPhdr->firstSec != os))
2613 return off;
2614
2615 // If the section is not in a PT_LOAD, we just have to align it.
2616 if (!os->ptLoad)
2617 return alignToPowerOf2(Value: off, Align: os->addralign);
2618
2619 // If two sections share the same PT_LOAD the file offset is calculated
2620 // using this formula: Off2 = Off1 + (VA2 - VA1).
2621 OutputSection *first = os->ptLoad->firstSec;
2622 return first->offset + os->addr - first->addr;
2623}
2624
2625template <class ELFT> void Writer<ELFT>::assignFileOffsetsBinary() {
2626 // Compute the minimum LMA of all non-empty non-NOBITS sections as minAddr.
2627 auto needsOffset = [](OutputSection &sec) {
2628 return sec.type != SHT_NOBITS && (sec.flags & SHF_ALLOC) && sec.size > 0;
2629 };
2630 uint64_t minAddr = UINT64_MAX;
2631 for (OutputSection *sec : ctx.outputSections)
2632 if (needsOffset(*sec)) {
2633 sec->offset = sec->getLMA();
2634 minAddr = std::min(a: minAddr, b: sec->offset);
2635 }
2636
2637 // Sections are laid out at LMA minus minAddr.
2638 fileSize = 0;
2639 for (OutputSection *sec : ctx.outputSections)
2640 if (needsOffset(*sec)) {
2641 sec->offset -= minAddr;
2642 fileSize = std::max(a: fileSize, b: sec->offset + sec->size);
2643 }
2644}
2645
2646static std::string rangeToString(uint64_t addr, uint64_t len) {
2647 return "[0x" + utohexstr(X: addr) + ", 0x" + utohexstr(X: addr + len - 1) + "]";
2648}
2649
2650// Assign file offsets to output sections.
2651template <class ELFT> void Writer<ELFT>::assignFileOffsets() {
2652 ctx.out.programHeaders->offset = ctx.out.elfHeader->size;
2653 uint64_t off = ctx.out.elfHeader->size + ctx.out.programHeaders->size;
2654
2655 PhdrEntry *lastRX = nullptr;
2656 for (Partition &part : ctx.partitions)
2657 for (auto &p : part.phdrs)
2658 if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
2659 lastRX = p.get();
2660
2661 // Layout SHF_ALLOC sections before non-SHF_ALLOC sections. A non-SHF_ALLOC
2662 // will not occupy file offsets contained by a PT_LOAD.
2663 for (OutputSection *sec : ctx.outputSections) {
2664 if (!(sec->flags & SHF_ALLOC))
2665 continue;
2666 off = computeFileOffset(ctx, os: sec, off);
2667 sec->offset = off;
2668 if (sec->type != SHT_NOBITS)
2669 off += sec->size;
2670
2671 // If this is a last section of the last executable segment and that
2672 // segment is the last loadable segment, align the offset of the
2673 // following section to avoid loading non-segments parts of the file.
2674 if (ctx.arg.zSeparate != SeparateSegmentKind::None && lastRX &&
2675 lastRX->lastSec == sec)
2676 off = alignToPowerOf2(Value: off, Align: ctx.arg.maxPageSize);
2677 }
2678 for (OutputSection *osec : ctx.outputSections) {
2679 if (osec->flags & SHF_ALLOC)
2680 continue;
2681 osec->offset = alignToPowerOf2(Value: off, Align: osec->addralign);
2682 off = osec->offset + osec->size;
2683 }
2684
2685 sectionHeaderOff = alignToPowerOf2(Value: off, Align: ctx.arg.wordsize);
2686 fileSize =
2687 sectionHeaderOff + (ctx.outputSections.size() + 1) * sizeof(Elf_Shdr);
2688
2689 // Our logic assumes that sections have rising VA within the same segment.
2690 // With use of linker scripts it is possible to violate this rule and get file
2691 // offset overlaps or overflows. That should never happen with a valid script
2692 // which does not move the location counter backwards and usually scripts do
2693 // not do that. Unfortunately, there are apps in the wild, for example, Linux
2694 // kernel, which control segment distribution explicitly and move the counter
2695 // backwards, so we have to allow doing that to support linking them. We
2696 // perform non-critical checks for overlaps in checkSectionOverlap(), but here
2697 // we want to prevent file size overflows because it would crash the linker.
2698 for (OutputSection *sec : ctx.outputSections) {
2699 if (sec->type == SHT_NOBITS)
2700 continue;
2701 if ((sec->offset > fileSize) || (sec->offset + sec->size > fileSize))
2702 ErrAlways(ctx) << "unable to place section " << sec->name
2703 << " at file offset "
2704 << rangeToString(addr: sec->offset, len: sec->size)
2705 << "; check your linker script for overflows";
2706 }
2707}
2708
2709// Finalize the program headers. We call this function after we assign
2710// file offsets and VAs to all sections.
2711template <class ELFT> void Writer<ELFT>::setPhdrs(Partition &part) {
2712 for (std::unique_ptr<PhdrEntry> &p : part.phdrs) {
2713 OutputSection *first = p->firstSec;
2714 OutputSection *last = p->lastSec;
2715
2716 // .ARM.exidx sections may not be within a single .ARM.exidx
2717 // output section. We always want to describe just the
2718 // SyntheticSection.
2719 if (part.armExidx && p->p_type == PT_ARM_EXIDX) {
2720 p->p_filesz = part.armExidx->getSize();
2721 p->p_memsz = p->p_filesz;
2722 p->p_offset = first->offset + part.armExidx->outSecOff;
2723 p->p_vaddr = first->addr + part.armExidx->outSecOff;
2724 p->p_align = part.armExidx->addralign;
2725 if (part.elfHeader)
2726 p->p_offset -= part.elfHeader->getParent()->offset;
2727
2728 if (!p->hasLMA)
2729 p->p_paddr = first->getLMA() + part.armExidx->outSecOff;
2730 return;
2731 }
2732
2733 if (first) {
2734 p->p_filesz = last->offset - first->offset;
2735 if (last->type != SHT_NOBITS)
2736 p->p_filesz += last->size;
2737
2738 p->p_memsz = last->addr + last->size - first->addr;
2739 p->p_offset = first->offset;
2740 p->p_vaddr = first->addr;
2741
2742 // File offsets in partitions other than the main partition are relative
2743 // to the offset of the ELF headers. Perform that adjustment now.
2744 if (part.elfHeader)
2745 p->p_offset -= part.elfHeader->getParent()->offset;
2746
2747 if (!p->hasLMA)
2748 p->p_paddr = first->getLMA();
2749 }
2750 }
2751}
2752
2753// A helper struct for checkSectionOverlap.
2754namespace {
2755struct SectionOffset {
2756 OutputSection *sec;
2757 uint64_t offset;
2758};
2759} // namespace
2760
2761// Check whether sections overlap for a specific address range (file offsets,
2762// load and virtual addresses).
2763static void checkOverlap(Ctx &ctx, StringRef name,
2764 std::vector<SectionOffset> &sections,
2765 bool isVirtualAddr) {
2766 llvm::sort(C&: sections, Comp: [=](const SectionOffset &a, const SectionOffset &b) {
2767 return a.offset < b.offset;
2768 });
2769
2770 // Finding overlap is easy given a vector is sorted by start position.
2771 // If an element starts before the end of the previous element, they overlap.
2772 for (size_t i = 1, end = sections.size(); i < end; ++i) {
2773 SectionOffset a = sections[i - 1];
2774 SectionOffset b = sections[i];
2775 if (b.offset >= a.offset + a.sec->size)
2776 continue;
2777
2778 // If both sections are in OVERLAY we allow the overlapping of virtual
2779 // addresses, because it is what OVERLAY was designed for.
2780 if (isVirtualAddr && a.sec->inOverlay && b.sec->inOverlay)
2781 continue;
2782
2783 Err(ctx) << "section " << a.sec->name << " " << name
2784 << " range overlaps with " << b.sec->name << "\n>>> "
2785 << a.sec->name << " range is "
2786 << rangeToString(addr: a.offset, len: a.sec->size) << "\n>>> " << b.sec->name
2787 << " range is " << rangeToString(addr: b.offset, len: b.sec->size);
2788 }
2789}
2790
2791// Check for overlapping sections and address overflows.
2792//
2793// In this function we check that none of the output sections have overlapping
2794// file offsets. For SHF_ALLOC sections we also check that the load address
2795// ranges and the virtual address ranges don't overlap
2796template <class ELFT> void Writer<ELFT>::checkSections() {
2797 // First, check that section's VAs fit in available address space for target.
2798 for (OutputSection *os : ctx.outputSections)
2799 if ((os->addr + os->size < os->addr) ||
2800 (!ELFT::Is64Bits && os->addr + os->size > uint64_t(UINT32_MAX) + 1))
2801 Err(ctx) << "section " << os->name << " at 0x"
2802 << utohexstr(X: os->addr, LowerCase: true) << " of size 0x"
2803 << utohexstr(X: os->size, LowerCase: true)
2804 << " exceeds available address space";
2805
2806 // Check for overlapping file offsets. In this case we need to skip any
2807 // section marked as SHT_NOBITS. These sections don't actually occupy space in
2808 // the file so Sec->Offset + Sec->Size can overlap with others. If --oformat
2809 // binary is specified only add SHF_ALLOC sections are added to the output
2810 // file so we skip any non-allocated sections in that case.
2811 std::vector<SectionOffset> fileOffs;
2812 for (OutputSection *sec : ctx.outputSections)
2813 if (sec->size > 0 && sec->type != SHT_NOBITS &&
2814 (!ctx.arg.oFormatBinary || (sec->flags & SHF_ALLOC)))
2815 fileOffs.push_back(x: {.sec: sec, .offset: sec->offset});
2816 checkOverlap(ctx, name: "file", sections&: fileOffs, isVirtualAddr: false);
2817
2818 // When linking with -r there is no need to check for overlapping virtual/load
2819 // addresses since those addresses will only be assigned when the final
2820 // executable/shared object is created.
2821 if (ctx.arg.relocatable)
2822 return;
2823
2824 // Checking for overlapping virtual and load addresses only needs to take
2825 // into account SHF_ALLOC sections since others will not be loaded.
2826 // Furthermore, we also need to skip SHF_TLS sections since these will be
2827 // mapped to other addresses at runtime and can therefore have overlapping
2828 // ranges in the file.
2829 std::vector<SectionOffset> vmas;
2830 for (OutputSection *sec : ctx.outputSections)
2831 if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
2832 vmas.push_back(x: {.sec: sec, .offset: sec->addr});
2833 checkOverlap(ctx, name: "virtual address", sections&: vmas, isVirtualAddr: true);
2834
2835 // Finally, check that the load addresses don't overlap. This will usually be
2836 // the same as the virtual addresses but can be different when using a linker
2837 // script with AT().
2838 std::vector<SectionOffset> lmas;
2839 for (OutputSection *sec : ctx.outputSections)
2840 if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
2841 lmas.push_back(x: {.sec: sec, .offset: sec->getLMA()});
2842 checkOverlap(ctx, name: "load address", sections&: lmas, isVirtualAddr: false);
2843}
2844
2845// The entry point address is chosen in the following ways.
2846//
2847// 1. the '-e' entry command-line option;
2848// 2. the ENTRY(symbol) command in a linker control script;
2849// 3. the value of the symbol _start, if present;
2850// 4. the number represented by the entry symbol, if it is a number;
2851// 5. the address 0.
2852static uint64_t getEntryAddr(Ctx &ctx) {
2853 // Case 1, 2 or 3
2854 if (Symbol *b = ctx.symtab->find(name: ctx.arg.entry))
2855 return b->getVA(ctx);
2856
2857 // Case 4
2858 uint64_t addr;
2859 if (to_integer(S: ctx.arg.entry, Num&: addr))
2860 return addr;
2861
2862 // Case 5
2863 if (ctx.arg.warnMissingEntry)
2864 Warn(ctx) << "cannot find entry symbol " << ctx.arg.entry
2865 << "; not setting start address";
2866 return 0;
2867}
2868
2869static uint16_t getELFType(Ctx &ctx) {
2870 if (ctx.arg.isPic)
2871 return ET_DYN;
2872 if (ctx.arg.relocatable)
2873 return ET_REL;
2874 return ET_EXEC;
2875}
2876
2877template <class ELFT> void Writer<ELFT>::writeHeader() {
2878 writeEhdr<ELFT>(ctx, ctx.bufferStart, *ctx.mainPart);
2879 writePhdrs<ELFT>(ctx.bufferStart + sizeof(Elf_Ehdr), *ctx.mainPart);
2880
2881 auto *eHdr = reinterpret_cast<Elf_Ehdr *>(ctx.bufferStart);
2882 eHdr->e_type = getELFType(ctx);
2883 eHdr->e_entry = getEntryAddr(ctx);
2884
2885 // If -z nosectionheader is specified, omit the section header table.
2886 if (!ctx.in.shStrTab)
2887 return;
2888 eHdr->e_shoff = sectionHeaderOff;
2889
2890 // Write the section header table.
2891 //
2892 // The ELF header can only store numbers up to SHN_LORESERVE in the e_shnum
2893 // and e_shstrndx fields. When the value of one of these fields exceeds
2894 // SHN_LORESERVE ELF requires us to put sentinel values in the ELF header and
2895 // use fields in the section header at index 0 to store
2896 // the value. The sentinel values and fields are:
2897 // e_shnum = 0, SHdrs[0].sh_size = number of sections.
2898 // e_shstrndx = SHN_XINDEX, SHdrs[0].sh_link = .shstrtab section index.
2899 auto *sHdrs = reinterpret_cast<Elf_Shdr *>(ctx.bufferStart + eHdr->e_shoff);
2900 size_t num = ctx.outputSections.size() + 1;
2901 if (num >= SHN_LORESERVE)
2902 sHdrs->sh_size = num;
2903 else
2904 eHdr->e_shnum = num;
2905
2906 uint32_t strTabIndex = ctx.in.shStrTab->getParent()->sectionIndex;
2907 if (strTabIndex >= SHN_LORESERVE) {
2908 sHdrs->sh_link = strTabIndex;
2909 eHdr->e_shstrndx = SHN_XINDEX;
2910 } else {
2911 eHdr->e_shstrndx = strTabIndex;
2912 }
2913
2914 for (OutputSection *sec : ctx.outputSections)
2915 sec->writeHeaderTo<ELFT>(++sHdrs);
2916}
2917
2918// Open a result file.
2919template <class ELFT> void Writer<ELFT>::openFile() {
2920 uint64_t maxSize = ctx.arg.is64 ? INT64_MAX : UINT32_MAX;
2921 if (fileSize != size_t(fileSize) || maxSize < fileSize) {
2922 std::string msg;
2923 raw_string_ostream s(msg);
2924 s << "output file too large: " << fileSize << " bytes\n"
2925 << "section sizes:\n";
2926 for (OutputSection *os : ctx.outputSections)
2927 s << os->name << ' ' << os->size << "\n";
2928 ErrAlways(ctx) << msg;
2929 return;
2930 }
2931
2932 unlinkAsync(path: ctx.arg.outputFile);
2933 unsigned flags = 0;
2934 if (!ctx.arg.relocatable)
2935 flags |= FileOutputBuffer::F_executable;
2936 if (ctx.arg.mmapOutputFile)
2937 flags |= FileOutputBuffer::F_mmap;
2938 Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
2939 FileOutputBuffer::create(FilePath: ctx.arg.outputFile, Size: fileSize, Flags: flags);
2940
2941 if (!bufferOrErr) {
2942 ErrAlways(ctx) << "failed to open " << ctx.arg.outputFile << ": "
2943 << bufferOrErr.takeError();
2944 return;
2945 }
2946 buffer = std::move(*bufferOrErr);
2947 ctx.bufferStart = buffer->getBufferStart();
2948}
2949
2950template <class ELFT> void Writer<ELFT>::writeSectionsBinary() {
2951 parallel::TaskGroup tg;
2952 for (OutputSection *sec : ctx.outputSections)
2953 if (sec->flags & SHF_ALLOC)
2954 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
2955}
2956
2957static void fillTrap(std::array<uint8_t, 4> trapInstr, uint8_t *i,
2958 uint8_t *end) {
2959 for (; i + 4 <= end; i += 4)
2960 memcpy(dest: i, src: trapInstr.data(), n: 4);
2961}
2962
2963// Fill executable segments with trap instructions. This includes both the
2964// gaps between sections (due to alignment) and the tail padding to the page
2965// boundary. Even though it is not required by any standard, it is in general
2966// a good thing to do for security reasons.
2967template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
2968 for (Partition &part : ctx.partitions) {
2969 // Fill gaps between consecutive sections in the same executable segment.
2970 OutputSection *prev = nullptr;
2971 for (OutputSection *sec : ctx.outputSections) {
2972 PhdrEntry *p = sec->ptLoad;
2973 if (!p || !(p->p_flags & PF_X))
2974 continue;
2975 if (prev && prev->ptLoad == p)
2976 fillTrap(trapInstr: ctx.target->trapInstr,
2977 i: ctx.bufferStart + alignDown(Value: prev->offset + prev->size, Align: 4),
2978 end: ctx.bufferStart + sec->offset);
2979 prev = sec;
2980 }
2981
2982 // Fill the last page.
2983 for (std::unique_ptr<PhdrEntry> &p : part.phdrs)
2984 if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
2985 fillTrap(
2986 trapInstr: ctx.target->trapInstr,
2987 i: ctx.bufferStart + alignDown(Value: p->firstSec->offset + p->p_filesz, Align: 4),
2988 end: ctx.bufferStart + alignToPowerOf2(Value: p->firstSec->offset + p->p_filesz,
2989 Align: ctx.arg.maxPageSize));
2990
2991 // Round up the file size of the last segment to the page boundary iff it is
2992 // an executable segment to ensure that other tools don't accidentally
2993 // trim the instruction padding (e.g. when stripping the file).
2994 PhdrEntry *last = nullptr;
2995 for (std::unique_ptr<PhdrEntry> &p : part.phdrs)
2996 if (p->p_type == PT_LOAD)
2997 last = p.get();
2998
2999 if (last && (last->p_flags & PF_X)) {
3000 last->p_filesz = alignToPowerOf2(Value: last->p_filesz, Align: ctx.arg.maxPageSize);
3001 // p_memsz might be larger than the aligned p_filesz due to trailing BSS
3002 // sections. Don't decrease it.
3003 last->p_memsz = std::max(a: last->p_memsz, b: last->p_filesz);
3004 }
3005 }
3006}
3007
3008// Write section contents to a mmap'ed file.
3009template <class ELFT> void Writer<ELFT>::writeSections() {
3010 llvm::TimeTraceScope timeScope("Write sections");
3011
3012 {
3013 // In -r or --emit-relocs mode, write the relocation sections first as in
3014 // ELf_Rel targets we might find out that we need to modify the relocated
3015 // section while doing it.
3016 parallel::TaskGroup tg;
3017 for (OutputSection *sec : ctx.outputSections)
3018 if (isStaticRelSecType(type: sec->type))
3019 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
3020 }
3021 {
3022 parallel::TaskGroup tg;
3023 for (OutputSection *sec : ctx.outputSections)
3024 if (!isStaticRelSecType(type: sec->type))
3025 sec->writeTo<ELFT>(ctx, ctx.bufferStart + sec->offset, tg);
3026 }
3027
3028 // Finally, check that all dynamic relocation addends were written correctly.
3029 if (ctx.arg.checkDynamicRelocs && ctx.arg.writeAddends) {
3030 for (OutputSection *sec : ctx.outputSections)
3031 if (isStaticRelSecType(type: sec->type))
3032 sec->checkDynRelAddends(ctx);
3033 }
3034}
3035
3036// Computes a hash value of Data using a given hash function.
3037// In order to utilize multiple cores, we first split data into 1MB
3038// chunks, compute a hash for each chunk, and then compute a hash value
3039// of the hash values.
3040static void
3041computeHash(llvm::MutableArrayRef<uint8_t> hashBuf,
3042 llvm::ArrayRef<uint8_t> data,
3043 std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) {
3044 std::vector<ArrayRef<uint8_t>> chunks = split(arr: data, chunkSize: 1024 * 1024);
3045 const size_t hashesSize = chunks.size() * hashBuf.size();
3046 std::unique_ptr<uint8_t[]> hashes(new uint8_t[hashesSize]);
3047
3048 // Compute hash values.
3049 parallelFor(Begin: 0, End: chunks.size(), Fn: [&](size_t i) {
3050 hashFn(hashes.get() + i * hashBuf.size(), chunks[i]);
3051 });
3052
3053 // Write to the final output buffer.
3054 hashFn(hashBuf.data(), ArrayRef(hashes.get(), hashesSize));
3055}
3056
3057template <class ELFT> void Writer<ELFT>::writeBuildId() {
3058 if (!ctx.mainPart->buildId || !ctx.mainPart->buildId->getParent())
3059 return;
3060
3061 if (ctx.arg.buildId == BuildIdKind::Hexstring) {
3062 for (Partition &part : ctx.partitions)
3063 part.buildId->writeBuildId(buf: ctx.arg.buildIdVector);
3064 return;
3065 }
3066
3067 // Compute a hash of all sections of the output file.
3068 size_t hashSize = ctx.mainPart->buildId->hashSize;
3069 std::unique_ptr<uint8_t[]> buildId(new uint8_t[hashSize]);
3070 MutableArrayRef<uint8_t> output(buildId.get(), hashSize);
3071 llvm::ArrayRef<uint8_t> input{ctx.bufferStart, size_t(fileSize)};
3072
3073 // Fedora introduced build ID as "approximation of true uniqueness across all
3074 // binaries that might be used by overlapping sets of people". It does not
3075 // need some security goals that some hash algorithms strive to provide, e.g.
3076 // (second-)preimage and collision resistance. In practice people use 'md5'
3077 // and 'sha1' just for different lengths. Implement them with the more
3078 // efficient BLAKE3.
3079 switch (ctx.arg.buildId) {
3080 case BuildIdKind::Fast:
3081 computeHash(output, input, [](uint8_t *dest, ArrayRef<uint8_t> arr) {
3082 write64le(P: dest, V: xxh3_64bits(data: arr));
3083 });
3084 break;
3085 case BuildIdKind::Md5:
3086 computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
3087 memcpy(dest: dest, src: BLAKE3::hash<16>(Data: arr).data(), n: hashSize);
3088 });
3089 break;
3090 case BuildIdKind::Sha1:
3091 computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
3092 memcpy(dest: dest, src: BLAKE3::hash<20>(Data: arr).data(), n: hashSize);
3093 });
3094 break;
3095 case BuildIdKind::Uuid:
3096 if (auto ec = llvm::getRandomBytes(Buffer: buildId.get(), Size: hashSize))
3097 ErrAlways(ctx) << "entropy source failure: " << ec.message();
3098 break;
3099 default:
3100 llvm_unreachable("unknown BuildIdKind");
3101 }
3102 for (Partition &part : ctx.partitions)
3103 part.buildId->writeBuildId(buf: output);
3104}
3105
3106template void elf::writeResult<ELF32LE>(Ctx &);
3107template void elf::writeResult<ELF32BE>(Ctx &);
3108template void elf::writeResult<ELF64LE>(Ctx &);
3109template void elf::writeResult<ELF64BE>(Ctx &);
3110