1//===- Relocations.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains platform-independent functions to process relocations.
10// I'll describe the overview of this file here.
11//
12// Simple relocations are easy to handle for the linker. For example,
13// for R_X86_64_PC64 relocs, the linker just has to fix up locations
14// with the relative offsets to the target symbols. It would just be
15// reading records from relocation sections and applying them to output.
16//
17// But not all relocations are that easy to handle. For example, for
18// R_386_GOTOFF relocs, the linker has to create new GOT entries for
19// symbols if they don't exist, and fix up locations with GOT entry
20// offsets from the beginning of GOT section. So there is more than
21// fixing addresses in relocation processing.
22//
23// ELF defines a large number of complex relocations.
24//
25// The functions in this file analyze relocations and do whatever needs
26// to be done. It includes, but not limited to, the following.
27//
28// - create GOT/PLT entries
29// - create new relocations in .dynsym to let the dynamic linker resolve
30// them at runtime (since ELF supports dynamic linking, not all
31// relocations can be resolved at link-time)
32// - create COPY relocs and reserve space in .bss
33// - replace expensive relocs (in terms of runtime cost) with cheap ones
34// - error out infeasible combinations such as PIC and non-relative relocs
35//
36// Note that the functions in this file don't actually apply relocations
37// because it doesn't know about the output file nor the output file buffer.
38// It instead stores Relocation objects to InputSection's Relocations
39// vector to let it apply later in InputSection::writeTo.
40//
41//===----------------------------------------------------------------------===//
42
43#include "Relocations.h"
44#include "Config.h"
45#include "InputFiles.h"
46#include "LinkerScript.h"
47#include "OutputSections.h"
48#include "SymbolTable.h"
49#include "Symbols.h"
50#include "SyntheticSections.h"
51#include "Target.h"
52#include "Thunks.h"
53#include "lld/Common/ErrorHandler.h"
54#include "lld/Common/Memory.h"
55#include "llvm/ADT/SmallSet.h"
56#include "llvm/BinaryFormat/ELF.h"
57#include "llvm/Demangle/Demangle.h"
58#include <algorithm>
59
60using namespace llvm;
61using namespace llvm::ELF;
62using namespace llvm::object;
63using namespace llvm::support::endian;
64using namespace lld;
65using namespace lld::elf;
66
67static void printDefinedLocation(ELFSyncStream &s, const Symbol &sym) {
68 s << "\n>>> defined in " << sym.file;
69}
70
71// Construct a message in the following format.
72//
73// >>> defined in /home/alice/src/foo.o
74// >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
75// >>> /home/alice/src/bar.o:(.text+0x1)
76static void printLocation(ELFSyncStream &s, InputSectionBase &sec,
77 const Symbol &sym, uint64_t off) {
78 printDefinedLocation(s, sym);
79 s << "\n>>> referenced by ";
80 auto tell = s.tell();
81 s << sec.getSrcMsg(sym, offset: off);
82 if (tell != s.tell())
83 s << "\n>>> ";
84 s << sec.getObjMsg(offset: off);
85}
86
87void elf::reportRangeError(Ctx &ctx, uint8_t *loc, const Relocation &rel,
88 const Twine &v, int64_t min, uint64_t max) {
89 ErrorPlace errPlace = getErrorPlace(ctx, loc);
90 auto diag = Err(ctx);
91 diag << errPlace.loc << "relocation " << rel.type
92 << " out of range: " << v.str() << " is not in [" << min << ", " << max
93 << ']';
94
95 if (rel.sym) {
96 if (!rel.sym->isSection())
97 diag << "; references '" << rel.sym << '\'';
98 else if (auto *d = dyn_cast<Defined>(Val: rel.sym))
99 diag << "; references section '" << d->section->name << "'";
100
101 if (ctx.arg.emachine == EM_X86_64 && rel.type == R_X86_64_PC32 &&
102 rel.sym->getOutputSection() &&
103 (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) {
104 diag << "; R_X86_64_PC32 should not reference a section marked "
105 "SHF_X86_64_LARGE";
106 }
107 }
108 if (!errPlace.srcLoc.empty())
109 diag << "\n>>> referenced by " << errPlace.srcLoc;
110 if (rel.sym && !rel.sym->isSection())
111 printDefinedLocation(s&: diag, sym: *rel.sym);
112
113 if (errPlace.isec && errPlace.isec->name.starts_with(Prefix: ".debug"))
114 diag << "; consider recompiling with -fdebug-types-section to reduce size "
115 "of debug sections";
116}
117
118void elf::reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
119 const Symbol &sym, const Twine &msg) {
120 auto diag = Err(ctx);
121 diag << getErrorPlace(ctx, loc).loc << msg << " is out of range: " << v
122 << " is not in [" << llvm::minIntN(N: n) << ", " << llvm::maxIntN(N: n) << "]";
123 if (!sym.getName().empty()) {
124 diag << "; references '" << &sym << '\'';
125 printDefinedLocation(s&: diag, sym);
126 }
127}
128
129// Build a bitmask with one bit set for each 64 subset of RelExpr.
130static constexpr uint64_t buildMask() { return 0; }
131
132template <typename... Tails>
133static constexpr uint64_t buildMask(int head, Tails... tails) {
134 return (0 <= head && head < 64 ? uint64_t(1) << head : 0) |
135 buildMask(tails...);
136}
137
138// Return true if `Expr` is one of `Exprs`.
139// There are more than 64 but less than 128 RelExprs, so we divide the set of
140// exprs into [0, 64) and [64, 128) and represent each range as a constant
141// 64-bit mask. Then we decide which mask to test depending on the value of
142// expr and use a simple shift and bitwise-and to test for membership.
143template <RelExpr... Exprs> static bool oneof(RelExpr expr) {
144 assert(0 <= expr && (int)expr < 128 &&
145 "RelExpr is too large for 128-bit mask!");
146
147 if (expr >= 64)
148 return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...);
149 return (uint64_t(1) << expr) & buildMask(Exprs...);
150}
151
152static RelType getMipsPairType(RelType type, bool isLocal) {
153 switch (type) {
154 case R_MIPS_HI16:
155 return R_MIPS_LO16;
156 case R_MIPS_GOT16:
157 // In case of global symbol, the R_MIPS_GOT16 relocation does not
158 // have a pair. Each global symbol has a unique entry in the GOT
159 // and a corresponding instruction with help of the R_MIPS_GOT16
160 // relocation loads an address of the symbol. In case of local
161 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
162 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16
163 // relocations handle low 16 bits of the address. That allows
164 // to allocate only one GOT entry for every 64 KiB of local data.
165 return isLocal ? R_MIPS_LO16 : R_MIPS_NONE;
166 case R_MICROMIPS_GOT16:
167 return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE;
168 case R_MIPS_PCHI16:
169 return R_MIPS_PCLO16;
170 case R_MICROMIPS_HI16:
171 return R_MICROMIPS_LO16;
172 default:
173 return R_MIPS_NONE;
174 }
175}
176
177// True if non-preemptable symbol always has the same value regardless of where
178// the DSO is loaded.
179static bool isAbsolute(const Symbol &sym) {
180 if (sym.isUndefined())
181 return true;
182 if (const auto *dr = dyn_cast<Defined>(Val: &sym))
183 return dr->section == nullptr; // Absolute symbol.
184 return false;
185}
186
187static bool isAbsoluteValue(const Symbol &sym) {
188 return isAbsolute(sym) || sym.isTls();
189}
190
191// Returns true if Expr refers a PLT entry.
192static bool needsPlt(RelExpr expr) {
193 return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL,
194 R_GOTPLT_PC, RE_LOONGARCH_PLT_PAGE_PC, RE_PPC32_PLTREL,
195 RE_PPC64_CALL_PLT>(expr);
196}
197
198bool lld::elf::needsGot(RelExpr expr) {
199 return oneof<R_GOT, RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, R_GOT_OFF,
200 RE_MIPS_GOT_LOCAL_PAGE, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32,
201 RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC,
202 RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT,
203 RE_AARCH64_GOT_PAGE, RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>(
204 expr);
205}
206
207// True if this expression is of the form Sym - X, where X is a position in the
208// file (PC, or GOT for example).
209static bool isRelExpr(RelExpr expr) {
210 return oneof<R_PC, R_GOTREL, R_GOTPLTREL, RE_ARM_PCA, RE_MIPS_GOTREL,
211 RE_PPC64_CALL, RE_PPC64_RELAX_TOC, RE_AARCH64_PAGE_PC,
212 R_RELAX_GOT_PC, RE_RISCV_PC_INDIRECT, RE_PPC64_RELAX_GOT_PC,
213 RE_LOONGARCH_PAGE_PC>(expr);
214}
215
216static RelExpr toPlt(RelExpr expr) {
217 switch (expr) {
218 case RE_LOONGARCH_PAGE_PC:
219 return RE_LOONGARCH_PLT_PAGE_PC;
220 case RE_PPC64_CALL:
221 return RE_PPC64_CALL_PLT;
222 case R_PC:
223 return R_PLT_PC;
224 case R_ABS:
225 return R_PLT;
226 case R_GOTREL:
227 return R_PLT_GOTREL;
228 default:
229 return expr;
230 }
231}
232
233static RelExpr fromPlt(RelExpr expr) {
234 // We decided not to use a plt. Optimize a reference to the plt to a
235 // reference to the symbol itself.
236 switch (expr) {
237 case R_PLT_PC:
238 case RE_PPC32_PLTREL:
239 return R_PC;
240 case RE_LOONGARCH_PLT_PAGE_PC:
241 return RE_LOONGARCH_PAGE_PC;
242 case RE_PPC64_CALL_PLT:
243 return RE_PPC64_CALL;
244 case R_PLT:
245 return R_ABS;
246 case R_PLT_GOTPLT:
247 return R_GOTPLTREL;
248 case R_PLT_GOTREL:
249 return R_GOTREL;
250 default:
251 return expr;
252 }
253}
254
255// Returns true if a given shared symbol is in a read-only segment in a DSO.
256template <class ELFT> static bool isReadOnly(SharedSymbol &ss) {
257 using Elf_Phdr = typename ELFT::Phdr;
258
259 // Determine if the symbol is read-only by scanning the DSO's program headers.
260 const auto &file = cast<SharedFile>(Val&: *ss.file);
261 for (const Elf_Phdr &phdr :
262 check(file.template getObj<ELFT>().program_headers()))
263 if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) &&
264 !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr &&
265 ss.value < phdr.p_vaddr + phdr.p_memsz)
266 return true;
267 return false;
268}
269
270// Returns symbols at the same offset as a given symbol, including SS itself.
271//
272// If two or more symbols are at the same offset, and at least one of
273// them are copied by a copy relocation, all of them need to be copied.
274// Otherwise, they would refer to different places at runtime.
275template <class ELFT>
276static SmallSet<SharedSymbol *, 4> getSymbolsAt(Ctx &ctx, SharedSymbol &ss) {
277 using Elf_Sym = typename ELFT::Sym;
278
279 const auto &file = cast<SharedFile>(Val&: *ss.file);
280
281 SmallSet<SharedSymbol *, 4> ret;
282 for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) {
283 if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS ||
284 s.getType() == STT_TLS || s.st_value != ss.value)
285 continue;
286 StringRef name = check(s.getName(file.getStringTable()));
287 Symbol *sym = ctx.symtab->find(name);
288 if (auto *alias = dyn_cast_or_null<SharedSymbol>(Val: sym))
289 ret.insert(Ptr: alias);
290 }
291
292 // The loop does not check SHT_GNU_verneed, so ret does not contain
293 // non-default version symbols. If ss has a non-default version, ret won't
294 // contain ss. Just add ss unconditionally. If a non-default version alias is
295 // separately copy relocated, it and ss will have different addresses.
296 // Fortunately this case is impractical and fails with GNU ld as well.
297 ret.insert(Ptr: &ss);
298 return ret;
299}
300
301// When a symbol is copy relocated or we create a canonical plt entry, it is
302// effectively a defined symbol. In the case of copy relocation the symbol is
303// in .bss and in the case of a canonical plt entry it is in .plt. This function
304// replaces the existing symbol with a Defined pointing to the appropriate
305// location.
306static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec,
307 uint64_t value, uint64_t size) {
308 Symbol old = sym;
309 Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value,
310 size, &sec)
311 .overwrite(sym);
312
313 sym.versionId = old.versionId;
314 sym.isUsedInRegularObj = true;
315 // A copy relocated alias may need a GOT entry.
316 sym.flags.store(i: old.flags.load(m: std::memory_order_relaxed) & NEEDS_GOT,
317 m: std::memory_order_relaxed);
318}
319
320// Reserve space in .bss or .bss.rel.ro for copy relocation.
321//
322// The copy relocation is pretty much a hack. If you use a copy relocation
323// in your program, not only the symbol name but the symbol's size, RW/RO
324// bit and alignment become part of the ABI. In addition to that, if the
325// symbol has aliases, the aliases become part of the ABI. That's subtle,
326// but if you violate that implicit ABI, that can cause very counter-
327// intuitive consequences.
328//
329// So, what is the copy relocation? It's for linking non-position
330// independent code to DSOs. In an ideal world, all references to data
331// exported by DSOs should go indirectly through GOT. But if object files
332// are compiled as non-PIC, all data references are direct. There is no
333// way for the linker to transform the code to use GOT, as machine
334// instructions are already set in stone in object files. This is where
335// the copy relocation takes a role.
336//
337// A copy relocation instructs the dynamic linker to copy data from a DSO
338// to a specified address (which is usually in .bss) at load-time. If the
339// static linker (that's us) finds a direct data reference to a DSO
340// symbol, it creates a copy relocation, so that the symbol can be
341// resolved as if it were in .bss rather than in a DSO.
342//
343// As you can see in this function, we create a copy relocation for the
344// dynamic linker, and the relocation contains not only symbol name but
345// various other information about the symbol. So, such attributes become a
346// part of the ABI.
347//
348// Note for application developers: I can give you a piece of advice if
349// you are writing a shared library. You probably should export only
350// functions from your library. You shouldn't export variables.
351//
352// As an example what can happen when you export variables without knowing
353// the semantics of copy relocations, assume that you have an exported
354// variable of type T. It is an ABI-breaking change to add new members at
355// end of T even though doing that doesn't change the layout of the
356// existing members. That's because the space for the new members are not
357// reserved in .bss unless you recompile the main program. That means they
358// are likely to overlap with other data that happens to be laid out next
359// to the variable in .bss. This kind of issue is sometimes very hard to
360// debug. What's a solution? Instead of exporting a variable V from a DSO,
361// define an accessor getV().
362template <class ELFT> static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) {
363 // Copy relocation against zero-sized symbol doesn't make sense.
364 uint64_t symSize = ss.getSize();
365 if (symSize == 0 || ss.alignment == 0)
366 Err(ctx) << "cannot create a copy relocation for symbol " << &ss;
367
368 // See if this symbol is in a read-only segment. If so, preserve the symbol's
369 // memory protection by reserving space in the .bss.rel.ro section.
370 bool isRO = isReadOnly<ELFT>(ss);
371 BssSection *sec = make<BssSection>(args&: ctx, args: isRO ? ".bss.rel.ro" : ".bss",
372 args&: symSize, args&: ss.alignment);
373 OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent();
374
375 // At this point, sectionBases has been migrated to sections. Append sec to
376 // sections.
377 if (osec->commands.empty() ||
378 !isa<InputSectionDescription>(Val: osec->commands.back()))
379 osec->commands.push_back(Elt: make<InputSectionDescription>(args: ""));
380 auto *isd = cast<InputSectionDescription>(Val: osec->commands.back());
381 isd->sections.push_back(Elt: sec);
382 osec->commitSection(isec: sec);
383
384 // Look through the DSO's dynamic symbol table for aliases and create a
385 // dynamic symbol for each one. This causes the copy relocation to correctly
386 // interpose any aliases.
387 for (SharedSymbol *sym : getSymbolsAt<ELFT>(ctx, ss))
388 replaceWithDefined(ctx, sym&: *sym, sec&: *sec, value: 0, size: sym->size);
389
390 ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->copyRel, isec&: *sec, offsetInSec: 0, sym&: ss);
391}
392
393// .eh_frame sections are mergeable input sections, so their input
394// offsets are not linearly mapped to output section. For each input
395// offset, we need to find a section piece containing the offset and
396// add the piece's base address to the input offset to compute the
397// output offset. That isn't cheap.
398//
399// This class is to speed up the offset computation. When we process
400// relocations, we access offsets in the monotonically increasing
401// order. So we can optimize for that access pattern.
402//
403// For sections other than .eh_frame, this class doesn't do anything.
404namespace {
405class OffsetGetter {
406public:
407 OffsetGetter() = default;
408 explicit OffsetGetter(InputSectionBase &sec) {
409 if (auto *eh = dyn_cast<EhInputSection>(Val: &sec)) {
410 cies = eh->cies;
411 fdes = eh->fdes;
412 i = cies.begin();
413 j = fdes.begin();
414 }
415 }
416
417 // Translates offsets in input sections to offsets in output sections.
418 // Given offset must increase monotonically. We assume that Piece is
419 // sorted by inputOff.
420 uint64_t get(Ctx &ctx, uint64_t off) {
421 if (cies.empty())
422 return off;
423
424 while (j != fdes.end() && j->inputOff <= off)
425 ++j;
426 auto it = j;
427 if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) {
428 while (i != cies.end() && i->inputOff <= off)
429 ++i;
430 if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off) {
431 Err(ctx) << ".eh_frame: relocation is not in any piece";
432 return 0;
433 }
434 it = i;
435 }
436
437 // Offset -1 means that the piece is dead (i.e. garbage collected).
438 if (it[-1].outputOff == -1)
439 return -1;
440 return it[-1].outputOff + (off - it[-1].inputOff);
441 }
442
443private:
444 ArrayRef<EhSectionPiece> cies, fdes;
445 ArrayRef<EhSectionPiece>::iterator i, j;
446};
447
448// This class encapsulates states needed to scan relocations for one
449// InputSectionBase.
450class RelocationScanner {
451public:
452 RelocationScanner(Ctx &ctx) : ctx(ctx) {}
453 template <class ELFT>
454 void scanSection(InputSectionBase &s, bool isEH = false);
455
456private:
457 Ctx &ctx;
458 InputSectionBase *sec;
459 OffsetGetter getter;
460
461 // End of relocations, used by Mips/PPC64.
462 const void *end = nullptr;
463
464 template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const;
465 template <class ELFT, class RelTy>
466 int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const;
467 bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
468 uint64_t relOff) const;
469 void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym,
470 int64_t addend) const;
471 unsigned handleTlsRelocation(RelExpr expr, RelType type, uint64_t offset,
472 Symbol &sym, int64_t addend);
473
474 template <class ELFT, class RelTy>
475 void scanOne(typename Relocs<RelTy>::const_iterator &i);
476 template <class ELFT, class RelTy> void scan(Relocs<RelTy> rels);
477};
478} // namespace
479
480// MIPS has an odd notion of "paired" relocations to calculate addends.
481// For example, if a relocation is of R_MIPS_HI16, there must be a
482// R_MIPS_LO16 relocation after that, and an addend is calculated using
483// the two relocations.
484template <class ELFT, class RelTy>
485int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr,
486 bool isLocal) const {
487 if (expr == RE_MIPS_GOTREL && isLocal)
488 return sec->getFile<ELFT>()->mipsGp0;
489
490 // The ABI says that the paired relocation is used only for REL.
491 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
492 // This generalises to relocation types with implicit addends.
493 if (RelTy::HasAddend)
494 return 0;
495
496 RelType type = rel.getType(ctx.arg.isMips64EL);
497 RelType pairTy = getMipsPairType(type, isLocal);
498 if (pairTy == R_MIPS_NONE)
499 return 0;
500
501 const uint8_t *buf = sec->content().data();
502 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL);
503
504 // To make things worse, paired relocations might not be contiguous in
505 // the relocation table, so we need to do linear search. *sigh*
506 for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri)
507 if (ri->getType(ctx.arg.isMips64EL) == pairTy &&
508 ri->getSymbol(ctx.arg.isMips64EL) == symIndex)
509 return ctx.target->getImplicitAddend(buf: buf + ri->r_offset, type: pairTy);
510
511 Warn(ctx) << "can't find matching " << pairTy << " relocation for " << type;
512 return 0;
513}
514
515// Custom error message if Sym is defined in a discarded section.
516template <class ELFT>
517static void maybeReportDiscarded(Ctx &ctx, ELFSyncStream &msg, Undefined &sym) {
518 auto *file = dyn_cast<ObjFile<ELFT>>(sym.file);
519 if (!file || !sym.discardedSecIdx)
520 return;
521 ArrayRef<typename ELFT::Shdr> objSections =
522 file->template getELFShdrs<ELFT>();
523
524 if (sym.type == ELF::STT_SECTION) {
525 msg << "relocation refers to a discarded section: ";
526 msg << CHECK2(
527 file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file);
528 } else {
529 msg << "relocation refers to a symbol in a discarded section: " << &sym;
530 }
531 msg << "\n>>> defined in " << file;
532
533 Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1];
534 if (elfSec.sh_type != SHT_GROUP)
535 return;
536
537 // If the discarded section is a COMDAT.
538 StringRef signature = file->getShtGroupSignature(objSections, elfSec);
539 if (const InputFile *prevailing =
540 ctx.symtab->comdatGroups.lookup(Val: CachedHashStringRef(signature))) {
541 msg << "\n>>> section group signature: " << signature
542 << "\n>>> prevailing definition is in " << prevailing;
543 if (sym.nonPrevailing) {
544 msg << "\n>>> or the symbol in the prevailing group had STB_WEAK "
545 "binding and the symbol in a non-prevailing group had STB_GLOBAL "
546 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding "
547 "signature is not supported";
548 }
549 }
550}
551
552// Check whether the definition name def is a mangled function name that matches
553// the reference name ref.
554static bool canSuggestExternCForCXX(StringRef ref, StringRef def) {
555 llvm::ItaniumPartialDemangler d;
556 std::string name = def.str();
557 if (d.partialDemangle(MangledName: name.c_str()))
558 return false;
559 char *buf = d.getFunctionName(Buf: nullptr, N: nullptr);
560 if (!buf)
561 return false;
562 bool ret = ref == buf;
563 free(ptr: buf);
564 return ret;
565}
566
567// Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns
568// the suggested symbol, which is either in the symbol table, or in the same
569// file of sym.
570static const Symbol *getAlternativeSpelling(Ctx &ctx, const Undefined &sym,
571 std::string &pre_hint,
572 std::string &post_hint) {
573 DenseMap<StringRef, const Symbol *> map;
574 if (sym.file->kind() == InputFile::ObjKind) {
575 auto *file = cast<ELFFileBase>(Val: sym.file);
576 // If sym is a symbol defined in a discarded section, maybeReportDiscarded()
577 // will give an error. Don't suggest an alternative spelling.
578 if (sym.discardedSecIdx != 0 &&
579 file->getSections()[sym.discardedSecIdx] == &InputSection::discarded)
580 return nullptr;
581
582 // Build a map of local defined symbols.
583 for (const Symbol *s : sym.file->getSymbols())
584 if (s->isLocal() && s->isDefined() && !s->getName().empty())
585 map.try_emplace(Key: s->getName(), Args&: s);
586 }
587
588 auto suggest = [&](StringRef newName) -> const Symbol * {
589 // If defined locally.
590 if (const Symbol *s = map.lookup(Val: newName))
591 return s;
592
593 // If in the symbol table and not undefined.
594 if (const Symbol *s = ctx.symtab->find(name: newName))
595 if (!s->isUndefined())
596 return s;
597
598 return nullptr;
599 };
600
601 // This loop enumerates all strings of Levenshtein distance 1 as typo
602 // correction candidates and suggests the one that exists as a non-undefined
603 // symbol.
604 StringRef name = sym.getName();
605 for (size_t i = 0, e = name.size(); i != e + 1; ++i) {
606 // Insert a character before name[i].
607 std::string newName = (name.substr(Start: 0, N: i) + "0" + name.substr(Start: i)).str();
608 for (char c = '0'; c <= 'z'; ++c) {
609 newName[i] = c;
610 if (const Symbol *s = suggest(newName))
611 return s;
612 }
613 if (i == e)
614 break;
615
616 // Substitute name[i].
617 newName = std::string(name);
618 for (char c = '0'; c <= 'z'; ++c) {
619 newName[i] = c;
620 if (const Symbol *s = suggest(newName))
621 return s;
622 }
623
624 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is
625 // common.
626 if (i + 1 < e) {
627 newName[i] = name[i + 1];
628 newName[i + 1] = name[i];
629 if (const Symbol *s = suggest(newName))
630 return s;
631 }
632
633 // Delete name[i].
634 newName = (name.substr(Start: 0, N: i) + name.substr(Start: i + 1)).str();
635 if (const Symbol *s = suggest(newName))
636 return s;
637 }
638
639 // Case mismatch, e.g. Foo vs FOO.
640 for (auto &it : map)
641 if (name.equals_insensitive(RHS: it.first))
642 return it.second;
643 for (Symbol *sym : ctx.symtab->getSymbols())
644 if (!sym->isUndefined() && name.equals_insensitive(RHS: sym->getName()))
645 return sym;
646
647 // The reference may be a mangled name while the definition is not. Suggest a
648 // missing extern "C".
649 if (name.starts_with(Prefix: "_Z")) {
650 std::string buf = name.str();
651 llvm::ItaniumPartialDemangler d;
652 if (!d.partialDemangle(MangledName: buf.c_str()))
653 if (char *buf = d.getFunctionName(Buf: nullptr, N: nullptr)) {
654 const Symbol *s = suggest(buf);
655 free(ptr: buf);
656 if (s) {
657 pre_hint = ": extern \"C\" ";
658 return s;
659 }
660 }
661 } else {
662 const Symbol *s = nullptr;
663 for (auto &it : map)
664 if (canSuggestExternCForCXX(ref: name, def: it.first)) {
665 s = it.second;
666 break;
667 }
668 if (!s)
669 for (Symbol *sym : ctx.symtab->getSymbols())
670 if (canSuggestExternCForCXX(ref: name, def: sym->getName())) {
671 s = sym;
672 break;
673 }
674 if (s) {
675 pre_hint = " to declare ";
676 post_hint = " as extern \"C\"?";
677 return s;
678 }
679 }
680
681 return nullptr;
682}
683
684static void reportUndefinedSymbol(Ctx &ctx, const UndefinedDiag &undef,
685 bool correctSpelling) {
686 Undefined &sym = *undef.sym;
687 ELFSyncStream msg(ctx, DiagLevel::None);
688
689 auto visibility = [&]() {
690 switch (sym.visibility()) {
691 case STV_INTERNAL:
692 return "internal ";
693 case STV_HIDDEN:
694 return "hidden ";
695 case STV_PROTECTED:
696 return "protected ";
697 default:
698 return "";
699 }
700 };
701
702 switch (ctx.arg.ekind) {
703 case ELF32LEKind:
704 maybeReportDiscarded<ELF32LE>(ctx, msg, sym);
705 break;
706 case ELF32BEKind:
707 maybeReportDiscarded<ELF32BE>(ctx, msg, sym);
708 break;
709 case ELF64LEKind:
710 maybeReportDiscarded<ELF64LE>(ctx, msg, sym);
711 break;
712 case ELF64BEKind:
713 maybeReportDiscarded<ELF64BE>(ctx, msg, sym);
714 break;
715 default:
716 llvm_unreachable("");
717 }
718 if (msg.str().empty())
719 msg << "undefined " << visibility() << "symbol: " << &sym;
720
721 const size_t maxUndefReferences = 3;
722 for (UndefinedDiag::Loc l :
723 ArrayRef(undef.locs).take_front(N: maxUndefReferences)) {
724 InputSectionBase &sec = *l.sec;
725 uint64_t offset = l.offset;
726
727 msg << "\n>>> referenced by ";
728 // In the absence of line number information, utilize DW_TAG_variable (if
729 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`).
730 Symbol *enclosing = sec.getEnclosingSymbol(offset);
731
732 ELFSyncStream msg1(ctx, DiagLevel::None);
733 auto tell = msg.tell();
734 msg << sec.getSrcMsg(sym: enclosing ? *enclosing : sym, offset);
735 if (tell != msg.tell())
736 msg << "\n>>> ";
737 msg << sec.getObjMsg(offset);
738 }
739
740 if (maxUndefReferences < undef.locs.size())
741 msg << "\n>>> referenced " << (undef.locs.size() - maxUndefReferences)
742 << " more times";
743
744 if (correctSpelling) {
745 std::string pre_hint = ": ", post_hint;
746 if (const Symbol *corrected =
747 getAlternativeSpelling(ctx, sym, pre_hint, post_hint)) {
748 msg << "\n>>> did you mean" << pre_hint << corrected << post_hint
749 << "\n>>> defined in: " << corrected->file;
750 }
751 }
752
753 if (sym.getName().starts_with(Prefix: "_ZTV"))
754 msg << "\n>>> the vtable symbol may be undefined because the class is "
755 "missing its key function "
756 "(see https://lld.llvm.org/missingkeyfunction)";
757 if (ctx.arg.gcSections && ctx.arg.zStartStopGC &&
758 sym.getName().starts_with(Prefix: "__start_")) {
759 msg << "\n>>> the encapsulation symbol needs to be retained under "
760 "--gc-sections properly; consider -z nostart-stop-gc "
761 "(see https://lld.llvm.org/ELF/start-stop-gc)";
762 }
763
764 if (undef.isWarning)
765 Warn(ctx) << msg.str();
766 else
767 ctx.e.error(msg: msg.str(), tag: ErrorTag::SymbolNotFound, args: {sym.getName()});
768}
769
770void elf::reportUndefinedSymbols(Ctx &ctx) {
771 // Find the first "undefined symbol" diagnostic for each diagnostic, and
772 // collect all "referenced from" lines at the first diagnostic.
773 DenseMap<Symbol *, UndefinedDiag *> firstRef;
774 for (UndefinedDiag &undef : ctx.undefErrs) {
775 assert(undef.locs.size() == 1);
776 if (UndefinedDiag *canon = firstRef.lookup(Val: undef.sym)) {
777 canon->locs.push_back(Elt: undef.locs[0]);
778 undef.locs.clear();
779 } else
780 firstRef[undef.sym] = &undef;
781 }
782
783 // Enable spell corrector for the first 2 diagnostics.
784 for (auto [i, undef] : llvm::enumerate(First&: ctx.undefErrs))
785 if (!undef.locs.empty())
786 reportUndefinedSymbol(ctx, undef, correctSpelling: i < 2);
787}
788
789// Report an undefined symbol if necessary.
790// Returns true if the undefined symbol will produce an error message.
791static bool maybeReportUndefined(Ctx &ctx, Undefined &sym,
792 InputSectionBase &sec, uint64_t offset) {
793 std::lock_guard<std::mutex> lock(ctx.relocMutex);
794 // If versioned, issue an error (even if the symbol is weak) because we don't
795 // know the defining filename which is required to construct a Verneed entry.
796 if (sym.hasVersionSuffix) {
797 ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: false});
798 return true;
799 }
800 if (sym.isWeak())
801 return false;
802
803 bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT;
804 if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal)
805 return false;
806
807 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc
808 // which references a switch table in a discarded .rodata/.text section. The
809 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF
810 // spec says references from outside the group to a STB_LOCAL symbol are not
811 // allowed. Work around the bug.
812 //
813 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible
814 // because .LC0-.LTOC is not representable if the two labels are in different
815 // .got2
816 if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc"))
817 return false;
818
819 bool isWarning =
820 (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) ||
821 ctx.arg.noinhibitExec;
822 ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: isWarning});
823 return !isWarning;
824}
825
826// MIPS N32 ABI treats series of successive relocations with the same offset
827// as a single relocation. The similar approach used by N64 ABI, but this ABI
828// packs all relocations into the single relocation record. Here we emulate
829// this for the N32 ABI. Iterate over relocation with the same offset and put
830// theirs types into the single bit-set.
831template <class RelTy>
832RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const {
833 uint32_t type = 0;
834 uint64_t offset = rel->r_offset;
835
836 int n = 0;
837 while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset)
838 type |= (rel++)->getType(ctx.arg.isMips64EL) << (8 * n++);
839 return type;
840}
841
842template <bool shard = false>
843static void addRelativeReloc(Ctx &ctx, InputSectionBase &isec,
844 uint64_t offsetInSec, Symbol &sym, int64_t addend,
845 RelExpr expr, RelType type) {
846 Partition &part = isec.getPartition(ctx);
847
848 if (sym.isTagged()) {
849 part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec,
850 offsetInSec, sym, addend, type, expr);
851 // With MTE globals, we always want to derive the address tag by `ldg`-ing
852 // the symbol. When we have a RELATIVE relocation though, we no longer have
853 // a reference to the symbol. Because of this, when we have an addend that
854 // puts the result of the RELATIVE relocation out-of-bounds of the symbol
855 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI
856 // says we should store the offset to the start of the symbol in the target
857 // field. This is described in further detail in:
858 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative
859 if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize())
860 isec.relocations.push_back(Elt: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym});
861 return;
862 }
863
864 // Add a relative relocation. If relrDyn section is enabled, and the
865 // relocation offset is guaranteed to be even, add the relocation to
866 // the relrDyn section, otherwise add it to the relaDyn section.
867 // relrDyn sections don't support odd offsets. Also, relrDyn sections
868 // don't store the addend values, so we must write it to the relocated
869 // address.
870 if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) {
871 isec.addReloc(r: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym});
872 if (shard)
873 part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back(
874 Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1});
875 else
876 part.relrDyn->relocs.push_back(Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1});
877 return;
878 }
879 part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec,
880 offsetInSec, sym, addend, type, expr);
881}
882
883template <class PltSection, class GotPltSection>
884static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt,
885 RelocationBaseSection &rel, RelType type, Symbol &sym) {
886 plt.addEntry(sym);
887 gotPlt.addEntry(sym);
888 rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx),
889 sym.isPreemptible ? DynamicReloc::AgainstSymbol
890 : DynamicReloc::AddendOnlyWithTargetVA,
891 sym, 0, R_ABS});
892}
893
894void elf::addGotEntry(Ctx &ctx, Symbol &sym) {
895 ctx.in.got->addEntry(sym);
896 uint64_t off = sym.getGotOffset(ctx);
897
898 // If preemptible, emit a GLOB_DAT relocation.
899 if (sym.isPreemptible) {
900 ctx.mainPart->relaDyn->addReloc(reloc: {ctx.target->gotRel, ctx.in.got.get(), off,
901 DynamicReloc::AgainstSymbol, sym, 0,
902 R_ABS});
903 return;
904 }
905
906 // Otherwise, the value is either a link-time constant or the load base
907 // plus a constant.
908 if (!ctx.arg.isPic || isAbsolute(sym))
909 ctx.in.got->addConstant(r: {.expr: R_ABS, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym});
910 else
911 addRelativeReloc(ctx, isec&: *ctx.in.got, offsetInSec: off, sym, addend: 0, expr: R_ABS,
912 type: ctx.target->symbolicRel);
913}
914
915static void addGotAuthEntry(Ctx &ctx, Symbol &sym) {
916 ctx.in.got->addEntry(sym);
917 ctx.in.got->addAuthEntry(sym);
918 uint64_t off = sym.getGotOffset(ctx);
919
920 // If preemptible, emit a GLOB_DAT relocation.
921 if (sym.isPreemptible) {
922 ctx.mainPart->relaDyn->addReloc(reloc: {R_AARCH64_AUTH_GLOB_DAT, ctx.in.got.get(),
923 off, DynamicReloc::AgainstSymbol, sym, 0,
924 R_ABS});
925 return;
926 }
927
928 // Signed GOT requires dynamic relocation.
929 ctx.in.got->getPartition(ctx).relaDyn->addReloc(
930 reloc: {R_AARCH64_AUTH_RELATIVE, ctx.in.got.get(), off,
931 DynamicReloc::AddendOnlyWithTargetVA, sym, 0, R_ABS});
932}
933
934static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) {
935 ctx.in.got->addEntry(sym);
936 uint64_t off = sym.getGotOffset(ctx);
937 if (!sym.isPreemptible && !ctx.arg.shared) {
938 ctx.in.got->addConstant(r: {.expr: R_TPREL, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym});
939 return;
940 }
941 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
942 dynType: ctx.target->tlsGotRel, isec&: *ctx.in.got, offsetInSec: off, sym, addendRelType: ctx.target->symbolicRel);
943}
944
945// Return true if we can define a symbol in the executable that
946// contains the value/function of a symbol defined in a shared
947// library.
948static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) {
949 // If the symbol has default visibility the symbol defined in the
950 // executable will preempt it.
951 // Note that we want the visibility of the shared symbol itself, not
952 // the visibility of the symbol in the output file we are producing.
953 if (!sym.dsoProtected)
954 return true;
955
956 // If we are allowed to break address equality of functions, defining
957 // a plt entry will allow the program to call the function in the
958 // .so, but the .so and the executable will no agree on the address
959 // of the function. Similar logic for objects.
960 return ((sym.isFunc() && ctx.arg.ignoreFunctionAddressEquality) ||
961 (sym.isObject() && ctx.arg.ignoreDataAddressEquality));
962}
963
964// Returns true if a given relocation can be computed at link-time.
965// This only handles relocation types expected in processAux.
966//
967// For instance, we know the offset from a relocation to its target at
968// link-time if the relocation is PC-relative and refers a
969// non-interposable function in the same executable. This function
970// will return true for such relocation.
971//
972// If this function returns false, that means we need to emit a
973// dynamic relocation so that the relocation will be fixed at load-time.
974bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
975 const Symbol &sym,
976 uint64_t relOff) const {
977 // These expressions always compute a constant
978 if (oneof<
979 R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, RE_MIPS_GOT_LOCAL_PAGE,
980 RE_MIPS_GOTREL, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, RE_MIPS_GOT_GP_PC,
981 RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC,
982 R_GOTONLY_PC, R_GOTPLTONLY_PC, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT,
983 R_GOTPLT_GOTREL, R_GOTPLT_PC, RE_PPC32_PLTREL, RE_PPC64_CALL_PLT,
984 RE_PPC64_RELAX_TOC, RE_RISCV_ADD, RE_AARCH64_GOT_PAGE,
985 RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, RE_LOONGARCH_PLT_PAGE_PC,
986 RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>(expr: e))
987 return true;
988
989 // These never do, except if the entire file is position dependent or if
990 // only the low bits are used.
991 if (e == R_GOT || e == R_PLT)
992 return ctx.target->usesOnlyLowPageBits(type) || !ctx.arg.isPic;
993 // R_AARCH64_AUTH_ABS64 requires a dynamic relocation.
994 if (e == RE_AARCH64_AUTH)
995 return false;
996
997 // The behavior of an undefined weak reference is implementation defined.
998 // (We treat undefined non-weak the same as undefined weak.) For static
999 // -no-pie linking, dynamic relocations are generally avoided (except
1000 // IRELATIVE). Emitting dynamic relocations for -shared aligns with its -z
1001 // undefs default. Dynamic -no-pie linking and -pie allow flexibility.
1002 if (sym.isPreemptible)
1003 return sym.isUndefined() && !ctx.arg.isPic;
1004 if (!ctx.arg.isPic)
1005 return true;
1006
1007 // Constant when referencing a non-preemptible symbol.
1008 if (e == R_SIZE || e == RE_RISCV_LEB128)
1009 return true;
1010
1011 // For the target and the relocation, we want to know if they are
1012 // absolute or relative.
1013 bool absVal = isAbsoluteValue(sym) && e != RE_PPC64_TOCBASE;
1014 bool relE = isRelExpr(expr: e);
1015 if (absVal && !relE)
1016 return true;
1017 if (!absVal && relE)
1018 return true;
1019 if (!absVal && !relE)
1020 return ctx.target->usesOnlyLowPageBits(type);
1021
1022 assert(absVal && relE);
1023
1024 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol
1025 // in PIC mode. This is a little strange, but it allows us to link function
1026 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers).
1027 // Normally such a call will be guarded with a comparison, which will load a
1028 // zero from the GOT.
1029 if (sym.isUndefined())
1030 return true;
1031
1032 // We set the final symbols values for linker script defined symbols later.
1033 // They always can be computed as a link time constant.
1034 if (sym.scriptDefined)
1035 return true;
1036
1037 auto diag = Err(ctx);
1038 diag << "relocation " << type << " cannot refer to absolute symbol: " << &sym;
1039 printLocation(s&: diag, sec&: *sec, sym, off: relOff);
1040 return true;
1041}
1042
1043// The reason we have to do this early scan is as follows
1044// * To mmap the output file, we need to know the size
1045// * For that, we need to know how many dynamic relocs we will have.
1046// It might be possible to avoid this by outputting the file with write:
1047// * Write the allocated output sections, computing addresses.
1048// * Apply relocations, recording which ones require a dynamic reloc.
1049// * Write the dynamic relocations.
1050// * Write the rest of the file.
1051// This would have some drawbacks. For example, we would only know if .rela.dyn
1052// is needed after applying relocations. If it is, it will go after rw and rx
1053// sections. Given that it is ro, we will need an extra PT_LOAD. This
1054// complicates things for the dynamic linker and means we would have to reserve
1055// space for the extra PT_LOAD even if we end up not using it.
1056void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
1057 Symbol &sym, int64_t addend) const {
1058 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT
1059 // indirection.
1060 const bool isIfunc = sym.isGnuIFunc();
1061 if (!sym.isPreemptible && (!isIfunc || ctx.arg.zIfuncNoplt)) {
1062 if (expr != R_GOT_PC) {
1063 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call
1064 // stub type. It should be ignored if optimized to R_PC.
1065 if (ctx.arg.emachine == EM_PPC && expr == RE_PPC32_PLTREL)
1066 addend &= ~0x8000;
1067 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into
1068 // call __tls_get_addr even if the symbol is non-preemptible.
1069 if (!(ctx.arg.emachine == EM_HEXAGON &&
1070 (type == R_HEX_GD_PLT_B22_PCREL ||
1071 type == R_HEX_GD_PLT_B22_PCREL_X ||
1072 type == R_HEX_GD_PLT_B32_PCREL_X)))
1073 expr = fromPlt(expr);
1074 } else if (!isAbsoluteValue(sym) ||
1075 (type == R_PPC64_PCREL_OPT && ctx.arg.emachine == EM_PPC64)) {
1076 expr = ctx.target->adjustGotPcExpr(type, addend,
1077 loc: sec->content().data() + offset);
1078 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
1079 // needing the GOT if we can't relax everything.
1080 if (expr == R_RELAX_GOT_PC)
1081 ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1082 }
1083 }
1084
1085 // We were asked not to generate PLT entries for ifuncs. Instead, pass the
1086 // direct relocation on through.
1087 if (LLVM_UNLIKELY(isIfunc) && ctx.arg.zIfuncNoplt) {
1088 std::lock_guard<std::mutex> lock(ctx.relocMutex);
1089 sym.isExported = true;
1090 ctx.mainPart->relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend,
1091 addendRelType: type);
1092 return;
1093 }
1094
1095 if (needsGot(expr)) {
1096 if (ctx.arg.emachine == EM_MIPS) {
1097 // MIPS ABI has special rules to process GOT entries and doesn't
1098 // require relocation entries for them. A special case is TLS
1099 // relocations. In that case dynamic loader applies dynamic
1100 // relocations to initialize TLS GOT entries.
1101 // See "Global Offset Table" in Chapter 5 in the following document
1102 // for detailed description:
1103 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1104 ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr);
1105 } else if (!sym.isTls() || ctx.arg.emachine != EM_LOONGARCH) {
1106 // Many LoongArch TLS relocs reuse the RE_LOONGARCH_GOT type, in which
1107 // case the NEEDS_GOT flag shouldn't get set.
1108 if (expr == RE_AARCH64_AUTH_GOT || expr == RE_AARCH64_AUTH_GOT_PAGE_PC ||
1109 expr == RE_AARCH64_AUTH_GOT_PC)
1110 sym.setFlags(NEEDS_GOT | NEEDS_GOT_AUTH);
1111 else
1112 sym.setFlags(NEEDS_GOT | NEEDS_GOT_NONAUTH);
1113 }
1114 } else if (needsPlt(expr)) {
1115 sym.setFlags(NEEDS_PLT);
1116 } else if (LLVM_UNLIKELY(isIfunc)) {
1117 sym.setFlags(HAS_DIRECT_RELOC);
1118 }
1119
1120 // If the relocation is known to be a link-time constant, we know no dynamic
1121 // relocation will be created, pass the control to relocateAlloc() or
1122 // relocateNonAlloc() to resolve it.
1123 if (isStaticLinkTimeConstant(e: expr, type, sym, relOff: offset)) {
1124 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1125 return;
1126 }
1127
1128 // Use a simple -z notext rule that treats all sections except .eh_frame as
1129 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our
1130 // SectionBase::getOffset would incorrectly adjust the offset).
1131 //
1132 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel
1133 // conversion. We still emit a dynamic relocation.
1134 bool canWrite = (sec->flags & SHF_WRITE) ||
1135 !(ctx.arg.zText ||
1136 (isa<EhInputSection>(Val: sec) && ctx.arg.emachine != EM_MIPS));
1137 if (canWrite) {
1138 RelType rel = ctx.target->getDynRel(type);
1139 if (oneof<R_GOT, RE_LOONGARCH_GOT>(expr) ||
1140 (rel == ctx.target->symbolicRel && !sym.isPreemptible)) {
1141 addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type);
1142 return;
1143 }
1144 if (rel != 0) {
1145 if (ctx.arg.emachine == EM_MIPS && rel == ctx.target->symbolicRel)
1146 rel = ctx.target->relativeRel;
1147 std::lock_guard<std::mutex> lock(ctx.relocMutex);
1148 Partition &part = sec->getPartition(ctx);
1149 if (ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) {
1150 // For a preemptible symbol, we can't use a relative relocation. For an
1151 // undefined symbol, we can't compute offset at link-time and use a
1152 // relative relocation. Use a symbolic relocation instead.
1153 if (sym.isPreemptible) {
1154 part.relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type);
1155 } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) {
1156 // When symbol values are determined in
1157 // finalizeAddressDependentContent, some .relr.auth.dyn relocations
1158 // may be moved to .rela.dyn.
1159 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1160 part.relrAuthDyn->relocs.push_back(Elt: {.inputSec: sec, .relocIdx: sec->relocs().size() - 1});
1161 } else {
1162 part.relaDyn->addReloc(reloc: {R_AARCH64_AUTH_RELATIVE, sec, offset,
1163 DynamicReloc::AddendOnlyWithTargetVA, sym,
1164 addend, R_ABS});
1165 }
1166 return;
1167 }
1168 part.relaDyn->addSymbolReloc(dynType: rel, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type);
1169
1170 // MIPS ABI turns using of GOT and dynamic relocations inside out.
1171 // While regular ABI uses dynamic relocations to fill up GOT entries
1172 // MIPS ABI requires dynamic linker to fills up GOT entries using
1173 // specially sorted dynamic symbol table. This affects even dynamic
1174 // relocations against symbols which do not require GOT entries
1175 // creation explicitly, i.e. do not have any GOT-relocations. So if
1176 // a preemptible symbol has a dynamic relocation we anyway have
1177 // to create a GOT entry for it.
1178 // If a non-preemptible symbol has a dynamic relocation against it,
1179 // dynamic linker takes it st_value, adds offset and writes down
1180 // result of the dynamic relocation. In case of preemptible symbol
1181 // dynamic linker performs symbol resolution, writes the symbol value
1182 // to the GOT entry and reads the GOT entry when it needs to perform
1183 // a dynamic relocation.
1184 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
1185 if (ctx.arg.emachine == EM_MIPS)
1186 ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr);
1187 return;
1188 }
1189 }
1190
1191 // When producing an executable, we can perform copy relocations (for
1192 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO.
1193 // Copy relocations/canonical PLT entries are unsupported for
1194 // R_AARCH64_AUTH_ABS64.
1195 if (!ctx.arg.shared && sym.isShared() &&
1196 !(ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) {
1197 if (!canDefineSymbolInExecutable(ctx, sym)) {
1198 auto diag = Err(ctx);
1199 diag << "cannot preempt symbol: " << &sym;
1200 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1201 return;
1202 }
1203
1204 if (sym.isObject()) {
1205 // Produce a copy relocation.
1206 if (auto *ss = dyn_cast<SharedSymbol>(Val: &sym)) {
1207 if (!ctx.arg.zCopyreloc) {
1208 auto diag = Err(ctx);
1209 diag << "unresolvable relocation " << type << " against symbol '"
1210 << ss << "'; recompile with -fPIC or remove '-z nocopyreloc'";
1211 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1212 }
1213 sym.setFlags(NEEDS_COPY);
1214 }
1215 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1216 return;
1217 }
1218
1219 // This handles a non PIC program call to function in a shared library. In
1220 // an ideal world, we could just report an error saying the relocation can
1221 // overflow at runtime. In the real world with glibc, crt1.o has a
1222 // R_X86_64_PC32 pointing to libc.so.
1223 //
1224 // The general idea on how to handle such cases is to create a PLT entry and
1225 // use that as the function value.
1226 //
1227 // For the static linking part, we just return a plt expr and everything
1228 // else will use the PLT entry as the address.
1229 //
1230 // The remaining problem is making sure pointer equality still works. We
1231 // need the help of the dynamic linker for that. We let it know that we have
1232 // a direct reference to a so symbol by creating an undefined symbol with a
1233 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to
1234 // the value of the symbol we created. This is true even for got entries, so
1235 // pointer equality is maintained. To avoid an infinite loop, the only entry
1236 // that points to the real function is a dedicated got entry used by the
1237 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
1238 // R_386_JMP_SLOT, etc).
1239
1240 // For position independent executable on i386, the plt entry requires ebx
1241 // to be set. This causes two problems:
1242 // * If some code has a direct reference to a function, it was probably
1243 // compiled without -fPIE/-fPIC and doesn't maintain ebx.
1244 // * If a library definition gets preempted to the executable, it will have
1245 // the wrong ebx value.
1246 if (sym.isFunc()) {
1247 if (ctx.arg.pie && ctx.arg.emachine == EM_386) {
1248 auto diag = Err(ctx);
1249 diag << "symbol '" << &sym
1250 << "' cannot be preempted; recompile with -fPIE";
1251 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1252 }
1253 sym.setFlags(NEEDS_COPY | NEEDS_PLT);
1254 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1255 return;
1256 }
1257 }
1258
1259 auto diag = Err(ctx);
1260 diag << "relocation " << type << " cannot be used against ";
1261 if (sym.getName().empty())
1262 diag << "local symbol";
1263 else
1264 diag << "symbol '" << &sym << "'";
1265 diag << "; recompile with -fPIC";
1266 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1267}
1268
1269// This function is similar to the `handleTlsRelocation`. MIPS does not
1270// support any relaxations for TLS relocations so by factoring out MIPS
1271// handling in to the separate function we can simplify the code and do not
1272// pollute other `handleTlsRelocation` by MIPS `ifs` statements.
1273// Mips has a custom MipsGotSection that handles the writing of GOT entries
1274// without dynamic relocations.
1275static unsigned handleMipsTlsRelocation(Ctx &ctx, RelType type, Symbol &sym,
1276 InputSectionBase &c, uint64_t offset,
1277 int64_t addend, RelExpr expr) {
1278 if (expr == RE_MIPS_TLSLD) {
1279 ctx.in.mipsGot->addTlsIndex(file&: *c.file);
1280 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1281 return 1;
1282 }
1283 if (expr == RE_MIPS_TLSGD) {
1284 ctx.in.mipsGot->addDynTlsEntry(file&: *c.file, sym);
1285 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1286 return 1;
1287 }
1288 return 0;
1289}
1290
1291static unsigned handleAArch64PAuthTlsRelocation(InputSectionBase *sec,
1292 RelExpr expr, RelType type,
1293 uint64_t offset, Symbol &sym,
1294 int64_t addend) {
1295 // Do not optimize signed TLSDESC to LE/IE (as described in pauthabielf64).
1296 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#general-restrictions
1297 // > PAUTHELF64 only supports the descriptor based TLS (TLSDESC).
1298 if (oneof<RE_AARCH64_AUTH_TLSDESC_PAGE, RE_AARCH64_AUTH_TLSDESC>(expr)) {
1299 sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_AUTH);
1300 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1301 return 1;
1302 }
1303
1304 // TLSDESC_CALL hint relocation should not be emitted by compiler with signed
1305 // TLSDESC enabled.
1306 if (expr == R_TLSDESC_CALL)
1307 sym.setFlags(NEEDS_TLSDESC_NONAUTH);
1308
1309 return 0;
1310}
1311
1312// Notes about General Dynamic and Local Dynamic TLS models below. They may
1313// require the generation of a pair of GOT entries that have associated dynamic
1314// relocations. The pair of GOT entries created are of the form GOT[e0] Module
1315// Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of
1316// symbol in TLS block.
1317//
1318// Returns the number of relocations processed.
1319unsigned RelocationScanner::handleTlsRelocation(RelExpr expr, RelType type,
1320 uint64_t offset, Symbol &sym,
1321 int64_t addend) {
1322 bool isAArch64 = ctx.arg.emachine == EM_AARCH64;
1323
1324 if (isAArch64)
1325 if (unsigned processed = handleAArch64PAuthTlsRelocation(
1326 sec, expr, type, offset, sym, addend))
1327 return processed;
1328
1329 if (expr == R_TPREL || expr == R_TPREL_NEG) {
1330 if (ctx.arg.shared) {
1331 auto diag = Err(ctx);
1332 diag << "relocation " << type << " against " << &sym
1333 << " cannot be used with -shared";
1334 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1335 return 1;
1336 }
1337 return 0;
1338 }
1339
1340 if (ctx.arg.emachine == EM_MIPS)
1341 return handleMipsTlsRelocation(ctx, type, sym, c&: *sec, offset, addend, expr);
1342
1343 bool isRISCV = ctx.arg.emachine == EM_RISCV;
1344
1345 if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1346 R_TLSDESC_GOTPLT, RE_LOONGARCH_TLSDESC_PAGE_PC>(expr) &&
1347 ctx.arg.shared) {
1348 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not
1349 // set NEEDS_TLSDESC on the label.
1350 if (expr != R_TLSDESC_CALL) {
1351 if (isAArch64)
1352 sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_NONAUTH);
1353 else if (!isRISCV || type == R_RISCV_TLSDESC_HI20)
1354 sym.setFlags(NEEDS_TLSDESC);
1355 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1356 }
1357 return 1;
1358 }
1359
1360 // LoongArch supports IE to LE, DESC GD/LD to IE/LE optimizations in
1361 // non-extreme code model.
1362 bool execOptimizeInLoongArch =
1363 ctx.arg.emachine == EM_LOONGARCH &&
1364 (type == R_LARCH_TLS_IE_PC_HI20 || type == R_LARCH_TLS_IE_PC_LO12 ||
1365 type == R_LARCH_TLS_DESC_PC_HI20 || type == R_LARCH_TLS_DESC_PC_LO12 ||
1366 type == R_LARCH_TLS_DESC_LD || type == R_LARCH_TLS_DESC_CALL ||
1367 type == R_LARCH_TLS_DESC_PCREL20_S2);
1368
1369 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE
1370 // optimizations.
1371 // RISC-V supports TLSDESC to IE/LE optimizations.
1372 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable
1373 // optimization as well.
1374 bool execOptimize =
1375 !ctx.arg.shared && ctx.arg.emachine != EM_ARM &&
1376 ctx.arg.emachine != EM_HEXAGON &&
1377 (ctx.arg.emachine != EM_LOONGARCH || execOptimizeInLoongArch) &&
1378 !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) &&
1379 !sec->file->ppc64DisableTLSRelax;
1380
1381 // If we are producing an executable and the symbol is non-preemptable, it
1382 // must be defined and the code sequence can be optimized to use
1383 // Local-Exesec->
1384 //
1385 // ARM and RISC-V do not support any relaxations for TLS relocations, however,
1386 // we can omit the DTPMOD dynamic relocations and resolve them at link time
1387 // because them are always 1. This may be necessary for static linking as
1388 // DTPMOD may not be expected at load time.
1389 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared;
1390
1391 // Local Dynamic is for access to module local TLS variables, while still
1392 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the
1393 // module index, with a special value of 0 for the current module. GOT[e1] is
1394 // unused. There only needs to be one module index entry.
1395 if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) {
1396 // Local-Dynamic relocs can be optimized to Local-Exesec->
1397 if (execOptimize) {
1398 sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE),
1399 .type: type, .offset: offset, .addend: addend, .sym: &sym});
1400 return ctx.target->getTlsGdRelaxSkip(type);
1401 }
1402 if (expr == R_TLSLD_HINT)
1403 return 1;
1404 ctx.needsTlsLd.store(i: true, m: std::memory_order_relaxed);
1405 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1406 return 1;
1407 }
1408
1409 // Local-Dynamic relocs can be optimized to Local-Exesec->
1410 if (expr == R_DTPREL) {
1411 if (execOptimize)
1412 expr = ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE);
1413 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1414 return 1;
1415 }
1416
1417 // Local-Dynamic sequence where offset of tls variable relative to dynamic
1418 // thread pointer is stored in the got. This cannot be optimized to
1419 // Local-Exesec->
1420 if (expr == R_TLSLD_GOT_OFF) {
1421 sym.setFlags(NEEDS_GOT_DTPREL);
1422 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1423 return 1;
1424 }
1425
1426 // LoongArch does not support transition from TLSDESC to LE/IE in the extreme
1427 // code model, in which NEEDS_TLSDESC should set, rather than NEEDS_TLSGD. So
1428 // we check independently.
1429 if (ctx.arg.emachine == EM_LOONGARCH &&
1430 oneof<RE_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC,
1431 R_TLSDESC_CALL>(expr) &&
1432 !execOptimize) {
1433 if (expr != R_TLSDESC_CALL) {
1434 sym.setFlags(NEEDS_TLSDESC);
1435 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1436 }
1437 return 1;
1438 }
1439
1440 if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1441 R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC,
1442 RE_LOONGARCH_TLSGD_PAGE_PC, RE_LOONGARCH_TLSDESC_PAGE_PC>(expr)) {
1443 if (!execOptimize) {
1444 sym.setFlags(NEEDS_TLSGD);
1445 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1446 return 1;
1447 }
1448
1449 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec
1450 // depending on the symbol being locally defined or not.
1451 //
1452 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible
1453 // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix
1454 // the categorization in RISCV::relocateAllosec->
1455 if (sym.isPreemptible) {
1456 sym.setFlags(NEEDS_TLSGD_TO_IE);
1457 sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_IE),
1458 .type: type, .offset: offset, .addend: addend, .sym: &sym});
1459 } else {
1460 sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_LE),
1461 .type: type, .offset: offset, .addend: addend, .sym: &sym});
1462 }
1463 return ctx.target->getTlsGdRelaxSkip(type);
1464 }
1465
1466 if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, RE_AARCH64_GOT_PAGE_PC,
1467 RE_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) {
1468 ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed);
1469 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is
1470 // locally defined. This is not supported on SystemZ.
1471 if (execOptimize && isLocalInExecutable && ctx.arg.emachine != EM_S390) {
1472 sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1473 } else if (expr != R_TLSIE_HINT) {
1474 sym.setFlags(NEEDS_TLSIE);
1475 // R_GOT needs a relative relocation for PIC on i386 and Hexagon.
1476 if (expr == R_GOT && ctx.arg.isPic &&
1477 !ctx.target->usesOnlyLowPageBits(type))
1478 addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type);
1479 else
1480 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1481 }
1482 return 1;
1483 }
1484
1485 // LoongArch TLS GD/LD relocs reuse the RE_LOONGARCH_GOT, in which
1486 // NEEDS_TLSIE shouldn't set. So we check independently.
1487 if (ctx.arg.emachine == EM_LOONGARCH && expr == RE_LOONGARCH_GOT &&
1488 execOptimize && isLocalInExecutable) {
1489 ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed);
1490 sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1491 return 1;
1492 }
1493
1494 return 0;
1495}
1496
1497template <class ELFT, class RelTy>
1498void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
1499 const RelTy &rel = *i;
1500 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL);
1501 Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex);
1502 RelType type;
1503 if constexpr (ELFT::Is64Bits || RelTy::IsCrel) {
1504 type = rel.getType(ctx.arg.isMips64EL);
1505 ++i;
1506 } else {
1507 // CREL is unsupported for MIPS N32.
1508 if (ctx.arg.mipsN32Abi) {
1509 type = getMipsN32RelType(i);
1510 } else {
1511 type = rel.getType(ctx.arg.isMips64EL);
1512 ++i;
1513 }
1514 }
1515 // Get an offset in an output section this relocation is applied to.
1516 uint64_t offset = getter.get(ctx, off: rel.r_offset);
1517 if (offset == uint64_t(-1))
1518 return;
1519
1520 RelExpr expr =
1521 ctx.target->getRelExpr(type, s: sym, loc: sec->content().data() + offset);
1522 int64_t addend = RelTy::HasAddend
1523 ? getAddend<ELFT>(rel)
1524 : ctx.target->getImplicitAddend(
1525 buf: sec->content().data() + rel.r_offset, type);
1526 if (LLVM_UNLIKELY(ctx.arg.emachine == EM_MIPS))
1527 addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal());
1528 else if (ctx.arg.emachine == EM_PPC64 && ctx.arg.isPic && type == R_PPC64_TOC)
1529 addend += getPPC64TocBase(ctx);
1530
1531 // Ignore R_*_NONE and other marker relocations.
1532 if (expr == R_NONE)
1533 return;
1534
1535 // Error if the target symbol is undefined. Symbol index 0 may be used by
1536 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them.
1537 if (sym.isUndefined() && symIndex != 0 &&
1538 maybeReportUndefined(ctx, sym&: cast<Undefined>(Val&: sym), sec&: *sec, offset))
1539 return;
1540
1541 if (ctx.arg.emachine == EM_PPC64) {
1542 // We can separate the small code model relocations into 2 categories:
1543 // 1) Those that access the compiler generated .toc sections.
1544 // 2) Those that access the linker allocated got entries.
1545 // lld allocates got entries to symbols on demand. Since we don't try to
1546 // sort the got entries in any way, we don't have to track which objects
1547 // have got-based small code model relocs. The .toc sections get placed
1548 // after the end of the linker allocated .got section and we do sort those
1549 // so sections addressed with small code model relocations come first.
1550 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS)
1551 sec->file->ppc64SmallCodeModelTocRelocs = true;
1552
1553 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in
1554 // InputSectionBase::relocateAlloc().
1555 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(Val: sym) &&
1556 cast<Defined>(Val&: sym).section->name == ".toc")
1557 ctx.ppc64noTocRelax.insert(V: {&sym, addend});
1558
1559 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) ||
1560 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) {
1561 // Skip the error check for CREL, which does not set `end`.
1562 if constexpr (!RelTy::IsCrel) {
1563 if (i == end) {
1564 auto diag = Err(ctx);
1565 diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1566 "relocation";
1567 printLocation(s&: diag, sec&: *sec, sym, off: offset);
1568 return;
1569 }
1570 }
1571
1572 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC
1573 // case, so we can discern it later from the toc-case.
1574 if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC)
1575 ++offset;
1576 }
1577 }
1578
1579 // If the relocation does not emit a GOT or GOTPLT entry but its computation
1580 // uses their addresses, we need GOT or GOTPLT to be created.
1581 //
1582 // The 5 types that relative GOTPLT are all x86 and x86-64 specific.
1583 if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT,
1584 R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) {
1585 ctx.in.gotPlt->hasGotPltOffRel.store(i: true, m: std::memory_order_relaxed);
1586 } else if (oneof<R_GOTONLY_PC, R_GOTREL, RE_PPC32_PLTREL, RE_PPC64_TOCBASE,
1587 RE_PPC64_RELAX_TOC>(expr)) {
1588 ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1589 }
1590
1591 // Process TLS relocations, including TLS optimizations. Note that
1592 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
1593 //
1594 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
1595 // but we need to process them in handleTlsRelocation.
1596 if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) {
1597 if (unsigned processed =
1598 handleTlsRelocation(expr, type, offset, sym, addend)) {
1599 i += processed - 1;
1600 return;
1601 }
1602 }
1603
1604 processAux(expr, type, offset, sym, addend);
1605}
1606
1607// R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
1608// General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is
1609// found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the
1610// instructions are generated by very old IBM XL compilers. Work around the
1611// issue by disabling GD/LD to IE/LE relaxation.
1612template <class RelTy>
1613static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) {
1614 // Skip if sec is synthetic (sec.file is null) or if sec has been marked.
1615 if (!sec.file || sec.file->ppc64DisableTLSRelax)
1616 return;
1617 bool hasGDLD = false;
1618 for (const RelTy &rel : rels) {
1619 RelType type = rel.getType(false);
1620 switch (type) {
1621 case R_PPC64_TLSGD:
1622 case R_PPC64_TLSLD:
1623 return; // Found a marker
1624 case R_PPC64_GOT_TLSGD16:
1625 case R_PPC64_GOT_TLSGD16_HA:
1626 case R_PPC64_GOT_TLSGD16_HI:
1627 case R_PPC64_GOT_TLSGD16_LO:
1628 case R_PPC64_GOT_TLSLD16:
1629 case R_PPC64_GOT_TLSLD16_HA:
1630 case R_PPC64_GOT_TLSLD16_HI:
1631 case R_PPC64_GOT_TLSLD16_LO:
1632 hasGDLD = true;
1633 break;
1634 }
1635 }
1636 if (hasGDLD) {
1637 sec.file->ppc64DisableTLSRelax = true;
1638 Warn(ctx&: sec.file->ctx)
1639 << sec.file
1640 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations "
1641 "without "
1642 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations";
1643 }
1644}
1645
1646template <class ELFT, class RelTy>
1647void RelocationScanner::scan(Relocs<RelTy> rels) {
1648 // Not all relocations end up in Sec->Relocations, but a lot do.
1649 sec->relocations.reserve(N: rels.size());
1650
1651 if (ctx.arg.emachine == EM_PPC64)
1652 checkPPC64TLSRelax<RelTy>(*sec, rels);
1653
1654 // For EhInputSection, OffsetGetter expects the relocations to be sorted by
1655 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker
1656 // script), the relocations may be unordered.
1657 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS
1658 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip.
1659 SmallVector<RelTy, 0> storage;
1660 if (isa<EhInputSection>(Val: sec) || ctx.arg.emachine == EM_S390)
1661 rels = sortRels(rels, storage);
1662
1663 if constexpr (RelTy::IsCrel) {
1664 for (auto i = rels.begin(); i != rels.end();)
1665 scanOne<ELFT, RelTy>(i);
1666 } else {
1667 // The non-CREL code path has additional check for PPC64 TLS.
1668 end = static_cast<const void *>(rels.end());
1669 for (auto i = rels.begin(); i != end;)
1670 scanOne<ELFT, RelTy>(i);
1671 }
1672
1673 // Sort relocations by offset for more efficient searching for
1674 // R_RISCV_PCREL_HI20, R_PPC64_ADDR64 and the branch-to-branch optimization.
1675 if (ctx.arg.emachine == EM_RISCV ||
1676 (ctx.arg.emachine == EM_PPC64 && sec->name == ".toc") ||
1677 ctx.arg.branchToBranch)
1678 llvm::stable_sort(sec->relocs(),
1679 [](const Relocation &lhs, const Relocation &rhs) {
1680 return lhs.offset < rhs.offset;
1681 });
1682}
1683
1684template <class ELFT>
1685void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) {
1686 sec = &s;
1687 getter = OffsetGetter(s);
1688 const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(!isEH);
1689 if (rels.areRelocsCrel())
1690 scan<ELFT>(rels.crels);
1691 else if (rels.areRelocsRel())
1692 scan<ELFT>(rels.rels);
1693 else
1694 scan<ELFT>(rels.relas);
1695}
1696
1697template <class ELFT> void elf::scanRelocations(Ctx &ctx) {
1698 // Scan all relocations. Each relocation goes through a series of tests to
1699 // determine if it needs special treatment, such as creating GOT, PLT,
1700 // copy relocations, etc. Note that relocations for non-alloc sections are
1701 // directly processed by InputSection::relocateNonAlloc.
1702
1703 // Deterministic parallellism needs sorting relocations which is unsuitable
1704 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable
1705 // for parallelism.
1706 bool serial = !ctx.arg.zCombreloc || ctx.arg.emachine == EM_MIPS ||
1707 ctx.arg.emachine == EM_PPC64;
1708 parallel::TaskGroup tg;
1709 auto outerFn = [&]() {
1710 for (ELFFileBase *f : ctx.objectFiles) {
1711 auto fn = [f, &ctx]() {
1712 RelocationScanner scanner(ctx);
1713 for (InputSectionBase *s : f->getSections()) {
1714 if (s && s->kind() == SectionBase::Regular && s->isLive() &&
1715 (s->flags & SHF_ALLOC) &&
1716 !(s->type == SHT_ARM_EXIDX && ctx.arg.emachine == EM_ARM))
1717 scanner.template scanSection<ELFT>(*s);
1718 }
1719 };
1720 if (serial)
1721 fn();
1722 else
1723 tg.spawn(f: fn);
1724 }
1725 auto scanEH = [&] {
1726 RelocationScanner scanner(ctx);
1727 for (Partition &part : ctx.partitions) {
1728 for (EhInputSection *sec : part.ehFrame->sections)
1729 scanner.template scanSection<ELFT>(*sec, /*isEH=*/true);
1730 if (part.armExidx && part.armExidx->isLive())
1731 for (InputSection *sec : part.armExidx->exidxSections)
1732 if (sec->isLive())
1733 scanner.template scanSection<ELFT>(*sec);
1734 }
1735 };
1736 if (serial)
1737 scanEH();
1738 else
1739 tg.spawn(f: scanEH);
1740 };
1741 // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread
1742 // with valid getThreadIndex().
1743 if (serial)
1744 tg.spawn(f: outerFn);
1745 else
1746 outerFn();
1747}
1748
1749RelocationBaseSection &elf::getIRelativeSection(Ctx &ctx) {
1750 // Prior to Android V, there was a bug that caused RELR relocations to be
1751 // applied after packed relocations. This meant that resolvers referenced by
1752 // IRELATIVE relocations in the packed relocation section would read
1753 // unrelocated globals with RELR relocations when
1754 // --pack-relative-relocs=android+relr is enabled. Work around this by placing
1755 // IRELATIVE in .rela.plt.
1756 return ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt
1757 : *ctx.mainPart->relaDyn;
1758}
1759
1760static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) {
1761 // Handle a reference to a non-preemptible ifunc. These are special in a
1762 // few ways:
1763 //
1764 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
1765 // a fixed value. But assuming that all references to the ifunc are
1766 // GOT-generating or PLT-generating, the handling of an ifunc is
1767 // relatively straightforward. We create a PLT entry in Iplt, which is
1768 // usually at the end of .plt, which makes an indirect call using a
1769 // matching GOT entry in igotPlt, which is usually at the end of .got.plt.
1770 // The GOT entry is relocated using an IRELATIVE relocation in relaDyn,
1771 // which is usually at the end of .rela.dyn.
1772 //
1773 // - Despite the fact that an ifunc does not have a fixed value, compilers
1774 // that are not passed -fPIC will assume that they do, and will emit
1775 // direct (non-GOT-generating, non-PLT-generating) relocations to the
1776 // symbol. This means that if a direct relocation to the symbol is
1777 // seen, the linker must set a value for the symbol, and this value must
1778 // be consistent no matter what type of reference is made to the symbol.
1779 // This can be done by creating a PLT entry for the symbol in the way
1780 // described above and making it canonical, that is, making all references
1781 // point to the PLT entry instead of the resolver. In lld we also store
1782 // the address of the PLT entry in the dynamic symbol table, which means
1783 // that the symbol will also have the same value in other modules.
1784 // Because the value loaded from the GOT needs to be consistent with
1785 // the value computed using a direct relocation, a non-preemptible ifunc
1786 // may end up with two GOT entries, one in .got.plt that points to the
1787 // address returned by the resolver and is used only by the PLT entry,
1788 // and another in .got that points to the PLT entry and is used by
1789 // GOT-generating relocations.
1790 //
1791 // - The fact that these symbols do not have a fixed value makes them an
1792 // exception to the general rule that a statically linked executable does
1793 // not require any form of dynamic relocation. To handle these relocations
1794 // correctly, the IRELATIVE relocations are stored in an array which a
1795 // statically linked executable's startup code must enumerate using the
1796 // linker-defined symbols __rela?_iplt_{start,end}.
1797 if (!sym.isGnuIFunc() || sym.isPreemptible || ctx.arg.zIfuncNoplt)
1798 return false;
1799 // Skip unreferenced non-preemptible ifunc.
1800 if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC)))
1801 return true;
1802
1803 sym.isInIplt = true;
1804
1805 // Create an Iplt and the associated IRELATIVE relocation pointing to the
1806 // original section/value pairs. For non-GOT non-PLT relocation case below, we
1807 // may alter section/value, so create a copy of the symbol to make
1808 // section/value fixed.
1809 auto *directSym = makeDefined(args&: cast<Defined>(Val&: sym));
1810 directSym->allocateAux(ctx);
1811 auto &dyn = getIRelativeSection(ctx);
1812 addPltEntry(ctx, plt&: *ctx.in.iplt, gotPlt&: *ctx.in.igotPlt, rel&: dyn, type: ctx.target->iRelativeRel,
1813 sym&: *directSym);
1814 sym.allocateAux(ctx);
1815 ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx;
1816
1817 if (flags & HAS_DIRECT_RELOC) {
1818 // Change the value to the IPLT and redirect all references to it.
1819 auto &d = cast<Defined>(Val&: sym);
1820 d.section = ctx.in.iplt.get();
1821 d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize;
1822 d.size = 0;
1823 // It's important to set the symbol type here so that dynamic loaders
1824 // don't try to call the PLT as if it were an ifunc resolver.
1825 d.type = STT_FUNC;
1826
1827 if (flags & NEEDS_GOT) {
1828 assert(!(flags & NEEDS_GOT_AUTH) &&
1829 "R_AARCH64_AUTH_IRELATIVE is not supported yet");
1830 addGotEntry(ctx, sym);
1831 }
1832 } else if (flags & NEEDS_GOT) {
1833 // Redirect GOT accesses to point to the Igot.
1834 sym.gotInIgot = true;
1835 }
1836 return true;
1837}
1838
1839void elf::postScanRelocations(Ctx &ctx) {
1840 auto fn = [&](Symbol &sym) {
1841 auto flags = sym.flags.load(m: std::memory_order_relaxed);
1842 if (handleNonPreemptibleIfunc(ctx, sym, flags))
1843 return;
1844
1845 if (sym.isTagged() && sym.isDefined())
1846 ctx.mainPart->memtagGlobalDescriptors->addSymbol(sym);
1847
1848 if (!sym.needsDynReloc())
1849 return;
1850 sym.allocateAux(ctx);
1851
1852 if (flags & NEEDS_GOT) {
1853 if ((flags & NEEDS_GOT_AUTH) && (flags & NEEDS_GOT_NONAUTH)) {
1854 auto diag = Err(ctx);
1855 diag << "both AUTH and non-AUTH GOT entries for '" << sym.getName()
1856 << "' requested, but only one type of GOT entry per symbol is "
1857 "supported";
1858 return;
1859 }
1860 if (flags & NEEDS_GOT_AUTH)
1861 addGotAuthEntry(ctx, sym);
1862 else
1863 addGotEntry(ctx, sym);
1864 }
1865 if (flags & NEEDS_PLT)
1866 addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt,
1867 type: ctx.target->pltRel, sym);
1868 if (flags & NEEDS_COPY) {
1869 if (sym.isObject()) {
1870 invokeELFT(addCopyRelSymbol, ctx, cast<SharedSymbol>(sym));
1871 // NEEDS_COPY is cleared for sym and its aliases so that in
1872 // later iterations aliases won't cause redundant copies.
1873 assert(!sym.hasFlag(NEEDS_COPY));
1874 } else {
1875 assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT));
1876 if (!sym.isDefined()) {
1877 replaceWithDefined(ctx, sym, sec&: *ctx.in.plt,
1878 value: ctx.target->pltHeaderSize +
1879 ctx.target->pltEntrySize * sym.getPltIdx(ctx),
1880 size: 0);
1881 sym.setFlags(NEEDS_COPY);
1882 if (ctx.arg.emachine == EM_PPC) {
1883 // PPC32 canonical PLT entries are at the beginning of .glink
1884 cast<Defined>(Val&: sym).value = ctx.in.plt->headerSize;
1885 ctx.in.plt->headerSize += 16;
1886 cast<PPC32GlinkSection>(Val&: *ctx.in.plt).canonical_plts.push_back(Elt: &sym);
1887 }
1888 }
1889 }
1890 }
1891
1892 if (!sym.isTls())
1893 return;
1894 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared;
1895 GotSection *got = ctx.in.got.get();
1896
1897 if (flags & NEEDS_TLSDESC) {
1898 if ((flags & NEEDS_TLSDESC_AUTH) && (flags & NEEDS_TLSDESC_NONAUTH)) {
1899 Err(ctx)
1900 << "both AUTH and non-AUTH TLSDESC entries for '" << sym.getName()
1901 << "' requested, but only one type of TLSDESC entry per symbol is "
1902 "supported";
1903 return;
1904 }
1905 got->addTlsDescEntry(sym);
1906 RelType tlsDescRel = ctx.target->tlsDescRel;
1907 if (flags & NEEDS_TLSDESC_AUTH) {
1908 got->addTlsDescAuthEntry();
1909 tlsDescRel = ELF::R_AARCH64_AUTH_TLSDESC;
1910 }
1911 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
1912 dynType: tlsDescRel, isec&: *got, offsetInSec: got->getTlsDescOffset(sym), sym, addendRelType: tlsDescRel);
1913 }
1914 if (flags & NEEDS_TLSGD) {
1915 got->addDynTlsEntry(sym);
1916 uint64_t off = got->getGlobalDynOffset(b: sym);
1917 if (isLocalInExecutable)
1918 // Write one to the GOT slot.
1919 got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel, .offset: off, .addend: 1, .sym: &sym});
1920 else
1921 ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsModuleIndexRel,
1922 isec&: *got, offsetInSec: off, sym);
1923
1924 // If the symbol is preemptible we need the dynamic linker to write
1925 // the offset too.
1926 uint64_t offsetOff = off + ctx.arg.wordsize;
1927 if (sym.isPreemptible)
1928 ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsOffsetRel, isec&: *got,
1929 offsetInSec: offsetOff, sym);
1930 else
1931 got->addConstant(r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: offsetOff, .addend: 0, .sym: &sym});
1932 }
1933 if (flags & NEEDS_TLSGD_TO_IE) {
1934 got->addEntry(sym);
1935 ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsGotRel, isec&: *got,
1936 offsetInSec: sym.getGotOffset(ctx), sym);
1937 }
1938 if (flags & NEEDS_GOT_DTPREL) {
1939 got->addEntry(sym);
1940 got->addConstant(
1941 r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: sym.getGotOffset(ctx), .addend: 0, .sym: &sym});
1942 }
1943
1944 if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE))
1945 addTpOffsetGotEntry(ctx, sym);
1946 };
1947
1948 GotSection *got = ctx.in.got.get();
1949 if (ctx.needsTlsLd.load(m: std::memory_order_relaxed) && got->addTlsIndex()) {
1950 static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0);
1951 if (ctx.arg.shared)
1952 ctx.mainPart->relaDyn->addReloc(
1953 reloc: {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()});
1954 else
1955 got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel,
1956 .offset: got->getTlsIndexOff(), .addend: 1, .sym: &dummy});
1957 }
1958
1959 assert(ctx.symAux.size() == 1);
1960 for (Symbol *sym : ctx.symtab->getSymbols())
1961 fn(*sym);
1962
1963 // Local symbols may need the aforementioned non-preemptible ifunc and GOT
1964 // handling. They don't need regular PLT.
1965 for (ELFFileBase *file : ctx.objectFiles)
1966 for (Symbol *sym : file->getLocalSymbols())
1967 fn(*sym);
1968
1969 if (ctx.arg.branchToBranch)
1970 ctx.target->applyBranchToBranchOpt();
1971}
1972
1973static bool mergeCmp(const InputSection *a, const InputSection *b) {
1974 // std::merge requires a strict weak ordering.
1975 if (a->outSecOff < b->outSecOff)
1976 return true;
1977
1978 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection.
1979 if (a->outSecOff == b->outSecOff && a != b) {
1980 auto *ta = dyn_cast<ThunkSection>(Val: a);
1981 auto *tb = dyn_cast<ThunkSection>(Val: b);
1982
1983 // Check if Thunk is immediately before any specific Target
1984 // InputSection for example Mips LA25 Thunks.
1985 if (ta && ta->getTargetInputSection() == b)
1986 return true;
1987
1988 // Place Thunk Sections without specific targets before
1989 // non-Thunk Sections.
1990 if (ta && !tb && !ta->getTargetInputSection())
1991 return true;
1992 }
1993
1994 return false;
1995}
1996
1997// Call Fn on every executable InputSection accessed via the linker script
1998// InputSectionDescription::Sections.
1999static void forEachInputSectionDescription(
2000 ArrayRef<OutputSection *> outputSections,
2001 llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) {
2002 for (OutputSection *os : outputSections) {
2003 if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR))
2004 continue;
2005 for (SectionCommand *bc : os->commands)
2006 if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc))
2007 fn(os, isd);
2008 }
2009}
2010
2011ThunkCreator::ThunkCreator(Ctx &ctx) : ctx(ctx) {}
2012
2013ThunkCreator::~ThunkCreator() {}
2014
2015// Thunk Implementation
2016//
2017// Thunks (sometimes called stubs, veneers or branch islands) are small pieces
2018// of code that the linker inserts inbetween a caller and a callee. The thunks
2019// are added at link time rather than compile time as the decision on whether
2020// a thunk is needed, such as the caller and callee being out of range, can only
2021// be made at link time.
2022//
2023// It is straightforward to tell given the current state of the program when a
2024// thunk is needed for a particular call. The more difficult part is that
2025// the thunk needs to be placed in the program such that the caller can reach
2026// the thunk and the thunk can reach the callee; furthermore, adding thunks to
2027// the program alters addresses, which can mean more thunks etc.
2028//
2029// In lld we have a synthetic ThunkSection that can hold many Thunks.
2030// The decision to have a ThunkSection act as a container means that we can
2031// more easily handle the most common case of a single block of contiguous
2032// Thunks by inserting just a single ThunkSection.
2033//
2034// The implementation of Thunks in lld is split across these areas
2035// Relocations.cpp : Framework for creating and placing thunks
2036// Thunks.cpp : The code generated for each supported thunk
2037// Target.cpp : Target specific hooks that the framework uses to decide when
2038// a thunk is used
2039// Synthetic.cpp : Implementation of ThunkSection
2040// Writer.cpp : Iteratively call framework until no more Thunks added
2041//
2042// Thunk placement requirements:
2043// Mips LA25 thunks. These must be placed immediately before the callee section
2044// We can assume that the caller is in range of the Thunk. These are modelled
2045// by Thunks that return the section they must precede with
2046// getTargetInputSection().
2047//
2048// ARM interworking and range extension thunks. These thunks must be placed
2049// within range of the caller. All implemented ARM thunks can always reach the
2050// callee as they use an indirect jump via a register that has no range
2051// restrictions.
2052//
2053// Thunk placement algorithm:
2054// For Mips LA25 ThunkSections; the placement is explicit, it has to be before
2055// getTargetInputSection().
2056//
2057// For thunks that must be placed within range of the caller there are many
2058// possible choices given that the maximum range from the caller is usually
2059// much larger than the average InputSection size. Desirable properties include:
2060// - Maximize reuse of thunks by multiple callers
2061// - Minimize number of ThunkSections to simplify insertion
2062// - Handle impact of already added Thunks on addresses
2063// - Simple to understand and implement
2064//
2065// In lld for the first pass, we pre-create one or more ThunkSections per
2066// InputSectionDescription at Target specific intervals. A ThunkSection is
2067// placed so that the estimated end of the ThunkSection is within range of the
2068// start of the InputSectionDescription or the previous ThunkSection. For
2069// example:
2070// InputSectionDescription
2071// Section 0
2072// ...
2073// Section N
2074// ThunkSection 0
2075// Section N + 1
2076// ...
2077// Section N + K
2078// Thunk Section 1
2079//
2080// The intention is that we can add a Thunk to a ThunkSection that is well
2081// spaced enough to service a number of callers without having to do a lot
2082// of work. An important principle is that it is not an error if a Thunk cannot
2083// be placed in a pre-created ThunkSection; when this happens we create a new
2084// ThunkSection placed next to the caller. This allows us to handle the vast
2085// majority of thunks simply, but also handle rare cases where the branch range
2086// is smaller than the target specific spacing.
2087//
2088// The algorithm is expected to create all the thunks that are needed in a
2089// single pass, with a small number of programs needing a second pass due to
2090// the insertion of thunks in the first pass increasing the offset between
2091// callers and callees that were only just in range.
2092//
2093// A consequence of allowing new ThunkSections to be created outside of the
2094// pre-created ThunkSections is that in rare cases calls to Thunks that were in
2095// range in pass K, are out of range in some pass > K due to the insertion of
2096// more Thunks in between the caller and callee. When this happens we retarget
2097// the relocation back to the original target and create another Thunk.
2098
2099// Remove ThunkSections that are empty, this should only be the initial set
2100// precreated on pass 0.
2101
2102// Insert the Thunks for OutputSection OS into their designated place
2103// in the Sections vector, and recalculate the InputSection output section
2104// offsets.
2105// This may invalidate any output section offsets stored outside of InputSection
2106void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) {
2107 forEachInputSectionDescription(
2108 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2109 if (isd->thunkSections.empty())
2110 return;
2111
2112 // Remove any zero sized precreated Thunks.
2113 llvm::erase_if(C&: isd->thunkSections,
2114 P: [](const std::pair<ThunkSection *, uint32_t> &ts) {
2115 return ts.first->getSize() == 0;
2116 });
2117
2118 // ISD->ThunkSections contains all created ThunkSections, including
2119 // those inserted in previous passes. Extract the Thunks created this
2120 // pass and order them in ascending outSecOff.
2121 std::vector<ThunkSection *> newThunks;
2122 for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections)
2123 if (ts.second == pass)
2124 newThunks.push_back(x: ts.first);
2125 llvm::stable_sort(Range&: newThunks,
2126 C: [](const ThunkSection *a, const ThunkSection *b) {
2127 return a->outSecOff < b->outSecOff;
2128 });
2129
2130 // Merge sorted vectors of Thunks and InputSections by outSecOff
2131 SmallVector<InputSection *, 0> tmp;
2132 tmp.reserve(N: isd->sections.size() + newThunks.size());
2133
2134 std::merge(first1: isd->sections.begin(), last1: isd->sections.end(),
2135 first2: newThunks.begin(), last2: newThunks.end(), result: std::back_inserter(x&: tmp),
2136 comp: mergeCmp);
2137
2138 isd->sections = std::move(tmp);
2139 });
2140}
2141
2142static int64_t getPCBias(Ctx &ctx, RelType type) {
2143 if (ctx.arg.emachine != EM_ARM)
2144 return 0;
2145 switch (type) {
2146 case R_ARM_THM_JUMP19:
2147 case R_ARM_THM_JUMP24:
2148 case R_ARM_THM_CALL:
2149 return 4;
2150 default:
2151 return 8;
2152 }
2153}
2154
2155// Find or create a ThunkSection within the InputSectionDescription (ISD) that
2156// is in range of Src. An ISD maps to a range of InputSections described by a
2157// linker script section pattern such as { .text .text.* }.
2158ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os,
2159 InputSection *isec,
2160 InputSectionDescription *isd,
2161 const Relocation &rel,
2162 uint64_t src) {
2163 // See the comment in getThunk for -pcBias below.
2164 const int64_t pcBias = getPCBias(ctx, type: rel.type);
2165 for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) {
2166 ThunkSection *ts = tp.first;
2167 uint64_t tsBase = os->addr + ts->outSecOff - pcBias;
2168 uint64_t tsLimit = tsBase + ts->getSize();
2169 if (ctx.target->inBranchRange(type: rel.type, src,
2170 dst: (src > tsLimit) ? tsBase : tsLimit))
2171 return ts;
2172 }
2173
2174 // No suitable ThunkSection exists. This can happen when there is a branch
2175 // with lower range than the ThunkSection spacing or when there are too
2176 // many Thunks. Create a new ThunkSection as close to the InputSection as
2177 // possible. Error if InputSection is so large we cannot place ThunkSection
2178 // anywhere in Range.
2179 uint64_t thunkSecOff = isec->outSecOff;
2180 if (!ctx.target->inBranchRange(type: rel.type, src,
2181 dst: os->addr + thunkSecOff + rel.addend)) {
2182 thunkSecOff = isec->outSecOff + isec->getSize();
2183 if (!ctx.target->inBranchRange(type: rel.type, src,
2184 dst: os->addr + thunkSecOff + rel.addend))
2185 Fatal(ctx) << "InputSection too large for range extension thunk "
2186 << isec->getObjMsg(offset: src - (os->addr << isec->outSecOff));
2187 }
2188 return addThunkSection(os, isd, off: thunkSecOff);
2189}
2190
2191// Add a Thunk that needs to be placed in a ThunkSection that immediately
2192// precedes its Target.
2193ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) {
2194 ThunkSection *ts = thunkedSections.lookup(Val: isec);
2195 if (ts)
2196 return ts;
2197
2198 // Find InputSectionRange within Target Output Section (TOS) that the
2199 // InputSection (IS) that we need to precede is in.
2200 OutputSection *tos = isec->getParent();
2201 for (SectionCommand *bc : tos->commands) {
2202 auto *isd = dyn_cast<InputSectionDescription>(Val: bc);
2203 if (!isd || isd->sections.empty())
2204 continue;
2205
2206 InputSection *first = isd->sections.front();
2207 InputSection *last = isd->sections.back();
2208
2209 if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff)
2210 continue;
2211
2212 ts = addThunkSection(os: tos, isd, off: isec->outSecOff);
2213 thunkedSections[isec] = ts;
2214 return ts;
2215 }
2216
2217 return nullptr;
2218}
2219
2220// Create one or more ThunkSections per OS that can be used to place Thunks.
2221// We attempt to place the ThunkSections using the following desirable
2222// properties:
2223// - Within range of the maximum number of callers
2224// - Minimise the number of ThunkSections
2225//
2226// We follow a simple but conservative heuristic to place ThunkSections at
2227// offsets that are multiples of a Target specific branch range.
2228// For an InputSectionDescription that is smaller than the range, a single
2229// ThunkSection at the end of the range will do.
2230//
2231// For an InputSectionDescription that is more than twice the size of the range,
2232// we place the last ThunkSection at range bytes from the end of the
2233// InputSectionDescription in order to increase the likelihood that the
2234// distance from a thunk to its target will be sufficiently small to
2235// allow for the creation of a short thunk.
2236void ThunkCreator::createInitialThunkSections(
2237 ArrayRef<OutputSection *> outputSections) {
2238 uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing();
2239 forEachInputSectionDescription(
2240 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2241 if (isd->sections.empty())
2242 return;
2243
2244 uint32_t isdBegin = isd->sections.front()->outSecOff;
2245 uint32_t isdEnd =
2246 isd->sections.back()->outSecOff + isd->sections.back()->getSize();
2247 uint32_t lastThunkLowerBound = -1;
2248 if (isdEnd - isdBegin > thunkSectionSpacing * 2)
2249 lastThunkLowerBound = isdEnd - thunkSectionSpacing;
2250
2251 uint32_t isecLimit;
2252 uint32_t prevIsecLimit = isdBegin;
2253 uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing;
2254
2255 for (const InputSection *isec : isd->sections) {
2256 isecLimit = isec->outSecOff + isec->getSize();
2257 if (isecLimit > thunkUpperBound) {
2258 addThunkSection(os, isd, off: prevIsecLimit);
2259 thunkUpperBound = prevIsecLimit + thunkSectionSpacing;
2260 }
2261 if (isecLimit > lastThunkLowerBound)
2262 break;
2263 prevIsecLimit = isecLimit;
2264 }
2265 addThunkSection(os, isd, off: isecLimit);
2266 });
2267}
2268
2269ThunkSection *ThunkCreator::addThunkSection(OutputSection *os,
2270 InputSectionDescription *isd,
2271 uint64_t off) {
2272 auto *ts = make<ThunkSection>(args&: ctx, args&: os, args&: off);
2273 ts->partition = os->partition;
2274 if ((ctx.arg.fixCortexA53Errata843419 || ctx.arg.fixCortexA8) &&
2275 !isd->sections.empty()) {
2276 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add
2277 // thunks we disturb the base addresses of sections placed after the thunks
2278 // this makes patches we have generated redundant, and may cause us to
2279 // generate more patches as different instructions are now in sensitive
2280 // locations. When we generate more patches we may force more branches to
2281 // go out of range, causing more thunks to be generated. In pathological
2282 // cases this can cause the address dependent content pass not to converge.
2283 // We fix this by rounding up the size of the ThunkSection to 4KiB, this
2284 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB,
2285 // which means that adding Thunks to the section does not invalidate
2286 // errata patches for following code.
2287 // Rounding up the size to 4KiB has consequences for code-size and can
2288 // trip up linker script defined assertions. For example the linux kernel
2289 // has an assertion that what LLD represents as an InputSectionDescription
2290 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib.
2291 // We use the heuristic of rounding up the size when both of the following
2292 // conditions are true:
2293 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This
2294 // accounts for the case where no single InputSectionDescription is
2295 // larger than the OutputSection size. This is conservative but simple.
2296 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent
2297 // any assertion failures that an InputSectionDescription is < 4 KiB
2298 // in size.
2299 uint64_t isdSize = isd->sections.back()->outSecOff +
2300 isd->sections.back()->getSize() -
2301 isd->sections.front()->outSecOff;
2302 if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096)
2303 ts->roundUpSizeForErrata = true;
2304 }
2305 isd->thunkSections.push_back(Elt: {ts, pass});
2306 return ts;
2307}
2308
2309static bool isThunkSectionCompatible(InputSection *source,
2310 SectionBase *target) {
2311 // We can't reuse thunks in different loadable partitions because they might
2312 // not be loaded. But partition 1 (the main partition) will always be loaded.
2313 if (source->partition != target->partition)
2314 return target->partition == 1;
2315 return true;
2316}
2317
2318std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
2319 Relocation &rel, uint64_t src) {
2320 SmallVector<std::unique_ptr<Thunk>, 0> *thunkVec = nullptr;
2321 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled
2322 // out in the relocation addend. We compensate for the PC bias so that
2323 // an Arm and Thumb relocation to the same destination get the same keyAddend,
2324 // which is usually 0.
2325 const int64_t pcBias = getPCBias(ctx, type: rel.type);
2326 const int64_t keyAddend = rel.addend + pcBias;
2327
2328 // We use a ((section, offset), addend) pair to find the thunk position if
2329 // possible so that we create only one thunk for aliased symbols or ICFed
2330 // sections. There may be multiple relocations sharing the same (section,
2331 // offset + addend) pair. We may revert the relocation back to its original
2332 // non-Thunk target, so we cannot fold offset + addend.
2333 if (auto *d = dyn_cast<Defined>(Val: rel.sym))
2334 if (!d->isInPlt(ctx) && d->section)
2335 thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value},
2336 keyAddend}];
2337 if (!thunkVec)
2338 thunkVec = &thunkedSymbols[{rel.sym, keyAddend}];
2339
2340 // Check existing Thunks for Sym to see if they can be reused
2341 for (auto &t : *thunkVec)
2342 if (isThunkSectionCompatible(source: isec, target: t->getThunkTargetSym()->section) &&
2343 t->isCompatibleWith(*isec, rel) &&
2344 ctx.target->inBranchRange(type: rel.type, src,
2345 dst: t->getThunkTargetSym()->getVA(ctx, addend: -pcBias)))
2346 return std::make_pair(x: t.get(), y: false);
2347
2348 // No existing compatible Thunk in range, create a new one
2349 thunkVec->push_back(Elt: addThunk(ctx, isec: *isec, rel));
2350 return std::make_pair(x: thunkVec->back().get(), y: true);
2351}
2352
2353std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d,
2354 int64_t a) {
2355 auto [it, isNew] = landingPadsBySectionAndAddend.try_emplace(
2356 Key: {{d.section, d.value}, a}, Args: nullptr);
2357 if (isNew)
2358 it->second = addLandingPadThunk(ctx, s&: d, a);
2359 return {it->second.get(), isNew};
2360}
2361
2362// Return true if the relocation target is an in range Thunk.
2363// Return false if the relocation is not to a Thunk. If the relocation target
2364// was originally to a Thunk, but is no longer in range we revert the
2365// relocation back to its original non-Thunk target.
2366bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
2367 if (Thunk *t = thunks.lookup(Val: rel.sym)) {
2368 if (ctx.target->inBranchRange(type: rel.type, src,
2369 dst: rel.sym->getVA(ctx, addend: rel.addend)))
2370 return true;
2371 rel.sym = &t->destination;
2372 rel.addend = t->addend;
2373 if (rel.sym->isInPlt(ctx))
2374 rel.expr = toPlt(expr: rel.expr);
2375 }
2376 return false;
2377}
2378
2379// When indirect branches are restricted, such as AArch64 BTI Thunks may need
2380// to target a linker generated landing pad instead of the target. This needs
2381// to be done once per pass as the need for a BTI thunk is dependent whether
2382// a thunk is short or long. We iterate over all the thunks to make sure we
2383// catch thunks that have been created but are no longer live. Non-live thunks
2384// are not reachable via normalizeExistingThunk() but are still written.
2385bool ThunkCreator::addSyntheticLandingPads() {
2386 bool addressesChanged = false;
2387 for (Thunk *t : allThunks) {
2388 if (!t->needsSyntheticLandingPad())
2389 continue;
2390 Thunk *lpt;
2391 bool isNew;
2392 auto &dr = cast<Defined>(Val&: t->destination);
2393 std::tie(args&: lpt, args&: isNew) = getSyntheticLandingPad(d&: dr, a: t->addend);
2394 if (isNew) {
2395 addressesChanged = true;
2396 getISThunkSec(isec: cast<InputSection>(Val: dr.section))->addThunk(t: lpt);
2397 }
2398 t->landingPad = lpt->getThunkTargetSym();
2399 }
2400 return addressesChanged;
2401}
2402
2403// Process all relocations from the InputSections that have been assigned
2404// to InputSectionDescriptions and redirect through Thunks if needed. The
2405// function should be called iteratively until it returns false.
2406//
2407// PreConditions:
2408// All InputSections that may need a Thunk are reachable from
2409// OutputSectionCommands.
2410//
2411// All OutputSections have an address and all InputSections have an offset
2412// within the OutputSection.
2413//
2414// The offsets between caller (relocation place) and callee
2415// (relocation target) will not be modified outside of createThunks().
2416//
2417// PostConditions:
2418// If return value is true then ThunkSections have been inserted into
2419// OutputSections. All relocations that needed a Thunk based on the information
2420// available to createThunks() on entry have been redirected to a Thunk. Note
2421// that adding Thunks changes offsets between caller and callee so more Thunks
2422// may be required.
2423//
2424// If return value is false then no more Thunks are needed, and createThunks has
2425// made no changes. If the target requires range extension thunks, currently
2426// ARM, then any future change in offset between caller and callee risks a
2427// relocation out of range error.
2428bool ThunkCreator::createThunks(uint32_t pass,
2429 ArrayRef<OutputSection *> outputSections) {
2430 this->pass = pass;
2431 bool addressesChanged = false;
2432
2433 if (pass == 0 && ctx.target->getThunkSectionSpacing())
2434 createInitialThunkSections(outputSections);
2435
2436 if (ctx.arg.emachine == EM_AARCH64)
2437 addressesChanged = addSyntheticLandingPads();
2438
2439 // Create all the Thunks and insert them into synthetic ThunkSections. The
2440 // ThunkSections are later inserted back into InputSectionDescriptions.
2441 // We separate the creation of ThunkSections from the insertion of the
2442 // ThunkSections as ThunkSections are not always inserted into the same
2443 // InputSectionDescription as the caller.
2444 forEachInputSectionDescription(
2445 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2446 for (InputSection *isec : isd->sections)
2447 for (Relocation &rel : isec->relocs()) {
2448 uint64_t src = isec->getVA(offset: rel.offset);
2449
2450 // If we are a relocation to an existing Thunk, check if it is
2451 // still in range. If not then Rel will be altered to point to its
2452 // original target so another Thunk can be generated.
2453 if (pass > 0 && normalizeExistingThunk(rel, src))
2454 continue;
2455
2456 if (!ctx.target->needsThunk(expr: rel.expr, relocType: rel.type, file: isec->file, branchAddr: src,
2457 s: *rel.sym, a: rel.addend))
2458 continue;
2459
2460 Thunk *t;
2461 bool isNew;
2462 std::tie(args&: t, args&: isNew) = getThunk(isec, rel, src);
2463
2464 if (isNew) {
2465 // Find or create a ThunkSection for the new Thunk
2466 ThunkSection *ts;
2467 if (auto *tis = t->getTargetInputSection())
2468 ts = getISThunkSec(isec: tis);
2469 else
2470 ts = getISDThunkSec(os, isec, isd, rel, src);
2471 ts->addThunk(t);
2472 thunks[t->getThunkTargetSym()] = t;
2473 allThunks.push_back(x: t);
2474 }
2475
2476 // Redirect relocation to Thunk, we never go via the PLT to a Thunk
2477 rel.sym = t->getThunkTargetSym();
2478 rel.expr = fromPlt(expr: rel.expr);
2479
2480 // On AArch64 and PPC, a jump/call relocation may be encoded as
2481 // STT_SECTION + non-zero addend, clear the addend after
2482 // redirection.
2483 if (ctx.arg.emachine != EM_MIPS)
2484 rel.addend = -getPCBias(ctx, type: rel.type);
2485 }
2486
2487 for (auto &p : isd->thunkSections)
2488 addressesChanged |= p.first->assignOffsets();
2489 });
2490
2491 for (auto &p : thunkedSections)
2492 addressesChanged |= p.second->assignOffsets();
2493
2494 // Merge all created synthetic ThunkSections back into OutputSection
2495 mergeThunks(outputSections);
2496 return addressesChanged;
2497}
2498
2499// The following aid in the conversion of call x@GDPLT to call __tls_get_addr
2500// hexagonNeedsTLSSymbol scans for relocations would require a call to
2501// __tls_get_addr.
2502// hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr.
2503bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) {
2504 bool needTlsSymbol = false;
2505 forEachInputSectionDescription(
2506 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2507 for (InputSection *isec : isd->sections)
2508 for (Relocation &rel : isec->relocs())
2509 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2510 needTlsSymbol = true;
2511 return;
2512 }
2513 });
2514 return needTlsSymbol;
2515}
2516
2517void elf::hexagonTLSSymbolUpdate(Ctx &ctx) {
2518 Symbol *sym = ctx.symtab->find(name: "__tls_get_addr");
2519 if (!sym)
2520 return;
2521 bool needEntry = true;
2522 forEachInputSectionDescription(
2523 outputSections: ctx.outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2524 for (InputSection *isec : isd->sections)
2525 for (Relocation &rel : isec->relocs())
2526 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2527 if (needEntry) {
2528 sym->allocateAux(ctx);
2529 addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt,
2530 type: ctx.target->pltRel, sym&: *sym);
2531 needEntry = false;
2532 }
2533 rel.sym = sym;
2534 }
2535 });
2536}
2537
2538static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) {
2539 if (cmd.toFirst)
2540 return cmd.outputSections[0] == osec;
2541 return llvm::is_contained(Range: cmd.outputSections, Element: osec);
2542}
2543
2544template <class ELFT, class Rels>
2545static void scanCrossRefs(Ctx &ctx, const NoCrossRefCommand &cmd,
2546 OutputSection *osec, InputSection *sec, Rels rels) {
2547 for (const auto &r : rels) {
2548 Symbol &sym = sec->file->getSymbol(symbolIndex: r.getSymbol(ctx.arg.isMips64EL));
2549 // A legal cross-reference is when the destination output section is
2550 // nullptr, osec for a self-reference, or a section that is described by the
2551 // NOCROSSREFS/NOCROSSREFS_TO command.
2552 auto *dstOsec = sym.getOutputSection();
2553 if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, osec: dstOsec->name))
2554 continue;
2555
2556 std::string toSymName;
2557 if (!sym.isSection())
2558 toSymName = toStr(ctx, sym);
2559 else if (auto *d = dyn_cast<Defined>(Val: &sym))
2560 toSymName = d->section->name;
2561 Err(ctx) << sec->getLocation(offset: r.r_offset)
2562 << ": prohibited cross reference from '" << osec->name << "' to '"
2563 << toSymName << "' in '" << dstOsec->name << "'";
2564 }
2565}
2566
2567// For each output section described by at least one NOCROSSREFS(_TO) command,
2568// scan relocations from its input sections for prohibited cross references.
2569template <class ELFT> void elf::checkNoCrossRefs(Ctx &ctx) {
2570 for (OutputSection *osec : ctx.outputSections) {
2571 for (const NoCrossRefCommand &noxref : ctx.script->noCrossRefs) {
2572 if (!llvm::is_contained(Range: noxref.outputSections, Element: osec->name) ||
2573 (noxref.toFirst && noxref.outputSections[0] == osec->name))
2574 continue;
2575 for (SectionCommand *cmd : osec->commands) {
2576 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
2577 if (!isd)
2578 continue;
2579 parallelForEach(isd->sections, [&](InputSection *sec) {
2580 invokeOnRelocs(*sec, scanCrossRefs<ELFT>, ctx, noxref, osec, sec);
2581 });
2582 }
2583 }
2584 }
2585}
2586
2587template void elf::scanRelocations<ELF32LE>(Ctx &);
2588template void elf::scanRelocations<ELF32BE>(Ctx &);
2589template void elf::scanRelocations<ELF64LE>(Ctx &);
2590template void elf::scanRelocations<ELF64BE>(Ctx &);
2591
2592template void elf::checkNoCrossRefs<ELF32LE>(Ctx &);
2593template void elf::checkNoCrossRefs<ELF32BE>(Ctx &);
2594template void elf::checkNoCrossRefs<ELF64LE>(Ctx &);
2595template void elf::checkNoCrossRefs<ELF64BE>(Ctx &);
2596