1//===- Relocations.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains platform-independent functions to process relocations.
10// I'll describe the overview of this file here.
11//
12// Simple relocations are easy to handle for the linker. For example,
13// for R_X86_64_PC64 relocs, the linker just has to fix up locations
14// with the relative offsets to the target symbols. It would just be
15// reading records from relocation sections and applying them to output.
16//
17// But not all relocations are that easy to handle. For example, for
18// R_386_GOTOFF relocs, the linker has to create new GOT entries for
19// symbols if they don't exist, and fix up locations with GOT entry
20// offsets from the beginning of GOT section. So there is more than
21// fixing addresses in relocation processing.
22//
23// ELF defines a large number of complex relocations.
24//
25// The functions in this file analyze relocations and do whatever needs
26// to be done. It includes, but not limited to, the following.
27//
28// - create GOT/PLT entries
29// - create new relocations in .dynsym to let the dynamic linker resolve
30// them at runtime (since ELF supports dynamic linking, not all
31// relocations can be resolved at link-time)
32// - create COPY relocs and reserve space in .bss
33// - replace expensive relocs (in terms of runtime cost) with cheap ones
34// - error out infeasible combinations such as PIC and non-relative relocs
35//
36// Note that the functions in this file don't actually apply relocations
37// because it doesn't know about the output file nor the output file buffer.
38// It instead stores Relocation objects to InputSection's Relocations
39// vector to let it apply later in InputSection::writeTo.
40//
41//===----------------------------------------------------------------------===//
42
43#include "Relocations.h"
44#include "Config.h"
45#include "InputFiles.h"
46#include "LinkerScript.h"
47#include "OutputSections.h"
48#include "SymbolTable.h"
49#include "Symbols.h"
50#include "SyntheticSections.h"
51#include "Target.h"
52#include "Thunks.h"
53#include "lld/Common/ErrorHandler.h"
54#include "lld/Common/Memory.h"
55#include "llvm/ADT/SmallSet.h"
56#include "llvm/BinaryFormat/ELF.h"
57#include "llvm/Demangle/Demangle.h"
58#include "llvm/Support/Endian.h"
59#include <algorithm>
60
61using namespace llvm;
62using namespace llvm::ELF;
63using namespace llvm::object;
64using namespace llvm::support::endian;
65using namespace lld;
66using namespace lld::elf;
67
68static std::optional<std::string> getLinkerScriptLocation(const Symbol &sym) {
69 for (SectionCommand *cmd : script->sectionCommands)
70 if (auto *assign = dyn_cast<SymbolAssignment>(Val: cmd))
71 if (assign->sym == &sym)
72 return assign->location;
73 return std::nullopt;
74}
75
76static std::string getDefinedLocation(const Symbol &sym) {
77 const char msg[] = "\n>>> defined in ";
78 if (sym.file)
79 return msg + toString(f: sym.file);
80 if (std::optional<std::string> loc = getLinkerScriptLocation(sym))
81 return msg + *loc;
82 return "";
83}
84
85// Construct a message in the following format.
86//
87// >>> defined in /home/alice/src/foo.o
88// >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
89// >>> /home/alice/src/bar.o:(.text+0x1)
90static std::string getLocation(InputSectionBase &s, const Symbol &sym,
91 uint64_t off) {
92 std::string msg = getDefinedLocation(sym) + "\n>>> referenced by ";
93 std::string src = s.getSrcMsg(sym, offset: off);
94 if (!src.empty())
95 msg += src + "\n>>> ";
96 return msg + s.getObjMsg(offset: off);
97}
98
99void elf::reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
100 int64_t min, uint64_t max) {
101 ErrorPlace errPlace = getErrorPlace(loc);
102 std::string hint;
103 if (rel.sym) {
104 if (!rel.sym->isSection())
105 hint = "; references '" + lld::toString(*rel.sym) + '\'';
106 else if (auto *d = dyn_cast<Defined>(Val: rel.sym))
107 hint = ("; references section '" + d->section->name + "'").str();
108
109 if (config->emachine == EM_X86_64 && rel.type == R_X86_64_PC32 &&
110 rel.sym->getOutputSection() &&
111 (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) {
112 hint += "; R_X86_64_PC32 should not reference a section marked "
113 "SHF_X86_64_LARGE";
114 }
115 }
116 if (!errPlace.srcLoc.empty())
117 hint += "\n>>> referenced by " + errPlace.srcLoc;
118 if (rel.sym && !rel.sym->isSection())
119 hint += getDefinedLocation(sym: *rel.sym);
120
121 if (errPlace.isec && errPlace.isec->name.starts_with(Prefix: ".debug"))
122 hint += "; consider recompiling with -fdebug-types-section to reduce size "
123 "of debug sections";
124
125 errorOrWarn(msg: errPlace.loc + "relocation " + lld::toString(type: rel.type) +
126 " out of range: " + v.str() + " is not in [" + Twine(min).str() +
127 ", " + Twine(max).str() + "]" + hint);
128}
129
130void elf::reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
131 const Twine &msg) {
132 ErrorPlace errPlace = getErrorPlace(loc);
133 std::string hint;
134 if (!sym.getName().empty())
135 hint =
136 "; references '" + lld::toString(sym) + '\'' + getDefinedLocation(sym);
137 errorOrWarn(msg: errPlace.loc + msg + " is out of range: " + Twine(v) +
138 " is not in [" + Twine(llvm::minIntN(N: n)) + ", " +
139 Twine(llvm::maxIntN(N: n)) + "]" + hint);
140}
141
142// Build a bitmask with one bit set for each 64 subset of RelExpr.
143static constexpr uint64_t buildMask() { return 0; }
144
145template <typename... Tails>
146static constexpr uint64_t buildMask(int head, Tails... tails) {
147 return (0 <= head && head < 64 ? uint64_t(1) << head : 0) |
148 buildMask(tails...);
149}
150
151// Return true if `Expr` is one of `Exprs`.
152// There are more than 64 but less than 128 RelExprs, so we divide the set of
153// exprs into [0, 64) and [64, 128) and represent each range as a constant
154// 64-bit mask. Then we decide which mask to test depending on the value of
155// expr and use a simple shift and bitwise-and to test for membership.
156template <RelExpr... Exprs> static bool oneof(RelExpr expr) {
157 assert(0 <= expr && (int)expr < 128 &&
158 "RelExpr is too large for 128-bit mask!");
159
160 if (expr >= 64)
161 return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...);
162 return (uint64_t(1) << expr) & buildMask(Exprs...);
163}
164
165static RelType getMipsPairType(RelType type, bool isLocal) {
166 switch (type) {
167 case R_MIPS_HI16:
168 return R_MIPS_LO16;
169 case R_MIPS_GOT16:
170 // In case of global symbol, the R_MIPS_GOT16 relocation does not
171 // have a pair. Each global symbol has a unique entry in the GOT
172 // and a corresponding instruction with help of the R_MIPS_GOT16
173 // relocation loads an address of the symbol. In case of local
174 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
175 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16
176 // relocations handle low 16 bits of the address. That allows
177 // to allocate only one GOT entry for every 64 KBytes of local data.
178 return isLocal ? R_MIPS_LO16 : R_MIPS_NONE;
179 case R_MICROMIPS_GOT16:
180 return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE;
181 case R_MIPS_PCHI16:
182 return R_MIPS_PCLO16;
183 case R_MICROMIPS_HI16:
184 return R_MICROMIPS_LO16;
185 default:
186 return R_MIPS_NONE;
187 }
188}
189
190// True if non-preemptable symbol always has the same value regardless of where
191// the DSO is loaded.
192static bool isAbsolute(const Symbol &sym) {
193 if (sym.isUndefWeak())
194 return true;
195 if (const auto *dr = dyn_cast<Defined>(Val: &sym))
196 return dr->section == nullptr; // Absolute symbol.
197 return false;
198}
199
200static bool isAbsoluteValue(const Symbol &sym) {
201 return isAbsolute(sym) || sym.isTls();
202}
203
204// Returns true if Expr refers a PLT entry.
205static bool needsPlt(RelExpr expr) {
206 return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL,
207 R_GOTPLT_PC, R_LOONGARCH_PLT_PAGE_PC, R_PPC32_PLTREL,
208 R_PPC64_CALL_PLT>(expr);
209}
210
211bool lld::elf::needsGot(RelExpr expr) {
212 return oneof<R_GOT, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOT_OFF,
213 R_MIPS_GOT_OFF32, R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT,
214 R_AARCH64_GOT_PAGE, R_LOONGARCH_GOT, R_LOONGARCH_GOT_PAGE_PC>(
215 expr);
216}
217
218// True if this expression is of the form Sym - X, where X is a position in the
219// file (PC, or GOT for example).
220static bool isRelExpr(RelExpr expr) {
221 return oneof<R_PC, R_GOTREL, R_GOTPLTREL, R_ARM_PCA, R_MIPS_GOTREL,
222 R_PPC64_CALL, R_PPC64_RELAX_TOC, R_AARCH64_PAGE_PC,
223 R_RELAX_GOT_PC, R_RISCV_PC_INDIRECT, R_PPC64_RELAX_GOT_PC,
224 R_LOONGARCH_PAGE_PC>(expr);
225}
226
227static RelExpr toPlt(RelExpr expr) {
228 switch (expr) {
229 case R_LOONGARCH_PAGE_PC:
230 return R_LOONGARCH_PLT_PAGE_PC;
231 case R_PPC64_CALL:
232 return R_PPC64_CALL_PLT;
233 case R_PC:
234 return R_PLT_PC;
235 case R_ABS:
236 return R_PLT;
237 case R_GOTREL:
238 return R_PLT_GOTREL;
239 default:
240 return expr;
241 }
242}
243
244static RelExpr fromPlt(RelExpr expr) {
245 // We decided not to use a plt. Optimize a reference to the plt to a
246 // reference to the symbol itself.
247 switch (expr) {
248 case R_PLT_PC:
249 case R_PPC32_PLTREL:
250 return R_PC;
251 case R_LOONGARCH_PLT_PAGE_PC:
252 return R_LOONGARCH_PAGE_PC;
253 case R_PPC64_CALL_PLT:
254 return R_PPC64_CALL;
255 case R_PLT:
256 return R_ABS;
257 case R_PLT_GOTPLT:
258 return R_GOTPLTREL;
259 case R_PLT_GOTREL:
260 return R_GOTREL;
261 default:
262 return expr;
263 }
264}
265
266// Returns true if a given shared symbol is in a read-only segment in a DSO.
267template <class ELFT> static bool isReadOnly(SharedSymbol &ss) {
268 using Elf_Phdr = typename ELFT::Phdr;
269
270 // Determine if the symbol is read-only by scanning the DSO's program headers.
271 const auto &file = cast<SharedFile>(Val&: *ss.file);
272 for (const Elf_Phdr &phdr :
273 check(file.template getObj<ELFT>().program_headers()))
274 if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) &&
275 !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr &&
276 ss.value < phdr.p_vaddr + phdr.p_memsz)
277 return true;
278 return false;
279}
280
281// Returns symbols at the same offset as a given symbol, including SS itself.
282//
283// If two or more symbols are at the same offset, and at least one of
284// them are copied by a copy relocation, all of them need to be copied.
285// Otherwise, they would refer to different places at runtime.
286template <class ELFT>
287static SmallSet<SharedSymbol *, 4> getSymbolsAt(SharedSymbol &ss) {
288 using Elf_Sym = typename ELFT::Sym;
289
290 const auto &file = cast<SharedFile>(Val&: *ss.file);
291
292 SmallSet<SharedSymbol *, 4> ret;
293 for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) {
294 if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS ||
295 s.getType() == STT_TLS || s.st_value != ss.value)
296 continue;
297 StringRef name = check(s.getName(file.getStringTable()));
298 Symbol *sym = symtab.find(name);
299 if (auto *alias = dyn_cast_or_null<SharedSymbol>(Val: sym))
300 ret.insert(Ptr: alias);
301 }
302
303 // The loop does not check SHT_GNU_verneed, so ret does not contain
304 // non-default version symbols. If ss has a non-default version, ret won't
305 // contain ss. Just add ss unconditionally. If a non-default version alias is
306 // separately copy relocated, it and ss will have different addresses.
307 // Fortunately this case is impractical and fails with GNU ld as well.
308 ret.insert(Ptr: &ss);
309 return ret;
310}
311
312// When a symbol is copy relocated or we create a canonical plt entry, it is
313// effectively a defined symbol. In the case of copy relocation the symbol is
314// in .bss and in the case of a canonical plt entry it is in .plt. This function
315// replaces the existing symbol with a Defined pointing to the appropriate
316// location.
317static void replaceWithDefined(Symbol &sym, SectionBase &sec, uint64_t value,
318 uint64_t size) {
319 Symbol old = sym;
320 Defined(sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value,
321 size, &sec)
322 .overwrite(sym);
323
324 sym.versionId = old.versionId;
325 sym.exportDynamic = true;
326 sym.isUsedInRegularObj = true;
327 // A copy relocated alias may need a GOT entry.
328 sym.flags.store(i: old.flags.load(m: std::memory_order_relaxed) & NEEDS_GOT,
329 m: std::memory_order_relaxed);
330}
331
332// Reserve space in .bss or .bss.rel.ro for copy relocation.
333//
334// The copy relocation is pretty much a hack. If you use a copy relocation
335// in your program, not only the symbol name but the symbol's size, RW/RO
336// bit and alignment become part of the ABI. In addition to that, if the
337// symbol has aliases, the aliases become part of the ABI. That's subtle,
338// but if you violate that implicit ABI, that can cause very counter-
339// intuitive consequences.
340//
341// So, what is the copy relocation? It's for linking non-position
342// independent code to DSOs. In an ideal world, all references to data
343// exported by DSOs should go indirectly through GOT. But if object files
344// are compiled as non-PIC, all data references are direct. There is no
345// way for the linker to transform the code to use GOT, as machine
346// instructions are already set in stone in object files. This is where
347// the copy relocation takes a role.
348//
349// A copy relocation instructs the dynamic linker to copy data from a DSO
350// to a specified address (which is usually in .bss) at load-time. If the
351// static linker (that's us) finds a direct data reference to a DSO
352// symbol, it creates a copy relocation, so that the symbol can be
353// resolved as if it were in .bss rather than in a DSO.
354//
355// As you can see in this function, we create a copy relocation for the
356// dynamic linker, and the relocation contains not only symbol name but
357// various other information about the symbol. So, such attributes become a
358// part of the ABI.
359//
360// Note for application developers: I can give you a piece of advice if
361// you are writing a shared library. You probably should export only
362// functions from your library. You shouldn't export variables.
363//
364// As an example what can happen when you export variables without knowing
365// the semantics of copy relocations, assume that you have an exported
366// variable of type T. It is an ABI-breaking change to add new members at
367// end of T even though doing that doesn't change the layout of the
368// existing members. That's because the space for the new members are not
369// reserved in .bss unless you recompile the main program. That means they
370// are likely to overlap with other data that happens to be laid out next
371// to the variable in .bss. This kind of issue is sometimes very hard to
372// debug. What's a solution? Instead of exporting a variable V from a DSO,
373// define an accessor getV().
374template <class ELFT> static void addCopyRelSymbol(SharedSymbol &ss) {
375 // Copy relocation against zero-sized symbol doesn't make sense.
376 uint64_t symSize = ss.getSize();
377 if (symSize == 0 || ss.alignment == 0)
378 fatal(msg: "cannot create a copy relocation for symbol " + toString(ss));
379
380 // See if this symbol is in a read-only segment. If so, preserve the symbol's
381 // memory protection by reserving space in the .bss.rel.ro section.
382 bool isRO = isReadOnly<ELFT>(ss);
383 BssSection *sec =
384 make<BssSection>(args: isRO ? ".bss.rel.ro" : ".bss", args&: symSize, args&: ss.alignment);
385 OutputSection *osec = (isRO ? in.bssRelRo : in.bss)->getParent();
386
387 // At this point, sectionBases has been migrated to sections. Append sec to
388 // sections.
389 if (osec->commands.empty() ||
390 !isa<InputSectionDescription>(Val: osec->commands.back()))
391 osec->commands.push_back(Elt: make<InputSectionDescription>(args: ""));
392 auto *isd = cast<InputSectionDescription>(Val: osec->commands.back());
393 isd->sections.push_back(Elt: sec);
394 osec->commitSection(isec: sec);
395
396 // Look through the DSO's dynamic symbol table for aliases and create a
397 // dynamic symbol for each one. This causes the copy relocation to correctly
398 // interpose any aliases.
399 for (SharedSymbol *sym : getSymbolsAt<ELFT>(ss))
400 replaceWithDefined(sym&: *sym, sec&: *sec, value: 0, size: sym->size);
401
402 mainPart->relaDyn->addSymbolReloc(dynType: target->copyRel, isec&: *sec, offsetInSec: 0, sym&: ss);
403}
404
405// .eh_frame sections are mergeable input sections, so their input
406// offsets are not linearly mapped to output section. For each input
407// offset, we need to find a section piece containing the offset and
408// add the piece's base address to the input offset to compute the
409// output offset. That isn't cheap.
410//
411// This class is to speed up the offset computation. When we process
412// relocations, we access offsets in the monotonically increasing
413// order. So we can optimize for that access pattern.
414//
415// For sections other than .eh_frame, this class doesn't do anything.
416namespace {
417class OffsetGetter {
418public:
419 OffsetGetter() = default;
420 explicit OffsetGetter(InputSectionBase &sec) {
421 if (auto *eh = dyn_cast<EhInputSection>(Val: &sec)) {
422 cies = eh->cies;
423 fdes = eh->fdes;
424 i = cies.begin();
425 j = fdes.begin();
426 }
427 }
428
429 // Translates offsets in input sections to offsets in output sections.
430 // Given offset must increase monotonically. We assume that Piece is
431 // sorted by inputOff.
432 uint64_t get(uint64_t off) {
433 if (cies.empty())
434 return off;
435
436 while (j != fdes.end() && j->inputOff <= off)
437 ++j;
438 auto it = j;
439 if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) {
440 while (i != cies.end() && i->inputOff <= off)
441 ++i;
442 if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off)
443 fatal(msg: ".eh_frame: relocation is not in any piece");
444 it = i;
445 }
446
447 // Offset -1 means that the piece is dead (i.e. garbage collected).
448 if (it[-1].outputOff == -1)
449 return -1;
450 return it[-1].outputOff + (off - it[-1].inputOff);
451 }
452
453private:
454 ArrayRef<EhSectionPiece> cies, fdes;
455 ArrayRef<EhSectionPiece>::iterator i, j;
456};
457
458// This class encapsulates states needed to scan relocations for one
459// InputSectionBase.
460class RelocationScanner {
461public:
462 template <class ELFT>
463 void scanSection(InputSectionBase &s, bool isEH = false);
464
465private:
466 InputSectionBase *sec;
467 OffsetGetter getter;
468
469 // End of relocations, used by Mips/PPC64.
470 const void *end = nullptr;
471
472 template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const;
473 template <class ELFT, class RelTy>
474 int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const;
475 bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
476 uint64_t relOff) const;
477 void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym,
478 int64_t addend) const;
479 template <class ELFT, class RelTy>
480 void scanOne(typename Relocs<RelTy>::const_iterator &i);
481 template <class ELFT, class RelTy> void scan(Relocs<RelTy> rels);
482};
483} // namespace
484
485// MIPS has an odd notion of "paired" relocations to calculate addends.
486// For example, if a relocation is of R_MIPS_HI16, there must be a
487// R_MIPS_LO16 relocation after that, and an addend is calculated using
488// the two relocations.
489template <class ELFT, class RelTy>
490int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr,
491 bool isLocal) const {
492 if (expr == R_MIPS_GOTREL && isLocal)
493 return sec->getFile<ELFT>()->mipsGp0;
494
495 // The ABI says that the paired relocation is used only for REL.
496 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
497 // This generalises to relocation types with implicit addends.
498 if (RelTy::HasAddend)
499 return 0;
500
501 RelType type = rel.getType(config->isMips64EL);
502 uint32_t pairTy = getMipsPairType(type, isLocal);
503 if (pairTy == R_MIPS_NONE)
504 return 0;
505
506 const uint8_t *buf = sec->content().data();
507 uint32_t symIndex = rel.getSymbol(config->isMips64EL);
508
509 // To make things worse, paired relocations might not be contiguous in
510 // the relocation table, so we need to do linear search. *sigh*
511 for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri)
512 if (ri->getType(config->isMips64EL) == pairTy &&
513 ri->getSymbol(config->isMips64EL) == symIndex)
514 return target->getImplicitAddend(buf: buf + ri->r_offset, type: pairTy);
515
516 warn(msg: "can't find matching " + toString(type: pairTy) + " relocation for " +
517 toString(type));
518 return 0;
519}
520
521// Custom error message if Sym is defined in a discarded section.
522template <class ELFT>
523static std::string maybeReportDiscarded(Undefined &sym) {
524 auto *file = dyn_cast_or_null<ObjFile<ELFT>>(sym.file);
525 if (!file || !sym.discardedSecIdx)
526 return "";
527 ArrayRef<typename ELFT::Shdr> objSections =
528 file->template getELFShdrs<ELFT>();
529
530 std::string msg;
531 if (sym.type == ELF::STT_SECTION) {
532 msg = "relocation refers to a discarded section: ";
533 msg += CHECK(
534 file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file);
535 } else {
536 msg = "relocation refers to a symbol in a discarded section: " +
537 toString(sym);
538 }
539 msg += "\n>>> defined in " + toString(file);
540
541 Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1];
542 if (elfSec.sh_type != SHT_GROUP)
543 return msg;
544
545 // If the discarded section is a COMDAT.
546 StringRef signature = file->getShtGroupSignature(objSections, elfSec);
547 if (const InputFile *prevailing =
548 symtab.comdatGroups.lookup(Val: CachedHashStringRef(signature))) {
549 msg += "\n>>> section group signature: " + signature.str() +
550 "\n>>> prevailing definition is in " + toString(f: prevailing);
551 if (sym.nonPrevailing) {
552 msg += "\n>>> or the symbol in the prevailing group had STB_WEAK "
553 "binding and the symbol in a non-prevailing group had STB_GLOBAL "
554 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding "
555 "signature is not supported";
556 }
557 }
558 return msg;
559}
560
561namespace {
562// Undefined diagnostics are collected in a vector and emitted once all of
563// them are known, so that some postprocessing on the list of undefined symbols
564// can happen before lld emits diagnostics.
565struct UndefinedDiag {
566 Undefined *sym;
567 struct Loc {
568 InputSectionBase *sec;
569 uint64_t offset;
570 };
571 std::vector<Loc> locs;
572 bool isWarning;
573};
574
575std::vector<UndefinedDiag> undefs;
576std::mutex relocMutex;
577}
578
579// Check whether the definition name def is a mangled function name that matches
580// the reference name ref.
581static bool canSuggestExternCForCXX(StringRef ref, StringRef def) {
582 llvm::ItaniumPartialDemangler d;
583 std::string name = def.str();
584 if (d.partialDemangle(MangledName: name.c_str()))
585 return false;
586 char *buf = d.getFunctionName(Buf: nullptr, N: nullptr);
587 if (!buf)
588 return false;
589 bool ret = ref == buf;
590 free(ptr: buf);
591 return ret;
592}
593
594// Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns
595// the suggested symbol, which is either in the symbol table, or in the same
596// file of sym.
597static const Symbol *getAlternativeSpelling(const Undefined &sym,
598 std::string &pre_hint,
599 std::string &post_hint) {
600 DenseMap<StringRef, const Symbol *> map;
601 if (sym.file && sym.file->kind() == InputFile::ObjKind) {
602 auto *file = cast<ELFFileBase>(Val: sym.file);
603 // If sym is a symbol defined in a discarded section, maybeReportDiscarded()
604 // will give an error. Don't suggest an alternative spelling.
605 if (file && sym.discardedSecIdx != 0 &&
606 file->getSections()[sym.discardedSecIdx] == &InputSection::discarded)
607 return nullptr;
608
609 // Build a map of local defined symbols.
610 for (const Symbol *s : sym.file->getSymbols())
611 if (s->isLocal() && s->isDefined() && !s->getName().empty())
612 map.try_emplace(Key: s->getName(), Args&: s);
613 }
614
615 auto suggest = [&](StringRef newName) -> const Symbol * {
616 // If defined locally.
617 if (const Symbol *s = map.lookup(Val: newName))
618 return s;
619
620 // If in the symbol table and not undefined.
621 if (const Symbol *s = symtab.find(name: newName))
622 if (!s->isUndefined())
623 return s;
624
625 return nullptr;
626 };
627
628 // This loop enumerates all strings of Levenshtein distance 1 as typo
629 // correction candidates and suggests the one that exists as a non-undefined
630 // symbol.
631 StringRef name = sym.getName();
632 for (size_t i = 0, e = name.size(); i != e + 1; ++i) {
633 // Insert a character before name[i].
634 std::string newName = (name.substr(Start: 0, N: i) + "0" + name.substr(Start: i)).str();
635 for (char c = '0'; c <= 'z'; ++c) {
636 newName[i] = c;
637 if (const Symbol *s = suggest(newName))
638 return s;
639 }
640 if (i == e)
641 break;
642
643 // Substitute name[i].
644 newName = std::string(name);
645 for (char c = '0'; c <= 'z'; ++c) {
646 newName[i] = c;
647 if (const Symbol *s = suggest(newName))
648 return s;
649 }
650
651 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is
652 // common.
653 if (i + 1 < e) {
654 newName[i] = name[i + 1];
655 newName[i + 1] = name[i];
656 if (const Symbol *s = suggest(newName))
657 return s;
658 }
659
660 // Delete name[i].
661 newName = (name.substr(Start: 0, N: i) + name.substr(Start: i + 1)).str();
662 if (const Symbol *s = suggest(newName))
663 return s;
664 }
665
666 // Case mismatch, e.g. Foo vs FOO.
667 for (auto &it : map)
668 if (name.equals_insensitive(RHS: it.first))
669 return it.second;
670 for (Symbol *sym : symtab.getSymbols())
671 if (!sym->isUndefined() && name.equals_insensitive(RHS: sym->getName()))
672 return sym;
673
674 // The reference may be a mangled name while the definition is not. Suggest a
675 // missing extern "C".
676 if (name.starts_with(Prefix: "_Z")) {
677 std::string buf = name.str();
678 llvm::ItaniumPartialDemangler d;
679 if (!d.partialDemangle(MangledName: buf.c_str()))
680 if (char *buf = d.getFunctionName(Buf: nullptr, N: nullptr)) {
681 const Symbol *s = suggest(buf);
682 free(ptr: buf);
683 if (s) {
684 pre_hint = ": extern \"C\" ";
685 return s;
686 }
687 }
688 } else {
689 const Symbol *s = nullptr;
690 for (auto &it : map)
691 if (canSuggestExternCForCXX(ref: name, def: it.first)) {
692 s = it.second;
693 break;
694 }
695 if (!s)
696 for (Symbol *sym : symtab.getSymbols())
697 if (canSuggestExternCForCXX(ref: name, def: sym->getName())) {
698 s = sym;
699 break;
700 }
701 if (s) {
702 pre_hint = " to declare ";
703 post_hint = " as extern \"C\"?";
704 return s;
705 }
706 }
707
708 return nullptr;
709}
710
711static void reportUndefinedSymbol(const UndefinedDiag &undef,
712 bool correctSpelling) {
713 Undefined &sym = *undef.sym;
714
715 auto visibility = [&]() -> std::string {
716 switch (sym.visibility()) {
717 case STV_INTERNAL:
718 return "internal ";
719 case STV_HIDDEN:
720 return "hidden ";
721 case STV_PROTECTED:
722 return "protected ";
723 default:
724 return "";
725 }
726 };
727
728 std::string msg;
729 switch (config->ekind) {
730 case ELF32LEKind:
731 msg = maybeReportDiscarded<ELF32LE>(sym);
732 break;
733 case ELF32BEKind:
734 msg = maybeReportDiscarded<ELF32BE>(sym);
735 break;
736 case ELF64LEKind:
737 msg = maybeReportDiscarded<ELF64LE>(sym);
738 break;
739 case ELF64BEKind:
740 msg = maybeReportDiscarded<ELF64BE>(sym);
741 break;
742 default:
743 llvm_unreachable("");
744 }
745 if (msg.empty())
746 msg = "undefined " + visibility() + "symbol: " + toString(sym);
747
748 const size_t maxUndefReferences = 3;
749 size_t i = 0;
750 for (UndefinedDiag::Loc l : undef.locs) {
751 if (i >= maxUndefReferences)
752 break;
753 InputSectionBase &sec = *l.sec;
754 uint64_t offset = l.offset;
755
756 msg += "\n>>> referenced by ";
757 // In the absence of line number information, utilize DW_TAG_variable (if
758 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`).
759 Symbol *enclosing = sec.getEnclosingSymbol(offset);
760 std::string src = sec.getSrcMsg(sym: enclosing ? *enclosing : sym, offset);
761 if (!src.empty())
762 msg += src + "\n>>> ";
763 msg += sec.getObjMsg(offset);
764 i++;
765 }
766
767 if (i < undef.locs.size())
768 msg += ("\n>>> referenced " + Twine(undef.locs.size() - i) + " more times")
769 .str();
770
771 if (correctSpelling) {
772 std::string pre_hint = ": ", post_hint;
773 if (const Symbol *corrected =
774 getAlternativeSpelling(sym, pre_hint, post_hint)) {
775 msg += "\n>>> did you mean" + pre_hint + toString(*corrected) + post_hint;
776 if (corrected->file)
777 msg += "\n>>> defined in: " + toString(f: corrected->file);
778 }
779 }
780
781 if (sym.getName().starts_with(Prefix: "_ZTV"))
782 msg +=
783 "\n>>> the vtable symbol may be undefined because the class is missing "
784 "its key function (see https://lld.llvm.org/missingkeyfunction)";
785 if (config->gcSections && config->zStartStopGC &&
786 sym.getName().starts_with(Prefix: "__start_")) {
787 msg += "\n>>> the encapsulation symbol needs to be retained under "
788 "--gc-sections properly; consider -z nostart-stop-gc "
789 "(see https://lld.llvm.org/ELF/start-stop-gc)";
790 }
791
792 if (undef.isWarning)
793 warn(msg);
794 else
795 error(msg, tag: ErrorTag::SymbolNotFound, args: {sym.getName()});
796}
797
798void elf::reportUndefinedSymbols() {
799 // Find the first "undefined symbol" diagnostic for each diagnostic, and
800 // collect all "referenced from" lines at the first diagnostic.
801 DenseMap<Symbol *, UndefinedDiag *> firstRef;
802 for (UndefinedDiag &undef : undefs) {
803 assert(undef.locs.size() == 1);
804 if (UndefinedDiag *canon = firstRef.lookup(Val: undef.sym)) {
805 canon->locs.push_back(x: undef.locs[0]);
806 undef.locs.clear();
807 } else
808 firstRef[undef.sym] = &undef;
809 }
810
811 // Enable spell corrector for the first 2 diagnostics.
812 for (const auto &[i, undef] : llvm::enumerate(First&: undefs))
813 if (!undef.locs.empty())
814 reportUndefinedSymbol(undef, correctSpelling: i < 2);
815 undefs.clear();
816}
817
818// Report an undefined symbol if necessary.
819// Returns true if the undefined symbol will produce an error message.
820static bool maybeReportUndefined(Undefined &sym, InputSectionBase &sec,
821 uint64_t offset) {
822 std::lock_guard<std::mutex> lock(relocMutex);
823 // If versioned, issue an error (even if the symbol is weak) because we don't
824 // know the defining filename which is required to construct a Verneed entry.
825 if (sym.hasVersionSuffix) {
826 undefs.push_back(x: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: false});
827 return true;
828 }
829 if (sym.isWeak())
830 return false;
831
832 bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT;
833 if (config->unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal)
834 return false;
835
836 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc
837 // which references a switch table in a discarded .rodata/.text section. The
838 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF
839 // spec says references from outside the group to a STB_LOCAL symbol are not
840 // allowed. Work around the bug.
841 //
842 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible
843 // because .LC0-.LTOC is not representable if the two labels are in different
844 // .got2
845 if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc"))
846 return false;
847
848 bool isWarning =
849 (config->unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) ||
850 config->noinhibitExec;
851 undefs.push_back(x: {.sym: &sym, .locs: {{.sec: &sec, .offset: offset}}, .isWarning: isWarning});
852 return !isWarning;
853}
854
855// MIPS N32 ABI treats series of successive relocations with the same offset
856// as a single relocation. The similar approach used by N64 ABI, but this ABI
857// packs all relocations into the single relocation record. Here we emulate
858// this for the N32 ABI. Iterate over relocation with the same offset and put
859// theirs types into the single bit-set.
860template <class RelTy>
861RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const {
862 RelType type = 0;
863 uint64_t offset = rel->r_offset;
864
865 int n = 0;
866 while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset)
867 type |= (rel++)->getType(config->isMips64EL) << (8 * n++);
868 return type;
869}
870
871template <bool shard = false>
872static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec,
873 Symbol &sym, int64_t addend, RelExpr expr,
874 RelType type) {
875 Partition &part = isec.getPartition();
876
877 if (sym.isTagged()) {
878 std::lock_guard<std::mutex> lock(relocMutex);
879 part.relaDyn->addRelativeReloc(dynType: target->relativeRel, isec, offsetInSec, sym,
880 addend, addendRelType: type, expr);
881 // With MTE globals, we always want to derive the address tag by `ldg`-ing
882 // the symbol. When we have a RELATIVE relocation though, we no longer have
883 // a reference to the symbol. Because of this, when we have an addend that
884 // puts the result of the RELATIVE relocation out-of-bounds of the symbol
885 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI
886 // says we should store the offset to the start of the symbol in the target
887 // field. This is described in further detail in:
888 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative
889 if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize())
890 isec.relocations.push_back(Elt: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym});
891 return;
892 }
893
894 // Add a relative relocation. If relrDyn section is enabled, and the
895 // relocation offset is guaranteed to be even, add the relocation to
896 // the relrDyn section, otherwise add it to the relaDyn section.
897 // relrDyn sections don't support odd offsets. Also, relrDyn sections
898 // don't store the addend values, so we must write it to the relocated
899 // address.
900 if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) {
901 isec.addReloc(r: {.expr: expr, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym});
902 if (shard)
903 part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back(
904 Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1});
905 else
906 part.relrDyn->relocs.push_back(Elt: {.inputSec: &isec, .relocIdx: isec.relocs().size() - 1});
907 return;
908 }
909 part.relaDyn->addRelativeReloc<shard>(target->relativeRel, isec, offsetInSec,
910 sym, addend, type, expr);
911}
912
913template <class PltSection, class GotPltSection>
914static void addPltEntry(PltSection &plt, GotPltSection &gotPlt,
915 RelocationBaseSection &rel, RelType type, Symbol &sym) {
916 plt.addEntry(sym);
917 gotPlt.addEntry(sym);
918 rel.addReloc({type, &gotPlt, sym.getGotPltOffset(),
919 sym.isPreemptible ? DynamicReloc::AgainstSymbol
920 : DynamicReloc::AddendOnlyWithTargetVA,
921 sym, 0, R_ABS});
922}
923
924void elf::addGotEntry(Symbol &sym) {
925 in.got->addEntry(sym);
926 uint64_t off = sym.getGotOffset();
927
928 // If preemptible, emit a GLOB_DAT relocation.
929 if (sym.isPreemptible) {
930 mainPart->relaDyn->addReloc(reloc: {target->gotRel, in.got.get(), off,
931 DynamicReloc::AgainstSymbol, sym, 0, R_ABS});
932 return;
933 }
934
935 // Otherwise, the value is either a link-time constant or the load base
936 // plus a constant.
937 if (!config->isPic || isAbsolute(sym))
938 in.got->addConstant(r: {.expr: R_ABS, .type: target->symbolicRel, .offset: off, .addend: 0, .sym: &sym});
939 else
940 addRelativeReloc(isec&: *in.got, offsetInSec: off, sym, addend: 0, expr: R_ABS, type: target->symbolicRel);
941}
942
943static void addTpOffsetGotEntry(Symbol &sym) {
944 in.got->addEntry(sym);
945 uint64_t off = sym.getGotOffset();
946 if (!sym.isPreemptible && !config->shared) {
947 in.got->addConstant(r: {.expr: R_TPREL, .type: target->symbolicRel, .offset: off, .addend: 0, .sym: &sym});
948 return;
949 }
950 mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
951 dynType: target->tlsGotRel, sec&: *in.got, offsetInSec: off, sym, addendRelType: target->symbolicRel);
952}
953
954// Return true if we can define a symbol in the executable that
955// contains the value/function of a symbol defined in a shared
956// library.
957static bool canDefineSymbolInExecutable(Symbol &sym) {
958 // If the symbol has default visibility the symbol defined in the
959 // executable will preempt it.
960 // Note that we want the visibility of the shared symbol itself, not
961 // the visibility of the symbol in the output file we are producing.
962 if (!sym.dsoProtected)
963 return true;
964
965 // If we are allowed to break address equality of functions, defining
966 // a plt entry will allow the program to call the function in the
967 // .so, but the .so and the executable will no agree on the address
968 // of the function. Similar logic for objects.
969 return ((sym.isFunc() && config->ignoreFunctionAddressEquality) ||
970 (sym.isObject() && config->ignoreDataAddressEquality));
971}
972
973// Returns true if a given relocation can be computed at link-time.
974// This only handles relocation types expected in processAux.
975//
976// For instance, we know the offset from a relocation to its target at
977// link-time if the relocation is PC-relative and refers a
978// non-interposable function in the same executable. This function
979// will return true for such relocation.
980//
981// If this function returns false, that means we need to emit a
982// dynamic relocation so that the relocation will be fixed at load-time.
983bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
984 const Symbol &sym,
985 uint64_t relOff) const {
986 // These expressions always compute a constant
987 if (oneof<R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, R_MIPS_GOT_LOCAL_PAGE,
988 R_MIPS_GOTREL, R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC,
989 R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC, R_GOTPLTONLY_PC,
990 R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, R_GOTPLT_PC,
991 R_PPC32_PLTREL, R_PPC64_CALL_PLT, R_PPC64_RELAX_TOC, R_RISCV_ADD,
992 R_AARCH64_GOT_PAGE, R_LOONGARCH_PLT_PAGE_PC, R_LOONGARCH_GOT,
993 R_LOONGARCH_GOT_PAGE_PC>(expr: e))
994 return true;
995
996 // These never do, except if the entire file is position dependent or if
997 // only the low bits are used.
998 if (e == R_GOT || e == R_PLT)
999 return target->usesOnlyLowPageBits(type) || !config->isPic;
1000
1001 // R_AARCH64_AUTH_ABS64 requires a dynamic relocation.
1002 if (sym.isPreemptible || e == R_AARCH64_AUTH)
1003 return false;
1004 if (!config->isPic)
1005 return true;
1006
1007 // Constant when referencing a non-preemptible symbol.
1008 if (e == R_SIZE || e == R_RISCV_LEB128)
1009 return true;
1010
1011 // For the target and the relocation, we want to know if they are
1012 // absolute or relative.
1013 bool absVal = isAbsoluteValue(sym);
1014 bool relE = isRelExpr(expr: e);
1015 if (absVal && !relE)
1016 return true;
1017 if (!absVal && relE)
1018 return true;
1019 if (!absVal && !relE)
1020 return target->usesOnlyLowPageBits(type);
1021
1022 assert(absVal && relE);
1023
1024 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol
1025 // in PIC mode. This is a little strange, but it allows us to link function
1026 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers).
1027 // Normally such a call will be guarded with a comparison, which will load a
1028 // zero from the GOT.
1029 if (sym.isUndefWeak())
1030 return true;
1031
1032 // We set the final symbols values for linker script defined symbols later.
1033 // They always can be computed as a link time constant.
1034 if (sym.scriptDefined)
1035 return true;
1036
1037 error(msg: "relocation " + toString(type) + " cannot refer to absolute symbol: " +
1038 toString(sym) + getLocation(s&: *sec, sym, off: relOff));
1039 return true;
1040}
1041
1042// The reason we have to do this early scan is as follows
1043// * To mmap the output file, we need to know the size
1044// * For that, we need to know how many dynamic relocs we will have.
1045// It might be possible to avoid this by outputting the file with write:
1046// * Write the allocated output sections, computing addresses.
1047// * Apply relocations, recording which ones require a dynamic reloc.
1048// * Write the dynamic relocations.
1049// * Write the rest of the file.
1050// This would have some drawbacks. For example, we would only know if .rela.dyn
1051// is needed after applying relocations. If it is, it will go after rw and rx
1052// sections. Given that it is ro, we will need an extra PT_LOAD. This
1053// complicates things for the dynamic linker and means we would have to reserve
1054// space for the extra PT_LOAD even if we end up not using it.
1055void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
1056 Symbol &sym, int64_t addend) const {
1057 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT
1058 // indirection.
1059 const bool isIfunc = sym.isGnuIFunc();
1060 if (!sym.isPreemptible && (!isIfunc || config->zIfuncNoplt)) {
1061 if (expr != R_GOT_PC) {
1062 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call
1063 // stub type. It should be ignored if optimized to R_PC.
1064 if (config->emachine == EM_PPC && expr == R_PPC32_PLTREL)
1065 addend &= ~0x8000;
1066 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into
1067 // call __tls_get_addr even if the symbol is non-preemptible.
1068 if (!(config->emachine == EM_HEXAGON &&
1069 (type == R_HEX_GD_PLT_B22_PCREL ||
1070 type == R_HEX_GD_PLT_B22_PCREL_X ||
1071 type == R_HEX_GD_PLT_B32_PCREL_X)))
1072 expr = fromPlt(expr);
1073 } else if (!isAbsoluteValue(sym)) {
1074 expr =
1075 target->adjustGotPcExpr(type, addend, loc: sec->content().data() + offset);
1076 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
1077 // needing the GOT if we can't relax everything.
1078 if (expr == R_RELAX_GOT_PC)
1079 in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1080 }
1081 }
1082
1083 // We were asked not to generate PLT entries for ifuncs. Instead, pass the
1084 // direct relocation on through.
1085 if (LLVM_UNLIKELY(isIfunc) && config->zIfuncNoplt) {
1086 std::lock_guard<std::mutex> lock(relocMutex);
1087 sym.exportDynamic = true;
1088 mainPart->relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type);
1089 return;
1090 }
1091
1092 if (needsGot(expr)) {
1093 if (config->emachine == EM_MIPS) {
1094 // MIPS ABI has special rules to process GOT entries and doesn't
1095 // require relocation entries for them. A special case is TLS
1096 // relocations. In that case dynamic loader applies dynamic
1097 // relocations to initialize TLS GOT entries.
1098 // See "Global Offset Table" in Chapter 5 in the following document
1099 // for detailed description:
1100 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1101 in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr);
1102 } else if (!sym.isTls() || config->emachine != EM_LOONGARCH) {
1103 // Many LoongArch TLS relocs reuse the R_LOONGARCH_GOT type, in which
1104 // case the NEEDS_GOT flag shouldn't get set.
1105 sym.setFlags(NEEDS_GOT);
1106 }
1107 } else if (needsPlt(expr)) {
1108 sym.setFlags(NEEDS_PLT);
1109 } else if (LLVM_UNLIKELY(isIfunc)) {
1110 sym.setFlags(HAS_DIRECT_RELOC);
1111 }
1112
1113 // If the relocation is known to be a link-time constant, we know no dynamic
1114 // relocation will be created, pass the control to relocateAlloc() or
1115 // relocateNonAlloc() to resolve it.
1116 //
1117 // The behavior of an undefined weak reference is implementation defined. For
1118 // non-link-time constants, we resolve relocations statically (let
1119 // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic
1120 // relocations for -pie and -shared.
1121 //
1122 // The general expectation of -no-pie static linking is that there is no
1123 // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for
1124 // -shared matches the spirit of its -z undefs default. -pie has freedom on
1125 // choices, and we choose dynamic relocations to be consistent with the
1126 // handling of GOT-generating relocations.
1127 if (isStaticLinkTimeConstant(e: expr, type, sym, relOff: offset) ||
1128 (!config->isPic && sym.isUndefWeak())) {
1129 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1130 return;
1131 }
1132
1133 // Use a simple -z notext rule that treats all sections except .eh_frame as
1134 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our
1135 // SectionBase::getOffset would incorrectly adjust the offset).
1136 //
1137 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel
1138 // conversion. We still emit a dynamic relocation.
1139 bool canWrite = (sec->flags & SHF_WRITE) ||
1140 !(config->zText ||
1141 (isa<EhInputSection>(Val: sec) && config->emachine != EM_MIPS));
1142 if (canWrite) {
1143 RelType rel = target->getDynRel(type);
1144 if (oneof<R_GOT, R_LOONGARCH_GOT>(expr) ||
1145 (rel == target->symbolicRel && !sym.isPreemptible)) {
1146 addRelativeReloc<true>(isec&: *sec, offsetInSec: offset, sym, addend, expr, type);
1147 return;
1148 }
1149 if (rel != 0) {
1150 if (config->emachine == EM_MIPS && rel == target->symbolicRel)
1151 rel = target->relativeRel;
1152 std::lock_guard<std::mutex> lock(relocMutex);
1153 Partition &part = sec->getPartition();
1154 if (config->emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) {
1155 // For a preemptible symbol, we can't use a relative relocation. For an
1156 // undefined symbol, we can't compute offset at link-time and use a
1157 // relative relocation. Use a symbolic relocation instead.
1158 if (sym.isPreemptible) {
1159 part.relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type);
1160 } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) {
1161 // When symbol values are determined in
1162 // finalizeAddressDependentContent, some .relr.auth.dyn relocations
1163 // may be moved to .rela.dyn.
1164 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1165 part.relrAuthDyn->relocs.push_back(Elt: {.inputSec: sec, .relocIdx: sec->relocs().size() - 1});
1166 } else {
1167 part.relaDyn->addReloc(reloc: {R_AARCH64_AUTH_RELATIVE, sec, offset,
1168 DynamicReloc::AddendOnlyWithTargetVA, sym,
1169 addend, R_ABS});
1170 }
1171 return;
1172 }
1173 part.relaDyn->addSymbolReloc(dynType: rel, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type);
1174
1175 // MIPS ABI turns using of GOT and dynamic relocations inside out.
1176 // While regular ABI uses dynamic relocations to fill up GOT entries
1177 // MIPS ABI requires dynamic linker to fills up GOT entries using
1178 // specially sorted dynamic symbol table. This affects even dynamic
1179 // relocations against symbols which do not require GOT entries
1180 // creation explicitly, i.e. do not have any GOT-relocations. So if
1181 // a preemptible symbol has a dynamic relocation we anyway have
1182 // to create a GOT entry for it.
1183 // If a non-preemptible symbol has a dynamic relocation against it,
1184 // dynamic linker takes it st_value, adds offset and writes down
1185 // result of the dynamic relocation. In case of preemptible symbol
1186 // dynamic linker performs symbol resolution, writes the symbol value
1187 // to the GOT entry and reads the GOT entry when it needs to perform
1188 // a dynamic relocation.
1189 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
1190 if (config->emachine == EM_MIPS)
1191 in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr);
1192 return;
1193 }
1194 }
1195
1196 // When producing an executable, we can perform copy relocations (for
1197 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO.
1198 // Copy relocations/canonical PLT entries are unsupported for
1199 // R_AARCH64_AUTH_ABS64.
1200 if (!config->shared && sym.isShared() &&
1201 !(config->emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) {
1202 if (!canDefineSymbolInExecutable(sym)) {
1203 errorOrWarn(msg: "cannot preempt symbol: " + toString(sym) +
1204 getLocation(s&: *sec, sym, off: offset));
1205 return;
1206 }
1207
1208 if (sym.isObject()) {
1209 // Produce a copy relocation.
1210 if (auto *ss = dyn_cast<SharedSymbol>(Val: &sym)) {
1211 if (!config->zCopyreloc)
1212 error(msg: "unresolvable relocation " + toString(type) +
1213 " against symbol '" + toString(*ss) +
1214 "'; recompile with -fPIC or remove '-z nocopyreloc'" +
1215 getLocation(s&: *sec, sym, off: offset));
1216 sym.setFlags(NEEDS_COPY);
1217 }
1218 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1219 return;
1220 }
1221
1222 // This handles a non PIC program call to function in a shared library. In
1223 // an ideal world, we could just report an error saying the relocation can
1224 // overflow at runtime. In the real world with glibc, crt1.o has a
1225 // R_X86_64_PC32 pointing to libc.so.
1226 //
1227 // The general idea on how to handle such cases is to create a PLT entry and
1228 // use that as the function value.
1229 //
1230 // For the static linking part, we just return a plt expr and everything
1231 // else will use the PLT entry as the address.
1232 //
1233 // The remaining problem is making sure pointer equality still works. We
1234 // need the help of the dynamic linker for that. We let it know that we have
1235 // a direct reference to a so symbol by creating an undefined symbol with a
1236 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to
1237 // the value of the symbol we created. This is true even for got entries, so
1238 // pointer equality is maintained. To avoid an infinite loop, the only entry
1239 // that points to the real function is a dedicated got entry used by the
1240 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
1241 // R_386_JMP_SLOT, etc).
1242
1243 // For position independent executable on i386, the plt entry requires ebx
1244 // to be set. This causes two problems:
1245 // * If some code has a direct reference to a function, it was probably
1246 // compiled without -fPIE/-fPIC and doesn't maintain ebx.
1247 // * If a library definition gets preempted to the executable, it will have
1248 // the wrong ebx value.
1249 if (sym.isFunc()) {
1250 if (config->pie && config->emachine == EM_386)
1251 errorOrWarn(msg: "symbol '" + toString(sym) +
1252 "' cannot be preempted; recompile with -fPIE" +
1253 getLocation(s&: *sec, sym, off: offset));
1254 sym.setFlags(NEEDS_COPY | NEEDS_PLT);
1255 sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1256 return;
1257 }
1258 }
1259
1260 errorOrWarn(msg: "relocation " + toString(type) + " cannot be used against " +
1261 (sym.getName().empty() ? "local symbol"
1262 : "symbol '" + toString(sym) + "'") +
1263 "; recompile with -fPIC" + getLocation(s&: *sec, sym, off: offset));
1264}
1265
1266// This function is similar to the `handleTlsRelocation`. MIPS does not
1267// support any relaxations for TLS relocations so by factoring out MIPS
1268// handling in to the separate function we can simplify the code and do not
1269// pollute other `handleTlsRelocation` by MIPS `ifs` statements.
1270// Mips has a custom MipsGotSection that handles the writing of GOT entries
1271// without dynamic relocations.
1272static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym,
1273 InputSectionBase &c, uint64_t offset,
1274 int64_t addend, RelExpr expr) {
1275 if (expr == R_MIPS_TLSLD) {
1276 in.mipsGot->addTlsIndex(file&: *c.file);
1277 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1278 return 1;
1279 }
1280 if (expr == R_MIPS_TLSGD) {
1281 in.mipsGot->addDynTlsEntry(file&: *c.file, sym);
1282 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1283 return 1;
1284 }
1285 return 0;
1286}
1287
1288// Notes about General Dynamic and Local Dynamic TLS models below. They may
1289// require the generation of a pair of GOT entries that have associated dynamic
1290// relocations. The pair of GOT entries created are of the form GOT[e0] Module
1291// Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of
1292// symbol in TLS block.
1293//
1294// Returns the number of relocations processed.
1295static unsigned handleTlsRelocation(RelType type, Symbol &sym,
1296 InputSectionBase &c, uint64_t offset,
1297 int64_t addend, RelExpr expr) {
1298 if (expr == R_TPREL || expr == R_TPREL_NEG) {
1299 if (config->shared) {
1300 errorOrWarn(msg: "relocation " + toString(type) + " against " + toString(sym) +
1301 " cannot be used with -shared" + getLocation(s&: c, sym, off: offset));
1302 return 1;
1303 }
1304 return 0;
1305 }
1306
1307 if (config->emachine == EM_MIPS)
1308 return handleMipsTlsRelocation(type, sym, c, offset, addend, expr);
1309
1310 // LoongArch does not yet implement transition from TLSDESC to LE/IE, so
1311 // generate TLSDESC dynamic relocation for the dynamic linker to handle.
1312 if (config->emachine == EM_LOONGARCH &&
1313 oneof<R_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC,
1314 R_TLSDESC_CALL>(expr)) {
1315 if (expr != R_TLSDESC_CALL) {
1316 sym.setFlags(NEEDS_TLSDESC);
1317 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1318 }
1319 return 1;
1320 }
1321
1322 bool isRISCV = config->emachine == EM_RISCV;
1323
1324 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1325 R_TLSDESC_GOTPLT>(expr) &&
1326 config->shared) {
1327 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not
1328 // set NEEDS_TLSDESC on the label.
1329 if (expr != R_TLSDESC_CALL) {
1330 if (!isRISCV || type == R_RISCV_TLSDESC_HI20)
1331 sym.setFlags(NEEDS_TLSDESC);
1332 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1333 }
1334 return 1;
1335 }
1336
1337 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE
1338 // optimizations.
1339 // RISC-V supports TLSDESC to IE/LE optimizations.
1340 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable
1341 // optimization as well.
1342 bool execOptimize =
1343 !config->shared && config->emachine != EM_ARM &&
1344 config->emachine != EM_HEXAGON && config->emachine != EM_LOONGARCH &&
1345 !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) &&
1346 !c.file->ppc64DisableTLSRelax;
1347
1348 // If we are producing an executable and the symbol is non-preemptable, it
1349 // must be defined and the code sequence can be optimized to use Local-Exec.
1350 //
1351 // ARM and RISC-V do not support any relaxations for TLS relocations, however,
1352 // we can omit the DTPMOD dynamic relocations and resolve them at link time
1353 // because them are always 1. This may be necessary for static linking as
1354 // DTPMOD may not be expected at load time.
1355 bool isLocalInExecutable = !sym.isPreemptible && !config->shared;
1356
1357 // Local Dynamic is for access to module local TLS variables, while still
1358 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the
1359 // module index, with a special value of 0 for the current module. GOT[e1] is
1360 // unused. There only needs to be one module index entry.
1361 if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) {
1362 // Local-Dynamic relocs can be optimized to Local-Exec.
1363 if (execOptimize) {
1364 c.addReloc(r: {.expr: target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE), .type: type,
1365 .offset: offset, .addend: addend, .sym: &sym});
1366 return target->getTlsGdRelaxSkip(type);
1367 }
1368 if (expr == R_TLSLD_HINT)
1369 return 1;
1370 ctx.needsTlsLd.store(i: true, m: std::memory_order_relaxed);
1371 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1372 return 1;
1373 }
1374
1375 // Local-Dynamic relocs can be optimized to Local-Exec.
1376 if (expr == R_DTPREL) {
1377 if (execOptimize)
1378 expr = target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE);
1379 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1380 return 1;
1381 }
1382
1383 // Local-Dynamic sequence where offset of tls variable relative to dynamic
1384 // thread pointer is stored in the got. This cannot be optimized to
1385 // Local-Exec.
1386 if (expr == R_TLSLD_GOT_OFF) {
1387 sym.setFlags(NEEDS_GOT_DTPREL);
1388 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1389 return 1;
1390 }
1391
1392 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1393 R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC,
1394 R_LOONGARCH_TLSGD_PAGE_PC>(expr)) {
1395 if (!execOptimize) {
1396 sym.setFlags(NEEDS_TLSGD);
1397 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1398 return 1;
1399 }
1400
1401 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec
1402 // depending on the symbol being locally defined or not.
1403 //
1404 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible
1405 // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix
1406 // the categorization in RISCV::relocateAlloc.
1407 if (sym.isPreemptible) {
1408 sym.setFlags(NEEDS_TLSGD_TO_IE);
1409 c.addReloc(r: {.expr: target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_IE), .type: type,
1410 .offset: offset, .addend: addend, .sym: &sym});
1411 } else {
1412 c.addReloc(r: {.expr: target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_LE), .type: type,
1413 .offset: offset, .addend: addend, .sym: &sym});
1414 }
1415 return target->getTlsGdRelaxSkip(type);
1416 }
1417
1418 if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, R_AARCH64_GOT_PAGE_PC,
1419 R_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) {
1420 ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed);
1421 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is
1422 // locally defined. This is not supported on SystemZ.
1423 if (execOptimize && isLocalInExecutable && config->emachine != EM_S390) {
1424 c.addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1425 } else if (expr != R_TLSIE_HINT) {
1426 sym.setFlags(NEEDS_TLSIE);
1427 // R_GOT needs a relative relocation for PIC on i386 and Hexagon.
1428 if (expr == R_GOT && config->isPic && !target->usesOnlyLowPageBits(type))
1429 addRelativeReloc<true>(isec&: c, offsetInSec: offset, sym, addend, expr, type);
1430 else
1431 c.addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1432 }
1433 return 1;
1434 }
1435
1436 return 0;
1437}
1438
1439template <class ELFT, class RelTy>
1440void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
1441 const RelTy &rel = *i;
1442 uint32_t symIndex = rel.getSymbol(config->isMips64EL);
1443 Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex);
1444 RelType type;
1445 if constexpr (ELFT::Is64Bits || RelTy::IsCrel) {
1446 type = rel.getType(config->isMips64EL);
1447 ++i;
1448 } else {
1449 // CREL is unsupported for MIPS N32.
1450 if (config->mipsN32Abi) {
1451 type = getMipsN32RelType(i);
1452 } else {
1453 type = rel.getType(config->isMips64EL);
1454 ++i;
1455 }
1456 }
1457 // Get an offset in an output section this relocation is applied to.
1458 uint64_t offset = getter.get(off: rel.r_offset);
1459 if (offset == uint64_t(-1))
1460 return;
1461
1462 RelExpr expr = target->getRelExpr(type, s: sym, loc: sec->content().data() + offset);
1463 int64_t addend = RelTy::HasAddend
1464 ? getAddend<ELFT>(rel)
1465 : target->getImplicitAddend(
1466 buf: sec->content().data() + rel.r_offset, type);
1467 if (LLVM_UNLIKELY(config->emachine == EM_MIPS))
1468 addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal());
1469 else if (config->emachine == EM_PPC64 && config->isPic && type == R_PPC64_TOC)
1470 addend += getPPC64TocBase();
1471
1472 // Ignore R_*_NONE and other marker relocations.
1473 if (expr == R_NONE)
1474 return;
1475
1476 // Error if the target symbol is undefined. Symbol index 0 may be used by
1477 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them.
1478 if (sym.isUndefined() && symIndex != 0 &&
1479 maybeReportUndefined(sym&: cast<Undefined>(Val&: sym), sec&: *sec, offset))
1480 return;
1481
1482 if (config->emachine == EM_PPC64) {
1483 // We can separate the small code model relocations into 2 categories:
1484 // 1) Those that access the compiler generated .toc sections.
1485 // 2) Those that access the linker allocated got entries.
1486 // lld allocates got entries to symbols on demand. Since we don't try to
1487 // sort the got entries in any way, we don't have to track which objects
1488 // have got-based small code model relocs. The .toc sections get placed
1489 // after the end of the linker allocated .got section and we do sort those
1490 // so sections addressed with small code model relocations come first.
1491 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS)
1492 sec->file->ppc64SmallCodeModelTocRelocs = true;
1493
1494 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in
1495 // InputSectionBase::relocateAlloc().
1496 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(Val: sym) &&
1497 cast<Defined>(Val&: sym).section->name == ".toc")
1498 ppc64noTocRelax.insert(V: {&sym, addend});
1499
1500 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) ||
1501 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) {
1502 // Skip the error check for CREL, which does not set `end`.
1503 if constexpr (!RelTy::IsCrel) {
1504 if (i == end) {
1505 errorOrWarn(msg: "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1506 "relocation" +
1507 getLocation(s&: *sec, sym, off: offset));
1508 return;
1509 }
1510 }
1511
1512 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC
1513 // case, so we can discern it later from the toc-case.
1514 if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC)
1515 ++offset;
1516 }
1517 }
1518
1519 // If the relocation does not emit a GOT or GOTPLT entry but its computation
1520 // uses their addresses, we need GOT or GOTPLT to be created.
1521 //
1522 // The 5 types that relative GOTPLT are all x86 and x86-64 specific.
1523 if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT,
1524 R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) {
1525 in.gotPlt->hasGotPltOffRel.store(i: true, m: std::memory_order_relaxed);
1526 } else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC32_PLTREL, R_PPC64_TOCBASE,
1527 R_PPC64_RELAX_TOC>(expr)) {
1528 in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1529 }
1530
1531 // Process TLS relocations, including TLS optimizations. Note that
1532 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
1533 //
1534 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
1535 // but we need to process them in handleTlsRelocation.
1536 if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) {
1537 if (unsigned processed =
1538 handleTlsRelocation(type, sym, c&: *sec, offset, addend, expr)) {
1539 i += processed - 1;
1540 return;
1541 }
1542 }
1543
1544 processAux(expr, type, offset, sym, addend);
1545}
1546
1547// R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
1548// General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is
1549// found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the
1550// instructions are generated by very old IBM XL compilers. Work around the
1551// issue by disabling GD/LD to IE/LE relaxation.
1552template <class RelTy>
1553static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) {
1554 // Skip if sec is synthetic (sec.file is null) or if sec has been marked.
1555 if (!sec.file || sec.file->ppc64DisableTLSRelax)
1556 return;
1557 bool hasGDLD = false;
1558 for (const RelTy &rel : rels) {
1559 RelType type = rel.getType(false);
1560 switch (type) {
1561 case R_PPC64_TLSGD:
1562 case R_PPC64_TLSLD:
1563 return; // Found a marker
1564 case R_PPC64_GOT_TLSGD16:
1565 case R_PPC64_GOT_TLSGD16_HA:
1566 case R_PPC64_GOT_TLSGD16_HI:
1567 case R_PPC64_GOT_TLSGD16_LO:
1568 case R_PPC64_GOT_TLSLD16:
1569 case R_PPC64_GOT_TLSLD16_HA:
1570 case R_PPC64_GOT_TLSLD16_HI:
1571 case R_PPC64_GOT_TLSLD16_LO:
1572 hasGDLD = true;
1573 break;
1574 }
1575 }
1576 if (hasGDLD) {
1577 sec.file->ppc64DisableTLSRelax = true;
1578 warn(msg: toString(f: sec.file) +
1579 ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations without "
1580 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations");
1581 }
1582}
1583
1584template <class ELFT, class RelTy>
1585void RelocationScanner::scan(Relocs<RelTy> rels) {
1586 // Not all relocations end up in Sec->Relocations, but a lot do.
1587 sec->relocations.reserve(N: rels.size());
1588
1589 if (config->emachine == EM_PPC64)
1590 checkPPC64TLSRelax<RelTy>(*sec, rels);
1591
1592 // For EhInputSection, OffsetGetter expects the relocations to be sorted by
1593 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker
1594 // script), the relocations may be unordered.
1595 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS
1596 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip.
1597 SmallVector<RelTy, 0> storage;
1598 if (isa<EhInputSection>(Val: sec) || config->emachine == EM_S390)
1599 rels = sortRels(rels, storage);
1600
1601 if constexpr (RelTy::IsCrel) {
1602 for (auto i = rels.begin(); i != rels.end();)
1603 scanOne<ELFT, RelTy>(i);
1604 } else {
1605 // The non-CREL code path has additional check for PPC64 TLS.
1606 end = static_cast<const void *>(rels.end());
1607 for (auto i = rels.begin(); i != end;)
1608 scanOne<ELFT, RelTy>(i);
1609 }
1610
1611 // Sort relocations by offset for more efficient searching for
1612 // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64.
1613 if (config->emachine == EM_RISCV ||
1614 (config->emachine == EM_PPC64 && sec->name == ".toc"))
1615 llvm::stable_sort(sec->relocs(),
1616 [](const Relocation &lhs, const Relocation &rhs) {
1617 return lhs.offset < rhs.offset;
1618 });
1619}
1620
1621template <class ELFT>
1622void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) {
1623 sec = &s;
1624 getter = OffsetGetter(s);
1625 const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(!isEH);
1626 if (rels.areRelocsCrel())
1627 scan<ELFT>(rels.crels);
1628 else if (rels.areRelocsRel())
1629 scan<ELFT>(rels.rels);
1630 else
1631 scan<ELFT>(rels.relas);
1632}
1633
1634template <class ELFT> void elf::scanRelocations() {
1635 // Scan all relocations. Each relocation goes through a series of tests to
1636 // determine if it needs special treatment, such as creating GOT, PLT,
1637 // copy relocations, etc. Note that relocations for non-alloc sections are
1638 // directly processed by InputSection::relocateNonAlloc.
1639
1640 // Deterministic parallellism needs sorting relocations which is unsuitable
1641 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable
1642 // for parallelism.
1643 bool serial = !config->zCombreloc || config->emachine == EM_MIPS ||
1644 config->emachine == EM_PPC64;
1645 parallel::TaskGroup tg;
1646 for (ELFFileBase *f : ctx.objectFiles) {
1647 auto fn = [f]() {
1648 RelocationScanner scanner;
1649 for (InputSectionBase *s : f->getSections()) {
1650 if (s && s->kind() == SectionBase::Regular && s->isLive() &&
1651 (s->flags & SHF_ALLOC) &&
1652 !(s->type == SHT_ARM_EXIDX && config->emachine == EM_ARM))
1653 scanner.template scanSection<ELFT>(*s);
1654 }
1655 };
1656 tg.spawn(f: fn, Sequential: serial);
1657 }
1658
1659 tg.spawn(f: [] {
1660 RelocationScanner scanner;
1661 for (Partition &part : partitions) {
1662 for (EhInputSection *sec : part.ehFrame->sections)
1663 scanner.template scanSection<ELFT>(*sec, /*isEH=*/true);
1664 if (part.armExidx && part.armExidx->isLive())
1665 for (InputSection *sec : part.armExidx->exidxSections)
1666 if (sec->isLive())
1667 scanner.template scanSection<ELFT>(*sec);
1668 }
1669 });
1670}
1671
1672static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
1673 // Handle a reference to a non-preemptible ifunc. These are special in a
1674 // few ways:
1675 //
1676 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
1677 // a fixed value. But assuming that all references to the ifunc are
1678 // GOT-generating or PLT-generating, the handling of an ifunc is
1679 // relatively straightforward. We create a PLT entry in Iplt, which is
1680 // usually at the end of .plt, which makes an indirect call using a
1681 // matching GOT entry in igotPlt, which is usually at the end of .got.plt.
1682 // The GOT entry is relocated using an IRELATIVE relocation in relaDyn,
1683 // which is usually at the end of .rela.dyn.
1684 //
1685 // - Despite the fact that an ifunc does not have a fixed value, compilers
1686 // that are not passed -fPIC will assume that they do, and will emit
1687 // direct (non-GOT-generating, non-PLT-generating) relocations to the
1688 // symbol. This means that if a direct relocation to the symbol is
1689 // seen, the linker must set a value for the symbol, and this value must
1690 // be consistent no matter what type of reference is made to the symbol.
1691 // This can be done by creating a PLT entry for the symbol in the way
1692 // described above and making it canonical, that is, making all references
1693 // point to the PLT entry instead of the resolver. In lld we also store
1694 // the address of the PLT entry in the dynamic symbol table, which means
1695 // that the symbol will also have the same value in other modules.
1696 // Because the value loaded from the GOT needs to be consistent with
1697 // the value computed using a direct relocation, a non-preemptible ifunc
1698 // may end up with two GOT entries, one in .got.plt that points to the
1699 // address returned by the resolver and is used only by the PLT entry,
1700 // and another in .got that points to the PLT entry and is used by
1701 // GOT-generating relocations.
1702 //
1703 // - The fact that these symbols do not have a fixed value makes them an
1704 // exception to the general rule that a statically linked executable does
1705 // not require any form of dynamic relocation. To handle these relocations
1706 // correctly, the IRELATIVE relocations are stored in an array which a
1707 // statically linked executable's startup code must enumerate using the
1708 // linker-defined symbols __rela?_iplt_{start,end}.
1709 if (!sym.isGnuIFunc() || sym.isPreemptible || config->zIfuncNoplt)
1710 return false;
1711 // Skip unreferenced non-preemptible ifunc.
1712 if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC)))
1713 return true;
1714
1715 sym.isInIplt = true;
1716
1717 // Create an Iplt and the associated IRELATIVE relocation pointing to the
1718 // original section/value pairs. For non-GOT non-PLT relocation case below, we
1719 // may alter section/value, so create a copy of the symbol to make
1720 // section/value fixed.
1721 //
1722 // Prior to Android V, there was a bug that caused RELR relocations to be
1723 // applied after packed relocations. This meant that resolvers referenced by
1724 // IRELATIVE relocations in the packed relocation section would read
1725 // unrelocated globals with RELR relocations when
1726 // --pack-relative-relocs=android+relr is enabled. Work around this by placing
1727 // IRELATIVE in .rela.plt.
1728 auto *directSym = makeDefined(args&: cast<Defined>(Val&: sym));
1729 directSym->allocateAux();
1730 auto &dyn = config->androidPackDynRelocs ? *in.relaPlt : *mainPart->relaDyn;
1731 addPltEntry(plt&: *in.iplt, gotPlt&: *in.igotPlt, rel&: dyn, type: target->iRelativeRel, sym&: *directSym);
1732 sym.allocateAux();
1733 symAux.back().pltIdx = symAux[directSym->auxIdx].pltIdx;
1734
1735 if (flags & HAS_DIRECT_RELOC) {
1736 // Change the value to the IPLT and redirect all references to it.
1737 auto &d = cast<Defined>(Val&: sym);
1738 d.section = in.iplt.get();
1739 d.value = d.getPltIdx() * target->ipltEntrySize;
1740 d.size = 0;
1741 // It's important to set the symbol type here so that dynamic loaders
1742 // don't try to call the PLT as if it were an ifunc resolver.
1743 d.type = STT_FUNC;
1744
1745 if (flags & NEEDS_GOT)
1746 addGotEntry(sym);
1747 } else if (flags & NEEDS_GOT) {
1748 // Redirect GOT accesses to point to the Igot.
1749 sym.gotInIgot = true;
1750 }
1751 return true;
1752}
1753
1754void elf::postScanRelocations() {
1755 auto fn = [](Symbol &sym) {
1756 auto flags = sym.flags.load(m: std::memory_order_relaxed);
1757 if (handleNonPreemptibleIfunc(sym, flags))
1758 return;
1759
1760 if (sym.isTagged() && sym.isDefined())
1761 mainPart->memtagGlobalDescriptors->addSymbol(sym);
1762
1763 if (!sym.needsDynReloc())
1764 return;
1765 sym.allocateAux();
1766
1767 if (flags & NEEDS_GOT)
1768 addGotEntry(sym);
1769 if (flags & NEEDS_PLT)
1770 addPltEntry(plt&: *in.plt, gotPlt&: *in.gotPlt, rel&: *in.relaPlt, type: target->pltRel, sym);
1771 if (flags & NEEDS_COPY) {
1772 if (sym.isObject()) {
1773 invokeELFT(addCopyRelSymbol, cast<SharedSymbol>(sym));
1774 // NEEDS_COPY is cleared for sym and its aliases so that in
1775 // later iterations aliases won't cause redundant copies.
1776 assert(!sym.hasFlag(NEEDS_COPY));
1777 } else {
1778 assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT));
1779 if (!sym.isDefined()) {
1780 replaceWithDefined(sym, sec&: *in.plt,
1781 value: target->pltHeaderSize +
1782 target->pltEntrySize * sym.getPltIdx(),
1783 size: 0);
1784 sym.setFlags(NEEDS_COPY);
1785 if (config->emachine == EM_PPC) {
1786 // PPC32 canonical PLT entries are at the beginning of .glink
1787 cast<Defined>(Val&: sym).value = in.plt->headerSize;
1788 in.plt->headerSize += 16;
1789 cast<PPC32GlinkSection>(Val&: *in.plt).canonical_plts.push_back(Elt: &sym);
1790 }
1791 }
1792 }
1793 }
1794
1795 if (!sym.isTls())
1796 return;
1797 bool isLocalInExecutable = !sym.isPreemptible && !config->shared;
1798 GotSection *got = in.got.get();
1799
1800 if (flags & NEEDS_TLSDESC) {
1801 got->addTlsDescEntry(sym);
1802 mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
1803 dynType: target->tlsDescRel, sec&: *got, offsetInSec: got->getTlsDescOffset(sym), sym,
1804 addendRelType: target->tlsDescRel);
1805 }
1806 if (flags & NEEDS_TLSGD) {
1807 got->addDynTlsEntry(sym);
1808 uint64_t off = got->getGlobalDynOffset(b: sym);
1809 if (isLocalInExecutable)
1810 // Write one to the GOT slot.
1811 got->addConstant(r: {.expr: R_ADDEND, .type: target->symbolicRel, .offset: off, .addend: 1, .sym: &sym});
1812 else
1813 mainPart->relaDyn->addSymbolReloc(dynType: target->tlsModuleIndexRel, isec&: *got, offsetInSec: off,
1814 sym);
1815
1816 // If the symbol is preemptible we need the dynamic linker to write
1817 // the offset too.
1818 uint64_t offsetOff = off + config->wordsize;
1819 if (sym.isPreemptible)
1820 mainPart->relaDyn->addSymbolReloc(dynType: target->tlsOffsetRel, isec&: *got, offsetInSec: offsetOff,
1821 sym);
1822 else
1823 got->addConstant(r: {.expr: R_ABS, .type: target->tlsOffsetRel, .offset: offsetOff, .addend: 0, .sym: &sym});
1824 }
1825 if (flags & NEEDS_TLSGD_TO_IE) {
1826 got->addEntry(sym);
1827 mainPart->relaDyn->addSymbolReloc(dynType: target->tlsGotRel, isec&: *got,
1828 offsetInSec: sym.getGotOffset(), sym);
1829 }
1830 if (flags & NEEDS_GOT_DTPREL) {
1831 got->addEntry(sym);
1832 got->addConstant(
1833 r: {.expr: R_ABS, .type: target->tlsOffsetRel, .offset: sym.getGotOffset(), .addend: 0, .sym: &sym});
1834 }
1835
1836 if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE))
1837 addTpOffsetGotEntry(sym);
1838 };
1839
1840 GotSection *got = in.got.get();
1841 if (ctx.needsTlsLd.load(m: std::memory_order_relaxed) && got->addTlsIndex()) {
1842 static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0);
1843 if (config->shared)
1844 mainPart->relaDyn->addReloc(
1845 reloc: {target->tlsModuleIndexRel, got, got->getTlsIndexOff()});
1846 else
1847 got->addConstant(
1848 r: {.expr: R_ADDEND, .type: target->symbolicRel, .offset: got->getTlsIndexOff(), .addend: 1, .sym: &dummy});
1849 }
1850
1851 assert(symAux.size() == 1);
1852 for (Symbol *sym : symtab.getSymbols())
1853 fn(*sym);
1854
1855 // Local symbols may need the aforementioned non-preemptible ifunc and GOT
1856 // handling. They don't need regular PLT.
1857 for (ELFFileBase *file : ctx.objectFiles)
1858 for (Symbol *sym : file->getLocalSymbols())
1859 fn(*sym);
1860}
1861
1862static bool mergeCmp(const InputSection *a, const InputSection *b) {
1863 // std::merge requires a strict weak ordering.
1864 if (a->outSecOff < b->outSecOff)
1865 return true;
1866
1867 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection.
1868 if (a->outSecOff == b->outSecOff && a != b) {
1869 auto *ta = dyn_cast<ThunkSection>(Val: a);
1870 auto *tb = dyn_cast<ThunkSection>(Val: b);
1871
1872 // Check if Thunk is immediately before any specific Target
1873 // InputSection for example Mips LA25 Thunks.
1874 if (ta && ta->getTargetInputSection() == b)
1875 return true;
1876
1877 // Place Thunk Sections without specific targets before
1878 // non-Thunk Sections.
1879 if (ta && !tb && !ta->getTargetInputSection())
1880 return true;
1881 }
1882
1883 return false;
1884}
1885
1886// Call Fn on every executable InputSection accessed via the linker script
1887// InputSectionDescription::Sections.
1888static void forEachInputSectionDescription(
1889 ArrayRef<OutputSection *> outputSections,
1890 llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) {
1891 for (OutputSection *os : outputSections) {
1892 if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR))
1893 continue;
1894 for (SectionCommand *bc : os->commands)
1895 if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc))
1896 fn(os, isd);
1897 }
1898}
1899
1900// Thunk Implementation
1901//
1902// Thunks (sometimes called stubs, veneers or branch islands) are small pieces
1903// of code that the linker inserts inbetween a caller and a callee. The thunks
1904// are added at link time rather than compile time as the decision on whether
1905// a thunk is needed, such as the caller and callee being out of range, can only
1906// be made at link time.
1907//
1908// It is straightforward to tell given the current state of the program when a
1909// thunk is needed for a particular call. The more difficult part is that
1910// the thunk needs to be placed in the program such that the caller can reach
1911// the thunk and the thunk can reach the callee; furthermore, adding thunks to
1912// the program alters addresses, which can mean more thunks etc.
1913//
1914// In lld we have a synthetic ThunkSection that can hold many Thunks.
1915// The decision to have a ThunkSection act as a container means that we can
1916// more easily handle the most common case of a single block of contiguous
1917// Thunks by inserting just a single ThunkSection.
1918//
1919// The implementation of Thunks in lld is split across these areas
1920// Relocations.cpp : Framework for creating and placing thunks
1921// Thunks.cpp : The code generated for each supported thunk
1922// Target.cpp : Target specific hooks that the framework uses to decide when
1923// a thunk is used
1924// Synthetic.cpp : Implementation of ThunkSection
1925// Writer.cpp : Iteratively call framework until no more Thunks added
1926//
1927// Thunk placement requirements:
1928// Mips LA25 thunks. These must be placed immediately before the callee section
1929// We can assume that the caller is in range of the Thunk. These are modelled
1930// by Thunks that return the section they must precede with
1931// getTargetInputSection().
1932//
1933// ARM interworking and range extension thunks. These thunks must be placed
1934// within range of the caller. All implemented ARM thunks can always reach the
1935// callee as they use an indirect jump via a register that has no range
1936// restrictions.
1937//
1938// Thunk placement algorithm:
1939// For Mips LA25 ThunkSections; the placement is explicit, it has to be before
1940// getTargetInputSection().
1941//
1942// For thunks that must be placed within range of the caller there are many
1943// possible choices given that the maximum range from the caller is usually
1944// much larger than the average InputSection size. Desirable properties include:
1945// - Maximize reuse of thunks by multiple callers
1946// - Minimize number of ThunkSections to simplify insertion
1947// - Handle impact of already added Thunks on addresses
1948// - Simple to understand and implement
1949//
1950// In lld for the first pass, we pre-create one or more ThunkSections per
1951// InputSectionDescription at Target specific intervals. A ThunkSection is
1952// placed so that the estimated end of the ThunkSection is within range of the
1953// start of the InputSectionDescription or the previous ThunkSection. For
1954// example:
1955// InputSectionDescription
1956// Section 0
1957// ...
1958// Section N
1959// ThunkSection 0
1960// Section N + 1
1961// ...
1962// Section N + K
1963// Thunk Section 1
1964//
1965// The intention is that we can add a Thunk to a ThunkSection that is well
1966// spaced enough to service a number of callers without having to do a lot
1967// of work. An important principle is that it is not an error if a Thunk cannot
1968// be placed in a pre-created ThunkSection; when this happens we create a new
1969// ThunkSection placed next to the caller. This allows us to handle the vast
1970// majority of thunks simply, but also handle rare cases where the branch range
1971// is smaller than the target specific spacing.
1972//
1973// The algorithm is expected to create all the thunks that are needed in a
1974// single pass, with a small number of programs needing a second pass due to
1975// the insertion of thunks in the first pass increasing the offset between
1976// callers and callees that were only just in range.
1977//
1978// A consequence of allowing new ThunkSections to be created outside of the
1979// pre-created ThunkSections is that in rare cases calls to Thunks that were in
1980// range in pass K, are out of range in some pass > K due to the insertion of
1981// more Thunks in between the caller and callee. When this happens we retarget
1982// the relocation back to the original target and create another Thunk.
1983
1984// Remove ThunkSections that are empty, this should only be the initial set
1985// precreated on pass 0.
1986
1987// Insert the Thunks for OutputSection OS into their designated place
1988// in the Sections vector, and recalculate the InputSection output section
1989// offsets.
1990// This may invalidate any output section offsets stored outside of InputSection
1991void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) {
1992 forEachInputSectionDescription(
1993 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
1994 if (isd->thunkSections.empty())
1995 return;
1996
1997 // Remove any zero sized precreated Thunks.
1998 llvm::erase_if(C&: isd->thunkSections,
1999 P: [](const std::pair<ThunkSection *, uint32_t> &ts) {
2000 return ts.first->getSize() == 0;
2001 });
2002
2003 // ISD->ThunkSections contains all created ThunkSections, including
2004 // those inserted in previous passes. Extract the Thunks created this
2005 // pass and order them in ascending outSecOff.
2006 std::vector<ThunkSection *> newThunks;
2007 for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections)
2008 if (ts.second == pass)
2009 newThunks.push_back(x: ts.first);
2010 llvm::stable_sort(Range&: newThunks,
2011 C: [](const ThunkSection *a, const ThunkSection *b) {
2012 return a->outSecOff < b->outSecOff;
2013 });
2014
2015 // Merge sorted vectors of Thunks and InputSections by outSecOff
2016 SmallVector<InputSection *, 0> tmp;
2017 tmp.reserve(N: isd->sections.size() + newThunks.size());
2018
2019 std::merge(first1: isd->sections.begin(), last1: isd->sections.end(),
2020 first2: newThunks.begin(), last2: newThunks.end(), result: std::back_inserter(x&: tmp),
2021 comp: mergeCmp);
2022
2023 isd->sections = std::move(tmp);
2024 });
2025}
2026
2027static int64_t getPCBias(RelType type) {
2028 if (config->emachine != EM_ARM)
2029 return 0;
2030 switch (type) {
2031 case R_ARM_THM_JUMP19:
2032 case R_ARM_THM_JUMP24:
2033 case R_ARM_THM_CALL:
2034 return 4;
2035 default:
2036 return 8;
2037 }
2038}
2039
2040// Find or create a ThunkSection within the InputSectionDescription (ISD) that
2041// is in range of Src. An ISD maps to a range of InputSections described by a
2042// linker script section pattern such as { .text .text.* }.
2043ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os,
2044 InputSection *isec,
2045 InputSectionDescription *isd,
2046 const Relocation &rel,
2047 uint64_t src) {
2048 // See the comment in getThunk for -pcBias below.
2049 const int64_t pcBias = getPCBias(type: rel.type);
2050 for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) {
2051 ThunkSection *ts = tp.first;
2052 uint64_t tsBase = os->addr + ts->outSecOff - pcBias;
2053 uint64_t tsLimit = tsBase + ts->getSize();
2054 if (target->inBranchRange(type: rel.type, src,
2055 dst: (src > tsLimit) ? tsBase : tsLimit))
2056 return ts;
2057 }
2058
2059 // No suitable ThunkSection exists. This can happen when there is a branch
2060 // with lower range than the ThunkSection spacing or when there are too
2061 // many Thunks. Create a new ThunkSection as close to the InputSection as
2062 // possible. Error if InputSection is so large we cannot place ThunkSection
2063 // anywhere in Range.
2064 uint64_t thunkSecOff = isec->outSecOff;
2065 if (!target->inBranchRange(type: rel.type, src,
2066 dst: os->addr + thunkSecOff + rel.addend)) {
2067 thunkSecOff = isec->outSecOff + isec->getSize();
2068 if (!target->inBranchRange(type: rel.type, src,
2069 dst: os->addr + thunkSecOff + rel.addend))
2070 fatal(msg: "InputSection too large for range extension thunk " +
2071 isec->getObjMsg(offset: src - (os->addr + isec->outSecOff)));
2072 }
2073 return addThunkSection(os, isd, off: thunkSecOff);
2074}
2075
2076// Add a Thunk that needs to be placed in a ThunkSection that immediately
2077// precedes its Target.
2078ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) {
2079 ThunkSection *ts = thunkedSections.lookup(Val: isec);
2080 if (ts)
2081 return ts;
2082
2083 // Find InputSectionRange within Target Output Section (TOS) that the
2084 // InputSection (IS) that we need to precede is in.
2085 OutputSection *tos = isec->getParent();
2086 for (SectionCommand *bc : tos->commands) {
2087 auto *isd = dyn_cast<InputSectionDescription>(Val: bc);
2088 if (!isd || isd->sections.empty())
2089 continue;
2090
2091 InputSection *first = isd->sections.front();
2092 InputSection *last = isd->sections.back();
2093
2094 if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff)
2095 continue;
2096
2097 ts = addThunkSection(os: tos, isd, off: isec->outSecOff);
2098 thunkedSections[isec] = ts;
2099 return ts;
2100 }
2101
2102 return nullptr;
2103}
2104
2105// Create one or more ThunkSections per OS that can be used to place Thunks.
2106// We attempt to place the ThunkSections using the following desirable
2107// properties:
2108// - Within range of the maximum number of callers
2109// - Minimise the number of ThunkSections
2110//
2111// We follow a simple but conservative heuristic to place ThunkSections at
2112// offsets that are multiples of a Target specific branch range.
2113// For an InputSectionDescription that is smaller than the range, a single
2114// ThunkSection at the end of the range will do.
2115//
2116// For an InputSectionDescription that is more than twice the size of the range,
2117// we place the last ThunkSection at range bytes from the end of the
2118// InputSectionDescription in order to increase the likelihood that the
2119// distance from a thunk to its target will be sufficiently small to
2120// allow for the creation of a short thunk.
2121void ThunkCreator::createInitialThunkSections(
2122 ArrayRef<OutputSection *> outputSections) {
2123 uint32_t thunkSectionSpacing = target->getThunkSectionSpacing();
2124
2125 forEachInputSectionDescription(
2126 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2127 if (isd->sections.empty())
2128 return;
2129
2130 uint32_t isdBegin = isd->sections.front()->outSecOff;
2131 uint32_t isdEnd =
2132 isd->sections.back()->outSecOff + isd->sections.back()->getSize();
2133 uint32_t lastThunkLowerBound = -1;
2134 if (isdEnd - isdBegin > thunkSectionSpacing * 2)
2135 lastThunkLowerBound = isdEnd - thunkSectionSpacing;
2136
2137 uint32_t isecLimit;
2138 uint32_t prevIsecLimit = isdBegin;
2139 uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing;
2140
2141 for (const InputSection *isec : isd->sections) {
2142 isecLimit = isec->outSecOff + isec->getSize();
2143 if (isecLimit > thunkUpperBound) {
2144 addThunkSection(os, isd, off: prevIsecLimit);
2145 thunkUpperBound = prevIsecLimit + thunkSectionSpacing;
2146 }
2147 if (isecLimit > lastThunkLowerBound)
2148 break;
2149 prevIsecLimit = isecLimit;
2150 }
2151 addThunkSection(os, isd, off: isecLimit);
2152 });
2153}
2154
2155ThunkSection *ThunkCreator::addThunkSection(OutputSection *os,
2156 InputSectionDescription *isd,
2157 uint64_t off) {
2158 auto *ts = make<ThunkSection>(args&: os, args&: off);
2159 ts->partition = os->partition;
2160 if ((config->fixCortexA53Errata843419 || config->fixCortexA8) &&
2161 !isd->sections.empty()) {
2162 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add
2163 // thunks we disturb the base addresses of sections placed after the thunks
2164 // this makes patches we have generated redundant, and may cause us to
2165 // generate more patches as different instructions are now in sensitive
2166 // locations. When we generate more patches we may force more branches to
2167 // go out of range, causing more thunks to be generated. In pathological
2168 // cases this can cause the address dependent content pass not to converge.
2169 // We fix this by rounding up the size of the ThunkSection to 4KiB, this
2170 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB,
2171 // which means that adding Thunks to the section does not invalidate
2172 // errata patches for following code.
2173 // Rounding up the size to 4KiB has consequences for code-size and can
2174 // trip up linker script defined assertions. For example the linux kernel
2175 // has an assertion that what LLD represents as an InputSectionDescription
2176 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib.
2177 // We use the heuristic of rounding up the size when both of the following
2178 // conditions are true:
2179 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This
2180 // accounts for the case where no single InputSectionDescription is
2181 // larger than the OutputSection size. This is conservative but simple.
2182 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent
2183 // any assertion failures that an InputSectionDescription is < 4 KiB
2184 // in size.
2185 uint64_t isdSize = isd->sections.back()->outSecOff +
2186 isd->sections.back()->getSize() -
2187 isd->sections.front()->outSecOff;
2188 if (os->size > target->getThunkSectionSpacing() && isdSize > 4096)
2189 ts->roundUpSizeForErrata = true;
2190 }
2191 isd->thunkSections.push_back(Elt: {ts, pass});
2192 return ts;
2193}
2194
2195static bool isThunkSectionCompatible(InputSection *source,
2196 SectionBase *target) {
2197 // We can't reuse thunks in different loadable partitions because they might
2198 // not be loaded. But partition 1 (the main partition) will always be loaded.
2199 if (source->partition != target->partition)
2200 return target->partition == 1;
2201 return true;
2202}
2203
2204std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
2205 Relocation &rel, uint64_t src) {
2206 std::vector<Thunk *> *thunkVec = nullptr;
2207 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled
2208 // out in the relocation addend. We compensate for the PC bias so that
2209 // an Arm and Thumb relocation to the same destination get the same keyAddend,
2210 // which is usually 0.
2211 const int64_t pcBias = getPCBias(type: rel.type);
2212 const int64_t keyAddend = rel.addend + pcBias;
2213
2214 // We use a ((section, offset), addend) pair to find the thunk position if
2215 // possible so that we create only one thunk for aliased symbols or ICFed
2216 // sections. There may be multiple relocations sharing the same (section,
2217 // offset + addend) pair. We may revert the relocation back to its original
2218 // non-Thunk target, so we cannot fold offset + addend.
2219 if (auto *d = dyn_cast<Defined>(Val: rel.sym))
2220 if (!d->isInPlt() && d->section)
2221 thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value},
2222 keyAddend}];
2223 if (!thunkVec)
2224 thunkVec = &thunkedSymbols[{rel.sym, keyAddend}];
2225
2226 // Check existing Thunks for Sym to see if they can be reused
2227 for (Thunk *t : *thunkVec)
2228 if (isThunkSectionCompatible(source: isec, target: t->getThunkTargetSym()->section) &&
2229 t->isCompatibleWith(*isec, rel) &&
2230 target->inBranchRange(type: rel.type, src,
2231 dst: t->getThunkTargetSym()->getVA(addend: -pcBias)))
2232 return std::make_pair(x&: t, y: false);
2233
2234 // No existing compatible Thunk in range, create a new one
2235 Thunk *t = addThunk(isec: *isec, rel);
2236 thunkVec->push_back(x: t);
2237 return std::make_pair(x&: t, y: true);
2238}
2239
2240// Return true if the relocation target is an in range Thunk.
2241// Return false if the relocation is not to a Thunk. If the relocation target
2242// was originally to a Thunk, but is no longer in range we revert the
2243// relocation back to its original non-Thunk target.
2244bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
2245 if (Thunk *t = thunks.lookup(Val: rel.sym)) {
2246 if (target->inBranchRange(type: rel.type, src, dst: rel.sym->getVA(addend: rel.addend)))
2247 return true;
2248 rel.sym = &t->destination;
2249 rel.addend = t->addend;
2250 if (rel.sym->isInPlt())
2251 rel.expr = toPlt(expr: rel.expr);
2252 }
2253 return false;
2254}
2255
2256// Process all relocations from the InputSections that have been assigned
2257// to InputSectionDescriptions and redirect through Thunks if needed. The
2258// function should be called iteratively until it returns false.
2259//
2260// PreConditions:
2261// All InputSections that may need a Thunk are reachable from
2262// OutputSectionCommands.
2263//
2264// All OutputSections have an address and all InputSections have an offset
2265// within the OutputSection.
2266//
2267// The offsets between caller (relocation place) and callee
2268// (relocation target) will not be modified outside of createThunks().
2269//
2270// PostConditions:
2271// If return value is true then ThunkSections have been inserted into
2272// OutputSections. All relocations that needed a Thunk based on the information
2273// available to createThunks() on entry have been redirected to a Thunk. Note
2274// that adding Thunks changes offsets between caller and callee so more Thunks
2275// may be required.
2276//
2277// If return value is false then no more Thunks are needed, and createThunks has
2278// made no changes. If the target requires range extension thunks, currently
2279// ARM, then any future change in offset between caller and callee risks a
2280// relocation out of range error.
2281bool ThunkCreator::createThunks(uint32_t pass,
2282 ArrayRef<OutputSection *> outputSections) {
2283 this->pass = pass;
2284 bool addressesChanged = false;
2285
2286 if (pass == 0 && target->getThunkSectionSpacing())
2287 createInitialThunkSections(outputSections);
2288
2289 // Create all the Thunks and insert them into synthetic ThunkSections. The
2290 // ThunkSections are later inserted back into InputSectionDescriptions.
2291 // We separate the creation of ThunkSections from the insertion of the
2292 // ThunkSections as ThunkSections are not always inserted into the same
2293 // InputSectionDescription as the caller.
2294 forEachInputSectionDescription(
2295 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2296 for (InputSection *isec : isd->sections)
2297 for (Relocation &rel : isec->relocs()) {
2298 uint64_t src = isec->getVA(offset: rel.offset);
2299
2300 // If we are a relocation to an existing Thunk, check if it is
2301 // still in range. If not then Rel will be altered to point to its
2302 // original target so another Thunk can be generated.
2303 if (pass > 0 && normalizeExistingThunk(rel, src))
2304 continue;
2305
2306 if (!target->needsThunk(expr: rel.expr, relocType: rel.type, file: isec->file, branchAddr: src,
2307 s: *rel.sym, a: rel.addend))
2308 continue;
2309
2310 Thunk *t;
2311 bool isNew;
2312 std::tie(args&: t, args&: isNew) = getThunk(isec, rel, src);
2313
2314 if (isNew) {
2315 // Find or create a ThunkSection for the new Thunk
2316 ThunkSection *ts;
2317 if (auto *tis = t->getTargetInputSection())
2318 ts = getISThunkSec(isec: tis);
2319 else
2320 ts = getISDThunkSec(os, isec, isd, rel, src);
2321 ts->addThunk(t);
2322 thunks[t->getThunkTargetSym()] = t;
2323 }
2324
2325 // Redirect relocation to Thunk, we never go via the PLT to a Thunk
2326 rel.sym = t->getThunkTargetSym();
2327 rel.expr = fromPlt(expr: rel.expr);
2328
2329 // On AArch64 and PPC, a jump/call relocation may be encoded as
2330 // STT_SECTION + non-zero addend, clear the addend after
2331 // redirection.
2332 if (config->emachine != EM_MIPS)
2333 rel.addend = -getPCBias(type: rel.type);
2334 }
2335
2336 for (auto &p : isd->thunkSections)
2337 addressesChanged |= p.first->assignOffsets();
2338 });
2339
2340 for (auto &p : thunkedSections)
2341 addressesChanged |= p.second->assignOffsets();
2342
2343 // Merge all created synthetic ThunkSections back into OutputSection
2344 mergeThunks(outputSections);
2345 return addressesChanged;
2346}
2347
2348// The following aid in the conversion of call x@GDPLT to call __tls_get_addr
2349// hexagonNeedsTLSSymbol scans for relocations would require a call to
2350// __tls_get_addr.
2351// hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr.
2352bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) {
2353 bool needTlsSymbol = false;
2354 forEachInputSectionDescription(
2355 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2356 for (InputSection *isec : isd->sections)
2357 for (Relocation &rel : isec->relocs())
2358 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2359 needTlsSymbol = true;
2360 return;
2361 }
2362 });
2363 return needTlsSymbol;
2364}
2365
2366void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) {
2367 Symbol *sym = symtab.find(name: "__tls_get_addr");
2368 if (!sym)
2369 return;
2370 bool needEntry = true;
2371 forEachInputSectionDescription(
2372 outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) {
2373 for (InputSection *isec : isd->sections)
2374 for (Relocation &rel : isec->relocs())
2375 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2376 if (needEntry) {
2377 sym->allocateAux();
2378 addPltEntry(plt&: *in.plt, gotPlt&: *in.gotPlt, rel&: *in.relaPlt, type: target->pltRel,
2379 sym&: *sym);
2380 needEntry = false;
2381 }
2382 rel.sym = sym;
2383 }
2384 });
2385}
2386
2387static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) {
2388 if (cmd.toFirst)
2389 return cmd.outputSections[0] == osec;
2390 return llvm::is_contained(Range: cmd.outputSections, Element: osec);
2391}
2392
2393template <class ELFT, class Rels>
2394static void scanCrossRefs(const NoCrossRefCommand &cmd, OutputSection *osec,
2395 InputSection *sec, Rels rels) {
2396 for (const auto &r : rels) {
2397 Symbol &sym = sec->file->getSymbol(symbolIndex: r.getSymbol(config->isMips64EL));
2398 // A legal cross-reference is when the destination output section is
2399 // nullptr, osec for a self-reference, or a section that is described by the
2400 // NOCROSSREFS/NOCROSSREFS_TO command.
2401 auto *dstOsec = sym.getOutputSection();
2402 if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, osec: dstOsec->name))
2403 continue;
2404
2405 std::string toSymName;
2406 if (!sym.isSection())
2407 toSymName = toString(sym);
2408 else if (auto *d = dyn_cast<Defined>(Val: &sym))
2409 toSymName = d->section->name;
2410 errorOrWarn(sec->getLocation(offset: r.r_offset) +
2411 ": prohibited cross reference from '" + osec->name + "' to '" +
2412 toSymName + "' in '" + dstOsec->name + "'");
2413 }
2414}
2415
2416// For each output section described by at least one NOCROSSREFS(_TO) command,
2417// scan relocations from its input sections for prohibited cross references.
2418template <class ELFT> void elf::checkNoCrossRefs() {
2419 for (OutputSection *osec : outputSections) {
2420 for (const NoCrossRefCommand &noxref : script->noCrossRefs) {
2421 if (!llvm::is_contained(Range: noxref.outputSections, Element: osec->name) ||
2422 (noxref.toFirst && noxref.outputSections[0] == osec->name))
2423 continue;
2424 for (SectionCommand *cmd : osec->commands) {
2425 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
2426 if (!isd)
2427 continue;
2428 parallelForEach(isd->sections, [&](InputSection *sec) {
2429 invokeOnRelocs(*sec, scanCrossRefs<ELFT>, noxref, osec, sec);
2430 });
2431 }
2432 }
2433 }
2434}
2435
2436template void elf::scanRelocations<ELF32LE>();
2437template void elf::scanRelocations<ELF32BE>();
2438template void elf::scanRelocations<ELF64LE>();
2439template void elf::scanRelocations<ELF64BE>();
2440
2441template void elf::checkNoCrossRefs<ELF32LE>();
2442template void elf::checkNoCrossRefs<ELF32BE>();
2443template void elf::checkNoCrossRefs<ELF64LE>();
2444template void elf::checkNoCrossRefs<ELF64BE>();
2445