| 1 | //===- Relocations.cpp ----------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the core relocation processing logic. It analyzes |
| 10 | // relocations and determines what auxiliary data structures (GOT, PLT, copy |
| 11 | // relocations) need to be created during linking. |
| 12 | // |
| 13 | // The main entry point is scanRelocations<ELFT>(), which calls scanSection() |
| 14 | // to process all relocations within an input section. For each relocation, |
| 15 | // scan() analyzes the type and target, and determines whether a synthetic |
| 16 | // section entry or dynamic relocation is needed. |
| 17 | // |
| 18 | // Note: This file analyzes what needs to be done but doesn't apply the |
| 19 | // actual relocations - that happens later in InputSection::writeTo(). |
| 20 | // Instead, it populates Relocation objects in InputSectionBase::relocations |
| 21 | // and creates necessary synthetic sections (GOT, PLT, etc.). |
| 22 | // |
| 23 | // In addition, this file implements the core Thunk creation logic, called |
| 24 | // during finalizeAddressDependentContent(). |
| 25 | // |
| 26 | //===----------------------------------------------------------------------===// |
| 27 | |
| 28 | #include "Relocations.h" |
| 29 | #include "Config.h" |
| 30 | #include "InputFiles.h" |
| 31 | #include "LinkerScript.h" |
| 32 | #include "OutputSections.h" |
| 33 | #include "RelocScan.h" |
| 34 | #include "SymbolTable.h" |
| 35 | #include "Symbols.h" |
| 36 | #include "SyntheticSections.h" |
| 37 | #include "Target.h" |
| 38 | #include "Thunks.h" |
| 39 | #include "lld/Common/ErrorHandler.h" |
| 40 | #include "lld/Common/Memory.h" |
| 41 | #include "llvm/ADT/SmallSet.h" |
| 42 | #include "llvm/BinaryFormat/ELF.h" |
| 43 | #include "llvm/Demangle/Demangle.h" |
| 44 | #include <algorithm> |
| 45 | |
| 46 | using namespace llvm; |
| 47 | using namespace llvm::ELF; |
| 48 | using namespace llvm::object; |
| 49 | using namespace llvm::support::endian; |
| 50 | using namespace lld; |
| 51 | using namespace lld::elf; |
| 52 | |
| 53 | static void printDefinedLocation(ELFSyncStream &s, const Symbol &sym) { |
| 54 | s << "\n>>> defined in " << sym.file; |
| 55 | } |
| 56 | |
| 57 | // Construct a message in the following format. |
| 58 | // |
| 59 | // >>> defined in /home/alice/src/foo.o |
| 60 | // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12) |
| 61 | // >>> /home/alice/src/bar.o:(.text+0x1) |
| 62 | void elf::printLocation(ELFSyncStream &s, InputSectionBase &sec, |
| 63 | const Symbol &sym, uint64_t off) { |
| 64 | printDefinedLocation(s, sym); |
| 65 | s << "\n>>> referenced by " ; |
| 66 | auto tell = s.tell(); |
| 67 | s << sec.getSrcMsg(sym, offset: off); |
| 68 | if (tell != s.tell()) |
| 69 | s << "\n>>> " ; |
| 70 | s << sec.getObjMsg(offset: off); |
| 71 | } |
| 72 | |
| 73 | void elf::reportRangeError(Ctx &ctx, uint8_t *loc, const Relocation &rel, |
| 74 | const Twine &v, int64_t min, uint64_t max) { |
| 75 | ErrorPlace errPlace = getErrorPlace(ctx, loc); |
| 76 | auto diag = Err(ctx); |
| 77 | diag << errPlace.loc << "relocation " << rel.type |
| 78 | << " out of range: " << v.str() << " is not in [" << min << ", " << max |
| 79 | << ']'; |
| 80 | |
| 81 | if (rel.sym) { |
| 82 | if (!rel.sym->isSection()) |
| 83 | diag << "; references '" << rel.sym << '\''; |
| 84 | else if (auto *d = dyn_cast<Defined>(Val: rel.sym)) |
| 85 | diag << "; references section '" << d->section->name << "'" ; |
| 86 | |
| 87 | if (ctx.arg.emachine == EM_X86_64 && rel.type == R_X86_64_PC32 && |
| 88 | rel.sym->getOutputSection() && |
| 89 | (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) { |
| 90 | diag << "; R_X86_64_PC32 should not reference a section marked " |
| 91 | "SHF_X86_64_LARGE" ; |
| 92 | } |
| 93 | } |
| 94 | if (!errPlace.srcLoc.empty()) |
| 95 | diag << "\n>>> referenced by " << errPlace.srcLoc; |
| 96 | if (rel.sym && !rel.sym->isSection()) |
| 97 | printDefinedLocation(s&: diag, sym: *rel.sym); |
| 98 | |
| 99 | if (errPlace.isec && errPlace.isec->name.starts_with(Prefix: ".debug" )) |
| 100 | diag << "; consider recompiling with -fdebug-types-section to reduce size " |
| 101 | "of debug sections" ; |
| 102 | } |
| 103 | |
| 104 | void elf::reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n, |
| 105 | const Symbol &sym, const Twine &msg) { |
| 106 | auto diag = Err(ctx); |
| 107 | diag << getErrorPlace(ctx, loc).loc << msg << " is out of range: " << v |
| 108 | << " is not in [" << llvm::minIntN(N: n) << ", " << llvm::maxIntN(N: n) << "]" ; |
| 109 | if (!sym.getName().empty()) { |
| 110 | diag << "; references '" << &sym << '\''; |
| 111 | printDefinedLocation(s&: diag, sym); |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | // True if non-preemptable symbol always has the same value regardless of where |
| 116 | // the DSO is loaded. |
| 117 | bool elf::isAbsolute(const Symbol &sym) { |
| 118 | if (sym.isUndefined()) |
| 119 | return true; |
| 120 | if (const auto *dr = dyn_cast<Defined>(Val: &sym)) |
| 121 | return dr->section == nullptr; // Absolute symbol. |
| 122 | return false; |
| 123 | } |
| 124 | |
| 125 | static bool isAbsoluteOrTls(const Symbol &sym) { |
| 126 | return isAbsolute(sym) || sym.isTls(); |
| 127 | } |
| 128 | |
| 129 | // Returns true if Expr refers a PLT entry. |
| 130 | static bool needsPlt(RelExpr expr) { |
| 131 | return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, |
| 132 | R_GOTPLT_PC, RE_LOONGARCH_PLT_PAGE_PC, RE_PPC32_PLTREL, |
| 133 | RE_PPC64_CALL_PLT>(expr); |
| 134 | } |
| 135 | |
| 136 | bool lld::elf::needsGot(RelExpr expr) { |
| 137 | return oneof<R_GOT, RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, R_GOT_OFF, |
| 138 | RE_MIPS_GOT_LOCAL_PAGE, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, |
| 139 | RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, |
| 140 | RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT, |
| 141 | RE_AARCH64_GOT_PAGE, RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>( |
| 142 | expr); |
| 143 | } |
| 144 | |
| 145 | // True if this expression is of the form Sym - X, where X is a position in the |
| 146 | // file (PC, or GOT for example). |
| 147 | static bool isRelExpr(RelExpr expr) { |
| 148 | return oneof<R_PC, R_GOTREL, R_GOTPLTREL, RE_ARM_PCA, RE_MIPS_GOTREL, |
| 149 | RE_PPC64_CALL, RE_PPC64_RELAX_TOC, RE_AARCH64_PAGE_PC, |
| 150 | R_RELAX_GOT_PC, RE_RISCV_PC_INDIRECT, RE_PPC64_RELAX_GOT_PC, |
| 151 | RE_LOONGARCH_PAGE_PC, RE_LOONGARCH_PC_INDIRECT>(expr); |
| 152 | } |
| 153 | |
| 154 | static RelExpr toPlt(RelExpr expr) { |
| 155 | switch (expr) { |
| 156 | case RE_LOONGARCH_PAGE_PC: |
| 157 | return RE_LOONGARCH_PLT_PAGE_PC; |
| 158 | case RE_PPC64_CALL: |
| 159 | return RE_PPC64_CALL_PLT; |
| 160 | case R_PC: |
| 161 | return R_PLT_PC; |
| 162 | case R_ABS: |
| 163 | return R_PLT; |
| 164 | case R_GOTREL: |
| 165 | return R_PLT_GOTREL; |
| 166 | default: |
| 167 | return expr; |
| 168 | } |
| 169 | } |
| 170 | |
| 171 | static RelExpr fromPlt(RelExpr expr) { |
| 172 | // We decided not to use a plt. Optimize a reference to the plt to a |
| 173 | // reference to the symbol itself. |
| 174 | switch (expr) { |
| 175 | case R_PLT_PC: |
| 176 | case RE_PPC32_PLTREL: |
| 177 | return R_PC; |
| 178 | case RE_LOONGARCH_PLT_PAGE_PC: |
| 179 | return RE_LOONGARCH_PAGE_PC; |
| 180 | case RE_PPC64_CALL_PLT: |
| 181 | return RE_PPC64_CALL; |
| 182 | case R_PLT: |
| 183 | return R_ABS; |
| 184 | case R_PLT_GOTPLT: |
| 185 | return R_GOTPLTREL; |
| 186 | case R_PLT_GOTREL: |
| 187 | return R_GOTREL; |
| 188 | default: |
| 189 | return expr; |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | // Returns true if a given shared symbol is in a read-only segment in a DSO. |
| 194 | template <class ELFT> static bool isReadOnly(SharedSymbol &ss) { |
| 195 | using Elf_Phdr = typename ELFT::Phdr; |
| 196 | |
| 197 | // Determine if the symbol is read-only by scanning the DSO's program headers. |
| 198 | const auto &file = cast<SharedFile>(Val&: *ss.file); |
| 199 | for (const Elf_Phdr &phdr : |
| 200 | check(file.template getObj<ELFT>().program_headers())) |
| 201 | if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) && |
| 202 | !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr && |
| 203 | ss.value < phdr.p_vaddr + phdr.p_memsz) |
| 204 | return true; |
| 205 | return false; |
| 206 | } |
| 207 | |
| 208 | // Returns symbols at the same offset as a given symbol, including SS itself. |
| 209 | // |
| 210 | // If two or more symbols are at the same offset, and at least one of |
| 211 | // them are copied by a copy relocation, all of them need to be copied. |
| 212 | // Otherwise, they would refer to different places at runtime. |
| 213 | template <class ELFT> |
| 214 | static SmallPtrSet<SharedSymbol *, 4> getSymbolsAt(Ctx &ctx, SharedSymbol &ss) { |
| 215 | using Elf_Sym = typename ELFT::Sym; |
| 216 | |
| 217 | const auto &file = cast<SharedFile>(Val&: *ss.file); |
| 218 | |
| 219 | SmallPtrSet<SharedSymbol *, 4> ret; |
| 220 | for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) { |
| 221 | if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS || |
| 222 | s.getType() == STT_TLS || s.st_value != ss.value) |
| 223 | continue; |
| 224 | StringRef name = check(s.getName(file.getStringTable())); |
| 225 | Symbol *sym = ctx.symtab->find(name); |
| 226 | if (auto *alias = dyn_cast_or_null<SharedSymbol>(Val: sym)) |
| 227 | ret.insert(Ptr: alias); |
| 228 | } |
| 229 | |
| 230 | // The loop does not check SHT_GNU_verneed, so ret does not contain |
| 231 | // non-default version symbols. If ss has a non-default version, ret won't |
| 232 | // contain ss. Just add ss unconditionally. If a non-default version alias is |
| 233 | // separately copy relocated, it and ss will have different addresses. |
| 234 | // Fortunately this case is impractical and fails with GNU ld as well. |
| 235 | ret.insert(Ptr: &ss); |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | // When a symbol is copy relocated or we create a canonical plt entry, it is |
| 240 | // effectively a defined symbol. In the case of copy relocation the symbol is |
| 241 | // in .bss and in the case of a canonical plt entry it is in .plt. This function |
| 242 | // replaces the existing symbol with a Defined pointing to the appropriate |
| 243 | // location. |
| 244 | static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec, |
| 245 | uint64_t value, uint64_t size) { |
| 246 | Symbol old = sym; |
| 247 | Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, |
| 248 | size, &sec) |
| 249 | .overwrite(sym); |
| 250 | |
| 251 | sym.versionId = old.versionId; |
| 252 | sym.isUsedInRegularObj = true; |
| 253 | // A copy relocated alias may need a GOT entry. |
| 254 | sym.flags.store(i: old.flags.load(m: std::memory_order_relaxed) & NEEDS_GOT, |
| 255 | m: std::memory_order_relaxed); |
| 256 | } |
| 257 | |
| 258 | // Reserve space in .bss or .bss.rel.ro for copy relocation. |
| 259 | // |
| 260 | // The copy relocation is pretty much a hack. If you use a copy relocation |
| 261 | // in your program, not only the symbol name but the symbol's size, RW/RO |
| 262 | // bit and alignment become part of the ABI. In addition to that, if the |
| 263 | // symbol has aliases, the aliases become part of the ABI. That's subtle, |
| 264 | // but if you violate that implicit ABI, that can cause very counter- |
| 265 | // intuitive consequences. |
| 266 | // |
| 267 | // So, what is the copy relocation? It's for linking non-position |
| 268 | // independent code to DSOs. In an ideal world, all references to data |
| 269 | // exported by DSOs should go indirectly through GOT. But if object files |
| 270 | // are compiled as non-PIC, all data references are direct. There is no |
| 271 | // way for the linker to transform the code to use GOT, as machine |
| 272 | // instructions are already set in stone in object files. This is where |
| 273 | // the copy relocation takes a role. |
| 274 | // |
| 275 | // A copy relocation instructs the dynamic linker to copy data from a DSO |
| 276 | // to a specified address (which is usually in .bss) at load-time. If the |
| 277 | // static linker (that's us) finds a direct data reference to a DSO |
| 278 | // symbol, it creates a copy relocation, so that the symbol can be |
| 279 | // resolved as if it were in .bss rather than in a DSO. |
| 280 | // |
| 281 | // As you can see in this function, we create a copy relocation for the |
| 282 | // dynamic linker, and the relocation contains not only symbol name but |
| 283 | // various other information about the symbol. So, such attributes become a |
| 284 | // part of the ABI. |
| 285 | // |
| 286 | // Note for application developers: I can give you a piece of advice if |
| 287 | // you are writing a shared library. You probably should export only |
| 288 | // functions from your library. You shouldn't export variables. |
| 289 | // |
| 290 | // As an example what can happen when you export variables without knowing |
| 291 | // the semantics of copy relocations, assume that you have an exported |
| 292 | // variable of type T. It is an ABI-breaking change to add new members at |
| 293 | // end of T even though doing that doesn't change the layout of the |
| 294 | // existing members. That's because the space for the new members are not |
| 295 | // reserved in .bss unless you recompile the main program. That means they |
| 296 | // are likely to overlap with other data that happens to be laid out next |
| 297 | // to the variable in .bss. This kind of issue is sometimes very hard to |
| 298 | // debug. What's a solution? Instead of exporting a variable V from a DSO, |
| 299 | // define an accessor getV(). |
| 300 | template <class ELFT> static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) { |
| 301 | // Copy relocation against zero-sized symbol doesn't make sense. |
| 302 | uint64_t symSize = ss.getSize(); |
| 303 | if (symSize == 0 || ss.alignment == 0) |
| 304 | Err(ctx) << "cannot create a copy relocation for symbol " << &ss; |
| 305 | |
| 306 | // See if this symbol is in a read-only segment. If so, preserve the symbol's |
| 307 | // memory protection by reserving space in the .bss.rel.ro section. |
| 308 | bool isRO = isReadOnly<ELFT>(ss); |
| 309 | BssSection *sec = make<BssSection>(args&: ctx, args: isRO ? ".bss.rel.ro" : ".bss" , |
| 310 | args&: symSize, args&: ss.alignment); |
| 311 | OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent(); |
| 312 | |
| 313 | // At this point, sectionBases has been migrated to sections. Append sec to |
| 314 | // sections. |
| 315 | if (osec->commands.empty() || |
| 316 | !isa<InputSectionDescription>(Val: osec->commands.back())) |
| 317 | osec->commands.push_back(Elt: make<InputSectionDescription>(args: "" )); |
| 318 | auto *isd = cast<InputSectionDescription>(Val: osec->commands.back()); |
| 319 | isd->sections.push_back(Elt: sec); |
| 320 | osec->commitSection(isec: sec); |
| 321 | |
| 322 | // Look through the DSO's dynamic symbol table for aliases and create a |
| 323 | // dynamic symbol for each one. This causes the copy relocation to correctly |
| 324 | // interpose any aliases. |
| 325 | for (SharedSymbol *sym : getSymbolsAt<ELFT>(ctx, ss)) |
| 326 | replaceWithDefined(ctx, sym&: *sym, sec&: *sec, value: 0, size: sym->size); |
| 327 | |
| 328 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->copyRel, isec&: *sec, offsetInSec: 0, sym&: ss); |
| 329 | } |
| 330 | |
| 331 | // .eh_frame sections are mergeable input sections, so their input |
| 332 | // offsets are not linearly mapped to output section. For each input |
| 333 | // offset, we need to find a section piece containing the offset and |
| 334 | // add the piece's base address to the input offset to compute the |
| 335 | // output offset. That isn't cheap. |
| 336 | // |
| 337 | // This class is to speed up the offset computation. When we process |
| 338 | // relocations, we access offsets in the monotonically increasing |
| 339 | // order. So we can optimize for that access pattern. |
| 340 | // |
| 341 | // For sections other than .eh_frame, this class doesn't do anything. |
| 342 | namespace { |
| 343 | class OffsetGetter { |
| 344 | public: |
| 345 | OffsetGetter() = default; |
| 346 | explicit OffsetGetter(EhInputSection &sec) { |
| 347 | cies = sec.cies; |
| 348 | fdes = sec.fdes; |
| 349 | i = cies.begin(); |
| 350 | j = fdes.begin(); |
| 351 | } |
| 352 | |
| 353 | // Translates offsets in input sections to offsets in output sections. |
| 354 | // Given offset must increase monotonically. We assume that Piece is |
| 355 | // sorted by inputOff. |
| 356 | uint64_t get(Ctx &ctx, uint64_t off) { |
| 357 | while (j != fdes.end() && j->inputOff <= off) |
| 358 | ++j; |
| 359 | auto it = j; |
| 360 | if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) { |
| 361 | while (i != cies.end() && i->inputOff <= off) |
| 362 | ++i; |
| 363 | if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off) { |
| 364 | Err(ctx) << ".eh_frame: relocation is not in any piece" ; |
| 365 | return 0; |
| 366 | } |
| 367 | it = i; |
| 368 | } |
| 369 | |
| 370 | // Offset -1 means that the piece is dead (i.e. garbage collected). |
| 371 | if (it[-1].outputOff == -1) |
| 372 | return -1; |
| 373 | return it[-1].outputOff + (off - it[-1].inputOff); |
| 374 | } |
| 375 | |
| 376 | private: |
| 377 | ArrayRef<EhSectionPiece> cies, fdes; |
| 378 | ArrayRef<EhSectionPiece>::iterator i, j; |
| 379 | }; |
| 380 | } // namespace |
| 381 | |
| 382 | // Custom error message if Sym is defined in a discarded section. |
| 383 | template <class ELFT> |
| 384 | static void maybeReportDiscarded(Ctx &ctx, ELFSyncStream &msg, Undefined &sym) { |
| 385 | auto *file = dyn_cast<ObjFile<ELFT>>(sym.file); |
| 386 | if (!file || !sym.discardedSecIdx) |
| 387 | return; |
| 388 | ArrayRef<typename ELFT::Shdr> objSections = |
| 389 | file->template getELFShdrs<ELFT>(); |
| 390 | |
| 391 | if (sym.type == ELF::STT_SECTION) { |
| 392 | msg << "relocation refers to a discarded section: " ; |
| 393 | msg << CHECK2( |
| 394 | file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file); |
| 395 | } else { |
| 396 | msg << "relocation refers to a symbol in a discarded section: " << &sym; |
| 397 | } |
| 398 | msg << "\n>>> defined in " << file; |
| 399 | |
| 400 | Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1]; |
| 401 | if (elfSec.sh_type != SHT_GROUP) |
| 402 | return; |
| 403 | |
| 404 | // If the discarded section is a COMDAT. |
| 405 | StringRef signature = file->getShtGroupSignature(objSections, elfSec); |
| 406 | if (const InputFile *prevailing = |
| 407 | ctx.symtab->comdatGroups.lookup(Val: CachedHashStringRef(signature))) { |
| 408 | msg << "\n>>> section group signature: " << signature |
| 409 | << "\n>>> prevailing definition is in " << prevailing; |
| 410 | if (sym.nonPrevailing) { |
| 411 | msg << "\n>>> or the symbol in the prevailing group had STB_WEAK " |
| 412 | "binding and the symbol in a non-prevailing group had STB_GLOBAL " |
| 413 | "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding " |
| 414 | "signature is not supported" ; |
| 415 | } |
| 416 | } |
| 417 | } |
| 418 | |
| 419 | // Check whether the definition name def is a mangled function name that matches |
| 420 | // the reference name ref. |
| 421 | static bool canSuggestExternCForCXX(StringRef ref, StringRef def) { |
| 422 | llvm::ItaniumPartialDemangler d; |
| 423 | std::string name = def.str(); |
| 424 | if (d.partialDemangle(MangledName: name.c_str())) |
| 425 | return false; |
| 426 | char *buf = d.getFunctionName(Buf: nullptr, N: nullptr); |
| 427 | if (!buf) |
| 428 | return false; |
| 429 | bool ret = ref == buf; |
| 430 | free(ptr: buf); |
| 431 | return ret; |
| 432 | } |
| 433 | |
| 434 | // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns |
| 435 | // the suggested symbol, which is either in the symbol table, or in the same |
| 436 | // file of sym. |
| 437 | static const Symbol *getAlternativeSpelling(Ctx &ctx, const Undefined &sym, |
| 438 | std::string &pre_hint, |
| 439 | std::string &post_hint) { |
| 440 | DenseMap<StringRef, const Symbol *> map; |
| 441 | if (sym.file->kind() == InputFile::ObjKind) { |
| 442 | auto *file = cast<ELFFileBase>(Val: sym.file); |
| 443 | // If sym is a symbol defined in a discarded section, maybeReportDiscarded() |
| 444 | // will give an error. Don't suggest an alternative spelling. |
| 445 | if (sym.discardedSecIdx != 0 && |
| 446 | file->getSections()[sym.discardedSecIdx] == &InputSection::discarded) |
| 447 | return nullptr; |
| 448 | |
| 449 | // Build a map of local defined symbols. |
| 450 | for (const Symbol *s : sym.file->getSymbols()) |
| 451 | if (s->isLocal() && s->isDefined() && !s->getName().empty()) |
| 452 | map.try_emplace(Key: s->getName(), Args&: s); |
| 453 | } |
| 454 | |
| 455 | auto suggest = [&](StringRef newName) -> const Symbol * { |
| 456 | // If defined locally. |
| 457 | if (const Symbol *s = map.lookup(Val: newName)) |
| 458 | return s; |
| 459 | |
| 460 | // If in the symbol table and not undefined. |
| 461 | if (const Symbol *s = ctx.symtab->find(name: newName)) |
| 462 | if (!s->isUndefined()) |
| 463 | return s; |
| 464 | |
| 465 | return nullptr; |
| 466 | }; |
| 467 | |
| 468 | // This loop enumerates all strings of Levenshtein distance 1 as typo |
| 469 | // correction candidates and suggests the one that exists as a non-undefined |
| 470 | // symbol. |
| 471 | StringRef name = sym.getName(); |
| 472 | for (size_t i = 0, e = name.size(); i != e + 1; ++i) { |
| 473 | // Insert a character before name[i]. |
| 474 | std::string newName = (name.substr(Start: 0, N: i) + "0" + name.substr(Start: i)).str(); |
| 475 | for (char c = '0'; c <= 'z'; ++c) { |
| 476 | newName[i] = c; |
| 477 | if (const Symbol *s = suggest(newName)) |
| 478 | return s; |
| 479 | } |
| 480 | if (i == e) |
| 481 | break; |
| 482 | |
| 483 | // Substitute name[i]. |
| 484 | newName = std::string(name); |
| 485 | for (char c = '0'; c <= 'z'; ++c) { |
| 486 | newName[i] = c; |
| 487 | if (const Symbol *s = suggest(newName)) |
| 488 | return s; |
| 489 | } |
| 490 | |
| 491 | // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is |
| 492 | // common. |
| 493 | if (i + 1 < e) { |
| 494 | newName[i] = name[i + 1]; |
| 495 | newName[i + 1] = name[i]; |
| 496 | if (const Symbol *s = suggest(newName)) |
| 497 | return s; |
| 498 | } |
| 499 | |
| 500 | // Delete name[i]. |
| 501 | newName = (name.substr(Start: 0, N: i) + name.substr(Start: i + 1)).str(); |
| 502 | if (const Symbol *s = suggest(newName)) |
| 503 | return s; |
| 504 | } |
| 505 | |
| 506 | // Case mismatch, e.g. Foo vs FOO. |
| 507 | for (auto &it : map) |
| 508 | if (name.equals_insensitive(RHS: it.first)) |
| 509 | return it.second; |
| 510 | for (Symbol *sym : ctx.symtab->getSymbols()) |
| 511 | if (!sym->isUndefined() && name.equals_insensitive(RHS: sym->getName())) |
| 512 | return sym; |
| 513 | |
| 514 | // The reference may be a mangled name while the definition is not. Suggest a |
| 515 | // missing extern "C". |
| 516 | if (name.starts_with(Prefix: "_Z" )) { |
| 517 | std::string buf = name.str(); |
| 518 | llvm::ItaniumPartialDemangler d; |
| 519 | if (!d.partialDemangle(MangledName: buf.c_str())) |
| 520 | if (char *buf = d.getFunctionName(Buf: nullptr, N: nullptr)) { |
| 521 | const Symbol *s = suggest(buf); |
| 522 | free(ptr: buf); |
| 523 | if (s) { |
| 524 | pre_hint = ": extern \"C\" " ; |
| 525 | return s; |
| 526 | } |
| 527 | } |
| 528 | } else { |
| 529 | const Symbol *s = nullptr; |
| 530 | for (auto &it : map) |
| 531 | if (canSuggestExternCForCXX(ref: name, def: it.first)) { |
| 532 | s = it.second; |
| 533 | break; |
| 534 | } |
| 535 | if (!s) |
| 536 | for (Symbol *sym : ctx.symtab->getSymbols()) |
| 537 | if (canSuggestExternCForCXX(ref: name, def: sym->getName())) { |
| 538 | s = sym; |
| 539 | break; |
| 540 | } |
| 541 | if (s) { |
| 542 | pre_hint = " to declare " ; |
| 543 | post_hint = " as extern \"C\"?" ; |
| 544 | return s; |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | return nullptr; |
| 549 | } |
| 550 | |
| 551 | static void reportUndefinedSymbol(Ctx &ctx, const UndefinedDiag &undef, |
| 552 | bool correctSpelling) { |
| 553 | Undefined &sym = *undef.sym; |
| 554 | ELFSyncStream msg(ctx, DiagLevel::None); |
| 555 | |
| 556 | auto visibility = [&]() { |
| 557 | switch (sym.visibility()) { |
| 558 | case STV_INTERNAL: |
| 559 | return "internal " ; |
| 560 | case STV_HIDDEN: |
| 561 | return "hidden " ; |
| 562 | case STV_PROTECTED: |
| 563 | return "protected " ; |
| 564 | default: |
| 565 | return "" ; |
| 566 | } |
| 567 | }; |
| 568 | |
| 569 | switch (ctx.arg.ekind) { |
| 570 | case ELF32LEKind: |
| 571 | maybeReportDiscarded<ELF32LE>(ctx, msg, sym); |
| 572 | break; |
| 573 | case ELF32BEKind: |
| 574 | maybeReportDiscarded<ELF32BE>(ctx, msg, sym); |
| 575 | break; |
| 576 | case ELF64LEKind: |
| 577 | maybeReportDiscarded<ELF64LE>(ctx, msg, sym); |
| 578 | break; |
| 579 | case ELF64BEKind: |
| 580 | maybeReportDiscarded<ELF64BE>(ctx, msg, sym); |
| 581 | break; |
| 582 | default: |
| 583 | llvm_unreachable("" ); |
| 584 | } |
| 585 | if (msg.str().empty()) |
| 586 | msg << "undefined " << visibility() << "symbol: " << &sym; |
| 587 | |
| 588 | const size_t maxUndefReferences = 3; |
| 589 | for (UndefinedDiag::Loc l : |
| 590 | ArrayRef(undef.locs).take_front(N: maxUndefReferences)) { |
| 591 | InputSectionBase &sec = *l.sec; |
| 592 | uint64_t offset = l.offset; |
| 593 | |
| 594 | msg << "\n>>> referenced by " ; |
| 595 | // In the absence of line number information, utilize DW_TAG_variable (if |
| 596 | // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`). |
| 597 | Symbol *enclosing = sec.getEnclosingSymbol(offset); |
| 598 | |
| 599 | ELFSyncStream msg1(ctx, DiagLevel::None); |
| 600 | auto tell = msg.tell(); |
| 601 | msg << sec.getSrcMsg(sym: enclosing ? *enclosing : sym, offset); |
| 602 | if (tell != msg.tell()) |
| 603 | msg << "\n>>> " ; |
| 604 | msg << sec.getObjMsg(offset); |
| 605 | } |
| 606 | |
| 607 | if (maxUndefReferences < undef.locs.size()) |
| 608 | msg << "\n>>> referenced " << (undef.locs.size() - maxUndefReferences) |
| 609 | << " more times" ; |
| 610 | |
| 611 | if (correctSpelling) { |
| 612 | std::string pre_hint = ": " , post_hint; |
| 613 | if (const Symbol *corrected = |
| 614 | getAlternativeSpelling(ctx, sym, pre_hint, post_hint)) { |
| 615 | msg << "\n>>> did you mean" << pre_hint << corrected << post_hint |
| 616 | << "\n>>> defined in: " << corrected->file; |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | if (sym.getName().starts_with(Prefix: "_ZTV" )) |
| 621 | msg << "\n>>> the vtable symbol may be undefined because the class is " |
| 622 | "missing its key function " |
| 623 | "(see https://lld.llvm.org/missingkeyfunction)" ; |
| 624 | if (ctx.arg.gcSections && ctx.arg.zStartStopGC && |
| 625 | sym.getName().starts_with(Prefix: "__start_" )) { |
| 626 | msg << "\n>>> the encapsulation symbol needs to be retained under " |
| 627 | "--gc-sections properly; consider -z nostart-stop-gc " |
| 628 | "(see https://lld.llvm.org/ELF/start-stop-gc)" ; |
| 629 | } |
| 630 | |
| 631 | if (undef.isWarning) |
| 632 | Warn(ctx) << msg.str(); |
| 633 | else |
| 634 | ctx.e.error(msg: msg.str(), tag: ErrorTag::SymbolNotFound, args: {sym.getName()}); |
| 635 | } |
| 636 | |
| 637 | void elf::reportUndefinedSymbols(Ctx &ctx) { |
| 638 | // Find the first "undefined symbol" diagnostic for each diagnostic, and |
| 639 | // collect all "referenced from" lines at the first diagnostic. |
| 640 | DenseMap<Symbol *, UndefinedDiag *> firstRef; |
| 641 | for (UndefinedDiag &undef : ctx.undefErrs) { |
| 642 | assert(undef.locs.size() == 1); |
| 643 | if (UndefinedDiag *canon = firstRef.lookup(Val: undef.sym)) { |
| 644 | canon->locs.push_back(Elt: undef.locs[0]); |
| 645 | undef.locs.clear(); |
| 646 | } else |
| 647 | firstRef[undef.sym] = &undef; |
| 648 | } |
| 649 | |
| 650 | // Enable spell corrector for the first 2 diagnostics. |
| 651 | for (auto [i, undef] : llvm::enumerate(First&: ctx.undefErrs)) |
| 652 | if (!undef.locs.empty()) |
| 653 | reportUndefinedSymbol(ctx, undef, correctSpelling: i < 2); |
| 654 | } |
| 655 | |
| 656 | // Report an undefined symbol if necessary. |
| 657 | // Returns true if the undefined symbol will produce an error message. |
| 658 | bool RelocScan::maybeReportUndefined(Undefined &sym, uint64_t offset) { |
| 659 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
| 660 | // If versioned, issue an error (even if the symbol is weak) because we don't |
| 661 | // know the defining filename which is required to construct a Verneed entry. |
| 662 | if (sym.hasVersionSuffix) { |
| 663 | ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: sec, .offset: offset}}, .isWarning: false}); |
| 664 | return true; |
| 665 | } |
| 666 | if (sym.isWeak()) |
| 667 | return false; |
| 668 | |
| 669 | bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT; |
| 670 | if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal) |
| 671 | return false; |
| 672 | |
| 673 | // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc |
| 674 | // which references a switch table in a discarded .rodata/.text section. The |
| 675 | // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF |
| 676 | // spec says references from outside the group to a STB_LOCAL symbol are not |
| 677 | // allowed. Work around the bug. |
| 678 | // |
| 679 | // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible |
| 680 | // because .LC0-.LTOC is not representable if the two labels are in different |
| 681 | // .got2 |
| 682 | if (sym.discardedSecIdx != 0 && (sec->name == ".got2" || sec->name == ".toc" )) |
| 683 | return false; |
| 684 | |
| 685 | bool isWarning = |
| 686 | (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) || |
| 687 | ctx.arg.noinhibitExec; |
| 688 | ctx.undefErrs.push_back(Elt: {.sym: &sym, .locs: {{.sec: sec, .offset: offset}}, .isWarning: isWarning}); |
| 689 | return !isWarning; |
| 690 | } |
| 691 | |
| 692 | bool RelocScan::checkTlsLe(uint64_t offset, Symbol &sym, RelType type) { |
| 693 | if (!ctx.arg.shared) |
| 694 | return false; |
| 695 | auto diag = Err(ctx); |
| 696 | diag << "relocation " << type << " against " << &sym |
| 697 | << " cannot be used with -shared" ; |
| 698 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 699 | return true; |
| 700 | } |
| 701 | |
| 702 | template <bool shard = false> |
| 703 | static void addRelativeReloc(Ctx &ctx, InputSectionBase &isec, |
| 704 | uint64_t offsetInSec, Symbol &sym, int64_t addend, |
| 705 | RelExpr expr, RelType type) { |
| 706 | Partition &part = isec.getPartition(ctx); |
| 707 | bool isAArch64Auth = |
| 708 | ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64; |
| 709 | |
| 710 | // Add a relative relocation. If relrDyn section is enabled, and the |
| 711 | // relocation offset is guaranteed to be even, add the relocation to |
| 712 | // the relrDyn section, otherwise add it to the relaDyn section. |
| 713 | // relrDyn sections don't support odd offsets. Also, relrDyn sections |
| 714 | // don't store the addend values, so we must write it to the relocated |
| 715 | // address. |
| 716 | // |
| 717 | // When symbol values are determined in finalizeAddressDependentContent, |
| 718 | // some .relr.auth.dyn relocations may be moved to .rela.dyn. |
| 719 | // |
| 720 | // MTE globals may need to store the original addend as well so cannot use |
| 721 | // relrDyn. TODO: It should be unambiguous when not using R_ADDEND_NEG below? |
| 722 | RelrBaseSection *relrDyn = part.relrDyn.get(); |
| 723 | if (isAArch64Auth) |
| 724 | relrDyn = part.relrAuthDyn.get(); |
| 725 | if (sym.isTagged()) |
| 726 | relrDyn = nullptr; |
| 727 | if (relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) { |
| 728 | relrDyn->addRelativeReloc<shard>(isec, offsetInSec, sym, addend, type, |
| 729 | expr); |
| 730 | return; |
| 731 | } |
| 732 | RelType relativeType = ctx.target->relativeRel; |
| 733 | if (isAArch64Auth) |
| 734 | relativeType = R_AARCH64_AUTH_RELATIVE; |
| 735 | part.relaDyn->addRelativeReloc<shard>(relativeType, isec, offsetInSec, sym, |
| 736 | addend, type, expr); |
| 737 | // With MTE globals, we always want to derive the address tag by `ldg`-ing |
| 738 | // the symbol. When we have a RELATIVE relocation though, we no longer have |
| 739 | // a reference to the symbol. Because of this, when we have an addend that |
| 740 | // puts the result of the RELATIVE relocation out-of-bounds of the symbol |
| 741 | // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI |
| 742 | // says we should store the offset to the start of the symbol in the target |
| 743 | // field. This is described in further detail in: |
| 744 | // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative |
| 745 | if (sym.isTagged() && |
| 746 | (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize())) |
| 747 | isec.addReloc(r: {.expr: R_ADDEND_NEG, .type: type, .offset: offsetInSec, .addend: addend, .sym: &sym}); |
| 748 | } |
| 749 | |
| 750 | template <class PltSection, class GotPltSection> |
| 751 | static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt, |
| 752 | RelocationBaseSection &rel, RelType type, Symbol &sym) { |
| 753 | plt.addEntry(sym); |
| 754 | gotPlt.addEntry(sym); |
| 755 | if (sym.isPreemptible) |
| 756 | rel.addReloc( |
| 757 | {type, &gotPlt, sym.getGotPltOffset(ctx), true, sym, 0, R_ADDEND}); |
| 758 | else |
| 759 | rel.addReloc( |
| 760 | {type, &gotPlt, sym.getGotPltOffset(ctx), false, sym, 0, R_ABS}); |
| 761 | } |
| 762 | |
| 763 | void elf::addGotEntry(Ctx &ctx, Symbol &sym) { |
| 764 | ctx.in.got->addEntry(sym); |
| 765 | uint64_t off = sym.getGotOffset(ctx); |
| 766 | |
| 767 | // If preemptible, emit a GLOB_DAT relocation. |
| 768 | if (sym.isPreemptible) { |
| 769 | ctx.mainPart->relaDyn->addReloc( |
| 770 | reloc: {ctx.target->gotRel, ctx.in.got.get(), off, true, sym, 0, R_ADDEND}); |
| 771 | return; |
| 772 | } |
| 773 | |
| 774 | // Otherwise, the value is either a link-time constant or the load base |
| 775 | // plus a constant. |
| 776 | if (!ctx.arg.isPic || isAbsolute(sym)) |
| 777 | ctx.in.got->addConstant(r: {.expr: R_ABS, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym}); |
| 778 | else |
| 779 | addRelativeReloc(ctx, isec&: *ctx.in.got, offsetInSec: off, sym, addend: 0, expr: R_ABS, |
| 780 | type: ctx.target->symbolicRel); |
| 781 | } |
| 782 | |
| 783 | static void addGotAuthEntry(Ctx &ctx, Symbol &sym) { |
| 784 | ctx.in.got->addEntry(sym); |
| 785 | ctx.in.got->addAuthEntry(sym); |
| 786 | uint64_t off = sym.getGotOffset(ctx); |
| 787 | |
| 788 | // If preemptible, emit a GLOB_DAT relocation. |
| 789 | if (sym.isPreemptible) { |
| 790 | ctx.mainPart->relaDyn->addReloc(reloc: {R_AARCH64_AUTH_GLOB_DAT, ctx.in.got.get(), |
| 791 | off, true, sym, 0, R_ADDEND}); |
| 792 | return; |
| 793 | } |
| 794 | |
| 795 | // Signed GOT requires dynamic relocation. |
| 796 | ctx.in.got->getPartition(ctx).relaDyn->addReloc( |
| 797 | reloc: {R_AARCH64_AUTH_RELATIVE, ctx.in.got.get(), off, false, sym, 0, R_ABS}); |
| 798 | } |
| 799 | |
| 800 | static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) { |
| 801 | ctx.in.got->addEntry(sym); |
| 802 | uint64_t off = sym.getGotOffset(ctx); |
| 803 | if (!sym.isPreemptible && !ctx.arg.shared) { |
| 804 | ctx.in.got->addConstant(r: {.expr: R_TPREL, .type: ctx.target->symbolicRel, .offset: off, .addend: 0, .sym: &sym}); |
| 805 | return; |
| 806 | } |
| 807 | ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( |
| 808 | dynType: ctx.target->tlsGotRel, isec&: *ctx.in.got, offsetInSec: off, sym, addendRelType: ctx.target->symbolicRel); |
| 809 | } |
| 810 | |
| 811 | // Return true if we can define a symbol in the executable that |
| 812 | // contains the value/function of a symbol defined in a shared |
| 813 | // library. |
| 814 | static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) { |
| 815 | // If the symbol has default visibility the symbol defined in the |
| 816 | // executable will preempt it. |
| 817 | // Note that we want the visibility of the shared symbol itself, not |
| 818 | // the visibility of the symbol in the output file we are producing. |
| 819 | if (!sym.dsoProtected) |
| 820 | return true; |
| 821 | |
| 822 | // If we are allowed to break address equality of functions, defining |
| 823 | // a plt entry will allow the program to call the function in the |
| 824 | // .so, but the .so and the executable will no agree on the address |
| 825 | // of the function. Similar logic for objects. |
| 826 | return ((sym.isFunc() && ctx.arg.ignoreFunctionAddressEquality) || |
| 827 | (sym.isObject() && ctx.arg.ignoreDataAddressEquality)); |
| 828 | } |
| 829 | |
| 830 | // Returns true if a given relocation can be computed at link-time. |
| 831 | // This only handles relocation types expected in process(). |
| 832 | // |
| 833 | // For instance, we know the offset from a relocation to its target at |
| 834 | // link-time if the relocation is PC-relative and refers a |
| 835 | // non-interposable function in the same executable. This function |
| 836 | // will return true for such relocation. |
| 837 | // |
| 838 | // If this function returns false, that means we need to emit a |
| 839 | // dynamic relocation so that the relocation will be fixed at load-time. |
| 840 | bool RelocScan::isStaticLinkTimeConstant(RelExpr e, RelType type, |
| 841 | const Symbol &sym, |
| 842 | uint64_t relOff) const { |
| 843 | // These expressions always compute a constant |
| 844 | if (oneof< |
| 845 | R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, RE_MIPS_GOT_LOCAL_PAGE, |
| 846 | RE_MIPS_GOTREL, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, RE_MIPS_GOT_GP_PC, |
| 847 | RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, |
| 848 | R_GOTONLY_PC, R_GOTPLTONLY_PC, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, |
| 849 | R_GOTPLT_GOTREL, R_GOTPLT_PC, RE_PPC32_PLTREL, RE_PPC64_CALL_PLT, |
| 850 | RE_PPC64_RELAX_TOC, RE_RISCV_ADD, RE_AARCH64_GOT_PAGE, |
| 851 | RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, RE_LOONGARCH_PLT_PAGE_PC, |
| 852 | RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>(expr: e)) |
| 853 | return true; |
| 854 | |
| 855 | // These never do, except if the entire file is position dependent or if |
| 856 | // only the low bits are used. |
| 857 | if (e == R_GOT || e == R_PLT) |
| 858 | return ctx.target->usesOnlyLowPageBits(type) || !ctx.arg.isPic; |
| 859 | // R_AARCH64_AUTH_ABS64 and iRelSymbolicRel require a dynamic relocation. |
| 860 | if (e == RE_AARCH64_AUTH || type == ctx.target->iRelSymbolicRel) |
| 861 | return false; |
| 862 | |
| 863 | // The behavior of an undefined weak reference is implementation defined. |
| 864 | // (We treat undefined non-weak the same as undefined weak.) For static |
| 865 | // -no-pie linking, dynamic relocations are generally avoided (except |
| 866 | // IRELATIVE). Emitting dynamic relocations for -shared aligns with its -z |
| 867 | // undefs default. Dynamic -no-pie linking and -pie allow flexibility. |
| 868 | if (sym.isPreemptible) |
| 869 | return sym.isUndefined() && !ctx.arg.isPic; |
| 870 | if (!ctx.arg.isPic) |
| 871 | return true; |
| 872 | |
| 873 | // Constant when referencing a non-preemptible symbol. |
| 874 | if (e == R_SIZE || e == RE_RISCV_LEB128) |
| 875 | return true; |
| 876 | |
| 877 | // For the target and the relocation, we want to know if they are |
| 878 | // absolute or relative. |
| 879 | bool absVal = isAbsoluteOrTls(sym) && e != RE_PPC64_TOCBASE; |
| 880 | bool relE = isRelExpr(expr: e); |
| 881 | if (absVal && !relE) |
| 882 | return true; |
| 883 | if (!absVal && relE) |
| 884 | return true; |
| 885 | if (!absVal && !relE) |
| 886 | return ctx.target->usesOnlyLowPageBits(type); |
| 887 | |
| 888 | assert(absVal && relE); |
| 889 | |
| 890 | // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol |
| 891 | // in PIC mode. This is a little strange, but it allows us to link function |
| 892 | // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers). |
| 893 | // Normally such a call will be guarded with a comparison, which will load a |
| 894 | // zero from the GOT. |
| 895 | if (sym.isUndefined()) |
| 896 | return true; |
| 897 | |
| 898 | // We set the final symbols values for linker script defined symbols later. |
| 899 | // They always can be computed as a link time constant. |
| 900 | if (sym.scriptDefined) |
| 901 | return true; |
| 902 | |
| 903 | auto diag = Err(ctx); |
| 904 | diag << "relocation " << type << " cannot refer to absolute symbol: " << &sym; |
| 905 | printLocation(s&: diag, sec&: *sec, sym, off: relOff); |
| 906 | return true; |
| 907 | } |
| 908 | |
| 909 | // The reason we have to do this early scan is as follows |
| 910 | // * To mmap the output file, we need to know the size |
| 911 | // * For that, we need to know how many dynamic relocs we will have. |
| 912 | // It might be possible to avoid this by outputting the file with write: |
| 913 | // * Write the allocated output sections, computing addresses. |
| 914 | // * Apply relocations, recording which ones require a dynamic reloc. |
| 915 | // * Write the dynamic relocations. |
| 916 | // * Write the rest of the file. |
| 917 | // This would have some drawbacks. For example, we would only know if .rela.dyn |
| 918 | // is needed after applying relocations. If it is, it will go after rw and rx |
| 919 | // sections. Given that it is ro, we will need an extra PT_LOAD. This |
| 920 | // complicates things for the dynamic linker and means we would have to reserve |
| 921 | // space for the extra PT_LOAD even if we end up not using it. |
| 922 | void RelocScan::process(RelExpr expr, RelType type, uint64_t offset, |
| 923 | Symbol &sym, int64_t addend) const { |
| 924 | // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT |
| 925 | // indirection. |
| 926 | const bool isIfunc = sym.isGnuIFunc(); |
| 927 | if (!sym.isPreemptible && !isIfunc) { |
| 928 | if (expr != R_GOT_PC) { |
| 929 | // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call |
| 930 | // stub type. It should be ignored if optimized to R_PC. |
| 931 | if (ctx.arg.emachine == EM_PPC && expr == RE_PPC32_PLTREL) |
| 932 | addend &= ~0x8000; |
| 933 | // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into |
| 934 | // call __tls_get_addr even if the symbol is non-preemptible. |
| 935 | if (!(ctx.arg.emachine == EM_HEXAGON && |
| 936 | (type == R_HEX_GD_PLT_B22_PCREL || |
| 937 | type == R_HEX_GD_PLT_B22_PCREL_X || |
| 938 | type == R_HEX_GD_PLT_B32_PCREL_X))) |
| 939 | expr = fromPlt(expr); |
| 940 | } else if (!isAbsoluteOrTls(sym) || |
| 941 | (type == R_PPC64_PCREL_OPT && ctx.arg.emachine == EM_PPC64)) { |
| 942 | expr = ctx.target->adjustGotPcExpr(type, addend, |
| 943 | loc: sec->content().data() + offset); |
| 944 | // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up |
| 945 | // needing the GOT if we can't relax everything. |
| 946 | if (expr == R_RELAX_GOT_PC) |
| 947 | ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed); |
| 948 | } |
| 949 | } |
| 950 | |
| 951 | // We were asked not to generate PLT entries for ifuncs. Instead, pass the |
| 952 | // direct relocation on through. |
| 953 | if (LLVM_UNLIKELY(isIfunc) && ctx.arg.zIfuncNoplt) { |
| 954 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
| 955 | sym.isExported = true; |
| 956 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: type, isec&: *sec, offsetInSec: offset, sym, addend, |
| 957 | addendRelType: type); |
| 958 | return; |
| 959 | } |
| 960 | |
| 961 | if (needsGot(expr)) { |
| 962 | if (ctx.arg.emachine == EM_MIPS) { |
| 963 | // MIPS ABI has special rules to process GOT entries and doesn't |
| 964 | // require relocation entries for them. A special case is TLS |
| 965 | // relocations. In that case dynamic loader applies dynamic |
| 966 | // relocations to initialize TLS GOT entries. |
| 967 | // See "Global Offset Table" in Chapter 5 in the following document |
| 968 | // for detailed description: |
| 969 | // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf |
| 970 | ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr); |
| 971 | } else if (!sym.isTls() || ctx.arg.emachine != EM_LOONGARCH) { |
| 972 | // Many LoongArch TLS relocs reuse the RE_LOONGARCH_GOT type, in which |
| 973 | // case the NEEDS_GOT flag shouldn't get set. |
| 974 | if (expr == RE_AARCH64_AUTH_GOT || expr == RE_AARCH64_AUTH_GOT_PAGE_PC || |
| 975 | expr == RE_AARCH64_AUTH_GOT_PC) |
| 976 | sym.setFlags(NEEDS_GOT | NEEDS_GOT_AUTH); |
| 977 | else |
| 978 | sym.setFlags(NEEDS_GOT | NEEDS_GOT_NONAUTH); |
| 979 | } |
| 980 | } else if (needsPlt(expr)) { |
| 981 | sym.setFlags(NEEDS_PLT); |
| 982 | } else if (LLVM_UNLIKELY(isIfunc)) { |
| 983 | sym.setFlags(HAS_DIRECT_RELOC); |
| 984 | } |
| 985 | |
| 986 | // If the relocation is known to be a link-time constant, we know no dynamic |
| 987 | // relocation will be created, pass the control to relocateAlloc() or |
| 988 | // relocateNonAlloc() to resolve it. |
| 989 | if (isStaticLinkTimeConstant(e: expr, type, sym, relOff: offset)) { |
| 990 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 991 | return; |
| 992 | } |
| 993 | |
| 994 | // Use a simple -z notext rule that treats all sections except .eh_frame as |
| 995 | // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our |
| 996 | // SectionBase::getOffset would incorrectly adjust the offset). |
| 997 | // |
| 998 | // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel |
| 999 | // conversion. We still emit a dynamic relocation. |
| 1000 | bool canWrite = (sec->flags & SHF_WRITE) || |
| 1001 | !(ctx.arg.zText || |
| 1002 | (isa<EhInputSection>(Val: sec) && ctx.arg.emachine != EM_MIPS)); |
| 1003 | if (canWrite) { |
| 1004 | RelType rel = ctx.target->getDynRel(type); |
| 1005 | if (oneof<R_GOT, RE_LOONGARCH_GOT>(expr) || |
| 1006 | ((rel == ctx.target->symbolicRel || |
| 1007 | (ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) && |
| 1008 | !sym.isPreemptible)) { |
| 1009 | addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type); |
| 1010 | return; |
| 1011 | } |
| 1012 | if (rel != 0) { |
| 1013 | if (ctx.arg.emachine == EM_MIPS && rel == ctx.target->symbolicRel) |
| 1014 | rel = ctx.target->relativeRel; |
| 1015 | std::lock_guard<std::mutex> lock(ctx.relocMutex); |
| 1016 | Partition &part = sec->getPartition(ctx); |
| 1017 | if (LLVM_UNLIKELY(type == ctx.target->iRelSymbolicRel)) { |
| 1018 | if (sym.isPreemptible) { |
| 1019 | auto diag = Err(ctx); |
| 1020 | diag << "relocation " << type |
| 1021 | << " cannot be used against preemptible symbol '" << &sym << "'" ; |
| 1022 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1023 | } else if (isIfunc) { |
| 1024 | auto diag = Err(ctx); |
| 1025 | diag << "relocation " << type |
| 1026 | << " cannot be used against ifunc symbol '" << &sym << "'" ; |
| 1027 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1028 | } else { |
| 1029 | part.relaDyn->addReloc(reloc: {ctx.target->iRelativeRel, sec, offset, false, |
| 1030 | sym, addend, R_ABS}); |
| 1031 | return; |
| 1032 | } |
| 1033 | } |
| 1034 | part.relaDyn->addSymbolReloc(dynType: rel, isec&: *sec, offsetInSec: offset, sym, addend, addendRelType: type); |
| 1035 | |
| 1036 | // MIPS ABI turns using of GOT and dynamic relocations inside out. |
| 1037 | // While regular ABI uses dynamic relocations to fill up GOT entries |
| 1038 | // MIPS ABI requires dynamic linker to fills up GOT entries using |
| 1039 | // specially sorted dynamic symbol table. This affects even dynamic |
| 1040 | // relocations against symbols which do not require GOT entries |
| 1041 | // creation explicitly, i.e. do not have any GOT-relocations. So if |
| 1042 | // a preemptible symbol has a dynamic relocation we anyway have |
| 1043 | // to create a GOT entry for it. |
| 1044 | // If a non-preemptible symbol has a dynamic relocation against it, |
| 1045 | // dynamic linker takes it st_value, adds offset and writes down |
| 1046 | // result of the dynamic relocation. In case of preemptible symbol |
| 1047 | // dynamic linker performs symbol resolution, writes the symbol value |
| 1048 | // to the GOT entry and reads the GOT entry when it needs to perform |
| 1049 | // a dynamic relocation. |
| 1050 | // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 |
| 1051 | if (ctx.arg.emachine == EM_MIPS) |
| 1052 | ctx.in.mipsGot->addEntry(file&: *sec->file, sym, addend, expr); |
| 1053 | return; |
| 1054 | } |
| 1055 | } |
| 1056 | |
| 1057 | // When producing an executable, we can perform copy relocations (for |
| 1058 | // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO. |
| 1059 | // Copy relocations/canonical PLT entries are unsupported for |
| 1060 | // R_AARCH64_AUTH_ABS64. |
| 1061 | if (!ctx.arg.shared && sym.isShared() && |
| 1062 | !(ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) { |
| 1063 | if (!canDefineSymbolInExecutable(ctx, sym)) { |
| 1064 | auto diag = Err(ctx); |
| 1065 | diag << "cannot preempt symbol: " << &sym; |
| 1066 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1067 | return; |
| 1068 | } |
| 1069 | |
| 1070 | if (sym.isObject()) { |
| 1071 | // Produce a copy relocation. |
| 1072 | if (auto *ss = dyn_cast<SharedSymbol>(Val: &sym)) { |
| 1073 | if (!ctx.arg.zCopyreloc) { |
| 1074 | auto diag = Err(ctx); |
| 1075 | diag << "unresolvable relocation " << type << " against symbol '" |
| 1076 | << ss << "'; recompile with -fPIC or remove '-z nocopyreloc'" ; |
| 1077 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1078 | } |
| 1079 | sym.setFlags(NEEDS_COPY); |
| 1080 | } |
| 1081 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1082 | return; |
| 1083 | } |
| 1084 | |
| 1085 | // This handles a non PIC program call to function in a shared library. In |
| 1086 | // an ideal world, we could just report an error saying the relocation can |
| 1087 | // overflow at runtime. In the real world with glibc, crt1.o has a |
| 1088 | // R_X86_64_PC32 pointing to libc.so. |
| 1089 | // |
| 1090 | // The general idea on how to handle such cases is to create a PLT entry and |
| 1091 | // use that as the function value. |
| 1092 | // |
| 1093 | // For the static linking part, we just return a plt expr and everything |
| 1094 | // else will use the PLT entry as the address. |
| 1095 | // |
| 1096 | // The remaining problem is making sure pointer equality still works. We |
| 1097 | // need the help of the dynamic linker for that. We let it know that we have |
| 1098 | // a direct reference to a so symbol by creating an undefined symbol with a |
| 1099 | // non zero st_value. Seeing that, the dynamic linker resolves the symbol to |
| 1100 | // the value of the symbol we created. This is true even for got entries, so |
| 1101 | // pointer equality is maintained. To avoid an infinite loop, the only entry |
| 1102 | // that points to the real function is a dedicated got entry used by the |
| 1103 | // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT, |
| 1104 | // R_386_JMP_SLOT, etc). |
| 1105 | |
| 1106 | // For position independent executable on i386, the plt entry requires ebx |
| 1107 | // to be set. This causes two problems: |
| 1108 | // * If some code has a direct reference to a function, it was probably |
| 1109 | // compiled without -fPIE/-fPIC and doesn't maintain ebx. |
| 1110 | // * If a library definition gets preempted to the executable, it will have |
| 1111 | // the wrong ebx value. |
| 1112 | if (sym.isFunc()) { |
| 1113 | if (ctx.arg.pie && ctx.arg.emachine == EM_386) { |
| 1114 | auto diag = Err(ctx); |
| 1115 | diag << "symbol '" << &sym |
| 1116 | << "' cannot be preempted; recompile with -fPIE" ; |
| 1117 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1118 | } |
| 1119 | sym.setFlags(NEEDS_COPY | NEEDS_PLT); |
| 1120 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1121 | return; |
| 1122 | } |
| 1123 | } |
| 1124 | |
| 1125 | auto diag = Err(ctx); |
| 1126 | diag << "relocation " << type << " cannot be used against " ; |
| 1127 | if (sym.getName().empty()) |
| 1128 | diag << "local symbol" ; |
| 1129 | else |
| 1130 | diag << "symbol '" << &sym << "'" ; |
| 1131 | diag << "; recompile with -fPIC" ; |
| 1132 | printLocation(s&: diag, sec&: *sec, sym, off: offset); |
| 1133 | } |
| 1134 | |
| 1135 | static unsigned handleAArch64PAuthTlsRelocation(InputSectionBase *sec, |
| 1136 | RelExpr expr, RelType type, |
| 1137 | uint64_t offset, Symbol &sym, |
| 1138 | int64_t addend) { |
| 1139 | // Do not optimize signed TLSDESC to LE/IE (as described in pauthabielf64). |
| 1140 | // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#general-restrictions |
| 1141 | // > PAUTHELF64 only supports the descriptor based TLS (TLSDESC). |
| 1142 | if (oneof<RE_AARCH64_AUTH_TLSDESC_PAGE, RE_AARCH64_AUTH_TLSDESC>(expr)) { |
| 1143 | sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_AUTH); |
| 1144 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1145 | return 1; |
| 1146 | } |
| 1147 | |
| 1148 | // TLSDESC_CALL hint relocation should not be emitted by compiler with signed |
| 1149 | // TLSDESC enabled. |
| 1150 | if (expr == R_TLSDESC_CALL) |
| 1151 | sym.setFlags(NEEDS_TLSDESC_NONAUTH); |
| 1152 | |
| 1153 | return 0; |
| 1154 | } |
| 1155 | |
| 1156 | // Notes about General Dynamic and Local Dynamic TLS models below. They may |
| 1157 | // require the generation of a pair of GOT entries that have associated dynamic |
| 1158 | // relocations. The pair of GOT entries created are of the form GOT[e0] Module |
| 1159 | // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of |
| 1160 | // symbol in TLS block. |
| 1161 | // |
| 1162 | // Returns the number of relocations processed. |
| 1163 | unsigned RelocScan::handleTlsRelocation(RelExpr expr, RelType type, |
| 1164 | uint64_t offset, Symbol &sym, |
| 1165 | int64_t addend) { |
| 1166 | bool isAArch64 = ctx.arg.emachine == EM_AARCH64; |
| 1167 | |
| 1168 | if (isAArch64) |
| 1169 | if (unsigned processed = handleAArch64PAuthTlsRelocation( |
| 1170 | sec, expr, type, offset, sym, addend)) |
| 1171 | return processed; |
| 1172 | |
| 1173 | if (expr == R_TPREL || expr == R_TPREL_NEG) |
| 1174 | return checkTlsLe(offset, sym, type) ? 1 : 0; |
| 1175 | |
| 1176 | bool isRISCV = ctx.arg.emachine == EM_RISCV; |
| 1177 | |
| 1178 | if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, |
| 1179 | R_TLSDESC_GOTPLT, RE_LOONGARCH_TLSDESC_PAGE_PC>(expr) && |
| 1180 | ctx.arg.shared) { |
| 1181 | // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not |
| 1182 | // set NEEDS_TLSDESC on the label. |
| 1183 | if (expr != R_TLSDESC_CALL) { |
| 1184 | if (isAArch64) |
| 1185 | sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_NONAUTH); |
| 1186 | else if (!isRISCV || type == R_RISCV_TLSDESC_HI20) |
| 1187 | sym.setFlags(NEEDS_TLSDESC); |
| 1188 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1189 | } |
| 1190 | return 1; |
| 1191 | } |
| 1192 | |
| 1193 | // LoongArch supports IE to LE, DESC GD/LD to IE/LE optimizations in |
| 1194 | // non-extreme code model. |
| 1195 | bool execOptimizeInLoongArch = |
| 1196 | ctx.arg.emachine == EM_LOONGARCH && |
| 1197 | (type == R_LARCH_TLS_IE_PC_HI20 || type == R_LARCH_TLS_IE_PC_LO12 || |
| 1198 | type == R_LARCH_TLS_DESC_PC_HI20 || type == R_LARCH_TLS_DESC_PC_LO12 || |
| 1199 | type == R_LARCH_TLS_DESC_LD || type == R_LARCH_TLS_DESC_CALL || |
| 1200 | type == R_LARCH_TLS_DESC_PCREL20_S2); |
| 1201 | |
| 1202 | // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE |
| 1203 | // optimizations. |
| 1204 | // RISC-V supports TLSDESC to IE/LE optimizations. |
| 1205 | // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable |
| 1206 | // optimization as well. |
| 1207 | bool execOptimize = |
| 1208 | !ctx.arg.shared && ctx.arg.emachine != EM_ARM && |
| 1209 | ctx.arg.emachine != EM_HEXAGON && |
| 1210 | (ctx.arg.emachine != EM_LOONGARCH || execOptimizeInLoongArch) && |
| 1211 | !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) && |
| 1212 | !sec->file->ppc64DisableTLSRelax; |
| 1213 | |
| 1214 | // If we are producing an executable and the symbol is non-preemptable, it |
| 1215 | // must be defined and the code sequence can be optimized to use |
| 1216 | // Local-Exesec-> |
| 1217 | // |
| 1218 | // ARM and RISC-V do not support any relaxations for TLS relocations, however, |
| 1219 | // we can omit the DTPMOD dynamic relocations and resolve them at link time |
| 1220 | // because them are always 1. This may be necessary for static linking as |
| 1221 | // DTPMOD may not be expected at load time. |
| 1222 | bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; |
| 1223 | |
| 1224 | // Local Dynamic is for access to module local TLS variables, while still |
| 1225 | // being suitable for being dynamically loaded via dlopen. GOT[e0] is the |
| 1226 | // module index, with a special value of 0 for the current module. GOT[e1] is |
| 1227 | // unused. There only needs to be one module index entry. |
| 1228 | if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) { |
| 1229 | // Local-Dynamic relocs can be optimized to Local-Exesec-> |
| 1230 | if (execOptimize) { |
| 1231 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE), |
| 1232 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1233 | return ctx.target->getTlsGdRelaxSkip(type); |
| 1234 | } |
| 1235 | if (expr == R_TLSLD_HINT) |
| 1236 | return 1; |
| 1237 | ctx.needsTlsLd.store(i: true, m: std::memory_order_relaxed); |
| 1238 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1239 | return 1; |
| 1240 | } |
| 1241 | |
| 1242 | // Local-Dynamic relocs can be optimized to Local-Exesec-> |
| 1243 | if (expr == R_DTPREL) { |
| 1244 | if (execOptimize) |
| 1245 | expr = ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_LD_TO_LE); |
| 1246 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1247 | return 1; |
| 1248 | } |
| 1249 | |
| 1250 | // Local-Dynamic sequence where offset of tls variable relative to dynamic |
| 1251 | // thread pointer is stored in the got. This cannot be optimized to |
| 1252 | // Local-Exesec-> |
| 1253 | if (expr == R_TLSLD_GOT_OFF) { |
| 1254 | sym.setFlags(NEEDS_GOT_DTPREL); |
| 1255 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1256 | return 1; |
| 1257 | } |
| 1258 | |
| 1259 | // LoongArch does not support transition from TLSDESC to LE/IE in the extreme |
| 1260 | // code model, in which NEEDS_TLSDESC should set, rather than NEEDS_TLSGD. So |
| 1261 | // we check independently. |
| 1262 | if (ctx.arg.emachine == EM_LOONGARCH && |
| 1263 | oneof<RE_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC, |
| 1264 | R_TLSDESC_CALL>(expr) && |
| 1265 | !execOptimize) { |
| 1266 | if (expr != R_TLSDESC_CALL) { |
| 1267 | sym.setFlags(NEEDS_TLSDESC); |
| 1268 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1269 | } |
| 1270 | return 1; |
| 1271 | } |
| 1272 | |
| 1273 | if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, |
| 1274 | R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC, |
| 1275 | RE_LOONGARCH_TLSGD_PAGE_PC, RE_LOONGARCH_TLSDESC_PAGE_PC>(expr)) { |
| 1276 | if (!execOptimize) { |
| 1277 | sym.setFlags(NEEDS_TLSGD); |
| 1278 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1279 | return 1; |
| 1280 | } |
| 1281 | |
| 1282 | // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec |
| 1283 | // depending on the symbol being locally defined or not. |
| 1284 | // |
| 1285 | // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible |
| 1286 | // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix |
| 1287 | // the categorization in RISCV::relocateAllosec-> |
| 1288 | if (sym.isPreemptible) { |
| 1289 | sym.setFlags(NEEDS_TLSIE); |
| 1290 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_IE), |
| 1291 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1292 | } else { |
| 1293 | sec->addReloc(r: {.expr: ctx.target->adjustTlsExpr(type, expr: R_RELAX_TLS_GD_TO_LE), |
| 1294 | .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1295 | } |
| 1296 | return ctx.target->getTlsGdRelaxSkip(type); |
| 1297 | } |
| 1298 | |
| 1299 | if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, RE_AARCH64_GOT_PAGE_PC, |
| 1300 | RE_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) { |
| 1301 | ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed); |
| 1302 | // Initial-Exec relocs can be optimized to Local-Exec if the symbol is |
| 1303 | // locally defined. This is not supported on SystemZ. |
| 1304 | if (execOptimize && isLocalInExecutable && ctx.arg.emachine != EM_S390) { |
| 1305 | sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1306 | } else if (expr != R_TLSIE_HINT) { |
| 1307 | sym.setFlags(NEEDS_TLSIE); |
| 1308 | // R_GOT needs a relative relocation for PIC on i386 and Hexagon. |
| 1309 | if (expr == R_GOT && ctx.arg.isPic && |
| 1310 | !ctx.target->usesOnlyLowPageBits(type)) |
| 1311 | addRelativeReloc<true>(ctx, isec&: *sec, offsetInSec: offset, sym, addend, expr, type); |
| 1312 | else |
| 1313 | sec->addReloc(r: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1314 | } |
| 1315 | return 1; |
| 1316 | } |
| 1317 | |
| 1318 | // LoongArch TLS GD/LD relocs reuse the RE_LOONGARCH_GOT, in which |
| 1319 | // NEEDS_TLSIE shouldn't set. So we check independently. |
| 1320 | if (ctx.arg.emachine == EM_LOONGARCH && expr == RE_LOONGARCH_GOT && |
| 1321 | execOptimize && isLocalInExecutable) { |
| 1322 | ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed); |
| 1323 | sec->addReloc(r: {.expr: R_RELAX_TLS_IE_TO_LE, .type: type, .offset: offset, .addend: addend, .sym: &sym}); |
| 1324 | return 1; |
| 1325 | } |
| 1326 | |
| 1327 | return 0; |
| 1328 | } |
| 1329 | |
| 1330 | template <class ELFT, class RelTy> |
| 1331 | void TargetInfo::scanSectionImpl(InputSectionBase &sec, Relocs<RelTy> rels) { |
| 1332 | RelocScan rs(ctx, &sec); |
| 1333 | // Many relocations end up in sec.relocations. |
| 1334 | sec.relocations.reserve(N: rels.size()); |
| 1335 | |
| 1336 | // On SystemZ, all sections need to be sorted by r_offset, to allow TLS |
| 1337 | // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip. |
| 1338 | SmallVector<RelTy, 0> storage; |
| 1339 | if (ctx.arg.emachine == EM_S390) |
| 1340 | rels = sortRels(rels, storage); |
| 1341 | |
| 1342 | for (auto it = rels.begin(); it != rels.end(); ++it) { |
| 1343 | auto type = it->getType(false); |
| 1344 | rs.scan<ELFT, RelTy>(it, type, rs.getAddend<ELFT>(*it, type)); |
| 1345 | } |
| 1346 | |
| 1347 | // Sort relocations by offset for more efficient searching for |
| 1348 | // R_RISCV_PCREL_HI20, ALIGN relocations, R_PPC64_ADDR64 and the |
| 1349 | // branch-to-branch optimization. |
| 1350 | if (is_contained(Set: {EM_RISCV, EM_LOONGARCH}, Element: ctx.arg.emachine) || |
| 1351 | (ctx.arg.emachine == EM_PPC64 && sec.name == ".toc" ) || |
| 1352 | ctx.arg.branchToBranch) |
| 1353 | llvm::stable_sort(sec.relocs(), |
| 1354 | [](const Relocation &lhs, const Relocation &rhs) { |
| 1355 | return lhs.offset < rhs.offset; |
| 1356 | }); |
| 1357 | } |
| 1358 | |
| 1359 | template <class ELFT> void TargetInfo::scanSection1(InputSectionBase &sec) { |
| 1360 | const RelsOrRelas<ELFT> rels = sec.template relsOrRelas<ELFT>(); |
| 1361 | if (rels.areRelocsCrel()) |
| 1362 | scanSectionImpl<ELFT>(sec, rels.crels); |
| 1363 | else if (rels.areRelocsRel()) |
| 1364 | scanSectionImpl<ELFT>(sec, rels.rels); |
| 1365 | else |
| 1366 | scanSectionImpl<ELFT>(sec, rels.relas); |
| 1367 | } |
| 1368 | |
| 1369 | void TargetInfo::scanSection(InputSectionBase &sec) { |
| 1370 | invokeELFT(scanSection1, sec); |
| 1371 | } |
| 1372 | |
| 1373 | void RelocScan::scanEhSection(EhInputSection &s) { |
| 1374 | sec = &s; |
| 1375 | OffsetGetter getter(s); |
| 1376 | auto rels = s.rels; |
| 1377 | s.relocations.reserve(N: rels.size()); |
| 1378 | for (auto &r : rels) { |
| 1379 | // Ignore R_*_NONE and other marker relocations. |
| 1380 | if (r.expr == R_NONE) |
| 1381 | continue; |
| 1382 | uint64_t offset = getter.get(ctx, off: r.offset); |
| 1383 | // Skip if the relocation offset is within a dead piece. |
| 1384 | if (offset == uint64_t(-1)) |
| 1385 | continue; |
| 1386 | Symbol *sym = r.sym; |
| 1387 | if (sym->isUndefined() && |
| 1388 | maybeReportUndefined(sym&: cast<Undefined>(Val&: *sym), offset)) |
| 1389 | continue; |
| 1390 | process(expr: r.expr, type: r.type, offset, sym&: *sym, addend: r.addend); |
| 1391 | } |
| 1392 | } |
| 1393 | |
| 1394 | template <class ELFT> void elf::scanRelocations(Ctx &ctx) { |
| 1395 | // Scan all relocations. Each relocation goes through a series of tests to |
| 1396 | // determine if it needs special treatment, such as creating GOT, PLT, |
| 1397 | // copy relocations, etc. Note that relocations for non-alloc sections are |
| 1398 | // directly processed by InputSection::relocateNonAlloc. |
| 1399 | |
| 1400 | // Deterministic parallellism needs sorting relocations which is unsuitable |
| 1401 | // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable |
| 1402 | // for parallelism. |
| 1403 | bool serial = !ctx.arg.zCombreloc || ctx.arg.emachine == EM_MIPS || |
| 1404 | ctx.arg.emachine == EM_PPC64; |
| 1405 | parallel::TaskGroup tg; |
| 1406 | auto outerFn = [&]() { |
| 1407 | for (ELFFileBase *f : ctx.objectFiles) { |
| 1408 | auto fn = [f, &ctx]() { |
| 1409 | for (InputSectionBase *s : f->getSections()) { |
| 1410 | if (s && s->kind() == SectionBase::Regular && s->isLive() && |
| 1411 | (s->flags & SHF_ALLOC) && |
| 1412 | !(s->type == SHT_ARM_EXIDX && ctx.arg.emachine == EM_ARM)) |
| 1413 | ctx.target->scanSection(sec&: *s); |
| 1414 | } |
| 1415 | }; |
| 1416 | if (serial) |
| 1417 | fn(); |
| 1418 | else |
| 1419 | tg.spawn(f: fn); |
| 1420 | } |
| 1421 | auto scanEH = [&] { |
| 1422 | RelocScan scanner(ctx); |
| 1423 | for (Partition &part : ctx.partitions) { |
| 1424 | for (EhInputSection *sec : part.ehFrame->sections) |
| 1425 | scanner.scanEhSection(s&: *sec); |
| 1426 | if (part.armExidx && part.armExidx->isLive()) |
| 1427 | for (InputSection *sec : part.armExidx->exidxSections) |
| 1428 | if (sec->isLive()) |
| 1429 | ctx.target->scanSection(sec&: *sec); |
| 1430 | } |
| 1431 | }; |
| 1432 | if (serial) |
| 1433 | scanEH(); |
| 1434 | else |
| 1435 | tg.spawn(f: scanEH); |
| 1436 | }; |
| 1437 | // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread |
| 1438 | // with valid getThreadIndex(). |
| 1439 | if (serial) |
| 1440 | tg.spawn(f: outerFn); |
| 1441 | else |
| 1442 | outerFn(); |
| 1443 | } |
| 1444 | |
| 1445 | RelocationBaseSection &elf::getIRelativeSection(Ctx &ctx) { |
| 1446 | // Prior to Android V, there was a bug that caused RELR relocations to be |
| 1447 | // applied after packed relocations. This meant that resolvers referenced by |
| 1448 | // IRELATIVE relocations in the packed relocation section would read |
| 1449 | // unrelocated globals with RELR relocations when |
| 1450 | // --pack-relative-relocs=android+relr is enabled. Work around this by placing |
| 1451 | // IRELATIVE in .rela.plt. |
| 1452 | return ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt |
| 1453 | : *ctx.mainPart->relaDyn; |
| 1454 | } |
| 1455 | |
| 1456 | static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) { |
| 1457 | // Non-preemptible ifuncs are called via a PLT entry that resolves the actual |
| 1458 | // address at runtime. We create an IPLT entry and an IGOTPLT slot. The |
| 1459 | // IGOTPLT slot is relocated by an IRELATIVE relocation, whose addend encodes |
| 1460 | // the resolver address. At startup, the runtime calls the resolver and |
| 1461 | // fills the IGOTPLT slot. |
| 1462 | // |
| 1463 | // For direct (non-GOT/PLT) relocations, the symbol must have a constant |
| 1464 | // address. We achieve this by redirecting the symbol to its IPLT entry |
| 1465 | // ("canonicalizing" it), so all references see the same address, and the |
| 1466 | // resolver is called exactly once. This may result in two GOT entries: one |
| 1467 | // in .got.plt for the IRELATIVE, and one in .got pointing to the canonical |
| 1468 | // IPLT entry (for GOT-generating relocations). |
| 1469 | // |
| 1470 | // We clone the symbol to preserve the original resolver address for the |
| 1471 | // IRELATIVE addend. The clone is tracked in ctx.irelativeSyms so that linker |
| 1472 | // relaxation can adjust its value when the resolver address changes. |
| 1473 | // |
| 1474 | // Note: IRELATIVE relocations are needed even in static executables; see |
| 1475 | // `addRelIpltSymbols`. |
| 1476 | if (!sym.isGnuIFunc() || sym.isPreemptible || ctx.arg.zIfuncNoplt) |
| 1477 | return false; |
| 1478 | // Skip unreferenced non-preemptible ifunc. |
| 1479 | if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC))) |
| 1480 | return true; |
| 1481 | |
| 1482 | sym.isInIplt = true; |
| 1483 | |
| 1484 | auto *irelativeSym = makeDefined(args&: cast<Defined>(Val&: sym)); |
| 1485 | irelativeSym->allocateAux(ctx); |
| 1486 | ctx.irelativeSyms.push_back(Elt: irelativeSym); |
| 1487 | auto &dyn = getIRelativeSection(ctx); |
| 1488 | addPltEntry(ctx, plt&: *ctx.in.iplt, gotPlt&: *ctx.in.igotPlt, rel&: dyn, type: ctx.target->iRelativeRel, |
| 1489 | sym&: *irelativeSym); |
| 1490 | sym.allocateAux(ctx); |
| 1491 | ctx.symAux.back().pltIdx = ctx.symAux[irelativeSym->auxIdx].pltIdx; |
| 1492 | |
| 1493 | if (flags & HAS_DIRECT_RELOC) { |
| 1494 | // Change the value to the IPLT and redirect all references to it. |
| 1495 | auto &d = cast<Defined>(Val&: sym); |
| 1496 | d.section = ctx.in.iplt.get(); |
| 1497 | d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize; |
| 1498 | d.size = 0; |
| 1499 | // It's important to set the symbol type here so that dynamic loaders |
| 1500 | // don't try to call the PLT as if it were an ifunc resolver. |
| 1501 | d.type = STT_FUNC; |
| 1502 | |
| 1503 | if (flags & NEEDS_GOT) { |
| 1504 | assert(!(flags & NEEDS_GOT_AUTH) && |
| 1505 | "R_AARCH64_AUTH_IRELATIVE is not supported yet" ); |
| 1506 | addGotEntry(ctx, sym); |
| 1507 | } |
| 1508 | } else if (flags & NEEDS_GOT) { |
| 1509 | // Redirect GOT accesses to point to the Igot. |
| 1510 | sym.gotInIgot = true; |
| 1511 | } |
| 1512 | return true; |
| 1513 | } |
| 1514 | |
| 1515 | void elf::postScanRelocations(Ctx &ctx) { |
| 1516 | auto fn = [&](Symbol &sym) { |
| 1517 | auto flags = sym.flags.load(m: std::memory_order_relaxed); |
| 1518 | if (handleNonPreemptibleIfunc(ctx, sym, flags)) |
| 1519 | return; |
| 1520 | |
| 1521 | if (sym.isTagged() && sym.isDefined()) |
| 1522 | ctx.mainPart->memtagGlobalDescriptors->addSymbol(sym); |
| 1523 | |
| 1524 | if (!sym.needsDynReloc()) |
| 1525 | return; |
| 1526 | sym.allocateAux(ctx); |
| 1527 | |
| 1528 | if (flags & NEEDS_GOT) { |
| 1529 | if ((flags & NEEDS_GOT_AUTH) && (flags & NEEDS_GOT_NONAUTH)) { |
| 1530 | auto diag = Err(ctx); |
| 1531 | diag << "both AUTH and non-AUTH GOT entries for '" << sym.getName() |
| 1532 | << "' requested, but only one type of GOT entry per symbol is " |
| 1533 | "supported" ; |
| 1534 | return; |
| 1535 | } |
| 1536 | if (flags & NEEDS_GOT_AUTH) |
| 1537 | addGotAuthEntry(ctx, sym); |
| 1538 | else |
| 1539 | addGotEntry(ctx, sym); |
| 1540 | } |
| 1541 | if (flags & NEEDS_PLT) |
| 1542 | addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt, |
| 1543 | type: ctx.target->pltRel, sym); |
| 1544 | if (flags & NEEDS_COPY) { |
| 1545 | if (sym.isObject()) { |
| 1546 | invokeELFT(addCopyRelSymbol, ctx, cast<SharedSymbol>(sym)); |
| 1547 | // NEEDS_COPY is cleared for sym and its aliases so that in |
| 1548 | // later iterations aliases won't cause redundant copies. |
| 1549 | assert(!sym.hasFlag(NEEDS_COPY)); |
| 1550 | } else { |
| 1551 | assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); |
| 1552 | if (!sym.isDefined()) { |
| 1553 | replaceWithDefined(ctx, sym, sec&: *ctx.in.plt, |
| 1554 | value: ctx.target->pltHeaderSize + |
| 1555 | ctx.target->pltEntrySize * sym.getPltIdx(ctx), |
| 1556 | size: 0); |
| 1557 | sym.setFlags(NEEDS_COPY); |
| 1558 | if (ctx.arg.emachine == EM_PPC) { |
| 1559 | // PPC32 canonical PLT entries are at the beginning of .glink |
| 1560 | cast<Defined>(Val&: sym).value = ctx.in.plt->headerSize; |
| 1561 | ctx.in.plt->headerSize += 16; |
| 1562 | cast<PPC32GlinkSection>(Val&: *ctx.in.plt).canonical_plts.push_back(Elt: &sym); |
| 1563 | } |
| 1564 | } |
| 1565 | } |
| 1566 | } |
| 1567 | |
| 1568 | if (!sym.isTls()) |
| 1569 | return; |
| 1570 | bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; |
| 1571 | GotSection *got = ctx.in.got.get(); |
| 1572 | |
| 1573 | if (flags & NEEDS_TLSDESC) { |
| 1574 | if ((flags & NEEDS_TLSDESC_AUTH) && (flags & NEEDS_TLSDESC_NONAUTH)) { |
| 1575 | Err(ctx) |
| 1576 | << "both AUTH and non-AUTH TLSDESC entries for '" << sym.getName() |
| 1577 | << "' requested, but only one type of TLSDESC entry per symbol is " |
| 1578 | "supported" ; |
| 1579 | return; |
| 1580 | } |
| 1581 | got->addTlsDescEntry(sym); |
| 1582 | RelType tlsDescRel = ctx.target->tlsDescRel; |
| 1583 | if (flags & NEEDS_TLSDESC_AUTH) { |
| 1584 | got->addTlsDescAuthEntry(); |
| 1585 | tlsDescRel = ELF::R_AARCH64_AUTH_TLSDESC; |
| 1586 | } |
| 1587 | ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( |
| 1588 | dynType: tlsDescRel, isec&: *got, offsetInSec: got->getTlsDescOffset(sym), sym, addendRelType: tlsDescRel); |
| 1589 | } |
| 1590 | if (flags & NEEDS_TLSGD) { |
| 1591 | got->addDynTlsEntry(sym); |
| 1592 | uint64_t off = got->getGlobalDynOffset(b: sym); |
| 1593 | if (isLocalInExecutable) |
| 1594 | // Write one to the GOT slot. |
| 1595 | got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel, .offset: off, .addend: 1, .sym: &sym}); |
| 1596 | else |
| 1597 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsModuleIndexRel, |
| 1598 | isec&: *got, offsetInSec: off, sym); |
| 1599 | |
| 1600 | // If the symbol is preemptible we need the dynamic linker to write |
| 1601 | // the offset too. |
| 1602 | uint64_t offsetOff = off + ctx.arg.wordsize; |
| 1603 | if (sym.isPreemptible) |
| 1604 | ctx.mainPart->relaDyn->addSymbolReloc(dynType: ctx.target->tlsOffsetRel, isec&: *got, |
| 1605 | offsetInSec: offsetOff, sym); |
| 1606 | else |
| 1607 | got->addConstant(r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: offsetOff, .addend: 0, .sym: &sym}); |
| 1608 | } |
| 1609 | if (flags & NEEDS_GOT_DTPREL) { |
| 1610 | got->addEntry(sym); |
| 1611 | got->addConstant( |
| 1612 | r: {.expr: R_ABS, .type: ctx.target->tlsOffsetRel, .offset: sym.getGotOffset(ctx), .addend: 0, .sym: &sym}); |
| 1613 | } |
| 1614 | |
| 1615 | if (flags & NEEDS_TLSIE) |
| 1616 | addTpOffsetGotEntry(ctx, sym); |
| 1617 | }; |
| 1618 | |
| 1619 | GotSection *got = ctx.in.got.get(); |
| 1620 | if (ctx.needsTlsLd.load(m: std::memory_order_relaxed) && got->addTlsIndex()) { |
| 1621 | if (ctx.arg.shared) |
| 1622 | ctx.mainPart->relaDyn->addReloc( |
| 1623 | reloc: {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()}); |
| 1624 | else |
| 1625 | got->addConstant(r: {.expr: R_ADDEND, .type: ctx.target->symbolicRel, |
| 1626 | .offset: got->getTlsIndexOff(), .addend: 1, .sym: ctx.dummySym}); |
| 1627 | } |
| 1628 | |
| 1629 | assert(ctx.symAux.size() == 1); |
| 1630 | for (Symbol *sym : ctx.symtab->getSymbols()) |
| 1631 | fn(*sym); |
| 1632 | |
| 1633 | // Local symbols may need the aforementioned non-preemptible ifunc and GOT |
| 1634 | // handling. They don't need regular PLT. |
| 1635 | for (ELFFileBase *file : ctx.objectFiles) |
| 1636 | for (Symbol *sym : file->getLocalSymbols()) |
| 1637 | fn(*sym); |
| 1638 | |
| 1639 | if (ctx.arg.branchToBranch) |
| 1640 | ctx.target->applyBranchToBranchOpt(); |
| 1641 | } |
| 1642 | |
| 1643 | static bool mergeCmp(const InputSection *a, const InputSection *b) { |
| 1644 | // std::merge requires a strict weak ordering. |
| 1645 | if (a->outSecOff < b->outSecOff) |
| 1646 | return true; |
| 1647 | |
| 1648 | // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection. |
| 1649 | if (a->outSecOff == b->outSecOff && a != b) { |
| 1650 | auto *ta = dyn_cast<ThunkSection>(Val: a); |
| 1651 | auto *tb = dyn_cast<ThunkSection>(Val: b); |
| 1652 | |
| 1653 | // Check if Thunk is immediately before any specific Target |
| 1654 | // InputSection for example Mips LA25 Thunks. |
| 1655 | if (ta && ta->getTargetInputSection() == b) |
| 1656 | return true; |
| 1657 | |
| 1658 | // Place Thunk Sections without specific targets before |
| 1659 | // non-Thunk Sections. |
| 1660 | if (ta && !tb && !ta->getTargetInputSection()) |
| 1661 | return true; |
| 1662 | } |
| 1663 | |
| 1664 | return false; |
| 1665 | } |
| 1666 | |
| 1667 | // Call Fn on every executable InputSection accessed via the linker script |
| 1668 | // InputSectionDescription::Sections. |
| 1669 | static void forEachInputSectionDescription( |
| 1670 | ArrayRef<OutputSection *> outputSections, |
| 1671 | llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) { |
| 1672 | for (OutputSection *os : outputSections) { |
| 1673 | if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR)) |
| 1674 | continue; |
| 1675 | for (SectionCommand *bc : os->commands) |
| 1676 | if (auto *isd = dyn_cast<InputSectionDescription>(Val: bc)) |
| 1677 | fn(os, isd); |
| 1678 | } |
| 1679 | } |
| 1680 | |
| 1681 | ThunkCreator::ThunkCreator(Ctx &ctx) : ctx(ctx) {} |
| 1682 | |
| 1683 | ThunkCreator::~ThunkCreator() {} |
| 1684 | |
| 1685 | // Thunk Implementation |
| 1686 | // |
| 1687 | // Thunks (sometimes called stubs, veneers or branch islands) are small pieces |
| 1688 | // of code that the linker inserts inbetween a caller and a callee. The thunks |
| 1689 | // are added at link time rather than compile time as the decision on whether |
| 1690 | // a thunk is needed, such as the caller and callee being out of range, can only |
| 1691 | // be made at link time. |
| 1692 | // |
| 1693 | // It is straightforward to tell given the current state of the program when a |
| 1694 | // thunk is needed for a particular call. The more difficult part is that |
| 1695 | // the thunk needs to be placed in the program such that the caller can reach |
| 1696 | // the thunk and the thunk can reach the callee; furthermore, adding thunks to |
| 1697 | // the program alters addresses, which can mean more thunks etc. |
| 1698 | // |
| 1699 | // In lld we have a synthetic ThunkSection that can hold many Thunks. |
| 1700 | // The decision to have a ThunkSection act as a container means that we can |
| 1701 | // more easily handle the most common case of a single block of contiguous |
| 1702 | // Thunks by inserting just a single ThunkSection. |
| 1703 | // |
| 1704 | // The implementation of Thunks in lld is split across these areas |
| 1705 | // Relocations.cpp : Framework for creating and placing thunks |
| 1706 | // Thunks.cpp : The code generated for each supported thunk |
| 1707 | // Target.cpp : Target specific hooks that the framework uses to decide when |
| 1708 | // a thunk is used |
| 1709 | // Synthetic.cpp : Implementation of ThunkSection |
| 1710 | // Writer.cpp : Iteratively call framework until no more Thunks added |
| 1711 | // |
| 1712 | // Thunk placement requirements: |
| 1713 | // Mips LA25 thunks. These must be placed immediately before the callee section |
| 1714 | // We can assume that the caller is in range of the Thunk. These are modelled |
| 1715 | // by Thunks that return the section they must precede with |
| 1716 | // getTargetInputSection(). |
| 1717 | // |
| 1718 | // ARM interworking and range extension thunks. These thunks must be placed |
| 1719 | // within range of the caller. All implemented ARM thunks can always reach the |
| 1720 | // callee as they use an indirect jump via a register that has no range |
| 1721 | // restrictions. |
| 1722 | // |
| 1723 | // Thunk placement algorithm: |
| 1724 | // For Mips LA25 ThunkSections; the placement is explicit, it has to be before |
| 1725 | // getTargetInputSection(). |
| 1726 | // |
| 1727 | // For thunks that must be placed within range of the caller there are many |
| 1728 | // possible choices given that the maximum range from the caller is usually |
| 1729 | // much larger than the average InputSection size. Desirable properties include: |
| 1730 | // - Maximize reuse of thunks by multiple callers |
| 1731 | // - Minimize number of ThunkSections to simplify insertion |
| 1732 | // - Handle impact of already added Thunks on addresses |
| 1733 | // - Simple to understand and implement |
| 1734 | // |
| 1735 | // In lld for the first pass, we pre-create one or more ThunkSections per |
| 1736 | // InputSectionDescription at Target specific intervals. A ThunkSection is |
| 1737 | // placed so that the estimated end of the ThunkSection is within range of the |
| 1738 | // start of the InputSectionDescription or the previous ThunkSection. For |
| 1739 | // example: |
| 1740 | // InputSectionDescription |
| 1741 | // Section 0 |
| 1742 | // ... |
| 1743 | // Section N |
| 1744 | // ThunkSection 0 |
| 1745 | // Section N + 1 |
| 1746 | // ... |
| 1747 | // Section N + K |
| 1748 | // Thunk Section 1 |
| 1749 | // |
| 1750 | // The intention is that we can add a Thunk to a ThunkSection that is well |
| 1751 | // spaced enough to service a number of callers without having to do a lot |
| 1752 | // of work. An important principle is that it is not an error if a Thunk cannot |
| 1753 | // be placed in a pre-created ThunkSection; when this happens we create a new |
| 1754 | // ThunkSection placed next to the caller. This allows us to handle the vast |
| 1755 | // majority of thunks simply, but also handle rare cases where the branch range |
| 1756 | // is smaller than the target specific spacing. |
| 1757 | // |
| 1758 | // The algorithm is expected to create all the thunks that are needed in a |
| 1759 | // single pass, with a small number of programs needing a second pass due to |
| 1760 | // the insertion of thunks in the first pass increasing the offset between |
| 1761 | // callers and callees that were only just in range. |
| 1762 | // |
| 1763 | // A consequence of allowing new ThunkSections to be created outside of the |
| 1764 | // pre-created ThunkSections is that in rare cases calls to Thunks that were in |
| 1765 | // range in pass K, are out of range in some pass > K due to the insertion of |
| 1766 | // more Thunks in between the caller and callee. When this happens we retarget |
| 1767 | // the relocation back to the original target and create another Thunk. |
| 1768 | |
| 1769 | // Remove ThunkSections that are empty, this should only be the initial set |
| 1770 | // precreated on pass 0. |
| 1771 | |
| 1772 | // Insert the Thunks for OutputSection OS into their designated place |
| 1773 | // in the Sections vector, and recalculate the InputSection output section |
| 1774 | // offsets. |
| 1775 | // This may invalidate any output section offsets stored outside of InputSection |
| 1776 | void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) { |
| 1777 | forEachInputSectionDescription( |
| 1778 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
| 1779 | if (isd->thunkSections.empty()) |
| 1780 | return; |
| 1781 | |
| 1782 | // Remove any zero sized precreated Thunks. |
| 1783 | llvm::erase_if(C&: isd->thunkSections, |
| 1784 | P: [](const std::pair<ThunkSection *, uint32_t> &ts) { |
| 1785 | return ts.first->getSize() == 0; |
| 1786 | }); |
| 1787 | |
| 1788 | // ISD->ThunkSections contains all created ThunkSections, including |
| 1789 | // those inserted in previous passes. Extract the Thunks created this |
| 1790 | // pass and order them in ascending outSecOff. |
| 1791 | std::vector<ThunkSection *> newThunks; |
| 1792 | for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections) |
| 1793 | if (ts.second == pass) |
| 1794 | newThunks.push_back(x: ts.first); |
| 1795 | llvm::stable_sort(Range&: newThunks, |
| 1796 | C: [](const ThunkSection *a, const ThunkSection *b) { |
| 1797 | return a->outSecOff < b->outSecOff; |
| 1798 | }); |
| 1799 | |
| 1800 | // Merge sorted vectors of Thunks and InputSections by outSecOff |
| 1801 | SmallVector<InputSection *, 0> tmp; |
| 1802 | tmp.reserve(N: isd->sections.size() + newThunks.size()); |
| 1803 | |
| 1804 | std::merge(first1: isd->sections.begin(), last1: isd->sections.end(), |
| 1805 | first2: newThunks.begin(), last2: newThunks.end(), result: std::back_inserter(x&: tmp), |
| 1806 | comp: mergeCmp); |
| 1807 | |
| 1808 | isd->sections = std::move(tmp); |
| 1809 | }); |
| 1810 | } |
| 1811 | |
| 1812 | constexpr uint32_t HEXAGON_MASK_END_PACKET = 3 << 14; |
| 1813 | constexpr uint32_t HEXAGON_END_OF_PACKET = 3 << 14; |
| 1814 | constexpr uint32_t HEXAGON_END_OF_DUPLEX = 0 << 14; |
| 1815 | |
| 1816 | // Return the distance between the packet start and the instruction in the |
| 1817 | // relocation. |
| 1818 | static int getHexagonPacketOffset(const InputSection &isec, |
| 1819 | const Relocation &rel) { |
| 1820 | const ArrayRef<uint8_t> data = isec.content(); |
| 1821 | |
| 1822 | // Search back as many as 3 instructions. |
| 1823 | for (unsigned i = 0;; i++) { |
| 1824 | if (i == 3 || rel.offset < (i + 1) * 4) |
| 1825 | return i * 4; |
| 1826 | uint32_t instWord = |
| 1827 | read32(ctx&: isec.getCtx(), p: data.data() + (rel.offset - (i + 1) * 4)); |
| 1828 | if (((instWord & HEXAGON_MASK_END_PACKET) == HEXAGON_END_OF_PACKET) || |
| 1829 | ((instWord & HEXAGON_MASK_END_PACKET) == HEXAGON_END_OF_DUPLEX)) |
| 1830 | return i * 4; |
| 1831 | } |
| 1832 | } |
| 1833 | |
| 1834 | static int64_t getPCBias(Ctx &ctx, const InputSection &isec, |
| 1835 | const Relocation &rel) { |
| 1836 | if (ctx.arg.emachine == EM_ARM) { |
| 1837 | switch (rel.type) { |
| 1838 | case R_ARM_THM_JUMP19: |
| 1839 | case R_ARM_THM_JUMP24: |
| 1840 | case R_ARM_THM_CALL: |
| 1841 | return 4; |
| 1842 | default: |
| 1843 | return 8; |
| 1844 | } |
| 1845 | } |
| 1846 | if (ctx.arg.emachine == EM_HEXAGON) |
| 1847 | return -getHexagonPacketOffset(isec, rel); |
| 1848 | return 0; |
| 1849 | } |
| 1850 | |
| 1851 | // Find or create a ThunkSection within the InputSectionDescription (ISD) that |
| 1852 | // is in range of Src. An ISD maps to a range of InputSections described by a |
| 1853 | // linker script section pattern such as { .text .text.* }. |
| 1854 | ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os, |
| 1855 | InputSection *isec, |
| 1856 | InputSectionDescription *isd, |
| 1857 | const Relocation &rel, |
| 1858 | uint64_t src) { |
| 1859 | // See the comment in getThunk for -pcBias below. |
| 1860 | const int64_t pcBias = getPCBias(ctx, isec: *isec, rel); |
| 1861 | for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) { |
| 1862 | ThunkSection *ts = tp.first; |
| 1863 | uint64_t tsBase = os->addr + ts->outSecOff - pcBias; |
| 1864 | uint64_t tsLimit = tsBase + ts->getSize(); |
| 1865 | if (ctx.target->inBranchRange(type: rel.type, src, |
| 1866 | dst: (src > tsLimit) ? tsBase : tsLimit)) |
| 1867 | return ts; |
| 1868 | } |
| 1869 | |
| 1870 | // No suitable ThunkSection exists. This can happen when there is a branch |
| 1871 | // with lower range than the ThunkSection spacing or when there are too |
| 1872 | // many Thunks. Create a new ThunkSection as close to the InputSection as |
| 1873 | // possible. Error if InputSection is so large we cannot place ThunkSection |
| 1874 | // anywhere in Range. |
| 1875 | uint64_t thunkSecOff = isec->outSecOff; |
| 1876 | if (!ctx.target->inBranchRange(type: rel.type, src, |
| 1877 | dst: os->addr + thunkSecOff + rel.addend)) { |
| 1878 | thunkSecOff = isec->outSecOff + isec->getSize(); |
| 1879 | if (!ctx.target->inBranchRange(type: rel.type, src, |
| 1880 | dst: os->addr + thunkSecOff + rel.addend)) |
| 1881 | Fatal(ctx) << "InputSection too large for range extension thunk " |
| 1882 | << isec->getObjMsg(offset: src - (os->addr << isec->outSecOff)); |
| 1883 | } |
| 1884 | return addThunkSection(os, isd, off: thunkSecOff); |
| 1885 | } |
| 1886 | |
| 1887 | // Add a Thunk that needs to be placed in a ThunkSection that immediately |
| 1888 | // precedes its Target. |
| 1889 | ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) { |
| 1890 | ThunkSection *ts = thunkedSections.lookup(Val: isec); |
| 1891 | if (ts) |
| 1892 | return ts; |
| 1893 | |
| 1894 | // Find InputSectionRange within Target Output Section (TOS) that the |
| 1895 | // InputSection (IS) that we need to precede is in. |
| 1896 | OutputSection *tos = isec->getParent(); |
| 1897 | for (SectionCommand *bc : tos->commands) { |
| 1898 | auto *isd = dyn_cast<InputSectionDescription>(Val: bc); |
| 1899 | if (!isd || isd->sections.empty()) |
| 1900 | continue; |
| 1901 | |
| 1902 | InputSection *first = isd->sections.front(); |
| 1903 | InputSection *last = isd->sections.back(); |
| 1904 | |
| 1905 | if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff) |
| 1906 | continue; |
| 1907 | |
| 1908 | ts = addThunkSection(os: tos, isd, off: isec->outSecOff, /*isPrefix=*/true); |
| 1909 | thunkedSections[isec] = ts; |
| 1910 | return ts; |
| 1911 | } |
| 1912 | |
| 1913 | return nullptr; |
| 1914 | } |
| 1915 | |
| 1916 | // Create one or more ThunkSections per OS that can be used to place Thunks. |
| 1917 | // We attempt to place the ThunkSections using the following desirable |
| 1918 | // properties: |
| 1919 | // - Within range of the maximum number of callers |
| 1920 | // - Minimise the number of ThunkSections |
| 1921 | // |
| 1922 | // We follow a simple but conservative heuristic to place ThunkSections at |
| 1923 | // offsets that are multiples of a Target specific branch range. |
| 1924 | // For an InputSectionDescription that is smaller than the range, a single |
| 1925 | // ThunkSection at the end of the range will do. |
| 1926 | // |
| 1927 | // For an InputSectionDescription that is more than twice the size of the range, |
| 1928 | // we place the last ThunkSection at range bytes from the end of the |
| 1929 | // InputSectionDescription in order to increase the likelihood that the |
| 1930 | // distance from a thunk to its target will be sufficiently small to |
| 1931 | // allow for the creation of a short thunk. |
| 1932 | void ThunkCreator::createInitialThunkSections( |
| 1933 | ArrayRef<OutputSection *> outputSections) { |
| 1934 | uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing(); |
| 1935 | forEachInputSectionDescription( |
| 1936 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
| 1937 | if (isd->sections.empty()) |
| 1938 | return; |
| 1939 | |
| 1940 | uint32_t isdBegin = isd->sections.front()->outSecOff; |
| 1941 | uint32_t isdEnd = |
| 1942 | isd->sections.back()->outSecOff + isd->sections.back()->getSize(); |
| 1943 | uint32_t lastThunkLowerBound = -1; |
| 1944 | if (isdEnd - isdBegin > thunkSectionSpacing * 2) |
| 1945 | lastThunkLowerBound = isdEnd - thunkSectionSpacing; |
| 1946 | |
| 1947 | uint32_t isecLimit; |
| 1948 | uint32_t prevIsecLimit = isdBegin; |
| 1949 | uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing; |
| 1950 | |
| 1951 | for (const InputSection *isec : isd->sections) { |
| 1952 | isecLimit = isec->outSecOff + isec->getSize(); |
| 1953 | if (isecLimit > thunkUpperBound) { |
| 1954 | addThunkSection(os, isd, off: prevIsecLimit); |
| 1955 | thunkUpperBound = prevIsecLimit + thunkSectionSpacing; |
| 1956 | } |
| 1957 | if (isecLimit > lastThunkLowerBound) |
| 1958 | break; |
| 1959 | prevIsecLimit = isecLimit; |
| 1960 | } |
| 1961 | addThunkSection(os, isd, off: isecLimit); |
| 1962 | }); |
| 1963 | } |
| 1964 | |
| 1965 | ThunkSection *ThunkCreator::addThunkSection(OutputSection *os, |
| 1966 | InputSectionDescription *isd, |
| 1967 | uint64_t off, bool isPrefix) { |
| 1968 | auto *ts = make<ThunkSection>(args&: ctx, args&: os, args&: off); |
| 1969 | ts->partition = os->partition; |
| 1970 | if ((ctx.arg.fixCortexA53Errata843419 || ctx.arg.fixCortexA8) && |
| 1971 | !isd->sections.empty() && !isPrefix) { |
| 1972 | // The errata fixes are sensitive to addresses modulo 4 KiB. When we add |
| 1973 | // thunks we disturb the base addresses of sections placed after the thunks |
| 1974 | // this makes patches we have generated redundant, and may cause us to |
| 1975 | // generate more patches as different instructions are now in sensitive |
| 1976 | // locations. When we generate more patches we may force more branches to |
| 1977 | // go out of range, causing more thunks to be generated. In pathological |
| 1978 | // cases this can cause the address dependent content pass not to converge. |
| 1979 | // We fix this by rounding up the size of the ThunkSection to 4KiB, this |
| 1980 | // limits the insertion of a ThunkSection on the addresses modulo 4 KiB, |
| 1981 | // which means that adding Thunks to the section does not invalidate |
| 1982 | // errata patches for following code. |
| 1983 | // Rounding up the size to 4KiB has consequences for code-size and can |
| 1984 | // trip up linker script defined assertions. For example the linux kernel |
| 1985 | // has an assertion that what LLD represents as an InputSectionDescription |
| 1986 | // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib. |
| 1987 | // We use the heuristic of rounding up the size when both of the following |
| 1988 | // conditions are true: |
| 1989 | // 1.) The OutputSection is larger than the ThunkSectionSpacing. This |
| 1990 | // accounts for the case where no single InputSectionDescription is |
| 1991 | // larger than the OutputSection size. This is conservative but simple. |
| 1992 | // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent |
| 1993 | // any assertion failures that an InputSectionDescription is < 4 KiB |
| 1994 | // in size. |
| 1995 | // |
| 1996 | // isPrefix is a ThunkSection explicitly inserted before its target |
| 1997 | // section. We suppress the rounding up of the size of these ThunkSections |
| 1998 | // as unlike normal ThunkSections, they are small in size, but when BTI is |
| 1999 | // enabled very frequent. This can bloat code-size and push the errata |
| 2000 | // patches out of branch range. |
| 2001 | uint64_t isdSize = isd->sections.back()->outSecOff + |
| 2002 | isd->sections.back()->getSize() - |
| 2003 | isd->sections.front()->outSecOff; |
| 2004 | if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096) |
| 2005 | ts->roundUpSizeForErrata = true; |
| 2006 | } |
| 2007 | isd->thunkSections.push_back(Elt: {ts, pass}); |
| 2008 | return ts; |
| 2009 | } |
| 2010 | |
| 2011 | static bool isThunkSectionCompatible(InputSection *source, |
| 2012 | SectionBase *target) { |
| 2013 | // We can't reuse thunks in different loadable partitions because they might |
| 2014 | // not be loaded. But partition 1 (the main partition) will always be loaded. |
| 2015 | if (source->partition != target->partition) |
| 2016 | return target->partition == 1; |
| 2017 | return true; |
| 2018 | } |
| 2019 | |
| 2020 | std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec, |
| 2021 | Relocation &rel, uint64_t src) { |
| 2022 | SmallVector<std::unique_ptr<Thunk>, 0> *thunkVec = nullptr; |
| 2023 | // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled |
| 2024 | // out in the relocation addend. We compensate for the PC bias so that |
| 2025 | // an Arm and Thumb relocation to the same destination get the same keyAddend, |
| 2026 | // which is usually 0. |
| 2027 | const int64_t pcBias = getPCBias(ctx, isec: *isec, rel); |
| 2028 | const int64_t keyAddend = rel.addend + pcBias; |
| 2029 | |
| 2030 | // We use a ((section, offset), addend) pair to find the thunk position if |
| 2031 | // possible so that we create only one thunk for aliased symbols or ICFed |
| 2032 | // sections. There may be multiple relocations sharing the same (section, |
| 2033 | // offset + addend) pair. We may revert the relocation back to its original |
| 2034 | // non-Thunk target, so we cannot fold offset + addend. |
| 2035 | if (auto *d = dyn_cast<Defined>(Val: rel.sym)) |
| 2036 | if (!d->isInPlt(ctx) && d->section) |
| 2037 | thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value}, |
| 2038 | keyAddend}]; |
| 2039 | if (!thunkVec) |
| 2040 | thunkVec = &thunkedSymbols[{rel.sym, keyAddend}]; |
| 2041 | |
| 2042 | // Check existing Thunks for Sym to see if they can be reused |
| 2043 | for (auto &t : *thunkVec) |
| 2044 | if (isThunkSectionCompatible(source: isec, target: t->getThunkTargetSym()->section) && |
| 2045 | t->isCompatibleWith(*isec, rel) && |
| 2046 | ctx.target->inBranchRange(type: rel.type, src, |
| 2047 | dst: t->getThunkTargetSym()->getVA(ctx, addend: -pcBias))) |
| 2048 | return std::make_pair(x: t.get(), y: false); |
| 2049 | |
| 2050 | // No existing compatible Thunk in range, create a new one |
| 2051 | thunkVec->push_back(Elt: addThunk(ctx, isec: *isec, rel)); |
| 2052 | return std::make_pair(x: thunkVec->back().get(), y: true); |
| 2053 | } |
| 2054 | |
| 2055 | std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d, |
| 2056 | int64_t a) { |
| 2057 | auto [it, isNew] = landingPadsBySectionAndAddend.try_emplace( |
| 2058 | Key: {{d.section, d.value}, a}, Args: nullptr); |
| 2059 | if (isNew) |
| 2060 | it->second = addLandingPadThunk(ctx, s&: d, a); |
| 2061 | return {it->second.get(), isNew}; |
| 2062 | } |
| 2063 | |
| 2064 | // Return true if the relocation target is an in range Thunk. |
| 2065 | // Return false if the relocation is not to a Thunk. If the relocation target |
| 2066 | // was originally to a Thunk, but is no longer in range we revert the |
| 2067 | // relocation back to its original non-Thunk target. |
| 2068 | bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { |
| 2069 | if (Thunk *t = thunks.lookup(Val: rel.sym)) { |
| 2070 | if (ctx.target->inBranchRange(type: rel.type, src, |
| 2071 | dst: rel.sym->getVA(ctx, addend: rel.addend))) |
| 2072 | return true; |
| 2073 | rel.sym = &t->destination; |
| 2074 | rel.addend = t->addend; |
| 2075 | if (rel.sym->isInPlt(ctx)) |
| 2076 | rel.expr = toPlt(expr: rel.expr); |
| 2077 | } |
| 2078 | return false; |
| 2079 | } |
| 2080 | |
| 2081 | // When indirect branches are restricted, such as AArch64 BTI Thunks may need |
| 2082 | // to target a linker generated landing pad instead of the target. This needs |
| 2083 | // to be done once per pass as the need for a BTI thunk is dependent whether |
| 2084 | // a thunk is short or long. We iterate over all the thunks to make sure we |
| 2085 | // catch thunks that have been created but are no longer live. Non-live thunks |
| 2086 | // are not reachable via normalizeExistingThunk() but are still written. |
| 2087 | bool ThunkCreator::addSyntheticLandingPads() { |
| 2088 | bool addressesChanged = false; |
| 2089 | for (Thunk *t : allThunks) { |
| 2090 | if (!t->needsSyntheticLandingPad()) |
| 2091 | continue; |
| 2092 | Thunk *lpt; |
| 2093 | bool isNew; |
| 2094 | auto &dr = cast<Defined>(Val&: t->destination); |
| 2095 | std::tie(args&: lpt, args&: isNew) = getSyntheticLandingPad(d&: dr, a: t->addend); |
| 2096 | if (isNew) { |
| 2097 | addressesChanged = true; |
| 2098 | getISThunkSec(isec: cast<InputSection>(Val: dr.section))->addThunk(t: lpt); |
| 2099 | } |
| 2100 | t->landingPad = lpt->getThunkTargetSym(); |
| 2101 | } |
| 2102 | return addressesChanged; |
| 2103 | } |
| 2104 | |
| 2105 | // Process all relocations from the InputSections that have been assigned |
| 2106 | // to InputSectionDescriptions and redirect through Thunks if needed. The |
| 2107 | // function should be called iteratively until it returns false. |
| 2108 | // |
| 2109 | // PreConditions: |
| 2110 | // All InputSections that may need a Thunk are reachable from |
| 2111 | // OutputSectionCommands. |
| 2112 | // |
| 2113 | // All OutputSections have an address and all InputSections have an offset |
| 2114 | // within the OutputSection. |
| 2115 | // |
| 2116 | // The offsets between caller (relocation place) and callee |
| 2117 | // (relocation target) will not be modified outside of createThunks(). |
| 2118 | // |
| 2119 | // PostConditions: |
| 2120 | // If return value is true then ThunkSections have been inserted into |
| 2121 | // OutputSections. All relocations that needed a Thunk based on the information |
| 2122 | // available to createThunks() on entry have been redirected to a Thunk. Note |
| 2123 | // that adding Thunks changes offsets between caller and callee so more Thunks |
| 2124 | // may be required. |
| 2125 | // |
| 2126 | // If return value is false then no more Thunks are needed, and createThunks has |
| 2127 | // made no changes. If the target requires range extension thunks, currently |
| 2128 | // ARM, then any future change in offset between caller and callee risks a |
| 2129 | // relocation out of range error. |
| 2130 | bool ThunkCreator::createThunks(uint32_t pass, |
| 2131 | ArrayRef<OutputSection *> outputSections) { |
| 2132 | this->pass = pass; |
| 2133 | bool addressesChanged = false; |
| 2134 | |
| 2135 | if (pass == 0 && ctx.target->getThunkSectionSpacing()) |
| 2136 | createInitialThunkSections(outputSections); |
| 2137 | |
| 2138 | if (ctx.arg.emachine == EM_AARCH64) |
| 2139 | addressesChanged = addSyntheticLandingPads(); |
| 2140 | |
| 2141 | // Create all the Thunks and insert them into synthetic ThunkSections. The |
| 2142 | // ThunkSections are later inserted back into InputSectionDescriptions. |
| 2143 | // We separate the creation of ThunkSections from the insertion of the |
| 2144 | // ThunkSections as ThunkSections are not always inserted into the same |
| 2145 | // InputSectionDescription as the caller. |
| 2146 | forEachInputSectionDescription( |
| 2147 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
| 2148 | for (InputSection *isec : isd->sections) |
| 2149 | for (Relocation &rel : isec->relocs()) { |
| 2150 | uint64_t src = isec->getVA(offset: rel.offset); |
| 2151 | |
| 2152 | // If we are a relocation to an existing Thunk, check if it is |
| 2153 | // still in range. If not then Rel will be altered to point to its |
| 2154 | // original target so another Thunk can be generated. |
| 2155 | if (pass > 0 && normalizeExistingThunk(rel, src)) |
| 2156 | continue; |
| 2157 | |
| 2158 | if (!ctx.target->needsThunk(expr: rel.expr, relocType: rel.type, file: isec->file, branchAddr: src, |
| 2159 | s: *rel.sym, a: rel.addend)) |
| 2160 | continue; |
| 2161 | |
| 2162 | Thunk *t; |
| 2163 | bool isNew; |
| 2164 | std::tie(args&: t, args&: isNew) = getThunk(isec, rel, src); |
| 2165 | |
| 2166 | if (isNew) { |
| 2167 | // Find or create a ThunkSection for the new Thunk |
| 2168 | ThunkSection *ts; |
| 2169 | if (auto *tis = t->getTargetInputSection()) |
| 2170 | ts = getISThunkSec(isec: tis); |
| 2171 | else |
| 2172 | ts = getISDThunkSec(os, isec, isd, rel, src); |
| 2173 | ts->addThunk(t); |
| 2174 | thunks[t->getThunkTargetSym()] = t; |
| 2175 | allThunks.push_back(x: t); |
| 2176 | } |
| 2177 | |
| 2178 | // Redirect relocation to Thunk, we never go via the PLT to a Thunk |
| 2179 | rel.sym = t->getThunkTargetSym(); |
| 2180 | rel.expr = fromPlt(expr: rel.expr); |
| 2181 | |
| 2182 | // On AArch64 and PPC, a jump/call relocation may be encoded as |
| 2183 | // STT_SECTION + non-zero addend, clear the addend after |
| 2184 | // redirection. |
| 2185 | if (ctx.arg.emachine != EM_MIPS) |
| 2186 | rel.addend = -getPCBias(ctx, isec: *isec, rel); |
| 2187 | } |
| 2188 | |
| 2189 | for (auto &p : isd->thunkSections) |
| 2190 | addressesChanged |= p.first->assignOffsets(); |
| 2191 | }); |
| 2192 | |
| 2193 | for (auto &p : thunkedSections) |
| 2194 | addressesChanged |= p.second->assignOffsets(); |
| 2195 | |
| 2196 | // Merge all created synthetic ThunkSections back into OutputSection |
| 2197 | mergeThunks(outputSections); |
| 2198 | return addressesChanged; |
| 2199 | } |
| 2200 | |
| 2201 | // The following aid in the conversion of call x@GDPLT to call __tls_get_addr |
| 2202 | // hexagonNeedsTLSSymbol scans for relocations would require a call to |
| 2203 | // __tls_get_addr. |
| 2204 | // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr. |
| 2205 | bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) { |
| 2206 | bool needTlsSymbol = false; |
| 2207 | forEachInputSectionDescription( |
| 2208 | outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
| 2209 | for (InputSection *isec : isd->sections) |
| 2210 | for (Relocation &rel : isec->relocs()) |
| 2211 | if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { |
| 2212 | needTlsSymbol = true; |
| 2213 | return; |
| 2214 | } |
| 2215 | }); |
| 2216 | return needTlsSymbol; |
| 2217 | } |
| 2218 | |
| 2219 | void elf::hexagonTLSSymbolUpdate(Ctx &ctx) { |
| 2220 | Symbol *sym = ctx.symtab->find(name: "__tls_get_addr" ); |
| 2221 | if (!sym) |
| 2222 | return; |
| 2223 | bool needEntry = true; |
| 2224 | forEachInputSectionDescription( |
| 2225 | outputSections: ctx.outputSections, fn: [&](OutputSection *os, InputSectionDescription *isd) { |
| 2226 | for (InputSection *isec : isd->sections) |
| 2227 | for (Relocation &rel : isec->relocs()) |
| 2228 | if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { |
| 2229 | if (needEntry) { |
| 2230 | if (sym->auxIdx == 0) |
| 2231 | sym->allocateAux(ctx); |
| 2232 | addPltEntry(ctx, plt&: *ctx.in.plt, gotPlt&: *ctx.in.gotPlt, rel&: *ctx.in.relaPlt, |
| 2233 | type: ctx.target->pltRel, sym&: *sym); |
| 2234 | needEntry = false; |
| 2235 | } |
| 2236 | rel.sym = sym; |
| 2237 | } |
| 2238 | }); |
| 2239 | } |
| 2240 | |
| 2241 | static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) { |
| 2242 | if (cmd.toFirst) |
| 2243 | return cmd.outputSections[0] == osec; |
| 2244 | return llvm::is_contained(Range: cmd.outputSections, Element: osec); |
| 2245 | } |
| 2246 | |
| 2247 | template <class ELFT, class Rels> |
| 2248 | static void scanCrossRefs(Ctx &ctx, const NoCrossRefCommand &cmd, |
| 2249 | OutputSection *osec, InputSection *sec, Rels rels) { |
| 2250 | for (const auto &r : rels) { |
| 2251 | Symbol &sym = sec->file->getSymbol(symbolIndex: r.getSymbol(ctx.arg.isMips64EL)); |
| 2252 | // A legal cross-reference is when the destination output section is |
| 2253 | // nullptr, osec for a self-reference, or a section that is described by the |
| 2254 | // NOCROSSREFS/NOCROSSREFS_TO command. |
| 2255 | auto *dstOsec = sym.getOutputSection(); |
| 2256 | if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, osec: dstOsec->name)) |
| 2257 | continue; |
| 2258 | |
| 2259 | std::string toSymName; |
| 2260 | if (!sym.isSection()) |
| 2261 | toSymName = toStr(ctx, sym); |
| 2262 | else if (auto *d = dyn_cast<Defined>(Val: &sym)) |
| 2263 | toSymName = d->section->name; |
| 2264 | Err(ctx) << sec->getLocation(offset: r.r_offset) |
| 2265 | << ": prohibited cross reference from '" << osec->name << "' to '" |
| 2266 | << toSymName << "' in '" << dstOsec->name << "'" ; |
| 2267 | } |
| 2268 | } |
| 2269 | |
| 2270 | // For each output section described by at least one NOCROSSREFS(_TO) command, |
| 2271 | // scan relocations from its input sections for prohibited cross references. |
| 2272 | template <class ELFT> void elf::checkNoCrossRefs(Ctx &ctx) { |
| 2273 | for (OutputSection *osec : ctx.outputSections) { |
| 2274 | for (const NoCrossRefCommand &noxref : ctx.script->noCrossRefs) { |
| 2275 | if (!llvm::is_contained(Range: noxref.outputSections, Element: osec->name) || |
| 2276 | (noxref.toFirst && noxref.outputSections[0] == osec->name)) |
| 2277 | continue; |
| 2278 | for (SectionCommand *cmd : osec->commands) { |
| 2279 | auto *isd = dyn_cast<InputSectionDescription>(Val: cmd); |
| 2280 | if (!isd) |
| 2281 | continue; |
| 2282 | parallelForEach(isd->sections, [&](InputSection *sec) { |
| 2283 | invokeOnRelocs(*sec, scanCrossRefs<ELFT>, ctx, noxref, osec, sec); |
| 2284 | }); |
| 2285 | } |
| 2286 | } |
| 2287 | } |
| 2288 | } |
| 2289 | |
| 2290 | template void elf::scanRelocations<ELF32LE>(Ctx &); |
| 2291 | template void elf::scanRelocations<ELF32BE>(Ctx &); |
| 2292 | template void elf::scanRelocations<ELF64LE>(Ctx &); |
| 2293 | template void elf::scanRelocations<ELF64BE>(Ctx &); |
| 2294 | |
| 2295 | template void elf::checkNoCrossRefs<ELF32LE>(Ctx &); |
| 2296 | template void elf::checkNoCrossRefs<ELF32BE>(Ctx &); |
| 2297 | template void elf::checkNoCrossRefs<ELF64LE>(Ctx &); |
| 2298 | template void elf::checkNoCrossRefs<ELF64BE>(Ctx &); |
| 2299 | |