1//===- InputSection.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputSection.h"
10#include "Config.h"
11#include "InputFiles.h"
12#include "OutputSections.h"
13#include "Relocations.h"
14#include "SymbolTable.h"
15#include "Symbols.h"
16#include "SyntheticSections.h"
17#include "Target.h"
18#include "lld/Common/DWARF.h"
19#include "llvm/Support/Compiler.h"
20#include "llvm/Support/Compression.h"
21#include "llvm/Support/Endian.h"
22#include "llvm/Support/LEB128.h"
23#include "llvm/Support/xxhash.h"
24#include <algorithm>
25#include <optional>
26#include <vector>
27
28using namespace llvm;
29using namespace llvm::ELF;
30using namespace llvm::object;
31using namespace llvm::support;
32using namespace llvm::support::endian;
33using namespace llvm::sys;
34using namespace lld;
35using namespace lld::elf;
36
37// Returns a string to construct an error message.
38std::string elf::toStr(Ctx &ctx, const InputSectionBase *sec) {
39 return (toStr(ctx, f: sec->file) + ":(" + sec->name + ")").str();
40}
41
42const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
43 const InputSectionBase *sec) {
44 return s << toStr(ctx&: s.ctx, sec);
45}
46
47template <class ELFT>
48static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &file,
49 const typename ELFT::Shdr &hdr) {
50 if (hdr.sh_type == SHT_NOBITS)
51 return ArrayRef<uint8_t>(nullptr, hdr.sh_size);
52 return check(file.getObj().getSectionContents(hdr));
53}
54
55InputSectionBase::InputSectionBase(InputFile *file, StringRef name,
56 uint32_t type, uint64_t flags, uint32_t link,
57 uint32_t info, uint32_t addralign,
58 uint32_t entsize, ArrayRef<uint8_t> data,
59 Kind sectionKind)
60 : SectionBase(sectionKind, file, name, type, flags, link, info, addralign,
61 entsize),
62 bss(0), decodedCrel(0), keepUnique(0), nopFiller(0),
63 content_(data.data()), size(data.size()) {
64 // In order to reduce memory allocation, we assume that mergeable
65 // sections are smaller than 4 GiB, which is not an unreasonable
66 // assumption as of 2017.
67 if (sectionKind == SectionBase::Merge && content().size() > UINT32_MAX)
68 ErrAlways(ctx&: getCtx()) << this << ": section too large";
69
70 // The ELF spec states that a value of 0 means the section has
71 // no alignment constraints.
72 uint32_t v = std::max<uint32_t>(a: addralign, b: 1);
73 if (!isPowerOf2_64(Value: v)) {
74 Err(ctx&: getCtx()) << this << ": sh_addralign is not a power of 2";
75 v = 1;
76 }
77 this->addralign = v;
78
79 // If SHF_COMPRESSED is set, parse the header. The legacy .zdebug format is no
80 // longer supported.
81 if (flags & SHF_COMPRESSED) {
82 Ctx &ctx = file->ctx;
83 invokeELFT(parseCompressedHeader, ctx);
84 }
85}
86
87// SHF_INFO_LINK and SHF_GROUP are normally resolved and not copied to the
88// output section. However, for relocatable linking without
89// --force-group-allocation, the SHF_GROUP flag and section groups are retained.
90static uint64_t getFlags(Ctx &ctx, uint64_t flags) {
91 flags &= ~(uint64_t)SHF_INFO_LINK;
92 if (ctx.arg.resolveGroups)
93 flags &= ~(uint64_t)SHF_GROUP;
94 return flags;
95}
96
97template <class ELFT>
98InputSectionBase::InputSectionBase(ObjFile<ELFT> &file,
99 const typename ELFT::Shdr &hdr,
100 StringRef name, Kind sectionKind)
101 : InputSectionBase(&file, name, hdr.sh_type,
102 getFlags(file.ctx, hdr.sh_flags), hdr.sh_link,
103 hdr.sh_info, hdr.sh_addralign, hdr.sh_entsize,
104 getSectionContents(file, hdr), sectionKind) {
105 // We reject object files having insanely large alignments even though
106 // they are allowed by the spec. I think 4GB is a reasonable limitation.
107 // We might want to relax this in the future.
108 if (hdr.sh_addralign > UINT32_MAX) {
109 Err(ctx&: getCtx()) << &file << ": section sh_addralign is too large";
110 addralign = 1;
111 }
112}
113
114size_t InputSectionBase::getSize() const {
115 if (auto *s = dyn_cast<SyntheticSection>(Val: this))
116 return s->getSize();
117 return size - bytesDropped;
118}
119
120template <class ELFT>
121static void decompressAux(Ctx &ctx, const InputSectionBase &sec, uint8_t *out,
122 size_t size) {
123 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(sec.content_);
124 auto compressed = ArrayRef<uint8_t>(sec.content_, sec.compressedSize)
125 .slice(N: sizeof(typename ELFT::Chdr));
126 if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
127 ? compression::zlib::decompress(Input: compressed, Output: out, UncompressedSize&: size)
128 : compression::zstd::decompress(Input: compressed, Output: out, UncompressedSize&: size))
129 Err(ctx) << &sec << ": decompress failed: " << std::move(e);
130}
131
132void InputSectionBase::decompress() const {
133 Ctx &ctx = getCtx();
134 uint8_t *buf = makeThreadLocalN<uint8_t>(n: size);
135 invokeELFT(decompressAux, ctx, *this, buf, size);
136 content_ = buf;
137 compressed = false;
138}
139
140template <class ELFT>
141RelsOrRelas<ELFT> InputSectionBase::relsOrRelas(bool supportsCrel) const {
142 if (relSecIdx == 0)
143 return {};
144 RelsOrRelas<ELFT> ret;
145 auto *f = cast<ObjFile<ELFT>>(file);
146 typename ELFT::Shdr shdr = f->template getELFShdrs<ELFT>()[relSecIdx];
147 if (shdr.sh_type == SHT_CREL) {
148 // Return an iterator if supported by caller.
149 if (supportsCrel) {
150 ret.crels = Relocs<typename ELFT::Crel>(
151 (const uint8_t *)f->mb.getBufferStart() + shdr.sh_offset);
152 return ret;
153 }
154 InputSectionBase *const &relSec = f->getSections()[relSecIdx];
155 // Otherwise, allocate a buffer to hold the decoded RELA relocations. When
156 // called for the first time, relSec is null (without --emit-relocs) or an
157 // InputSection with false decodedCrel.
158 if (!relSec || !cast<InputSection>(Val: relSec)->decodedCrel) {
159 auto *sec = makeThreadLocal<InputSection>(*f, shdr, name);
160 f->cacheDecodedCrel(relSecIdx, sec);
161 sec->type = SHT_RELA;
162 sec->decodedCrel = true;
163
164 RelocsCrel<ELFT::Is64Bits> entries(sec->content_);
165 sec->size = entries.size() * sizeof(typename ELFT::Rela);
166 auto *relas = makeThreadLocalN<typename ELFT::Rela>(entries.size());
167 sec->content_ = reinterpret_cast<uint8_t *>(relas);
168 for (auto [i, r] : llvm::enumerate(entries)) {
169 relas[i].r_offset = r.r_offset;
170 relas[i].setSymbolAndType(r.r_symidx, r.r_type, false);
171 relas[i].r_addend = r.r_addend;
172 }
173 }
174 ret.relas = {ArrayRef(
175 reinterpret_cast<const typename ELFT::Rela *>(relSec->content_),
176 relSec->size / sizeof(typename ELFT::Rela))};
177 return ret;
178 }
179
180 const void *content = f->mb.getBufferStart() + shdr.sh_offset;
181 size_t size = shdr.sh_size;
182 if (shdr.sh_type == SHT_REL) {
183 ret.rels = {ArrayRef(reinterpret_cast<const typename ELFT::Rel *>(content),
184 size / sizeof(typename ELFT::Rel))};
185 } else {
186 assert(shdr.sh_type == SHT_RELA);
187 ret.relas = {
188 ArrayRef(reinterpret_cast<const typename ELFT::Rela *>(content),
189 size / sizeof(typename ELFT::Rela))};
190 }
191 return ret;
192}
193
194Ctx &SectionBase::getCtx() const { return file->ctx; }
195
196uint64_t SectionBase::getOffset(uint64_t offset) const {
197 switch (kind()) {
198 case Output: {
199 auto *os = cast<OutputSection>(Val: this);
200 // For output sections we treat offset -1 as the end of the section.
201 return offset == uint64_t(-1) ? os->size : offset;
202 }
203 case Class:
204 llvm_unreachable("section classes do not have offsets");
205 case Regular:
206 case Synthetic:
207 case Spill:
208 return cast<InputSection>(Val: this)->outSecOff + offset;
209 case EHFrame: {
210 // Two code paths may reach here. First, clang_rt.crtbegin.o and GCC
211 // crtbeginT.o may reference the start of an empty .eh_frame to identify the
212 // start of the output .eh_frame. Just return offset.
213 //
214 // Second, InputSection::copyRelocations on .eh_frame. Some pieces may be
215 // discarded due to GC/ICF. We should compute the output section offset.
216 const EhInputSection *es = cast<EhInputSection>(Val: this);
217 if (!es->content().empty())
218 if (InputSection *isec = es->getParent())
219 return isec->outSecOff + es->getParentOffset(offset);
220 return offset;
221 }
222 case Merge:
223 const MergeInputSection *ms = cast<MergeInputSection>(Val: this);
224 if (InputSection *isec = ms->getParent())
225 return isec->outSecOff + ms->getParentOffset(offset);
226 return ms->getParentOffset(offset);
227 }
228 llvm_unreachable("invalid section kind");
229}
230
231uint64_t SectionBase::getVA(uint64_t offset) const {
232 const OutputSection *out = getOutputSection();
233 return (out ? out->addr : 0) + getOffset(offset);
234}
235
236OutputSection *SectionBase::getOutputSection() {
237 InputSection *sec;
238 if (auto *isec = dyn_cast<InputSection>(Val: this))
239 sec = isec;
240 else if (auto *ms = dyn_cast<MergeInputSection>(Val: this))
241 sec = ms->getParent();
242 else if (auto *eh = dyn_cast<EhInputSection>(Val: this))
243 sec = eh->getParent();
244 else
245 return cast<OutputSection>(Val: this);
246 return sec ? sec->getParent() : nullptr;
247}
248
249// When a section is compressed, `rawData` consists with a header followed
250// by zlib-compressed data. This function parses a header to initialize
251// `uncompressedSize` member and remove the header from `rawData`.
252template <typename ELFT>
253void InputSectionBase::parseCompressedHeader(Ctx &ctx) {
254 flags &= ~(uint64_t)SHF_COMPRESSED;
255
256 // New-style header
257 if (content().size() < sizeof(typename ELFT::Chdr)) {
258 ErrAlways(ctx) << this << ": corrupted compressed section";
259 return;
260 }
261
262 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content().data());
263 if (hdr->ch_type == ELFCOMPRESS_ZLIB) {
264 if (!compression::zlib::isAvailable())
265 ErrAlways(ctx) << this
266 << " is compressed with ELFCOMPRESS_ZLIB, but lld is "
267 "not built with zlib support";
268 } else if (hdr->ch_type == ELFCOMPRESS_ZSTD) {
269 if (!compression::zstd::isAvailable())
270 ErrAlways(ctx) << this
271 << " is compressed with ELFCOMPRESS_ZSTD, but lld is "
272 "not built with zstd support";
273 } else {
274 ErrAlways(ctx) << this << ": unsupported compression type ("
275 << uint32_t(hdr->ch_type) << ")";
276 return;
277 }
278
279 compressed = true;
280 compressedSize = size;
281 size = hdr->ch_size;
282 addralign = std::max<uint32_t>(hdr->ch_addralign, 1);
283}
284
285InputSection *InputSectionBase::getLinkOrderDep() const {
286 assert(flags & SHF_LINK_ORDER);
287 if (!link)
288 return nullptr;
289 return cast<InputSection>(Val: file->getSections()[link]);
290}
291
292// Find a symbol that encloses a given location.
293Defined *InputSectionBase::getEnclosingSymbol(uint64_t offset,
294 uint8_t type) const {
295 if (file->isInternal())
296 return nullptr;
297 for (Symbol *b : file->getSymbols())
298 if (Defined *d = dyn_cast<Defined>(Val: b))
299 if (d->section == this && d->value <= offset &&
300 offset < d->value + d->size && (type == 0 || type == d->type))
301 return d;
302 return nullptr;
303}
304
305// Returns an object file location string. Used to construct an error message.
306std::string InputSectionBase::getLocation(uint64_t offset) const {
307 std::string secAndOffset =
308 (name + "+0x" + Twine::utohexstr(Val: offset) + ")").str();
309
310 std::string filename = toStr(getCtx(), f: file);
311 if (Defined *d = getEnclosingFunction(offset))
312 return filename + ":(function " + toStr(getCtx(), *d) + ": " + secAndOffset;
313
314 return filename + ":(" + secAndOffset;
315}
316
317static void printFileLine(const ELFSyncStream &s, StringRef path,
318 unsigned line) {
319 StringRef filename = path::filename(path);
320 s << filename << ':' << line;
321 if (filename != path)
322 s << " (" << path << ':' << line << ')';
323}
324
325// Print an error message that looks like this:
326//
327// foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
328const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
329 InputSectionBase::SrcMsg &&msg) {
330 auto &sec = msg.sec;
331 if (sec.file->kind() != InputFile::ObjKind)
332 return s;
333 auto &file = cast<ELFFileBase>(Val&: *sec.file);
334
335 // First, look up the DWARF line table.
336 ArrayRef<InputSectionBase *> sections = file.getSections();
337 auto it = llvm::find(Range&: sections, Val: &sec);
338 uint64_t sectionIndex = it != sections.end()
339 ? it - sections.begin()
340 : object::SectionedAddress::UndefSection;
341 DWARFCache *dwarf = file.getDwarf();
342 if (auto info = dwarf->getDILineInfo(offset: msg.offset, sectionIndex))
343 printFileLine(s, path: info->FileName, line: info->Line);
344 else if (auto fileLine = dwarf->getVariableLoc(name: msg.sym.getName()))
345 // If it failed, look up again as a variable.
346 printFileLine(s, path: fileLine->first, line: fileLine->second);
347 else
348 // File.sourceFile contains STT_FILE symbol, and that is a last resort.
349 s << file.sourceFile;
350 return s;
351}
352
353// Returns a filename string along with an optional section name. This
354// function is intended to be used for constructing an error
355// message. The returned message looks like this:
356//
357// path/to/foo.o:(function bar)
358//
359// or
360//
361// path/to/foo.o:(function bar) in archive path/to/bar.a
362const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
363 InputSectionBase::ObjMsg &&msg) {
364 auto *sec = msg.sec;
365 s << sec->file->getName() << ":(";
366
367 // Find a symbol that encloses a given location. getObjMsg may be called
368 // before ObjFile::initSectionsAndLocalSyms where local symbols are
369 // initialized.
370 if (Defined *d = sec->getEnclosingSymbol(offset: msg.offset))
371 s << d;
372 else
373 s << sec->name << "+0x" << Twine::utohexstr(Val: msg.offset);
374 s << ')';
375 if (!sec->file->archiveName.empty())
376 s << (" in archive " + sec->file->archiveName).str();
377 return s;
378}
379
380PotentialSpillSection::PotentialSpillSection(const InputSectionBase &source,
381 InputSectionDescription &isd)
382 : InputSection(source.file, source.name, source.type, source.flags,
383 source.addralign, source.addralign, {}, SectionBase::Spill),
384 isd(&isd) {}
385
386InputSection InputSection::discarded(nullptr, "", 0, 0, 0, 0,
387 ArrayRef<uint8_t>());
388
389InputSection::InputSection(InputFile *f, StringRef name, uint32_t type,
390 uint64_t flags, uint32_t addralign, uint32_t entsize,
391 ArrayRef<uint8_t> data, Kind k)
392 : InputSectionBase(f, name, type, flags,
393 /*link=*/0, /*info=*/0, addralign, /*entsize=*/entsize,
394 data, k) {
395 assert(f || this == &InputSection::discarded);
396}
397
398template <class ELFT>
399InputSection::InputSection(ObjFile<ELFT> &f, const typename ELFT::Shdr &header,
400 StringRef name)
401 : InputSectionBase(f, header, name, InputSectionBase::Regular) {}
402
403// Copy SHT_GROUP section contents. Used only for the -r option.
404template <class ELFT> void InputSection::copyShtGroup(uint8_t *buf) {
405 // ELFT::Word is the 32-bit integral type in the target endianness.
406 using u32 = typename ELFT::Word;
407 ArrayRef<u32> from = getDataAs<u32>();
408 auto *to = reinterpret_cast<u32 *>(buf);
409
410 // The first entry is not a section number but a flag.
411 *to++ = from[0];
412
413 // Adjust section numbers because section numbers in an input object files are
414 // different in the output. We also need to handle combined or discarded
415 // members.
416 ArrayRef<InputSectionBase *> sections = file->getSections();
417 DenseSet<uint32_t> seen;
418 for (uint32_t idx : from.slice(1)) {
419 OutputSection *osec = sections[idx]->getOutputSection();
420 if (osec && seen.insert(V: osec->sectionIndex).second)
421 *to++ = osec->sectionIndex;
422 }
423}
424
425InputSectionBase *InputSection::getRelocatedSection() const {
426 if (file->isInternal() || !isStaticRelSecType(type))
427 return nullptr;
428 ArrayRef<InputSectionBase *> sections = file->getSections();
429 return sections[info];
430}
431
432template <class ELFT, class RelTy>
433void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf) {
434 bool linkerRelax =
435 ctx.arg.relax && is_contained(Set: {EM_RISCV, EM_LOONGARCH}, Element: ctx.arg.emachine);
436 if (!ctx.arg.relocatable && (linkerRelax || ctx.arg.branchToBranch)) {
437 // On LoongArch and RISC-V, relaxation might change relocations: copy
438 // from internal ones that are updated by relaxation.
439 InputSectionBase *sec = getRelocatedSection();
440 copyRelocations<ELFT, RelTy>(
441 ctx, buf,
442 llvm::make_range(x: sec->relocations.begin(), y: sec->relocations.end()));
443 } else {
444 // Convert the raw relocations in the input section into Relocation objects
445 // suitable to be used by copyRelocations below.
446 struct MapRel {
447 Ctx &ctx;
448 const ObjFile<ELFT> &file;
449 Relocation operator()(const RelTy &rel) const {
450 // RelExpr is not used so set to a dummy value.
451 return Relocation{R_NONE, rel.getType(ctx.arg.isMips64EL), rel.r_offset,
452 getAddend<ELFT>(rel), &file.getRelocTargetSym(rel)};
453 }
454 };
455
456 using RawRels = ArrayRef<RelTy>;
457 using MapRelIter =
458 llvm::mapped_iterator<typename RawRels::iterator, MapRel>;
459 auto mapRel = MapRel{ctx, *getFile<ELFT>()};
460 RawRels rawRels = getDataAs<RelTy>();
461 auto rels = llvm::make_range(MapRelIter(rawRels.begin(), mapRel),
462 MapRelIter(rawRels.end(), mapRel));
463 copyRelocations<ELFT, RelTy>(ctx, buf, rels);
464 }
465}
466
467// This is used for -r and --emit-relocs. We can't use memcpy to copy
468// relocations because we need to update symbol table offset and section index
469// for each relocation. So we copy relocations one by one.
470template <class ELFT, class RelTy, class RelIt>
471void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf,
472 llvm::iterator_range<RelIt> rels) {
473 const TargetInfo &target = *ctx.target;
474 InputSectionBase *sec = getRelocatedSection();
475 (void)sec->contentMaybeDecompress(); // uncompress if needed
476
477 for (const Relocation &rel : rels) {
478 RelType type = rel.type;
479 const ObjFile<ELFT> *file = getFile<ELFT>();
480 Symbol &sym = *rel.sym;
481
482 auto *p = reinterpret_cast<typename ELFT::Rela *>(buf);
483 buf += sizeof(RelTy);
484
485 if (RelTy::HasAddend)
486 p->r_addend = rel.addend;
487
488 // Output section VA is zero for -r, so r_offset is an offset within the
489 // section, but for --emit-relocs it is a virtual address.
490 p->r_offset = sec->getVA(offset: rel.offset);
491 p->setSymbolAndType(ctx.in.symTab->getSymbolIndex(sym), type,
492 ctx.arg.isMips64EL);
493
494 if (sym.type == STT_SECTION) {
495 // We combine multiple section symbols into only one per
496 // section. This means we have to update the addend. That is
497 // trivial for Elf_Rela, but for Elf_Rel we have to write to the
498 // section data. We do that by adding to the Relocation vector.
499
500 // .eh_frame is horribly special and can reference discarded sections. To
501 // avoid having to parse and recreate .eh_frame, we just replace any
502 // relocation in it pointing to discarded sections with R_*_NONE, which
503 // hopefully creates a frame that is ignored at runtime. Also, don't warn
504 // on .gcc_except_table and debug sections.
505 //
506 // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc
507 auto *d = dyn_cast<Defined>(Val: &sym);
508 if (!d) {
509 if (!isDebugSection(sec: *sec) && sec->name != ".eh_frame" &&
510 sec->name != ".gcc_except_table" && sec->name != ".got2" &&
511 sec->name != ".toc") {
512 uint32_t secIdx = cast<Undefined>(Val&: sym).discardedSecIdx;
513 Elf_Shdr_Impl<ELFT> sec = file->template getELFShdrs<ELFT>()[secIdx];
514 Warn(ctx) << "relocation refers to a discarded section: "
515 << CHECK2(file->getObj().getSectionName(sec), file)
516 << "\n>>> referenced by " << getObjMsg(offset: p->r_offset);
517 }
518 p->setSymbolAndType(0, 0, false);
519 continue;
520 }
521 SectionBase *section = d->section;
522 assert(section->isLive());
523
524 int64_t addend = rel.addend;
525 const uint8_t *bufLoc = sec->content().begin() + rel.offset;
526 if (!RelTy::HasAddend)
527 addend = target.getImplicitAddend(buf: bufLoc, type);
528
529 if (ctx.arg.emachine == EM_MIPS &&
530 target.getRelExpr(type, s: sym, loc: bufLoc) == RE_MIPS_GOTREL) {
531 // Some MIPS relocations depend on "gp" value. By default,
532 // this value has 0x7ff0 offset from a .got section. But
533 // relocatable files produced by a compiler or a linker
534 // might redefine this default value and we must use it
535 // for a calculation of the relocation result. When we
536 // generate EXE or DSO it's trivial. Generating a relocatable
537 // output is more difficult case because the linker does
538 // not calculate relocations in this mode and loses
539 // individual "gp" values used by each input object file.
540 // As a workaround we add the "gp" value to the relocation
541 // addend and save it back to the file.
542 addend += sec->getFile<ELFT>()->mipsGp0;
543 }
544
545 if (RelTy::HasAddend)
546 p->r_addend =
547 sym.getVA(ctx, addend) - section->getOutputSection()->addr;
548 // For SHF_ALLOC sections relocated by REL, append a relocation to
549 // sec->relocations so that relocateAlloc transitively called by
550 // writeSections will update the implicit addend. Non-SHF_ALLOC sections
551 // utilize relocateNonAlloc to process raw relocations and do not need
552 // this sec->relocations change.
553 else if (ctx.arg.relocatable && (sec->flags & SHF_ALLOC) &&
554 type != target.noneRel)
555 sec->addReloc(r: {.expr: R_ABS, .type: type, .offset: rel.offset, .addend: addend, .sym: &sym});
556 } else if (ctx.arg.emachine == EM_PPC && type == R_PPC_PLTREL24 &&
557 p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
558 // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
559 // indicates that r30 is relative to the input section .got2
560 // (r_addend>=0x8000), after linking, r30 should be relative to the output
561 // section .got2 . To compensate for the shift, adjust r_addend by
562 // ppc32Got->outSecOff.
563 p->r_addend += sec->file->ppc32Got2->outSecOff;
564 }
565 }
566}
567
568// The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
569// references specially. The general rule is that the value of the symbol in
570// this context is the address of the place P. A further special case is that
571// branch relocations to an undefined weak reference resolve to the next
572// instruction.
573static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a,
574 uint32_t p) {
575 switch (type) {
576 // Unresolved branch relocations to weak references resolve to next
577 // instruction, this will be either 2 or 4 bytes on from P.
578 case R_ARM_THM_JUMP8:
579 case R_ARM_THM_JUMP11:
580 return p + 2 + a;
581 case R_ARM_CALL:
582 case R_ARM_JUMP24:
583 case R_ARM_PC24:
584 case R_ARM_PLT32:
585 case R_ARM_PREL31:
586 case R_ARM_THM_JUMP19:
587 case R_ARM_THM_JUMP24:
588 return p + 4 + a;
589 case R_ARM_THM_CALL:
590 // We don't want an interworking BLX to ARM
591 return p + 5 + a;
592 // Unresolved non branch pc-relative relocations
593 // R_ARM_TARGET2 which can be resolved relatively is not present as it never
594 // targets a weak-reference.
595 case R_ARM_MOVW_PREL_NC:
596 case R_ARM_MOVT_PREL:
597 case R_ARM_REL32:
598 case R_ARM_THM_ALU_PREL_11_0:
599 case R_ARM_THM_MOVW_PREL_NC:
600 case R_ARM_THM_MOVT_PREL:
601 case R_ARM_THM_PC12:
602 return p + a;
603 // p + a is unrepresentable as negative immediates can't be encoded.
604 case R_ARM_THM_PC8:
605 return p;
606 }
607 llvm_unreachable("ARM pc-relative relocation expected\n");
608}
609
610// The comment above getARMUndefinedRelativeWeakVA applies to this function.
611static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
612 switch (type) {
613 // Unresolved branch relocations to weak references resolve to next
614 // instruction, this is 4 bytes on from P.
615 case R_AARCH64_CALL26:
616 case R_AARCH64_CONDBR19:
617 case R_AARCH64_JUMP26:
618 case R_AARCH64_TSTBR14:
619 return p + 4;
620 // Unresolved non branch pc-relative relocations
621 case R_AARCH64_PREL16:
622 case R_AARCH64_PREL32:
623 case R_AARCH64_PREL64:
624 case R_AARCH64_ADR_PREL_LO21:
625 case R_AARCH64_LD_PREL_LO19:
626 case R_AARCH64_PLT32:
627 return p;
628 }
629 llvm_unreachable("AArch64 pc-relative relocation expected\n");
630}
631
632static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
633 switch (type) {
634 case R_RISCV_BRANCH:
635 case R_RISCV_JAL:
636 case R_RISCV_CALL:
637 case R_RISCV_CALL_PLT:
638 case R_RISCV_RVC_BRANCH:
639 case R_RISCV_RVC_JUMP:
640 case R_RISCV_PLT32:
641 return p;
642 default:
643 return 0;
644 }
645}
646
647// ARM SBREL relocations are of the form S + A - B where B is the static base
648// The ARM ABI defines base to be "addressing origin of the output segment
649// defining the symbol S". We defined the "addressing origin"/static base to be
650// the base of the PT_LOAD segment containing the Sym.
651// The procedure call standard only defines a Read Write Position Independent
652// RWPI variant so in practice we should expect the static base to be the base
653// of the RW segment.
654static uint64_t getARMStaticBase(const Symbol &sym) {
655 OutputSection *os = sym.getOutputSection();
656 if (!os || !os->ptLoad || !os->ptLoad->firstSec) {
657 Err(ctx&: os->ctx) << "SBREL relocation to " << sym.getName()
658 << " without static base";
659 return 0;
660 }
661 return os->ptLoad->firstSec->addr;
662}
663
664struct RISCVPCRel {
665 static constexpr const char *loReloc = "R_RISCV_PCREL_LO12";
666 static constexpr const char *hiReloc = "R_RISCV_PCREL_HI20";
667
668 static bool isHiReloc(uint32_t type) {
669 return is_contained(Set: {R_RISCV_PCREL_HI20, R_RISCV_GOT_HI20,
670 R_RISCV_TLS_GD_HI20, R_RISCV_TLS_GOT_HI20},
671 Element: type);
672 }
673};
674
675struct LoongArchPCAdd {
676 static constexpr const char *loReloc = "R_LARCH_*PCADD_LO12";
677 static constexpr const char *hiReloc = "R_LARCH_*PCADD_HI20";
678
679 static bool isHiReloc(uint32_t type) {
680 return is_contained(Set: {R_LARCH_PCADD_HI20, R_LARCH_GOT_PCADD_HI20,
681 R_LARCH_TLS_IE_PCADD_HI20, R_LARCH_TLS_LD_PCADD_HI20,
682 R_LARCH_TLS_GD_PCADD_HI20,
683 R_LARCH_TLS_DESC_PCADD_HI20},
684 Element: type);
685 }
686};
687
688// For PC-relative indirect relocations (e.g. R_RISCV_PCREL_LO12_* and
689// R_LARCH_*PCADD_LO12), the symbol referenced by the LO12 relocation does not
690// directly represent the final target address. Instead, it points to the
691// corresponding HI20 relocation, and the target VA is computed using the
692// symbol associated with that HI20 relocation.
693//
694// This helper locates and returns the matching HI20 relocation corresponding
695// to a given LO12 relocation.
696template <typename PCRel>
697static Relocation *getPCRelHi20(Ctx &ctx, const InputSectionBase *loSec,
698 const Relocation &loReloc) {
699 int64_t addend = loReloc.addend;
700 Symbol *sym = loReloc.sym;
701
702 const Defined *d = dyn_cast<Defined>(Val: sym);
703 if (!d) {
704 Err(ctx) << loSec->getLocation(offset: loReloc.offset)
705 << " points to undefined symbol";
706 return nullptr;
707 }
708 if (!d->section) {
709 Err(ctx) << loSec->getLocation(offset: loReloc.offset) << ": " << PCRel::loReloc
710 << " relocation points to an absolute symbol: " << sym->getName();
711 return nullptr;
712 }
713 InputSection *hiSec = cast<InputSection>(Val: d->section);
714
715 if (hiSec != loSec) {
716 Err(ctx) << loSec->getLocation(offset: loReloc.offset) << ": " << PCRel::loReloc
717 << " relocation points to a symbol '" << sym->getName()
718 << "' in a different section '" << hiSec->name << "'";
719 return nullptr;
720 }
721
722 if (addend != 0)
723 Warn(ctx) << loSec->getLocation(offset: loReloc.offset) << ": non-zero addend in "
724 << PCRel::loReloc << " relocation to "
725 << hiSec->getObjMsg(offset: d->value) << " is ignored";
726
727 // Relocations are sorted by offset, so we can use std::equal_range to do
728 // binary search.
729 Relocation hiReloc;
730 hiReloc.offset = d->value + addend;
731 auto range =
732 std::equal_range(hiSec->relocs().begin(), hiSec->relocs().end(), hiReloc,
733 [](const Relocation &lhs, const Relocation &rhs) {
734 return lhs.offset < rhs.offset;
735 });
736
737 for (auto it = range.first; it != range.second; ++it)
738 if (PCRel::isHiReloc(it->type))
739 return &*it;
740
741 Err(ctx) << loSec->getLocation(offset: loReloc.offset) << ": " << PCRel::loReloc
742 << " relocation points to " << hiSec->getObjMsg(offset: d->value)
743 << " without an associated " << PCRel::hiReloc << " relocation";
744 return nullptr;
745}
746
747// A TLS symbol's virtual address is relative to the TLS segment. Add a
748// target-specific adjustment to produce a thread-pointer-relative offset.
749static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
750 // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0.
751 if (&s == ctx.sym.tlsModuleBase)
752 return 0;
753
754 // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2
755 // while most others use Variant 1. At run time TP will be aligned to p_align.
756
757 // Variant 1. TP will be followed by an optional gap (which is the size of 2
758 // pointers on ARM/AArch64, 0 on other targets), followed by alignment
759 // padding, then the static TLS blocks. The alignment padding is added so that
760 // (TP + gap + padding) is congruent to p_vaddr modulo p_align.
761 //
762 // Variant 2. Static TLS blocks, followed by alignment padding are placed
763 // before TP. The alignment padding is added so that (TP - padding -
764 // p_memsz) is congruent to p_vaddr modulo p_align.
765 PhdrEntry *tls = ctx.tlsPhdr;
766 if (!tls) // Reported an error in getSymVA
767 return 0;
768 switch (ctx.arg.emachine) {
769 // Variant 1.
770 case EM_ARM:
771 case EM_AARCH64:
772 return s.getVA(ctx, addend: 0) + ctx.arg.wordsize * 2 +
773 ((tls->p_vaddr - ctx.arg.wordsize * 2) & (tls->p_align - 1));
774 case EM_MIPS:
775 case EM_PPC:
776 case EM_PPC64:
777 // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
778 // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
779 // data and 0xf000 of the program's TLS segment.
780 return s.getVA(ctx, addend: 0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
781 case EM_LOONGARCH:
782 case EM_RISCV:
783 // See the comment in handleTlsRelocation. For TLSDESC=>IE,
784 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} also reach here. While
785 // `tls` may be null, the return value is ignored.
786 if (s.type != STT_TLS)
787 return 0;
788 return s.getVA(ctx, addend: 0) + (tls->p_vaddr & (tls->p_align - 1));
789
790 // Variant 2.
791 case EM_HEXAGON:
792 case EM_S390:
793 case EM_SPARCV9:
794 case EM_386:
795 case EM_X86_64:
796 return s.getVA(ctx, addend: 0) - tls->p_memsz -
797 ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
798 default:
799 llvm_unreachable("unhandled ctx.arg.emachine");
800 }
801}
802
803uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
804 uint64_t p) const {
805 int64_t a = r.addend;
806 switch (r.expr) {
807 case R_ABS:
808 case R_DTPREL:
809 case R_RELAX_TLS_LD_TO_LE_ABS:
810 case R_RELAX_GOT_PC_NOPIC:
811 case RE_AARCH64_AUTH:
812 case RE_RISCV_ADD:
813 case RE_RISCV_LEB128:
814 return r.sym->getVA(ctx, addend: a);
815 case R_ADDEND:
816 return a;
817 case R_ADDEND_NEG:
818 return -static_cast<uint64_t>(a);
819 case R_RELAX_HINT:
820 return 0;
821 case RE_ARM_SBREL:
822 return r.sym->getVA(ctx, addend: a) - getARMStaticBase(sym: *r.sym);
823 case R_GOT:
824 case RE_AARCH64_AUTH_GOT:
825 case R_RELAX_TLS_GD_TO_IE_ABS:
826 return r.sym->getGotVA(ctx) + a;
827 case RE_LOONGARCH_GOT:
828 // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc r.type
829 // for their page offsets. The arithmetics are different in the TLS case
830 // so we have to duplicate some logic here.
831 if (r.sym->hasFlag(bit: NEEDS_TLSGD) && r.type != R_LARCH_TLS_IE_PC_LO12)
832 // Like RE_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
833 return ctx.in.got->getGlobalDynAddr(b: *r.sym) + a;
834 return r.sym->getGotVA(ctx) + a;
835 case R_GOTONLY_PC:
836 return ctx.in.got->getVA() + a - p;
837 case R_GOTPLTONLY_PC:
838 return ctx.in.gotPlt->getVA() + a - p;
839 case R_GOTREL:
840 case RE_PPC64_RELAX_TOC:
841 return r.sym->getVA(ctx, addend: a) - ctx.in.got->getVA();
842 case R_GOTPLTREL:
843 return r.sym->getVA(ctx, addend: a) - ctx.in.gotPlt->getVA();
844 case R_GOTPLT:
845 case R_RELAX_TLS_GD_TO_IE_GOTPLT:
846 return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA();
847 case R_TLSLD_GOT_OFF:
848 case R_GOT_OFF:
849 case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
850 return r.sym->getGotOffset(ctx) + a;
851 case RE_AARCH64_GOT_PAGE_PC:
852 case RE_AARCH64_AUTH_GOT_PAGE_PC:
853 case RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
854 return getAArch64Page(expr: r.sym->getGotVA(ctx) + a) - getAArch64Page(expr: p);
855 case RE_AARCH64_GOT_PAGE:
856 return r.sym->getGotVA(ctx) + a - getAArch64Page(expr: ctx.in.got->getVA());
857 case R_GOT_PC:
858 case RE_AARCH64_AUTH_GOT_PC:
859 case R_RELAX_TLS_GD_TO_IE:
860 return r.sym->getGotVA(ctx) + a - p;
861 case R_GOTPLT_GOTREL:
862 return r.sym->getGotPltVA(ctx) + a - ctx.in.got->getVA();
863 case R_GOTPLT_PC:
864 return r.sym->getGotPltVA(ctx) + a - p;
865 case RE_LOONGARCH_GOT_PAGE_PC:
866 case RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC:
867 if (r.sym->hasFlag(bit: NEEDS_TLSGD))
868 return getLoongArchPageDelta(dest: ctx.in.got->getGlobalDynAddr(b: *r.sym) + a, pc: p,
869 type: r.type);
870 return getLoongArchPageDelta(dest: r.sym->getGotVA(ctx) + a, pc: p, type: r.type);
871 case RE_MIPS_GOTREL:
872 return r.sym->getVA(ctx, addend: a) - ctx.in.mipsGot->getGp(f: file);
873 case RE_MIPS_GOT_GP:
874 return ctx.in.mipsGot->getGp(f: file) + a;
875 case RE_MIPS_GOT_GP_PC: {
876 // R_MIPS_LO16 expression has RE_MIPS_GOT_GP_PC r.type iif the target
877 // is _gp_disp symbol. In that case we should use the following
878 // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
879 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
880 // microMIPS variants of these relocations use slightly different
881 // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
882 // to correctly handle less-significant bit of the microMIPS symbol.
883 uint64_t v = ctx.in.mipsGot->getGp(f: file) + a - p;
884 if (r.type == R_MIPS_LO16 || r.type == R_MICROMIPS_LO16)
885 v += 4;
886 if (r.type == R_MICROMIPS_LO16 || r.type == R_MICROMIPS_HI16)
887 v -= 1;
888 return v;
889 }
890 case RE_MIPS_GOT_LOCAL_PAGE:
891 // If relocation against MIPS local symbol requires GOT entry, this entry
892 // should be initialized by 'page address'. This address is high 16-bits
893 // of sum the symbol's value and the addend.
894 return ctx.in.mipsGot->getVA() +
895 ctx.in.mipsGot->getPageEntryOffset(f: file, s: *r.sym, addend: a) -
896 ctx.in.mipsGot->getGp(f: file);
897 case RE_MIPS_OSEC_LOCAL_PAGE:
898 // This is used by the MIPS multi-GOT implementation. It relocates
899 // addresses of 64kb pages that lie inside the output section that sym is
900 // a representative for.
901 return getMipsPageAddr(addr: r.sym->getOutputSection()->addr) + a;
902 case RE_MIPS_GOT_OFF:
903 case RE_MIPS_GOT_OFF32:
904 // In case of MIPS if a GOT relocation has non-zero addend this addend
905 // should be applied to the GOT entry content not to the GOT entry offset.
906 // That is why we use separate expression r.type.
907 return ctx.in.mipsGot->getVA() +
908 ctx.in.mipsGot->getSymEntryOffset(f: file, s: *r.sym, addend: a) -
909 ctx.in.mipsGot->getGp(f: file);
910 case RE_MIPS_TLSGD:
911 return ctx.in.mipsGot->getVA() +
912 ctx.in.mipsGot->getGlobalDynOffset(f: file, s: *r.sym) -
913 ctx.in.mipsGot->getGp(f: file);
914 case RE_MIPS_TLSLD:
915 return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(f: file) -
916 ctx.in.mipsGot->getGp(f: file);
917 case RE_AARCH64_PAGE_PC: {
918 uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(ctx, addend: a);
919 return getAArch64Page(expr: val) - getAArch64Page(expr: p);
920 }
921 case RE_RISCV_PC_INDIRECT: {
922 if (const Relocation *hiRel = getPCRelHi20<RISCVPCRel>(ctx, loSec: this, loReloc: r))
923 return getRelocTargetVA(ctx, r: *hiRel, p: r.sym->getVA(ctx));
924 return 0;
925 }
926 case RE_LOONGARCH_PC_INDIRECT: {
927 if (const Relocation *hiRel = getPCRelHi20<LoongArchPCAdd>(ctx, loSec: this, loReloc: r))
928 return getRelocTargetVA(ctx, r: *hiRel, p: r.sym->getVA(ctx, addend: a));
929 return 0;
930 }
931 case RE_LOONGARCH_PAGE_PC:
932 return getLoongArchPageDelta(dest: r.sym->getVA(ctx, addend: a), pc: p, type: r.type);
933 case R_PC:
934 case RE_ARM_PCA: {
935 uint64_t dest;
936 if (r.expr == RE_ARM_PCA)
937 // Some PC relative ARM (Thumb) relocations align down the place.
938 p = p & 0xfffffffc;
939 if (r.sym->isUndefined()) {
940 // On ARM and AArch64 a branch to an undefined weak resolves to the next
941 // instruction, otherwise the place. On RISC-V, resolve an undefined weak
942 // to the same instruction to cause an infinite loop (making the user
943 // aware of the issue) while ensuring no overflow.
944 // Note: if the symbol is hidden, its binding has been converted to local,
945 // so we just check isUndefined() here.
946 if (ctx.arg.emachine == EM_ARM)
947 dest = getARMUndefinedRelativeWeakVA(type: r.type, a, p);
948 else if (ctx.arg.emachine == EM_AARCH64)
949 dest = getAArch64UndefinedRelativeWeakVA(type: r.type, p) + a;
950 else if (ctx.arg.emachine == EM_PPC)
951 dest = p;
952 else if (ctx.arg.emachine == EM_RISCV)
953 dest = getRISCVUndefinedRelativeWeakVA(type: r.type, p) + a;
954 else
955 dest = r.sym->getVA(ctx, addend: a);
956 } else {
957 dest = r.sym->getVA(ctx, addend: a);
958 }
959 return dest - p;
960 }
961 case R_PLT:
962 return r.sym->getPltVA(ctx) + a;
963 case R_PLT_PC:
964 case RE_PPC64_CALL_PLT:
965 return r.sym->getPltVA(ctx) + a - p;
966 case RE_LOONGARCH_PLT_PAGE_PC:
967 return getLoongArchPageDelta(dest: r.sym->getPltVA(ctx) + a, pc: p, type: r.type);
968 case R_PLT_GOTPLT:
969 return r.sym->getPltVA(ctx) + a - ctx.in.gotPlt->getVA();
970 case R_PLT_GOTREL:
971 return r.sym->getPltVA(ctx) + a - ctx.in.got->getVA();
972 case RE_PPC32_PLTREL:
973 // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
974 // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
975 // target VA computation.
976 return r.sym->getPltVA(ctx) - p;
977 case RE_PPC64_CALL: {
978 uint64_t symVA = r.sym->getVA(ctx, addend: a);
979 // If we have an undefined weak symbol, we might get here with a symbol
980 // address of zero. That could overflow, but the code must be unreachable,
981 // so don't bother doing anything at all.
982 if (!symVA)
983 return 0;
984
985 // PPC64 V2 ABI describes two entry points to a function. The global entry
986 // point is used for calls where the caller and callee (may) have different
987 // TOC base pointers and r2 needs to be modified to hold the TOC base for
988 // the callee. For local calls the caller and callee share the same
989 // TOC base and so the TOC pointer initialization code should be skipped by
990 // branching to the local entry point.
991 return symVA - p +
992 getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther: r.sym->stOther);
993 }
994 case RE_PPC64_TOCBASE:
995 return getPPC64TocBase(ctx) + a;
996 case R_RELAX_GOT_PC:
997 case RE_PPC64_RELAX_GOT_PC:
998 return r.sym->getVA(ctx, addend: a) - p;
999 case R_RELAX_TLS_GD_TO_LE:
1000 case R_RELAX_TLS_IE_TO_LE:
1001 case R_RELAX_TLS_LD_TO_LE:
1002 case R_TPREL:
1003 // It is not very clear what to return if the symbol is undefined. With
1004 // --noinhibit-exec, even a non-weak undefined reference may reach here.
1005 // Just return A, which matches R_ABS, and the behavior of some dynamic
1006 // loaders.
1007 if (r.sym->isUndefined())
1008 return a;
1009 return getTlsTpOffset(ctx, s: *r.sym) + a;
1010 case R_RELAX_TLS_GD_TO_LE_NEG:
1011 case R_TPREL_NEG:
1012 if (r.sym->isUndefined())
1013 return a;
1014 return -getTlsTpOffset(ctx, s: *r.sym) + a;
1015 case R_SIZE:
1016 return r.sym->getSize() + a;
1017 case R_TLSDESC:
1018 case RE_AARCH64_AUTH_TLSDESC:
1019 return ctx.in.got->getTlsDescAddr(sym: *r.sym) + a;
1020 case R_TLSDESC_PC:
1021 return ctx.in.got->getTlsDescAddr(sym: *r.sym) + a - p;
1022 case R_TLSDESC_GOTPLT:
1023 return ctx.in.got->getTlsDescAddr(sym: *r.sym) + a - ctx.in.gotPlt->getVA();
1024 case RE_AARCH64_TLSDESC_PAGE:
1025 case RE_AARCH64_AUTH_TLSDESC_PAGE:
1026 return getAArch64Page(expr: ctx.in.got->getTlsDescAddr(sym: *r.sym) + a) -
1027 getAArch64Page(expr: p);
1028 case RE_LOONGARCH_TLSDESC_PAGE_PC:
1029 return getLoongArchPageDelta(dest: ctx.in.got->getTlsDescAddr(sym: *r.sym) + a, pc: p,
1030 type: r.type);
1031 case R_TLSGD_GOT:
1032 return ctx.in.got->getGlobalDynOffset(b: *r.sym) + a;
1033 case R_TLSGD_GOTPLT:
1034 return ctx.in.got->getGlobalDynAddr(b: *r.sym) + a - ctx.in.gotPlt->getVA();
1035 case R_TLSGD_PC:
1036 return ctx.in.got->getGlobalDynAddr(b: *r.sym) + a - p;
1037 case RE_LOONGARCH_TLSGD_PAGE_PC:
1038 return getLoongArchPageDelta(dest: ctx.in.got->getGlobalDynAddr(b: *r.sym) + a, pc: p,
1039 type: r.type);
1040 case R_TLSLD_GOTPLT:
1041 return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a -
1042 ctx.in.gotPlt->getVA();
1043 case R_TLSLD_GOT:
1044 return ctx.in.got->getTlsIndexOff() + a;
1045 case R_TLSLD_PC:
1046 return ctx.in.got->getTlsIndexVA() + a - p;
1047 default:
1048 llvm_unreachable("invalid expression");
1049 }
1050}
1051
1052// This function applies relocations to sections without SHF_ALLOC bit.
1053// Such sections are never mapped to memory at runtime. Debug sections are
1054// an example. Relocations in non-alloc sections are much easier to
1055// handle than in allocated sections because it will never need complex
1056// treatment such as GOT or PLT (because at runtime no one refers them).
1057// So, we handle relocations for non-alloc sections directly in this
1058// function as a performance optimization.
1059template <class ELFT, class RelTy>
1060void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
1061 Relocs<RelTy> rels) {
1062 const unsigned bits = sizeof(typename ELFT::uint) * 8;
1063 const TargetInfo &target = *ctx.target;
1064 const auto emachine = ctx.arg.emachine;
1065 const bool isDebug = isDebugSection(sec: *this);
1066 const bool isDebugLine = isDebug && name == ".debug_line";
1067 std::optional<uint64_t> tombstone;
1068 if (isDebug) {
1069 if (name == ".debug_loc" || name == ".debug_ranges")
1070 tombstone = 1;
1071 else if (name == ".debug_names")
1072 tombstone = UINT64_MAX; // tombstone value
1073 else
1074 tombstone = 0;
1075 }
1076 for (const auto &patAndValue : llvm::reverse(C&: ctx.arg.deadRelocInNonAlloc))
1077 if (patAndValue.first.match(S: this->name)) {
1078 tombstone = patAndValue.second;
1079 break;
1080 }
1081
1082 const InputFile *f = this->file;
1083 for (auto it = rels.begin(), end = rels.end(); it != end; ++it) {
1084 const RelTy &rel = *it;
1085 const RelType type = rel.getType(ctx.arg.isMips64EL);
1086 const uint64_t offset = rel.r_offset;
1087 uint8_t *bufLoc = buf + offset;
1088 int64_t addend = getAddend<ELFT>(rel);
1089 if (!RelTy::HasAddend)
1090 addend += target.getImplicitAddend(buf: bufLoc, type);
1091
1092 Symbol &sym = f->getRelocTargetSym(rel);
1093 RelExpr expr = target.getRelExpr(type, s: sym, loc: bufLoc);
1094 if (expr == R_NONE)
1095 continue;
1096 auto *ds = dyn_cast<Defined>(Val: &sym);
1097
1098 if (emachine == EM_RISCV && type == R_RISCV_SET_ULEB128) {
1099 if (++it != end &&
1100 it->getType(/*isMips64EL=*/false) == R_RISCV_SUB_ULEB128 &&
1101 it->r_offset == offset) {
1102 uint64_t val;
1103 if (!ds && tombstone) {
1104 val = *tombstone;
1105 } else {
1106 val = sym.getVA(ctx, addend) -
1107 (f->getRelocTargetSym(*it).getVA(ctx) + getAddend<ELFT>(*it));
1108 }
1109 if (overwriteULEB128(bufLoc, val) >= 0x80)
1110 Err(ctx) << getLocation(offset) << ": ULEB128 value " << val
1111 << " exceeds available space; references '" << &sym << "'";
1112 continue;
1113 }
1114 Err(ctx) << getLocation(offset)
1115 << ": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128";
1116 return;
1117 }
1118
1119 if (tombstone && (expr == R_ABS || expr == R_DTPREL)) {
1120 // Resolve relocations in .debug_* referencing (discarded symbols or ICF
1121 // folded section symbols) to a tombstone value. Resolving to addend is
1122 // unsatisfactory because the result address range may collide with a
1123 // valid range of low address, or leave multiple CUs claiming ownership of
1124 // the same range of code, which may confuse consumers.
1125 //
1126 // To address the problems, we use -1 as a tombstone value for most
1127 // .debug_* sections. We have to ignore the addend because we don't want
1128 // to resolve an address attribute (which may have a non-zero addend) to
1129 // -1+addend (wrap around to a low address).
1130 //
1131 // R_DTPREL type relocations represent an offset into the dynamic thread
1132 // vector. The computed value is st_value plus a non-negative offset.
1133 // Negative values are invalid, so -1 can be used as the tombstone value.
1134 //
1135 // If the referenced symbol is relative to a discarded section (due to
1136 // --gc-sections, COMDAT, etc), it has been converted to a Undefined.
1137 // `ds->folded` catches the ICF folded case. However, resolving a
1138 // relocation in .debug_line to -1 would stop debugger users from setting
1139 // breakpoints on the folded-in function, so exclude .debug_line.
1140 //
1141 // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
1142 // (base address selection entry), use 1 (which is used by GNU ld for
1143 // .debug_ranges).
1144 //
1145 // TODO To reduce disruption, we use 0 instead of -1 as the tombstone
1146 // value. Enable -1 in a future release.
1147 if (!ds || (ds->folded && !isDebugLine)) {
1148 // If -z dead-reloc-in-nonalloc= is specified, respect it.
1149 uint64_t value = SignExtend64<bits>(*tombstone);
1150 // For a 32-bit local TU reference in .debug_names, X86_64::relocate
1151 // requires that the unsigned value for R_X86_64_32 is truncated to
1152 // 32-bit. Other 64-bit targets's don't discern signed/unsigned 32-bit
1153 // absolute relocations and do not need this change.
1154 if (emachine == EM_X86_64 && type == R_X86_64_32)
1155 value = static_cast<uint32_t>(value);
1156 target.relocateNoSym(loc: bufLoc, type, val: value);
1157 continue;
1158 }
1159 }
1160
1161 // For a relocatable link, content relocated by relocation types with an
1162 // explicit addend, such as RELA, remain unchanged and we can stop here.
1163 // While content relocated by relocation types with an implicit addend, such
1164 // as REL, needs the implicit addend updated.
1165 if (ctx.arg.relocatable && (RelTy::HasAddend || sym.type != STT_SECTION))
1166 continue;
1167
1168 // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
1169 // sections.
1170 if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL ||
1171 expr == RE_RISCV_ADD || expr == RE_ARM_SBREL) {
1172 target.relocateNoSym(loc: bufLoc, type,
1173 val: SignExtend64<bits>(sym.getVA(ctx, addend)));
1174 continue;
1175 }
1176
1177 if (expr == R_SIZE) {
1178 target.relocateNoSym(loc: bufLoc, type,
1179 val: SignExtend64<bits>(sym.getSize() + addend));
1180 continue;
1181 }
1182
1183 // If the control reaches here, we found a PC-relative relocation in a
1184 // non-ALLOC section. Since non-ALLOC section is not loaded into memory
1185 // at runtime, the notion of PC-relative doesn't make sense here. So,
1186 // this is a usage error. However, GNU linkers historically accept such
1187 // relocations without any errors and relocate them as if they were at
1188 // address 0. For bug-compatibility, we accept them with warnings. We
1189 // know Steel Bank Common Lisp as of 2018 have this bug.
1190 //
1191 // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
1192 // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed in
1193 // 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we need to
1194 // keep this bug-compatible code for a while.
1195 bool isErr = expr != R_PC && !(emachine == EM_386 && type == R_386_GOTPC);
1196 {
1197 ELFSyncStream diag(ctx, isErr && !ctx.arg.noinhibitExec
1198 ? DiagLevel::Err
1199 : DiagLevel::Warn);
1200 diag << getLocation(offset) << ": has non-ABS relocation " << type
1201 << " against symbol '" << &sym << "'";
1202 }
1203 if (!isErr)
1204 target.relocateNoSym(
1205 loc: bufLoc, type,
1206 val: SignExtend64<bits>(sym.getVA(ctx, addend: addend - offset - outSecOff)));
1207 }
1208}
1209
1210template <class ELFT>
1211void InputSection::relocate(Ctx &ctx, uint8_t *buf, uint8_t *bufEnd) {
1212 if ((flags & SHF_EXECINSTR) && LLVM_UNLIKELY(getFile<ELFT>()->splitStack))
1213 adjustSplitStackFunctionPrologues<ELFT>(ctx, buf, bufEnd);
1214
1215 if (flags & SHF_ALLOC) {
1216 ctx.target->relocateAlloc(sec&: *this, buf);
1217 return;
1218 }
1219
1220 auto *sec = cast<InputSection>(Val: this);
1221 // For a relocatable link, also call relocateNonAlloc() to rewrite applicable
1222 // locations with tombstone values.
1223 invokeOnRelocs(*sec, sec->relocateNonAlloc<ELFT>, ctx, buf);
1224}
1225
1226// For each function-defining prologue, find any calls to __morestack,
1227// and replace them with calls to __morestack_non_split.
1228static void switchMorestackCallsToMorestackNonSplit(
1229 Ctx &ctx, DenseSet<Defined *> &prologues,
1230 SmallVector<Relocation *, 0> &morestackCalls) {
1231
1232 // If the target adjusted a function's prologue, all calls to
1233 // __morestack inside that function should be switched to
1234 // __morestack_non_split.
1235 Symbol *moreStackNonSplit = ctx.symtab->find(name: "__morestack_non_split");
1236 if (!moreStackNonSplit) {
1237 ErrAlways(ctx) << "mixing split-stack objects requires a definition of "
1238 "__morestack_non_split";
1239 return;
1240 }
1241
1242 // Sort both collections to compare addresses efficiently.
1243 llvm::sort(C&: morestackCalls, Comp: [](const Relocation *l, const Relocation *r) {
1244 return l->offset < r->offset;
1245 });
1246 std::vector<Defined *> functions(prologues.begin(), prologues.end());
1247 llvm::sort(C&: functions, Comp: [](const Defined *l, const Defined *r) {
1248 return l->value < r->value;
1249 });
1250
1251 auto it = morestackCalls.begin();
1252 for (Defined *f : functions) {
1253 // Find the first call to __morestack within the function.
1254 while (it != morestackCalls.end() && (*it)->offset < f->value)
1255 ++it;
1256 // Adjust all calls inside the function.
1257 while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) {
1258 (*it)->sym = moreStackNonSplit;
1259 ++it;
1260 }
1261 }
1262}
1263
1264static bool enclosingPrologueAttempted(uint64_t offset,
1265 const DenseSet<Defined *> &prologues) {
1266 for (Defined *f : prologues)
1267 if (f->value <= offset && offset < f->value + f->size)
1268 return true;
1269 return false;
1270}
1271
1272// If a function compiled for split stack calls a function not
1273// compiled for split stack, then the caller needs its prologue
1274// adjusted to ensure that the called function will have enough stack
1275// available. Find those functions, and adjust their prologues.
1276template <class ELFT>
1277void InputSectionBase::adjustSplitStackFunctionPrologues(Ctx &ctx, uint8_t *buf,
1278 uint8_t *end) {
1279 DenseSet<Defined *> prologues;
1280 SmallVector<Relocation *, 0> morestackCalls;
1281
1282 for (Relocation &rel : relocs()) {
1283 // Ignore calls into the split-stack api.
1284 if (rel.sym->getName().starts_with(Prefix: "__morestack")) {
1285 if (rel.sym->getName() == "__morestack")
1286 morestackCalls.push_back(Elt: &rel);
1287 continue;
1288 }
1289
1290 // A relocation to non-function isn't relevant. Sometimes
1291 // __morestack is not marked as a function, so this check comes
1292 // after the name check.
1293 if (rel.sym->type != STT_FUNC)
1294 continue;
1295
1296 // If the callee's-file was compiled with split stack, nothing to do. In
1297 // this context, a "Defined" symbol is one "defined by the binary currently
1298 // being produced". So an "undefined" symbol might be provided by a shared
1299 // library. It is not possible to tell how such symbols were compiled, so be
1300 // conservative.
1301 if (Defined *d = dyn_cast<Defined>(Val: rel.sym))
1302 if (InputSection *isec = cast_or_null<InputSection>(Val: d->section))
1303 if (!isec || !isec->getFile<ELFT>() || isec->getFile<ELFT>()->splitStack)
1304 continue;
1305
1306 if (enclosingPrologueAttempted(offset: rel.offset, prologues))
1307 continue;
1308
1309 if (Defined *f = getEnclosingFunction(offset: rel.offset)) {
1310 prologues.insert(V: f);
1311 if (ctx.target->adjustPrologueForCrossSplitStack(loc: buf + f->value, end,
1312 stOther: f->stOther))
1313 continue;
1314 if (!getFile<ELFT>()->someNoSplitStack)
1315 Err(ctx)
1316 << this << ": " << f->getName() << " (with -fsplit-stack) calls "
1317 << rel.sym->getName()
1318 << " (without -fsplit-stack), but couldn't adjust its prologue";
1319 }
1320 }
1321
1322 if (ctx.target->needsMoreStackNonSplit)
1323 switchMorestackCallsToMorestackNonSplit(ctx, prologues, morestackCalls);
1324}
1325
1326template <class ELFT> void InputSection::writeTo(Ctx &ctx, uint8_t *buf) {
1327 if (LLVM_UNLIKELY(type == SHT_NOBITS))
1328 return;
1329 // If -r or --emit-relocs is given, then an InputSection
1330 // may be a relocation section.
1331 if (LLVM_UNLIKELY(type == SHT_RELA)) {
1332 copyRelocations<ELFT, typename ELFT::Rela>(ctx, buf);
1333 return;
1334 }
1335 if (LLVM_UNLIKELY(type == SHT_REL)) {
1336 copyRelocations<ELFT, typename ELFT::Rel>(ctx, buf);
1337 return;
1338 }
1339
1340 // If -r is given, we may have a SHT_GROUP section.
1341 if (LLVM_UNLIKELY(type == SHT_GROUP)) {
1342 copyShtGroup<ELFT>(buf);
1343 return;
1344 }
1345
1346 // If this is a compressed section, uncompress section contents directly
1347 // to the buffer.
1348 if (compressed) {
1349 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content_);
1350 auto compressed = ArrayRef<uint8_t>(content_, compressedSize)
1351 .slice(N: sizeof(typename ELFT::Chdr));
1352 size_t size = this->size;
1353 if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
1354 ? compression::zlib::decompress(Input: compressed, Output: buf, UncompressedSize&: size)
1355 : compression::zstd::decompress(Input: compressed, Output: buf, UncompressedSize&: size))
1356 Err(ctx) << this << ": decompress failed: " << std::move(e);
1357 uint8_t *bufEnd = buf + size;
1358 relocate<ELFT>(ctx, buf, bufEnd);
1359 return;
1360 }
1361
1362 // Copy section contents from source object file to output file
1363 // and then apply relocations.
1364 memcpy(dest: buf, src: content().data(), n: content().size());
1365 relocate<ELFT>(ctx, buf, buf + content().size());
1366}
1367
1368void InputSection::replace(InputSection *other) {
1369 addralign = std::max(a: addralign, b: other->addralign);
1370
1371 // When a section is replaced with another section that was allocated to
1372 // another partition, the replacement section (and its associated sections)
1373 // need to be placed in the main partition so that both partitions will be
1374 // able to access it.
1375 if (partition != other->partition) {
1376 partition = 1;
1377 for (InputSection *isec : dependentSections)
1378 isec->partition = 1;
1379 }
1380
1381 other->repl = repl;
1382 other->markDead();
1383}
1384
1385template <class ELFT>
1386EhInputSection::EhInputSection(ObjFile<ELFT> &f,
1387 const typename ELFT::Shdr &header,
1388 StringRef name)
1389 : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {}
1390
1391SyntheticSection *EhInputSection::getParent() const {
1392 return cast_or_null<SyntheticSection>(Val: parent);
1393}
1394
1395// .eh_frame is a sequence of CIE or FDE records.
1396// This function splits an input section into records and returns them.
1397// In rare cases (.eh_frame pieces are reordered by a linker script), the
1398// relocations may be unordered.
1399template <class ELFT> void EhInputSection::split() {
1400 const RelsOrRelas<ELFT> elfRels = relsOrRelas<ELFT>();
1401 if (elfRels.areRelocsCrel())
1402 preprocessRelocs<ELFT>(elfRels.crels);
1403 else if (elfRels.areRelocsRel())
1404 preprocessRelocs<ELFT>(elfRels.rels);
1405 else
1406 preprocessRelocs<ELFT>(elfRels.relas);
1407
1408 // The loop below expects the relocations to be sorted by offset.
1409 auto cmp = [](const Relocation &a, const Relocation &b) {
1410 return a.offset < b.offset;
1411 };
1412 if (!llvm::is_sorted(rels, cmp))
1413 llvm::stable_sort(rels, cmp);
1414
1415 ArrayRef<uint8_t> d = content();
1416 const char *msg = nullptr;
1417 unsigned relI = 0;
1418 while (!d.empty()) {
1419 if (d.size() < 4) {
1420 msg = "CIE/FDE too small";
1421 break;
1422 }
1423 uint64_t size = endian::read32<ELFT::Endianness>(d.data());
1424 if (size == 0) // ZERO terminator
1425 break;
1426 uint32_t id = endian::read32<ELFT::Endianness>(d.data() + 4);
1427 size += 4;
1428 if (LLVM_UNLIKELY(size > d.size())) {
1429 // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
1430 // but we do not support that format yet.
1431 msg = size == UINT32_MAX + uint64_t(4)
1432 ? "CIE/FDE too large"
1433 : "CIE/FDE ends past the end of the section";
1434 break;
1435 }
1436
1437 // Find the first relocation that points to [off,off+size). Relocations
1438 // have been sorted by r_offset.
1439 const uint64_t off = d.data() - content().data();
1440 while (relI != rels.size() && rels[relI].offset < off)
1441 ++relI;
1442 unsigned firstRel = -1;
1443 if (relI != rels.size() && rels[relI].offset < off + size)
1444 firstRel = relI;
1445 (id == 0 ? cies : fdes).emplace_back(Args: off, Args: this, Args&: size, Args&: firstRel);
1446 d = d.slice(N: size);
1447 }
1448 if (msg)
1449 Err(ctx&: file->ctx) << "corrupted .eh_frame: " << msg << "\n>>> defined in "
1450 << getObjMsg(offset: d.data() - content().data());
1451}
1452
1453template <class ELFT, class RelTy>
1454void EhInputSection::preprocessRelocs(Relocs<RelTy> elfRels) {
1455 Ctx &ctx = file->ctx;
1456 rels.reserve(N: elfRels.size());
1457 for (auto rel : elfRels) {
1458 uint64_t offset = rel.r_offset;
1459 Symbol &sym = file->getSymbol(symbolIndex: rel.getSymbol(ctx.arg.isMips64EL));
1460 RelType type = rel.getType(ctx.arg.isMips64EL);
1461 RelExpr expr = ctx.target->getRelExpr(type, s: sym, loc: content().data() + offset);
1462 int64_t addend =
1463 RelTy::HasAddend
1464 ? getAddend<ELFT>(rel)
1465 : ctx.target->getImplicitAddend(buf: content().data() + offset, type);
1466 rels.push_back(Elt: {.expr: expr, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1467 }
1468}
1469
1470// Return the offset in an output section for a given input offset.
1471uint64_t EhInputSection::getParentOffset(uint64_t offset) const {
1472 auto it = partition_point(
1473 Range: fdes, P: [=](EhSectionPiece p) { return p.inputOff <= offset; });
1474 if (it == fdes.begin() || it[-1].inputOff + it[-1].size <= offset) {
1475 it = partition_point(
1476 Range: cies, P: [=](EhSectionPiece p) { return p.inputOff <= offset; });
1477 if (it == cies.begin()) // invalid piece
1478 return offset;
1479 }
1480 if (it[-1].outputOff == -1) // invalid piece
1481 return offset - it[-1].inputOff;
1482 return it[-1].outputOff + (offset - it[-1].inputOff);
1483}
1484
1485static size_t findNull(StringRef s, size_t entSize) {
1486 for (unsigned i = 0, n = s.size(); i != n; i += entSize) {
1487 const char *b = s.begin() + i;
1488 if (std::all_of(first: b, last: b + entSize, pred: [](char c) { return c == 0; }))
1489 return i;
1490 }
1491 llvm_unreachable("");
1492}
1493
1494// Split SHF_STRINGS section. Such section is a sequence of
1495// null-terminated strings.
1496void MergeInputSection::splitStrings(StringRef s, size_t entSize) {
1497 const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1498 const char *p = s.data(), *end = s.data() + s.size();
1499 if (!std::all_of(first: end - entSize, last: end, pred: [](char c) { return c == 0; })) {
1500 Err(ctx&: getCtx()) << this << ": string is not null terminated";
1501 pieces.emplace_back(Args&: entSize, Args: 0, Args: false);
1502 return;
1503 }
1504 if (entSize == 1) {
1505 // Optimize the common case.
1506 do {
1507 size_t size = strlen(s: p);
1508 pieces.emplace_back(Args: p - s.begin(), Args: xxh3_64bits(data: StringRef(p, size)), Args: live);
1509 p += size + 1;
1510 } while (p != end);
1511 } else {
1512 do {
1513 size_t size = findNull(s: StringRef(p, end - p), entSize);
1514 pieces.emplace_back(Args: p - s.begin(), Args: xxh3_64bits(data: StringRef(p, size)), Args: live);
1515 p += size + entSize;
1516 } while (p != end);
1517 }
1518}
1519
1520// Split non-SHF_STRINGS section. Such section is a sequence of
1521// fixed size records.
1522void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> data,
1523 size_t entSize) {
1524 size_t size = data.size();
1525 assert((size % entSize) == 0);
1526 const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1527
1528 pieces.resize_for_overwrite(N: size / entSize);
1529 for (size_t i = 0, j = 0; i != size; i += entSize, j++)
1530 pieces[j] = {i, (uint32_t)xxh3_64bits(data: data.slice(N: i, M: entSize)), live};
1531}
1532
1533template <class ELFT>
1534MergeInputSection::MergeInputSection(ObjFile<ELFT> &f,
1535 const typename ELFT::Shdr &header,
1536 StringRef name)
1537 : InputSectionBase(f, header, name, InputSectionBase::Merge) {}
1538
1539MergeInputSection::MergeInputSection(Ctx &ctx, StringRef name, uint32_t type,
1540 uint64_t flags, uint64_t entsize,
1541 ArrayRef<uint8_t> data)
1542 : InputSectionBase(ctx.internalFile, name, type, flags, /*link=*/0,
1543 /*info=*/0,
1544 /*addralign=*/entsize, entsize, data,
1545 SectionBase::Merge) {}
1546
1547// This function is called after we obtain a complete list of input sections
1548// that need to be linked. This is responsible to split section contents
1549// into small chunks for further processing.
1550//
1551// Note that this function is called from parallelForEach. This must be
1552// thread-safe (i.e. no memory allocation from the pools).
1553void MergeInputSection::splitIntoPieces() {
1554 assert(pieces.empty());
1555
1556 if (flags & SHF_STRINGS)
1557 splitStrings(s: toStringRef(Input: contentMaybeDecompress()), entSize: entsize);
1558 else
1559 splitNonStrings(data: contentMaybeDecompress(), entSize: entsize);
1560}
1561
1562SectionPiece &MergeInputSection::getSectionPiece(uint64_t offset) {
1563 if (content().size() <= offset) {
1564 Err(ctx&: getCtx()) << this << ": offset is outside the section";
1565 return pieces[0];
1566 }
1567 return partition_point(
1568 Range&: pieces, P: [=](SectionPiece p) { return p.inputOff <= offset; })[-1];
1569}
1570
1571// Return the offset in an output section for a given input offset.
1572uint64_t MergeInputSection::getParentOffset(uint64_t offset) const {
1573 const SectionPiece &piece = getSectionPiece(offset);
1574 return piece.outputOff + (offset - piece.inputOff);
1575}
1576
1577template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
1578 StringRef);
1579template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
1580 StringRef);
1581template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
1582 StringRef);
1583template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
1584 StringRef);
1585
1586template void InputSection::writeTo<ELF32LE>(Ctx &, uint8_t *);
1587template void InputSection::writeTo<ELF32BE>(Ctx &, uint8_t *);
1588template void InputSection::writeTo<ELF64LE>(Ctx &, uint8_t *);
1589template void InputSection::writeTo<ELF64BE>(Ctx &, uint8_t *);
1590
1591template RelsOrRelas<ELF32LE>
1592InputSectionBase::relsOrRelas<ELF32LE>(bool) const;
1593template RelsOrRelas<ELF32BE>
1594InputSectionBase::relsOrRelas<ELF32BE>(bool) const;
1595template RelsOrRelas<ELF64LE>
1596InputSectionBase::relsOrRelas<ELF64LE>(bool) const;
1597template RelsOrRelas<ELF64BE>
1598InputSectionBase::relsOrRelas<ELF64BE>(bool) const;
1599
1600template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
1601 const ELF32LE::Shdr &, StringRef);
1602template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
1603 const ELF32BE::Shdr &, StringRef);
1604template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
1605 const ELF64LE::Shdr &, StringRef);
1606template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
1607 const ELF64BE::Shdr &, StringRef);
1608
1609template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
1610 const ELF32LE::Shdr &, StringRef);
1611template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
1612 const ELF32BE::Shdr &, StringRef);
1613template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
1614 const ELF64LE::Shdr &, StringRef);
1615template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
1616 const ELF64BE::Shdr &, StringRef);
1617
1618template void EhInputSection::split<ELF32LE>();
1619template void EhInputSection::split<ELF32BE>();
1620template void EhInputSection::split<ELF64LE>();
1621template void EhInputSection::split<ELF64BE>();
1622