1//===- Target.h -------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLD_ELF_TARGET_H
10#define LLD_ELF_TARGET_H
11
12#include "Config.h"
13#include "InputSection.h"
14#include "lld/Common/ErrorHandler.h"
15#include "llvm/ADT/StringExtras.h"
16#include "llvm/Object/ELF.h"
17#include "llvm/Object/ELFTypes.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include <array>
21
22namespace lld {
23namespace elf {
24class Defined;
25class InputFile;
26class Symbol;
27template <class RelTy> struct Relocs;
28
29std::string toStr(Ctx &, RelType type);
30
31class TargetInfo {
32public:
33 TargetInfo(Ctx &ctx) : ctx(ctx) {}
34 virtual uint32_t calcEFlags() const { return 0; }
35 // Create target-specific synthetic sections, defined in Arch/ files.
36 virtual void initTargetSpecificSections() {}
37 virtual RelExpr getRelExpr(RelType type, const Symbol &s,
38 const uint8_t *loc) const = 0;
39 virtual RelType getDynRel(RelType type) const { return 0; }
40 virtual void writeGotPltHeader(uint8_t *buf) const {}
41 virtual void writeGotHeader(uint8_t *buf) const {}
42 virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {}
43 virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
44 virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
45 virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
46
47 // If lazy binding is supported, the first entry of the PLT has code
48 // to call the dynamic linker to resolve PLT entries the first time
49 // they are called. This function writes that code.
50 virtual void writePltHeader(uint8_t *buf) const {}
51
52 virtual void writePlt(uint8_t *buf, const Symbol &sym,
53 uint64_t pltEntryAddr) const {}
54 virtual void writeIplt(uint8_t *buf, const Symbol &sym,
55 uint64_t pltEntryAddr) const {
56 // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
57 writePlt(buf, sym, pltEntryAddr);
58 }
59 virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
60 virtual void addPltHeaderSymbols(InputSection &isec) const {}
61 virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
62
63 // Returns true if a relocation only uses the low bits of a value such that
64 // all those bits are in the same page. For example, if the relocation
65 // only uses the low 12 bits in a system with 4k pages. If this is true, the
66 // bits will always have the same value at runtime and we don't have to emit
67 // a dynamic relocation.
68 virtual bool usesOnlyLowPageBits(RelType type) const;
69
70 // Decide whether a Thunk is needed for the relocation from File
71 // targeting S.
72 virtual bool needsThunk(RelExpr expr, RelType relocType,
73 const InputFile *file, uint64_t branchAddr,
74 const Symbol &s, int64_t a) const;
75
76 // On systems with range extensions we place collections of Thunks at
77 // regular spacings that enable the majority of branches reach the Thunks.
78 // a value of 0 means range extension thunks are not supported.
79 virtual uint32_t getThunkSectionSpacing() const { return 0; }
80
81 // The function with a prologue starting at Loc was compiled with
82 // -fsplit-stack and it calls a function compiled without. Adjust the prologue
83 // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
84 // The symbols st_other flags are needed on PowerPC64 for determining the
85 // offset to the split-stack prologue.
86 virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
87 uint8_t stOther) const;
88
89 // Return true if we can reach dst from src with RelType type.
90 virtual bool inBranchRange(RelType type, uint64_t src,
91 uint64_t dst) const;
92
93 // Function for scanning relocation. Typically overridden by targets that
94 // require special type or addend adjustment.
95 virtual void scanSection(InputSectionBase &);
96 // Called by scanSection as a default implementation for specific ELF
97 // relocation types.
98 template <class ELFT> void scanSection1(InputSectionBase &);
99 template <class ELFT, class RelTy>
100 void scanSectionImpl(InputSectionBase &, Relocs<RelTy>);
101
102 // Called after parallel relocation scanning is complete but before
103 // postScanRelocations processes symbol flags. Targets may override this to
104 // perform single-threaded fixups that cannot run during parallel scanning
105 // (e.g. symbol table modifications).
106 virtual void finalizeRelocScan() {}
107
108 virtual void relocate(uint8_t *loc, const Relocation &rel,
109 uint64_t val) const = 0;
110 void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
111 relocate(loc, rel: Relocation{.expr: R_NONE, .type: type, .offset: 0, .addend: 0, .sym: nullptr}, val);
112 }
113 virtual void relocateAlloc(InputSection &sec, uint8_t *buf) const;
114 void relocateEh(EhInputSection &sec, uint8_t *buf) const;
115
116 // Do a linker relaxation pass and return true if we changed something.
117 virtual bool relaxOnce(int pass) const { return false; }
118 virtual bool synthesizeAlign(uint64_t &dot, InputSection *sec) {
119 return false;
120 }
121 // Do finalize relaxation after collecting relaxation infos.
122 virtual void finalizeRelax(int passes) const {}
123
124 virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
125 JumpModType val) const {}
126 virtual void applyBranchToBranchOpt() const {}
127
128 virtual ~TargetInfo();
129
130 // This deletes a jump insn at the end of the section if it is a fall thru to
131 // the next section. Further, if there is a conditional jump and a direct
132 // jump consecutively, it tries to flip the conditional jump to convert the
133 // direct jump into a fall thru and delete it. Returns true if a jump
134 // instruction can be deleted.
135 virtual bool deleteFallThruJmpInsn(InputSection &is,
136 InputSection *nextIS) const {
137 return false;
138 }
139
140 Ctx &ctx;
141 unsigned defaultCommonPageSize = 4096;
142 unsigned defaultMaxPageSize = 4096;
143
144 uint64_t getImageBase() const;
145
146 // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
147 bool gotBaseSymInGotPlt = false;
148
149 static constexpr RelType noneRel = 0;
150 RelType copyRel = 0;
151 RelType gotRel = 0;
152 RelType pltRel = 0;
153 RelType relativeRel = 0;
154 RelType iRelativeRel = 0;
155 RelType symbolicRel = 0;
156 RelType iRelSymbolicRel = 0;
157 RelType tlsDescRel = 0;
158 RelType tlsGotRel = 0;
159 RelType tlsModuleIndexRel = 0;
160 RelType tlsOffsetRel = 0;
161 unsigned gotEntrySize = ctx.arg.wordsize;
162 unsigned pltEntrySize = 0;
163 unsigned pltHeaderSize = 0;
164 unsigned ipltEntrySize = 0;
165
166 // At least on x86_64 positions 1 and 2 are used by the first plt entry
167 // to support lazy loading.
168 unsigned gotPltHeaderEntriesNum = 3;
169
170 // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
171 unsigned gotHeaderEntriesNum = 0;
172
173 // On PPC ELF V2 abi, the dynamic section needs DT_PPC64_OPT (DT_LOPROC + 3)
174 // to be set to 0x2 if there can be multiple TOC's. Although we do not emit
175 // multiple TOC's, there can be a mix of TOC and NOTOC addressing which
176 // is functionally equivalent.
177 int ppc64DynamicSectionOpt = 0;
178
179 bool needsThunks = false;
180
181 // A 4-byte field corresponding to one or more trap instructions, used to pad
182 // executable OutputSections.
183 std::array<uint8_t, 4> trapInstr = {};
184
185 // Stores the NOP instructions of different sizes for the target and is used
186 // to pad sections that are relaxed.
187 std::optional<std::vector<std::vector<uint8_t>>> nopInstrs;
188
189 // If a target needs to rewrite calls to __morestack to instead call
190 // __morestack_non_split when a split-stack enabled caller calls a
191 // non-split-stack callee this will return true. Otherwise returns false.
192 bool needsMoreStackNonSplit = true;
193
194 virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
195 virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
196 const uint8_t *loc) const;
197
198protected:
199 // On FreeBSD x86_64 the first page cannot be mmaped.
200 // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
201 // installs this is set to 65536, so the first 15 pages cannot be used.
202 // Given that, the smallest value that can be used in here is 0x10000.
203 uint64_t defaultImageBase = 0x10000;
204};
205
206void setAArch64TargetInfo(Ctx &);
207void setAMDGPUTargetInfo(Ctx &);
208void setARMTargetInfo(Ctx &);
209void setAVRTargetInfo(Ctx &);
210void setHexagonTargetInfo(Ctx &);
211void setLoongArchTargetInfo(Ctx &);
212void setMSP430TargetInfo(Ctx &);
213void setMipsTargetInfo(Ctx &);
214void setPPC64TargetInfo(Ctx &);
215void setPPCTargetInfo(Ctx &);
216void setRISCVTargetInfo(Ctx &);
217void setSPARCV9TargetInfo(Ctx &);
218void setSystemZTargetInfo(Ctx &);
219void setX86TargetInfo(Ctx &);
220void setX86_64TargetInfo(Ctx &);
221
222struct ErrorPlace {
223 InputSectionBase *isec;
224 std::string loc;
225 std::string srcLoc;
226};
227
228// Returns input section and corresponding source string for the given location.
229ErrorPlace getErrorPlace(Ctx &ctx, const uint8_t *loc);
230
231static inline std::string getErrorLoc(Ctx &ctx, const uint8_t *loc) {
232 return getErrorPlace(ctx, loc).loc;
233}
234
235void processArmCmseSymbols(Ctx &);
236
237template <class ELFT> uint32_t calcMipsEFlags(Ctx &);
238uint8_t getMipsFpAbiFlag(Ctx &, InputFile *file, uint8_t oldFlag,
239 uint8_t newFlag);
240uint64_t getMipsPageAddr(uint64_t addr);
241bool isMipsN32Abi(Ctx &, const InputFile &f);
242bool isMicroMips(Ctx &);
243bool isMipsR6(Ctx &);
244
245void writePPC32GlinkSection(Ctx &, uint8_t *buf, size_t numEntries);
246
247unsigned getPPCDFormOp(unsigned secondaryOp);
248unsigned getPPCDSFormOp(unsigned secondaryOp);
249
250// In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
251// is a global entry point (GEP) which typically is used to initialize the TOC
252// pointer in general purpose register 2. The second is a local entry
253// point (LEP) which bypasses the TOC pointer initialization code. The
254// offset between GEP and LEP is encoded in a function's st_other flags.
255// This function will return the offset (in bytes) from the global entry-point
256// to the local entry-point.
257unsigned getPPC64GlobalEntryToLocalEntryOffset(Ctx &, uint8_t stOther);
258
259// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
260// instruction (regardless of endianness). Therefore, the prefix is always in
261// lower memory than the instruction.
262void writePrefixedInst(Ctx &, uint8_t *loc, uint64_t insn);
263
264void addPPC64SaveRestore(Ctx &);
265uint64_t getPPC64TocBase(Ctx &ctx);
266uint64_t getAArch64Page(uint64_t expr);
267bool isAArch64BTILandingPad(Ctx &, Symbol &s, int64_t a);
268template <typename ELFT> void writeARMCmseImportLib(Ctx &);
269uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type);
270void riscvFinalizeRelax(int passes);
271void mergeRISCVAttributesSections(Ctx &);
272void mergeHexagonAttributesSections(Ctx &);
273void addArmInputSectionMappingSymbols(Ctx &);
274void addArmSyntheticSectionMappingSymbol(Defined *);
275void sortArmMappingSymbols(Ctx &);
276void convertArmInstructionstoBE8(Ctx &, InputSection *sec, uint8_t *buf);
277void createTaggedSymbols(Ctx &);
278void initSymbolAnchors(Ctx &);
279
280void setTarget(Ctx &);
281
282template <class ELFT> bool isMipsPIC(const Defined *sym);
283
284const ELFSyncStream &operator<<(const ELFSyncStream &, RelType);
285
286void reportRangeError(Ctx &, uint8_t *loc, const Relocation &rel,
287 const Twine &v, int64_t min, uint64_t max);
288void reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
289 const Symbol &sym, const Twine &msg);
290
291// Make sure that V can be represented as an N bit signed integer.
292inline void checkInt(Ctx &ctx, uint8_t *loc, int64_t v, int n,
293 const Relocation &rel) {
294 if (v != llvm::SignExtend64(X: v, B: n))
295 reportRangeError(ctx, loc, rel, v: Twine(v), min: llvm::minIntN(N: n),
296 max: llvm::maxIntN(N: n));
297}
298
299// Make sure that V can be represented as an N bit unsigned integer.
300inline void checkUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
301 const Relocation &rel) {
302 if ((v >> n) != 0)
303 reportRangeError(ctx, loc, rel, v: Twine(v), min: 0, max: llvm::maxUIntN(N: n));
304}
305
306// Make sure that V can be represented as an N bit signed or unsigned integer.
307inline void checkIntUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
308 const Relocation &rel) {
309 // For the error message we should cast V to a signed integer so that error
310 // messages show a small negative value rather than an extremely large one
311 if (v != (uint64_t)llvm::SignExtend64(X: v, B: n) && (v >> n) != 0)
312 reportRangeError(ctx, loc, rel, v: Twine((int64_t)v), min: llvm::minIntN(N: n),
313 max: llvm::maxUIntN(N: n));
314}
315
316inline void checkAlignment(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
317 const Relocation &rel) {
318 if ((v & (n - 1)) != 0)
319 Err(ctx) << getErrorLoc(ctx, loc) << "improper alignment for relocation "
320 << rel.type << ": 0x" << llvm::utohexstr(X: v)
321 << " is not aligned to " << n << " bytes";
322}
323
324// Endianness-aware read/write.
325inline uint16_t read16(Ctx &ctx, const void *p) {
326 return llvm::support::endian::read16(P: p, E: ctx.arg.endianness);
327}
328
329inline uint32_t read32(Ctx &ctx, const void *p) {
330 return llvm::support::endian::read32(P: p, E: ctx.arg.endianness);
331}
332
333inline uint64_t read64(Ctx &ctx, const void *p) {
334 return llvm::support::endian::read64(P: p, E: ctx.arg.endianness);
335}
336
337inline void write16(Ctx &ctx, void *p, uint16_t v) {
338 llvm::support::endian::write16(P: p, V: v, E: ctx.arg.endianness);
339}
340
341inline void write32(Ctx &ctx, void *p, uint32_t v) {
342 llvm::support::endian::write32(P: p, V: v, E: ctx.arg.endianness);
343}
344
345inline void write64(Ctx &ctx, void *p, uint64_t v) {
346 llvm::support::endian::write64(P: p, V: v, E: ctx.arg.endianness);
347}
348
349} // namespace elf
350} // namespace lld
351
352#ifdef __clang__
353#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
354#endif
355#define invokeELFT(f, ...) \
356 do { \
357 switch (ctx.arg.ekind) { \
358 case lld::elf::ELF32LEKind: \
359 f<llvm::object::ELF32LE>(__VA_ARGS__); \
360 break; \
361 case lld::elf::ELF32BEKind: \
362 f<llvm::object::ELF32BE>(__VA_ARGS__); \
363 break; \
364 case lld::elf::ELF64LEKind: \
365 f<llvm::object::ELF64LE>(__VA_ARGS__); \
366 break; \
367 case lld::elf::ELF64BEKind: \
368 f<llvm::object::ELF64BE>(__VA_ARGS__); \
369 break; \
370 default: \
371 llvm_unreachable("unknown ctx.arg.ekind"); \
372 } \
373 } while (0)
374
375#endif
376