1//===- Target.h -------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLD_ELF_TARGET_H
10#define LLD_ELF_TARGET_H
11
12#include "Config.h"
13#include "InputSection.h"
14#include "lld/Common/ErrorHandler.h"
15#include "llvm/ADT/StringExtras.h"
16#include "llvm/Object/ELF.h"
17#include "llvm/Object/ELFTypes.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include <array>
21
22namespace lld {
23namespace elf {
24class Defined;
25class InputFile;
26class Symbol;
27template <class RelTy> struct Relocs;
28
29std::string toStr(Ctx &, RelType type);
30
31class TargetInfo {
32public:
33 TargetInfo(Ctx &ctx) : ctx(ctx) {}
34 virtual uint32_t calcEFlags() const { return 0; }
35 // Create target-specific synthetic sections, defined in Arch/ files.
36 virtual void initTargetSpecificSections() {}
37 virtual RelExpr getRelExpr(RelType type, const Symbol &s,
38 const uint8_t *loc) const = 0;
39 virtual RelType getDynRel(RelType type) const { return 0; }
40 virtual void writeGotPltHeader(uint8_t *buf) const {}
41 virtual void writeGotHeader(uint8_t *buf) const {}
42 virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {}
43 virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
44 virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
45 virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
46
47 // If lazy binding is supported, the first entry of the PLT has code
48 // to call the dynamic linker to resolve PLT entries the first time
49 // they are called. This function writes that code.
50 virtual void writePltHeader(uint8_t *buf) const {}
51
52 virtual void writePlt(uint8_t *buf, const Symbol &sym,
53 uint64_t pltEntryAddr) const {}
54 virtual void writeIplt(uint8_t *buf, const Symbol &sym,
55 uint64_t pltEntryAddr) const {
56 // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
57 writePlt(buf, sym, pltEntryAddr);
58 }
59 virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
60 virtual void addPltHeaderSymbols(InputSection &isec) const {}
61 virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
62
63 // Returns true if a relocation only uses the low bits of a value such that
64 // all those bits are in the same page. For example, if the relocation
65 // only uses the low 12 bits in a system with 4k pages. If this is true, the
66 // bits will always have the same value at runtime and we don't have to emit
67 // a dynamic relocation.
68 virtual bool usesOnlyLowPageBits(RelType type) const;
69
70 // Decide whether a Thunk is needed for the relocation from File
71 // targeting S.
72 virtual bool needsThunk(RelExpr expr, RelType relocType,
73 const InputFile *file, uint64_t branchAddr,
74 const Symbol &s, int64_t a) const;
75
76 // On systems with range extensions we place collections of Thunks at
77 // regular spacings that enable the majority of branches reach the Thunks.
78 // a value of 0 means range extension thunks are not supported.
79 virtual uint32_t getThunkSectionSpacing() const { return 0; }
80
81 // The function with a prologue starting at Loc was compiled with
82 // -fsplit-stack and it calls a function compiled without. Adjust the prologue
83 // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
84 // The symbols st_other flags are needed on PowerPC64 for determining the
85 // offset to the split-stack prologue.
86 virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
87 uint8_t stOther) const;
88
89 // Return true if we can reach dst from src with RelType type.
90 virtual bool inBranchRange(RelType type, uint64_t src,
91 uint64_t dst) const;
92
93 // Function for scanning relocation. Typically overridden by targets that
94 // require special type or addend adjustment.
95 virtual void scanSection(InputSectionBase &);
96 // Called by scanSection as a default implementation for specific ELF
97 // relocation types.
98 template <class ELFT> void scanSection1(InputSectionBase &);
99 template <class ELFT, class RelTy>
100 void scanSectionImpl(InputSectionBase &, Relocs<RelTy>);
101
102 virtual void relocate(uint8_t *loc, const Relocation &rel,
103 uint64_t val) const = 0;
104 void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
105 relocate(loc, rel: Relocation{.expr: R_NONE, .type: type, .offset: 0, .addend: 0, .sym: nullptr}, val);
106 }
107 virtual void relocateAlloc(InputSection &sec, uint8_t *buf) const;
108 void relocateEh(EhInputSection &sec, uint8_t *buf) const;
109
110 // Do a linker relaxation pass and return true if we changed something.
111 virtual bool relaxOnce(int pass) const { return false; }
112 virtual bool synthesizeAlign(uint64_t &dot, InputSection *sec) {
113 return false;
114 }
115 // Do finalize relaxation after collecting relaxation infos.
116 virtual void finalizeRelax(int passes) const {}
117
118 virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
119 JumpModType val) const {}
120 virtual void applyBranchToBranchOpt() const {}
121
122 virtual ~TargetInfo();
123
124 // This deletes a jump insn at the end of the section if it is a fall thru to
125 // the next section. Further, if there is a conditional jump and a direct
126 // jump consecutively, it tries to flip the conditional jump to convert the
127 // direct jump into a fall thru and delete it. Returns true if a jump
128 // instruction can be deleted.
129 virtual bool deleteFallThruJmpInsn(InputSection &is,
130 InputSection *nextIS) const {
131 return false;
132 }
133
134 Ctx &ctx;
135 unsigned defaultCommonPageSize = 4096;
136 unsigned defaultMaxPageSize = 4096;
137
138 uint64_t getImageBase() const;
139
140 // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
141 bool gotBaseSymInGotPlt = false;
142
143 static constexpr RelType noneRel = 0;
144 RelType copyRel = 0;
145 RelType gotRel = 0;
146 RelType pltRel = 0;
147 RelType relativeRel = 0;
148 RelType iRelativeRel = 0;
149 RelType symbolicRel = 0;
150 RelType iRelSymbolicRel = 0;
151 RelType tlsDescRel = 0;
152 RelType tlsGotRel = 0;
153 RelType tlsModuleIndexRel = 0;
154 RelType tlsOffsetRel = 0;
155 unsigned gotEntrySize = ctx.arg.wordsize;
156 unsigned pltEntrySize = 0;
157 unsigned pltHeaderSize = 0;
158 unsigned ipltEntrySize = 0;
159
160 // At least on x86_64 positions 1 and 2 are used by the first plt entry
161 // to support lazy loading.
162 unsigned gotPltHeaderEntriesNum = 3;
163
164 // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
165 unsigned gotHeaderEntriesNum = 0;
166
167 // On PPC ELF V2 abi, the dynamic section needs DT_PPC64_OPT (DT_LOPROC + 3)
168 // to be set to 0x2 if there can be multiple TOC's. Although we do not emit
169 // multiple TOC's, there can be a mix of TOC and NOTOC addressing which
170 // is functionally equivalent.
171 int ppc64DynamicSectionOpt = 0;
172
173 bool needsThunks = false;
174
175 // A 4-byte field corresponding to one or more trap instructions, used to pad
176 // executable OutputSections.
177 std::array<uint8_t, 4> trapInstr = {};
178
179 // Stores the NOP instructions of different sizes for the target and is used
180 // to pad sections that are relaxed.
181 std::optional<std::vector<std::vector<uint8_t>>> nopInstrs;
182
183 // If a target needs to rewrite calls to __morestack to instead call
184 // __morestack_non_split when a split-stack enabled caller calls a
185 // non-split-stack callee this will return true. Otherwise returns false.
186 bool needsMoreStackNonSplit = true;
187
188 virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
189 virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
190 const uint8_t *loc) const;
191
192protected:
193 // On FreeBSD x86_64 the first page cannot be mmaped.
194 // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
195 // installs this is set to 65536, so the first 15 pages cannot be used.
196 // Given that, the smallest value that can be used in here is 0x10000.
197 uint64_t defaultImageBase = 0x10000;
198};
199
200void setAArch64TargetInfo(Ctx &);
201void setAMDGPUTargetInfo(Ctx &);
202void setARMTargetInfo(Ctx &);
203void setAVRTargetInfo(Ctx &);
204void setHexagonTargetInfo(Ctx &);
205void setLoongArchTargetInfo(Ctx &);
206void setMSP430TargetInfo(Ctx &);
207void setMipsTargetInfo(Ctx &);
208void setPPC64TargetInfo(Ctx &);
209void setPPCTargetInfo(Ctx &);
210void setRISCVTargetInfo(Ctx &);
211void setSPARCV9TargetInfo(Ctx &);
212void setSystemZTargetInfo(Ctx &);
213void setX86TargetInfo(Ctx &);
214void setX86_64TargetInfo(Ctx &);
215
216struct ErrorPlace {
217 InputSectionBase *isec;
218 std::string loc;
219 std::string srcLoc;
220};
221
222// Returns input section and corresponding source string for the given location.
223ErrorPlace getErrorPlace(Ctx &ctx, const uint8_t *loc);
224
225static inline std::string getErrorLoc(Ctx &ctx, const uint8_t *loc) {
226 return getErrorPlace(ctx, loc).loc;
227}
228
229void processArmCmseSymbols(Ctx &);
230
231template <class ELFT> uint32_t calcMipsEFlags(Ctx &);
232uint8_t getMipsFpAbiFlag(Ctx &, InputFile *file, uint8_t oldFlag,
233 uint8_t newFlag);
234uint64_t getMipsPageAddr(uint64_t addr);
235bool isMipsN32Abi(Ctx &, const InputFile &f);
236bool isMicroMips(Ctx &);
237bool isMipsR6(Ctx &);
238
239void writePPC32GlinkSection(Ctx &, uint8_t *buf, size_t numEntries);
240
241unsigned getPPCDFormOp(unsigned secondaryOp);
242unsigned getPPCDSFormOp(unsigned secondaryOp);
243
244// In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
245// is a global entry point (GEP) which typically is used to initialize the TOC
246// pointer in general purpose register 2. The second is a local entry
247// point (LEP) which bypasses the TOC pointer initialization code. The
248// offset between GEP and LEP is encoded in a function's st_other flags.
249// This function will return the offset (in bytes) from the global entry-point
250// to the local entry-point.
251unsigned getPPC64GlobalEntryToLocalEntryOffset(Ctx &, uint8_t stOther);
252
253// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
254// instruction (regardless of endianness). Therefore, the prefix is always in
255// lower memory than the instruction.
256void writePrefixedInst(Ctx &, uint8_t *loc, uint64_t insn);
257
258void addPPC64SaveRestore(Ctx &);
259uint64_t getPPC64TocBase(Ctx &ctx);
260uint64_t getAArch64Page(uint64_t expr);
261bool isAArch64BTILandingPad(Ctx &, Symbol &s, int64_t a);
262template <typename ELFT> void writeARMCmseImportLib(Ctx &);
263uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type);
264void riscvFinalizeRelax(int passes);
265void mergeRISCVAttributesSections(Ctx &);
266void mergeHexagonAttributesSections(Ctx &);
267void addArmInputSectionMappingSymbols(Ctx &);
268void addArmSyntheticSectionMappingSymbol(Defined *);
269void sortArmMappingSymbols(Ctx &);
270void convertArmInstructionstoBE8(Ctx &, InputSection *sec, uint8_t *buf);
271void createTaggedSymbols(Ctx &);
272void initSymbolAnchors(Ctx &);
273
274void setTarget(Ctx &);
275
276template <class ELFT> bool isMipsPIC(const Defined *sym);
277
278const ELFSyncStream &operator<<(const ELFSyncStream &, RelType);
279
280void reportRangeError(Ctx &, uint8_t *loc, const Relocation &rel,
281 const Twine &v, int64_t min, uint64_t max);
282void reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
283 const Symbol &sym, const Twine &msg);
284
285// Make sure that V can be represented as an N bit signed integer.
286inline void checkInt(Ctx &ctx, uint8_t *loc, int64_t v, int n,
287 const Relocation &rel) {
288 if (v != llvm::SignExtend64(X: v, B: n))
289 reportRangeError(ctx, loc, rel, v: Twine(v), min: llvm::minIntN(N: n),
290 max: llvm::maxIntN(N: n));
291}
292
293// Make sure that V can be represented as an N bit unsigned integer.
294inline void checkUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
295 const Relocation &rel) {
296 if ((v >> n) != 0)
297 reportRangeError(ctx, loc, rel, v: Twine(v), min: 0, max: llvm::maxUIntN(N: n));
298}
299
300// Make sure that V can be represented as an N bit signed or unsigned integer.
301inline void checkIntUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
302 const Relocation &rel) {
303 // For the error message we should cast V to a signed integer so that error
304 // messages show a small negative value rather than an extremely large one
305 if (v != (uint64_t)llvm::SignExtend64(X: v, B: n) && (v >> n) != 0)
306 reportRangeError(ctx, loc, rel, v: Twine((int64_t)v), min: llvm::minIntN(N: n),
307 max: llvm::maxUIntN(N: n));
308}
309
310inline void checkAlignment(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
311 const Relocation &rel) {
312 if ((v & (n - 1)) != 0)
313 Err(ctx) << getErrorLoc(ctx, loc) << "improper alignment for relocation "
314 << rel.type << ": 0x" << llvm::utohexstr(X: v)
315 << " is not aligned to " << n << " bytes";
316}
317
318// Endianness-aware read/write.
319inline uint16_t read16(Ctx &ctx, const void *p) {
320 return llvm::support::endian::read16(P: p, E: ctx.arg.endianness);
321}
322
323inline uint32_t read32(Ctx &ctx, const void *p) {
324 return llvm::support::endian::read32(P: p, E: ctx.arg.endianness);
325}
326
327inline uint64_t read64(Ctx &ctx, const void *p) {
328 return llvm::support::endian::read64(P: p, E: ctx.arg.endianness);
329}
330
331inline void write16(Ctx &ctx, void *p, uint16_t v) {
332 llvm::support::endian::write16(P: p, V: v, E: ctx.arg.endianness);
333}
334
335inline void write32(Ctx &ctx, void *p, uint32_t v) {
336 llvm::support::endian::write32(P: p, V: v, E: ctx.arg.endianness);
337}
338
339inline void write64(Ctx &ctx, void *p, uint64_t v) {
340 llvm::support::endian::write64(P: p, V: v, E: ctx.arg.endianness);
341}
342
343} // namespace elf
344} // namespace lld
345
346#ifdef __clang__
347#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
348#endif
349#define invokeELFT(f, ...) \
350 do { \
351 switch (ctx.arg.ekind) { \
352 case lld::elf::ELF32LEKind: \
353 f<llvm::object::ELF32LE>(__VA_ARGS__); \
354 break; \
355 case lld::elf::ELF32BEKind: \
356 f<llvm::object::ELF32BE>(__VA_ARGS__); \
357 break; \
358 case lld::elf::ELF64LEKind: \
359 f<llvm::object::ELF64LE>(__VA_ARGS__); \
360 break; \
361 case lld::elf::ELF64BEKind: \
362 f<llvm::object::ELF64BE>(__VA_ARGS__); \
363 break; \
364 default: \
365 llvm_unreachable("unknown ctx.arg.ekind"); \
366 } \
367 } while (0)
368
369#endif
370