1//===- Target.h -------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLD_ELF_TARGET_H
10#define LLD_ELF_TARGET_H
11
12#include "Config.h"
13#include "InputSection.h"
14#include "lld/Common/ErrorHandler.h"
15#include "llvm/ADT/StringExtras.h"
16#include "llvm/Object/ELF.h"
17#include "llvm/Object/ELFTypes.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include <array>
21
22namespace lld {
23namespace elf {
24class Defined;
25class InputFile;
26class Symbol;
27
28std::string toStr(Ctx &, RelType type);
29
30class TargetInfo {
31public:
32 TargetInfo(Ctx &ctx) : ctx(ctx) {}
33 virtual uint32_t calcEFlags() const { return 0; }
34 virtual RelExpr getRelExpr(RelType type, const Symbol &s,
35 const uint8_t *loc) const = 0;
36 virtual RelType getDynRel(RelType type) const { return 0; }
37 virtual void writeGotPltHeader(uint8_t *buf) const {}
38 virtual void writeGotHeader(uint8_t *buf) const {}
39 virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {};
40 virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
41 virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
42 virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
43
44 // If lazy binding is supported, the first entry of the PLT has code
45 // to call the dynamic linker to resolve PLT entries the first time
46 // they are called. This function writes that code.
47 virtual void writePltHeader(uint8_t *buf) const {}
48
49 virtual void writePlt(uint8_t *buf, const Symbol &sym,
50 uint64_t pltEntryAddr) const {}
51 virtual void writeIplt(uint8_t *buf, const Symbol &sym,
52 uint64_t pltEntryAddr) const {
53 // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
54 writePlt(buf, sym, pltEntryAddr);
55 }
56 virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
57 virtual void addPltHeaderSymbols(InputSection &isec) const {}
58 virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
59
60 // Returns true if a relocation only uses the low bits of a value such that
61 // all those bits are in the same page. For example, if the relocation
62 // only uses the low 12 bits in a system with 4k pages. If this is true, the
63 // bits will always have the same value at runtime and we don't have to emit
64 // a dynamic relocation.
65 virtual bool usesOnlyLowPageBits(RelType type) const;
66
67 // Decide whether a Thunk is needed for the relocation from File
68 // targeting S.
69 virtual bool needsThunk(RelExpr expr, RelType relocType,
70 const InputFile *file, uint64_t branchAddr,
71 const Symbol &s, int64_t a) const;
72
73 // On systems with range extensions we place collections of Thunks at
74 // regular spacings that enable the majority of branches reach the Thunks.
75 // a value of 0 means range extension thunks are not supported.
76 virtual uint32_t getThunkSectionSpacing() const { return 0; }
77
78 // The function with a prologue starting at Loc was compiled with
79 // -fsplit-stack and it calls a function compiled without. Adjust the prologue
80 // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
81 // The symbols st_other flags are needed on PowerPC64 for determining the
82 // offset to the split-stack prologue.
83 virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
84 uint8_t stOther) const;
85
86 // Return true if we can reach dst from src with RelType type.
87 virtual bool inBranchRange(RelType type, uint64_t src,
88 uint64_t dst) const;
89
90 virtual void relocate(uint8_t *loc, const Relocation &rel,
91 uint64_t val) const = 0;
92 void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
93 relocate(loc, rel: Relocation{.expr: R_NONE, .type: type, .offset: 0, .addend: 0, .sym: nullptr}, val);
94 }
95 virtual void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const;
96
97 // Do a linker relaxation pass and return true if we changed something.
98 virtual bool relaxOnce(int pass) const { return false; }
99 // Do finalize relaxation after collecting relaxation infos.
100 virtual void finalizeRelax(int passes) const {}
101
102 virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
103 JumpModType val) const {}
104 virtual void applyBranchToBranchOpt() const {}
105
106 virtual ~TargetInfo();
107
108 // This deletes a jump insn at the end of the section if it is a fall thru to
109 // the next section. Further, if there is a conditional jump and a direct
110 // jump consecutively, it tries to flip the conditional jump to convert the
111 // direct jump into a fall thru and delete it. Returns true if a jump
112 // instruction can be deleted.
113 virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
114 InputSection *nextIS) const {
115 return false;
116 }
117
118 Ctx &ctx;
119 unsigned defaultCommonPageSize = 4096;
120 unsigned defaultMaxPageSize = 4096;
121
122 uint64_t getImageBase() const;
123
124 // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
125 bool gotBaseSymInGotPlt = false;
126
127 static constexpr RelType noneRel = 0;
128 RelType copyRel = 0;
129 RelType gotRel = 0;
130 RelType pltRel = 0;
131 RelType relativeRel = 0;
132 RelType iRelativeRel = 0;
133 RelType symbolicRel = 0;
134 RelType tlsDescRel = 0;
135 RelType tlsGotRel = 0;
136 RelType tlsModuleIndexRel = 0;
137 RelType tlsOffsetRel = 0;
138 unsigned gotEntrySize = ctx.arg.wordsize;
139 unsigned pltEntrySize = 0;
140 unsigned pltHeaderSize = 0;
141 unsigned ipltEntrySize = 0;
142
143 // At least on x86_64 positions 1 and 2 are used by the first plt entry
144 // to support lazy loading.
145 unsigned gotPltHeaderEntriesNum = 3;
146
147 // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
148 unsigned gotHeaderEntriesNum = 0;
149
150 // On PPC ELF V2 abi, the dynamic section needs DT_PPC64_OPT (DT_LOPROC + 3)
151 // to be set to 0x2 if there can be multiple TOC's. Although we do not emit
152 // multiple TOC's, there can be a mix of TOC and NOTOC addressing which
153 // is functionally equivalent.
154 int ppc64DynamicSectionOpt = 0;
155
156 bool needsThunks = false;
157
158 // A 4-byte field corresponding to one or more trap instructions, used to pad
159 // executable OutputSections.
160 std::array<uint8_t, 4> trapInstr = {};
161
162 // Stores the NOP instructions of different sizes for the target and is used
163 // to pad sections that are relaxed.
164 std::optional<std::vector<std::vector<uint8_t>>> nopInstrs;
165
166 // If a target needs to rewrite calls to __morestack to instead call
167 // __morestack_non_split when a split-stack enabled caller calls a
168 // non-split-stack callee this will return true. Otherwise returns false.
169 bool needsMoreStackNonSplit = true;
170
171 virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
172 virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
173 const uint8_t *loc) const;
174
175protected:
176 // On FreeBSD x86_64 the first page cannot be mmaped.
177 // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
178 // installs this is set to 65536, so the first 15 pages cannot be used.
179 // Given that, the smallest value that can be used in here is 0x10000.
180 uint64_t defaultImageBase = 0x10000;
181};
182
183void setAArch64TargetInfo(Ctx &);
184void setAMDGPUTargetInfo(Ctx &);
185void setARMTargetInfo(Ctx &);
186void setAVRTargetInfo(Ctx &);
187void setHexagonTargetInfo(Ctx &);
188void setLoongArchTargetInfo(Ctx &);
189void setMSP430TargetInfo(Ctx &);
190void setMipsTargetInfo(Ctx &);
191void setPPC64TargetInfo(Ctx &);
192void setPPCTargetInfo(Ctx &);
193void setRISCVTargetInfo(Ctx &);
194void setSPARCV9TargetInfo(Ctx &);
195void setSystemZTargetInfo(Ctx &);
196void setX86TargetInfo(Ctx &);
197void setX86_64TargetInfo(Ctx &);
198
199struct ErrorPlace {
200 InputSectionBase *isec;
201 std::string loc;
202 std::string srcLoc;
203};
204
205// Returns input section and corresponding source string for the given location.
206ErrorPlace getErrorPlace(Ctx &ctx, const uint8_t *loc);
207
208static inline std::string getErrorLoc(Ctx &ctx, const uint8_t *loc) {
209 return getErrorPlace(ctx, loc).loc;
210}
211
212void processArmCmseSymbols(Ctx &);
213
214template <class ELFT> uint32_t calcMipsEFlags(Ctx &);
215uint8_t getMipsFpAbiFlag(Ctx &, InputFile *file, uint8_t oldFlag,
216 uint8_t newFlag);
217bool isMipsN32Abi(Ctx &, const InputFile &f);
218bool isMicroMips(Ctx &);
219bool isMipsR6(Ctx &);
220
221void writePPC32GlinkSection(Ctx &, uint8_t *buf, size_t numEntries);
222
223unsigned getPPCDFormOp(unsigned secondaryOp);
224unsigned getPPCDSFormOp(unsigned secondaryOp);
225
226// In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
227// is a global entry point (GEP) which typically is used to initialize the TOC
228// pointer in general purpose register 2. The second is a local entry
229// point (LEP) which bypasses the TOC pointer initialization code. The
230// offset between GEP and LEP is encoded in a function's st_other flags.
231// This function will return the offset (in bytes) from the global entry-point
232// to the local entry-point.
233unsigned getPPC64GlobalEntryToLocalEntryOffset(Ctx &, uint8_t stOther);
234
235// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
236// instruction (regardless of endianness). Therefore, the prefix is always in
237// lower memory than the instruction.
238void writePrefixedInst(Ctx &, uint8_t *loc, uint64_t insn);
239
240void addPPC64SaveRestore(Ctx &);
241uint64_t getPPC64TocBase(Ctx &ctx);
242uint64_t getAArch64Page(uint64_t expr);
243bool isAArch64BTILandingPad(Ctx &, Symbol &s, int64_t a);
244template <typename ELFT> void writeARMCmseImportLib(Ctx &);
245uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type);
246void riscvFinalizeRelax(int passes);
247void mergeRISCVAttributesSections(Ctx &);
248void addArmInputSectionMappingSymbols(Ctx &);
249void addArmSyntheticSectionMappingSymbol(Defined *);
250void sortArmMappingSymbols(Ctx &);
251void convertArmInstructionstoBE8(Ctx &, InputSection *sec, uint8_t *buf);
252void createTaggedSymbols(Ctx &);
253void initSymbolAnchors(Ctx &);
254
255void setTarget(Ctx &);
256
257template <class ELFT> bool isMipsPIC(const Defined *sym);
258
259const ELFSyncStream &operator<<(const ELFSyncStream &, RelType);
260
261void reportRangeError(Ctx &, uint8_t *loc, const Relocation &rel,
262 const Twine &v, int64_t min, uint64_t max);
263void reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
264 const Symbol &sym, const Twine &msg);
265
266// Make sure that V can be represented as an N bit signed integer.
267inline void checkInt(Ctx &ctx, uint8_t *loc, int64_t v, int n,
268 const Relocation &rel) {
269 if (v != llvm::SignExtend64(X: v, B: n))
270 reportRangeError(ctx, loc, rel, v: Twine(v), min: llvm::minIntN(N: n),
271 max: llvm::maxIntN(N: n));
272}
273
274// Make sure that V can be represented as an N bit unsigned integer.
275inline void checkUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
276 const Relocation &rel) {
277 if ((v >> n) != 0)
278 reportRangeError(ctx, loc, rel, v: Twine(v), min: 0, max: llvm::maxUIntN(N: n));
279}
280
281// Make sure that V can be represented as an N bit signed or unsigned integer.
282inline void checkIntUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
283 const Relocation &rel) {
284 // For the error message we should cast V to a signed integer so that error
285 // messages show a small negative value rather than an extremely large one
286 if (v != (uint64_t)llvm::SignExtend64(X: v, B: n) && (v >> n) != 0)
287 reportRangeError(ctx, loc, rel, v: Twine((int64_t)v), min: llvm::minIntN(N: n),
288 max: llvm::maxUIntN(N: n));
289}
290
291inline void checkAlignment(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
292 const Relocation &rel) {
293 if ((v & (n - 1)) != 0)
294 Err(ctx) << getErrorLoc(ctx, loc) << "improper alignment for relocation "
295 << rel.type << ": 0x" << llvm::utohexstr(X: v)
296 << " is not aligned to " << n << " bytes";
297}
298
299// Endianness-aware read/write.
300inline uint16_t read16(Ctx &ctx, const void *p) {
301 return llvm::support::endian::read16(P: p, E: ctx.arg.endianness);
302}
303
304inline uint32_t read32(Ctx &ctx, const void *p) {
305 return llvm::support::endian::read32(P: p, E: ctx.arg.endianness);
306}
307
308inline uint64_t read64(Ctx &ctx, const void *p) {
309 return llvm::support::endian::read64(P: p, E: ctx.arg.endianness);
310}
311
312inline void write16(Ctx &ctx, void *p, uint16_t v) {
313 llvm::support::endian::write16(P: p, V: v, E: ctx.arg.endianness);
314}
315
316inline void write32(Ctx &ctx, void *p, uint32_t v) {
317 llvm::support::endian::write32(P: p, V: v, E: ctx.arg.endianness);
318}
319
320inline void write64(Ctx &ctx, void *p, uint64_t v) {
321 llvm::support::endian::write64(P: p, V: v, E: ctx.arg.endianness);
322}
323
324// Overwrite a ULEB128 value and keep the original length.
325inline uint64_t overwriteULEB128(uint8_t *bufLoc, uint64_t val) {
326 while (*bufLoc & 0x80) {
327 *bufLoc++ = 0x80 | (val & 0x7f);
328 val >>= 7;
329 }
330 *bufLoc = val;
331 return val;
332}
333} // namespace elf
334} // namespace lld
335
336#ifdef __clang__
337#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
338#endif
339#define invokeELFT(f, ...) \
340 switch (ctx.arg.ekind) { \
341 case lld::elf::ELF32LEKind: \
342 f<llvm::object::ELF32LE>(__VA_ARGS__); \
343 break; \
344 case lld::elf::ELF32BEKind: \
345 f<llvm::object::ELF32BE>(__VA_ARGS__); \
346 break; \
347 case lld::elf::ELF64LEKind: \
348 f<llvm::object::ELF64LE>(__VA_ARGS__); \
349 break; \
350 case lld::elf::ELF64BEKind: \
351 f<llvm::object::ELF64BE>(__VA_ARGS__); \
352 break; \
353 default: \
354 llvm_unreachable("unknown ctx.arg.ekind"); \
355 }
356
357#endif
358