1//===- X86_64.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "Symbols.h"
11#include "SyntheticSections.h"
12#include "Target.h"
13
14#include "lld/Common/ErrorHandler.h"
15#include "mach-o/compact_unwind_encoding.h"
16#include "llvm/BinaryFormat/MachO.h"
17#include "llvm/Support/Endian.h"
18
19using namespace llvm::MachO;
20using namespace llvm::support::endian;
21using namespace lld;
22using namespace lld::macho;
23
24namespace {
25
26struct X86_64 : TargetInfo {
27 X86_64();
28
29 int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
30 const relocation_info) const override;
31 void relocateOne(uint8_t *loc, const Relocation &, uint64_t va,
32 uint64_t relocVA) const override;
33
34 void writeStub(uint8_t *buf, const Symbol &,
35 uint64_t pointerVA) const override;
36 void writeStubHelperHeader(uint8_t *buf) const override;
37 void writeStubHelperEntry(uint8_t *buf, const Symbol &,
38 uint64_t entryAddr) const override;
39
40 void writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
41 uint64_t &stubOffset, uint64_t selrefVA,
42 Symbol *objcMsgSend) const override;
43
44 void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
45 uint64_t getPageSize() const override { return 4 * 1024; }
46
47 void handleDtraceReloc(const Symbol *sym, const Relocation &r,
48 uint8_t *loc) const override;
49};
50} // namespace
51
52static constexpr std::array<RelocAttrs, 10> relocAttrsArray{._M_elems: {
53#define B(x) RelocAttrBits::x
54 {.name: "UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE1) |
55 B(BYTE4) | B(BYTE8)},
56 {.name: "SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
57 {.name: "BRANCH",
58 B(PCREL) | B(EXTERN) | B(LOCAL) | B(BRANCH) | B(BYTE1) | B(BYTE4)},
59 {.name: "GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
60 {.name: "GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
61 {.name: "SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
62 {.name: "SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
63 {.name: "SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
64 {.name: "SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
65 {.name: "TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
66#undef B
67}};
68
69static int pcrelOffset(uint8_t type) {
70 switch (type) {
71 case X86_64_RELOC_SIGNED_1:
72 return 1;
73 case X86_64_RELOC_SIGNED_2:
74 return 2;
75 case X86_64_RELOC_SIGNED_4:
76 return 4;
77 default:
78 return 0;
79 }
80}
81
82int64_t X86_64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
83 relocation_info rel) const {
84 auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
85 const uint8_t *loc = buf + offset + rel.r_address;
86 int64_t addend;
87
88 switch (rel.r_length) {
89 case 0:
90 addend = static_cast<int8_t>(*loc);
91 break;
92 case 2:
93 addend = static_cast<int32_t>(read32le(P: loc));
94 break;
95 case 3:
96 addend = read64le(P: loc);
97 break;
98 default:
99 llvm_unreachable("invalid r_length");
100 }
101
102 return addend + pcrelOffset(type: rel.r_type);
103}
104
105void X86_64::relocateOne(uint8_t *loc, const Relocation &r, uint64_t value,
106 uint64_t relocVA) const {
107 if (r.pcrel) {
108 uint64_t pc = relocVA + (1ull << r.length) + pcrelOffset(type: r.type);
109 value -= pc;
110 }
111
112 switch (r.length) {
113 case 0:
114 if (r.type == X86_64_RELOC_UNSIGNED)
115 checkUInt(loc, d: r, v: value, bits: 8);
116 else
117 checkInt(loc, d: r, v: value, bits: 8);
118 *loc = value;
119 break;
120 case 2:
121 if (r.type == X86_64_RELOC_UNSIGNED)
122 checkUInt(loc, d: r, v: value, bits: 32);
123 else
124 checkInt(loc, d: r, v: value, bits: 32);
125 write32le(P: loc, V: value);
126 break;
127 case 3:
128 write64le(P: loc, V: value);
129 break;
130 default:
131 llvm_unreachable("invalid r_length");
132 }
133}
134
135// The following methods emit a number of assembly sequences with RIP-relative
136// addressing. Note that RIP-relative addressing on X86-64 has the RIP pointing
137// to the next instruction, not the current instruction, so we always have to
138// account for the current instruction's size when calculating offsets.
139// writeRipRelative helps with that.
140//
141// bufAddr: The virtual address corresponding to buf[0].
142// bufOff: The offset within buf of the next instruction.
143// destAddr: The destination address that the current instruction references.
144static void writeRipRelative(SymbolDiagnostic d, uint8_t *buf, uint64_t bufAddr,
145 uint64_t bufOff, uint64_t destAddr) {
146 uint64_t rip = bufAddr + bufOff;
147 checkInt(loc: buf, d, v: destAddr - rip, bits: 32);
148 // For the instructions we care about, the RIP-relative address is always
149 // stored in the last 4 bytes of the instruction.
150 write32le(P: buf + bufOff - 4, V: destAddr - rip);
151}
152
153static constexpr uint8_t stub[] = {
154 0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
155};
156
157void X86_64::writeStub(uint8_t *buf, const Symbol &sym,
158 uint64_t pointerVA) const {
159 memcpy(dest: buf, src: stub, n: 2); // just copy the two nonzero bytes
160 uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
161 writeRipRelative(d: {.symbol: &sym, .reason: "stub"}, buf, bufAddr: stubAddr, bufOff: sizeof(stub), destAddr: pointerVA);
162}
163
164static constexpr uint8_t stubHelperHeader[] = {
165 0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // 0x0: leaq ImageLoaderCache(%rip), %r11
166 0x41, 0x53, // 0x7: pushq %r11
167 0xff, 0x25, 0, 0, 0, 0, // 0x9: jmpq *dyld_stub_binder@GOT(%rip)
168 0x90, // 0xf: nop
169};
170
171void X86_64::writeStubHelperHeader(uint8_t *buf) const {
172 memcpy(dest: buf, src: stubHelperHeader, n: sizeof(stubHelperHeader));
173 SymbolDiagnostic d = {.symbol: nullptr, .reason: "stub helper header"};
174 writeRipRelative(d, buf, bufAddr: in.stubHelper->addr, bufOff: 7,
175 destAddr: in.imageLoaderCache->getVA());
176 writeRipRelative(d, buf, bufAddr: in.stubHelper->addr, bufOff: 0xf,
177 destAddr: in.got->addr +
178 in.stubHelper->stubBinder->gotIndex * LP64::wordSize);
179}
180
181static constexpr uint8_t stubHelperEntry[] = {
182 0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
183 0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
184};
185
186void X86_64::writeStubHelperEntry(uint8_t *buf, const Symbol &sym,
187 uint64_t entryAddr) const {
188 memcpy(dest: buf, src: stubHelperEntry, n: sizeof(stubHelperEntry));
189 write32le(P: buf + 1, V: sym.lazyBindOffset);
190 writeRipRelative(d: {.symbol: &sym, .reason: "stub helper"}, buf, bufAddr: entryAddr,
191 bufOff: sizeof(stubHelperEntry), destAddr: in.stubHelper->addr);
192}
193
194static constexpr uint8_t objcStubsFastCode[] = {
195 0x48, 0x8b, 0x35, 0, 0, 0, 0, // 0x0: movq selrefs@selector(%rip), %rsi
196 0xff, 0x25, 0, 0, 0, 0, // 0x7: jmpq *_objc_msgSend@GOT(%rip)
197};
198
199void X86_64::writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
200 uint64_t &stubOffset, uint64_t selrefVA,
201 Symbol *objcMsgSend) const {
202 uint64_t objcMsgSendAddr = in.got->addr;
203 uint64_t objcMsgSendIndex = objcMsgSend->gotIndex;
204
205 memcpy(dest: buf, src: objcStubsFastCode, n: sizeof(objcStubsFastCode));
206 SymbolDiagnostic d = {.symbol: sym, .reason: sym->getName()};
207 uint64_t stubAddr = stubsAddr + stubOffset;
208 writeRipRelative(d, buf, bufAddr: stubAddr, bufOff: 7, destAddr: selrefVA);
209 writeRipRelative(d, buf, bufAddr: stubAddr, bufOff: 0xd,
210 destAddr: objcMsgSendAddr + objcMsgSendIndex * LP64::wordSize);
211 stubOffset += target->objcStubsFastSize;
212}
213
214void X86_64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
215 // Convert MOVQ to LEAQ
216 if (loc[-2] != 0x8b)
217 error(msg: getRelocAttrs(type).name + " reloc requires MOVQ instruction");
218 loc[-2] = 0x8d;
219}
220
221X86_64::X86_64() : TargetInfo(LP64()) {
222 cpuType = CPU_TYPE_X86_64;
223 cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
224
225 modeDwarfEncoding = UNWIND_X86_MODE_DWARF;
226 subtractorRelocType = X86_64_RELOC_SUBTRACTOR;
227 unsignedRelocType = X86_64_RELOC_UNSIGNED;
228
229 stubSize = sizeof(stub);
230 stubHelperHeaderSize = sizeof(stubHelperHeader);
231 stubHelperEntrySize = sizeof(stubHelperEntry);
232
233 objcStubsFastSize = sizeof(objcStubsFastCode);
234 objcStubsFastAlignment = 1;
235
236 relocAttrs = {relocAttrsArray.data(), relocAttrsArray.size()};
237}
238
239TargetInfo *macho::createX86_64TargetInfo() {
240 static X86_64 t;
241 return &t;
242}
243
244void X86_64::handleDtraceReloc(const Symbol *sym, const Relocation &r,
245 uint8_t *loc) const {
246 assert(r.type == X86_64_RELOC_BRANCH);
247
248 if (config->outputType == MH_OBJECT)
249 return;
250
251 if (sym->getName().starts_with(Prefix: "___dtrace_probe")) {
252 // change call site to a NOP
253 loc[-1] = 0x90;
254 write32le(P: loc, V: 0x00401F0F);
255 } else if (sym->getName().starts_with(Prefix: "___dtrace_isenabled")) {
256 // change call site to a clear eax
257 loc[-1] = 0x33;
258 write32le(P: loc, V: 0x909090C0);
259 } else {
260 error(msg: "Unrecognized dtrace symbol prefix: " + toString(*sym));
261 }
262}
263