1//===- X86_64.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "Symbols.h"
11#include "SyntheticSections.h"
12#include "Target.h"
13
14#include "lld/Common/ErrorHandler.h"
15#include "mach-o/compact_unwind_encoding.h"
16#include "llvm/BinaryFormat/MachO.h"
17#include "llvm/Support/Endian.h"
18
19using namespace llvm::MachO;
20using namespace llvm::support::endian;
21using namespace lld;
22using namespace lld::macho;
23
24namespace {
25
26struct X86_64 : TargetInfo {
27 X86_64();
28
29 int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
30 const relocation_info) const override;
31 void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
32 uint64_t relocVA) const override;
33
34 void writeStub(uint8_t *buf, const Symbol &,
35 uint64_t pointerVA) const override;
36 void writeStubHelperHeader(uint8_t *buf) const override;
37 void writeStubHelperEntry(uint8_t *buf, const Symbol &,
38 uint64_t entryAddr) const override;
39
40 void writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
41 uint64_t &stubOffset, uint64_t selrefVA,
42 Symbol *objcMsgSend) const override;
43
44 void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
45 uint64_t getPageSize() const override { return 4 * 1024; }
46
47 void handleDtraceReloc(const Symbol *sym, const Reloc &r,
48 uint8_t *loc) const override;
49};
50} // namespace
51
52static constexpr std::array<RelocAttrs, 10> relocAttrsArray{._M_elems: {
53#define B(x) RelocAttrBits::x
54 {.name: "UNSIGNED",
55 B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
56 {.name: "SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
57 {.name: "BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
58 {.name: "GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
59 {.name: "GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
60 {.name: "SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
61 {.name: "SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
62 {.name: "SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
63 {.name: "SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
64 {.name: "TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
65#undef B
66}};
67
68static int pcrelOffset(uint8_t type) {
69 switch (type) {
70 case X86_64_RELOC_SIGNED_1:
71 return 1;
72 case X86_64_RELOC_SIGNED_2:
73 return 2;
74 case X86_64_RELOC_SIGNED_4:
75 return 4;
76 default:
77 return 0;
78 }
79}
80
81int64_t X86_64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
82 relocation_info rel) const {
83 auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
84 const uint8_t *loc = buf + offset + rel.r_address;
85
86 switch (rel.r_length) {
87 case 2:
88 return static_cast<int32_t>(read32le(P: loc)) + pcrelOffset(type: rel.r_type);
89 case 3:
90 return read64le(P: loc) + pcrelOffset(type: rel.r_type);
91 default:
92 llvm_unreachable("invalid r_length");
93 }
94}
95
96void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
97 uint64_t relocVA) const {
98 if (r.pcrel) {
99 uint64_t pc = relocVA + 4 + pcrelOffset(type: r.type);
100 value -= pc;
101 }
102
103 switch (r.length) {
104 case 2:
105 if (r.type == X86_64_RELOC_UNSIGNED)
106 checkUInt(loc, d: r, v: value, bits: 32);
107 else
108 checkInt(loc, d: r, v: value, bits: 32);
109 write32le(P: loc, V: value);
110 break;
111 case 3:
112 write64le(P: loc, V: value);
113 break;
114 default:
115 llvm_unreachable("invalid r_length");
116 }
117}
118
119// The following methods emit a number of assembly sequences with RIP-relative
120// addressing. Note that RIP-relative addressing on X86-64 has the RIP pointing
121// to the next instruction, not the current instruction, so we always have to
122// account for the current instruction's size when calculating offsets.
123// writeRipRelative helps with that.
124//
125// bufAddr: The virtual address corresponding to buf[0].
126// bufOff: The offset within buf of the next instruction.
127// destAddr: The destination address that the current instruction references.
128static void writeRipRelative(SymbolDiagnostic d, uint8_t *buf, uint64_t bufAddr,
129 uint64_t bufOff, uint64_t destAddr) {
130 uint64_t rip = bufAddr + bufOff;
131 checkInt(loc: buf, d, v: destAddr - rip, bits: 32);
132 // For the instructions we care about, the RIP-relative address is always
133 // stored in the last 4 bytes of the instruction.
134 write32le(P: buf + bufOff - 4, V: destAddr - rip);
135}
136
137static constexpr uint8_t stub[] = {
138 0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
139};
140
141void X86_64::writeStub(uint8_t *buf, const Symbol &sym,
142 uint64_t pointerVA) const {
143 memcpy(dest: buf, src: stub, n: 2); // just copy the two nonzero bytes
144 uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
145 writeRipRelative(d: {.symbol: &sym, .reason: "stub"}, buf, bufAddr: stubAddr, bufOff: sizeof(stub), destAddr: pointerVA);
146}
147
148static constexpr uint8_t stubHelperHeader[] = {
149 0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // 0x0: leaq ImageLoaderCache(%rip), %r11
150 0x41, 0x53, // 0x7: pushq %r11
151 0xff, 0x25, 0, 0, 0, 0, // 0x9: jmpq *dyld_stub_binder@GOT(%rip)
152 0x90, // 0xf: nop
153};
154
155void X86_64::writeStubHelperHeader(uint8_t *buf) const {
156 memcpy(dest: buf, src: stubHelperHeader, n: sizeof(stubHelperHeader));
157 SymbolDiagnostic d = {.symbol: nullptr, .reason: "stub helper header"};
158 writeRipRelative(d, buf, bufAddr: in.stubHelper->addr, bufOff: 7,
159 destAddr: in.imageLoaderCache->getVA());
160 writeRipRelative(d, buf, bufAddr: in.stubHelper->addr, bufOff: 0xf,
161 destAddr: in.got->addr +
162 in.stubHelper->stubBinder->gotIndex * LP64::wordSize);
163}
164
165static constexpr uint8_t stubHelperEntry[] = {
166 0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
167 0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
168};
169
170void X86_64::writeStubHelperEntry(uint8_t *buf, const Symbol &sym,
171 uint64_t entryAddr) const {
172 memcpy(dest: buf, src: stubHelperEntry, n: sizeof(stubHelperEntry));
173 write32le(P: buf + 1, V: sym.lazyBindOffset);
174 writeRipRelative(d: {.symbol: &sym, .reason: "stub helper"}, buf, bufAddr: entryAddr,
175 bufOff: sizeof(stubHelperEntry), destAddr: in.stubHelper->addr);
176}
177
178static constexpr uint8_t objcStubsFastCode[] = {
179 0x48, 0x8b, 0x35, 0, 0, 0, 0, // 0x0: movq selrefs@selector(%rip), %rsi
180 0xff, 0x25, 0, 0, 0, 0, // 0x7: jmpq *_objc_msgSend@GOT(%rip)
181};
182
183void X86_64::writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
184 uint64_t &stubOffset, uint64_t selrefVA,
185 Symbol *objcMsgSend) const {
186 uint64_t objcMsgSendAddr = in.got->addr;
187 uint64_t objcMsgSendIndex = objcMsgSend->gotIndex;
188
189 memcpy(dest: buf, src: objcStubsFastCode, n: sizeof(objcStubsFastCode));
190 SymbolDiagnostic d = {.symbol: sym, .reason: sym->getName()};
191 uint64_t stubAddr = stubsAddr + stubOffset;
192 writeRipRelative(d, buf, bufAddr: stubAddr, bufOff: 7, destAddr: selrefVA);
193 writeRipRelative(d, buf, bufAddr: stubAddr, bufOff: 0xd,
194 destAddr: objcMsgSendAddr + objcMsgSendIndex * LP64::wordSize);
195 stubOffset += target->objcStubsFastSize;
196}
197
198void X86_64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
199 // Convert MOVQ to LEAQ
200 if (loc[-2] != 0x8b)
201 error(msg: getRelocAttrs(type).name + " reloc requires MOVQ instruction");
202 loc[-2] = 0x8d;
203}
204
205X86_64::X86_64() : TargetInfo(LP64()) {
206 cpuType = CPU_TYPE_X86_64;
207 cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
208
209 modeDwarfEncoding = UNWIND_X86_MODE_DWARF;
210 subtractorRelocType = X86_64_RELOC_SUBTRACTOR;
211 unsignedRelocType = X86_64_RELOC_UNSIGNED;
212
213 stubSize = sizeof(stub);
214 stubHelperHeaderSize = sizeof(stubHelperHeader);
215 stubHelperEntrySize = sizeof(stubHelperEntry);
216
217 objcStubsFastSize = sizeof(objcStubsFastCode);
218 objcStubsFastAlignment = 1;
219
220 relocAttrs = {relocAttrsArray.data(), relocAttrsArray.size()};
221}
222
223TargetInfo *macho::createX86_64TargetInfo() {
224 static X86_64 t;
225 return &t;
226}
227
228void X86_64::handleDtraceReloc(const Symbol *sym, const Reloc &r,
229 uint8_t *loc) const {
230 assert(r.type == X86_64_RELOC_BRANCH);
231
232 if (config->outputType == MH_OBJECT)
233 return;
234
235 if (sym->getName().starts_with(Prefix: "___dtrace_probe")) {
236 // change call site to a NOP
237 loc[-1] = 0x90;
238 write32le(P: loc, V: 0x00401F0F);
239 } else if (sym->getName().starts_with(Prefix: "___dtrace_isenabled")) {
240 // change call site to a clear eax
241 loc[-1] = 0x33;
242 write32le(P: loc, V: 0x909090C0);
243 } else {
244 error(msg: "Unrecognized dtrace symbol prefix: " + toString(*sym));
245 }
246}
247