1//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of ELF support for the MC-JIT runtime dynamic linker.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RuntimeDyldELF.h"
14#include "Targets/RuntimeDyldELFMips.h"
15#include "llvm/ADT/StringRef.h"
16#include "llvm/BinaryFormat/ELF.h"
17#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
18#include "llvm/Object/ELFObjectFile.h"
19#include "llvm/Object/ObjectFile.h"
20#include "llvm/Support/Endian.h"
21#include "llvm/Support/MemoryBuffer.h"
22#include "llvm/TargetParser/Triple.h"
23
24using namespace llvm;
25using namespace llvm::object;
26using namespace llvm::support::endian;
27
28#define DEBUG_TYPE "dyld"
29
30static void or32le(void *P, int32_t V) { write32le(P, V: read32le(P) | V); }
31
32static void or32AArch64Imm(void *L, uint64_t Imm) {
33 or32le(P: L, V: (Imm & 0xFFF) << 10);
34}
35
36template <class T> static void write(bool isBE, void *P, T V) {
37 isBE ? write<T, llvm::endianness::big>(P, V)
38 : write<T, llvm::endianness::little>(P, V);
39}
40
41static void write32AArch64Addr(void *L, uint64_t Imm) {
42 uint32_t ImmLo = (Imm & 0x3) << 29;
43 uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
44 uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
45 write32le(P: L, V: (read32le(P: L) & ~Mask) | ImmLo | ImmHi);
46}
47
48// Return the bits [Start, End] from Val shifted Start bits.
49// For instance, getBits(0xF0, 4, 8) returns 0xF.
50static uint64_t getBits(uint64_t Val, int Start, int End) {
51 uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
52 return (Val >> Start) & Mask;
53}
54
55namespace {
56
57template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
58 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
59
60 typedef typename ELFT::uint addr_type;
61
62 DyldELFObject(ELFObjectFile<ELFT> &&Obj);
63
64public:
65 static Expected<std::unique_ptr<DyldELFObject>>
66 create(MemoryBufferRef Wrapper);
67
68 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
69
70 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
71
72 // Methods for type inquiry through isa, cast and dyn_cast
73 static bool classof(const Binary *v) {
74 return (isa<ELFObjectFile<ELFT>>(v) &&
75 classof(cast<ELFObjectFile<ELFT>>(v)));
76 }
77 static bool classof(const ELFObjectFile<ELFT> *v) {
78 return v->isDyldType();
79 }
80};
81
82
83
84// The MemoryBuffer passed into this constructor is just a wrapper around the
85// actual memory. Ultimately, the Binary parent class will take ownership of
86// this MemoryBuffer object but not the underlying memory.
87template <class ELFT>
88DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
89 : ELFObjectFile<ELFT>(std::move(Obj)) {
90 this->isDyldELFObject = true;
91}
92
93template <class ELFT>
94Expected<std::unique_ptr<DyldELFObject<ELFT>>>
95DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
96 auto Obj = ELFObjectFile<ELFT>::create(Wrapper);
97 if (auto E = Obj.takeError())
98 return std::move(E);
99 std::unique_ptr<DyldELFObject<ELFT>> Ret(
100 new DyldELFObject<ELFT>(std::move(*Obj)));
101 return std::move(Ret);
102}
103
104template <class ELFT>
105void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
106 uint64_t Addr) {
107 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
108 Elf_Shdr *shdr =
109 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
110
111 // This assumes the address passed in matches the target address bitness
112 // The template-based type cast handles everything else.
113 shdr->sh_addr = static_cast<addr_type>(Addr);
114}
115
116template <class ELFT>
117void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
118 uint64_t Addr) {
119
120 Elf_Sym *sym = const_cast<Elf_Sym *>(
121 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
122
123 // This assumes the address passed in matches the target address bitness
124 // The template-based type cast handles everything else.
125 sym->st_value = static_cast<addr_type>(Addr);
126}
127
128class LoadedELFObjectInfo final
129 : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
130 RuntimeDyld::LoadedObjectInfo> {
131public:
132 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
133 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
134
135 OwningBinary<ObjectFile>
136 getObjectForDebug(const ObjectFile &Obj) const override;
137};
138
139template <typename ELFT>
140static Expected<std::unique_ptr<DyldELFObject<ELFT>>>
141createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
142 const LoadedELFObjectInfo &L) {
143 typedef typename ELFT::Shdr Elf_Shdr;
144 typedef typename ELFT::uint addr_type;
145
146 Expected<std::unique_ptr<DyldELFObject<ELFT>>> ObjOrErr =
147 DyldELFObject<ELFT>::create(Buffer);
148 if (Error E = ObjOrErr.takeError())
149 return std::move(E);
150
151 std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
152
153 // Iterate over all sections in the object.
154 auto SI = SourceObject.section_begin();
155 for (const auto &Sec : Obj->sections()) {
156 Expected<StringRef> NameOrErr = Sec.getName();
157 if (!NameOrErr) {
158 consumeError(Err: NameOrErr.takeError());
159 continue;
160 }
161
162 if (*NameOrErr != "") {
163 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
164 Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
165 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
166
167 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(Sec: *SI)) {
168 // This assumes that the address passed in matches the target address
169 // bitness. The template-based type cast handles everything else.
170 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
171 }
172 }
173 ++SI;
174 }
175
176 return std::move(Obj);
177}
178
179static OwningBinary<ObjectFile>
180createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
181 assert(Obj.isELF() && "Not an ELF object file.");
182
183 std::unique_ptr<MemoryBuffer> Buffer =
184 MemoryBuffer::getMemBufferCopy(InputData: Obj.getData(), BufferName: Obj.getFileName());
185
186 Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
187 handleAllErrors(E: DebugObj.takeError());
188 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
189 DebugObj =
190 createRTDyldELFObject<ELF32LE>(Buffer: Buffer->getMemBufferRef(), SourceObject: Obj, L);
191 else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
192 DebugObj =
193 createRTDyldELFObject<ELF32BE>(Buffer: Buffer->getMemBufferRef(), SourceObject: Obj, L);
194 else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
195 DebugObj =
196 createRTDyldELFObject<ELF64BE>(Buffer: Buffer->getMemBufferRef(), SourceObject: Obj, L);
197 else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
198 DebugObj =
199 createRTDyldELFObject<ELF64LE>(Buffer: Buffer->getMemBufferRef(), SourceObject: Obj, L);
200 else
201 llvm_unreachable("Unexpected ELF format");
202
203 handleAllErrors(E: DebugObj.takeError());
204 return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
205}
206
207OwningBinary<ObjectFile>
208LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
209 return createELFDebugObject(Obj, L: *this);
210}
211
212} // anonymous namespace
213
214namespace llvm {
215
216RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
217 JITSymbolResolver &Resolver)
218 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
219RuntimeDyldELF::~RuntimeDyldELF() = default;
220
221void RuntimeDyldELF::registerEHFrames() {
222 for (SID EHFrameSID : UnregisteredEHFrameSections) {
223 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
224 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
225 size_t EHFrameSize = Sections[EHFrameSID].getSize();
226 MemMgr.registerEHFrames(Addr: EHFrameAddr, LoadAddr: EHFrameLoadAddr, Size: EHFrameSize);
227 }
228 UnregisteredEHFrameSections.clear();
229}
230
231std::unique_ptr<RuntimeDyldELF>
232llvm::RuntimeDyldELF::create(Triple::ArchType Arch,
233 RuntimeDyld::MemoryManager &MemMgr,
234 JITSymbolResolver &Resolver) {
235 switch (Arch) {
236 default:
237 return std::make_unique<RuntimeDyldELF>(args&: MemMgr, args&: Resolver);
238 case Triple::mips:
239 case Triple::mipsel:
240 case Triple::mips64:
241 case Triple::mips64el:
242 return std::make_unique<RuntimeDyldELFMips>(args&: MemMgr, args&: Resolver);
243 }
244}
245
246std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
247RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
248 if (auto ObjSectionToIDOrErr = loadObjectImpl(Obj: O))
249 return std::make_unique<LoadedELFObjectInfo>(args&: *this, args&: *ObjSectionToIDOrErr);
250 else {
251 HasError = true;
252 raw_string_ostream ErrStream(ErrorStr);
253 logAllUnhandledErrors(E: ObjSectionToIDOrErr.takeError(), OS&: ErrStream);
254 return nullptr;
255 }
256}
257
258void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
259 uint64_t Offset, uint64_t Value,
260 uint32_t Type, int64_t Addend,
261 uint64_t SymOffset) {
262 switch (Type) {
263 default:
264 report_fatal_error(reason: "Relocation type not implemented yet!");
265 break;
266 case ELF::R_X86_64_NONE:
267 break;
268 case ELF::R_X86_64_8: {
269 Value += Addend;
270 assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
271 uint8_t TruncatedAddr = (Value & 0xFF);
272 *Section.getAddressWithOffset(OffsetBytes: Offset) = TruncatedAddr;
273 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
274 << format("%p\n", Section.getAddressWithOffset(Offset)));
275 break;
276 }
277 case ELF::R_X86_64_16: {
278 Value += Addend;
279 assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
280 uint16_t TruncatedAddr = (Value & 0xFFFF);
281 support::ulittle16_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
282 TruncatedAddr;
283 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
284 << format("%p\n", Section.getAddressWithOffset(Offset)));
285 break;
286 }
287 case ELF::R_X86_64_64: {
288 support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
289 Value + Addend;
290 LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
291 << format("%p\n", Section.getAddressWithOffset(Offset)));
292 break;
293 }
294 case ELF::R_X86_64_32:
295 case ELF::R_X86_64_32S: {
296 Value += Addend;
297 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
298 (Type == ELF::R_X86_64_32S &&
299 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
300 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
301 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
302 TruncatedAddr;
303 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
304 << format("%p\n", Section.getAddressWithOffset(Offset)));
305 break;
306 }
307 case ELF::R_X86_64_PC8: {
308 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
309 int64_t RealOffset = Value + Addend - FinalAddress;
310 assert(isInt<8>(RealOffset));
311 int8_t TruncOffset = (RealOffset & 0xFF);
312 Section.getAddress()[Offset] = TruncOffset;
313 break;
314 }
315 case ELF::R_X86_64_PC32: {
316 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
317 int64_t RealOffset = Value + Addend - FinalAddress;
318 assert(isInt<32>(RealOffset));
319 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
320 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
321 TruncOffset;
322 break;
323 }
324 case ELF::R_X86_64_PC64: {
325 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
326 int64_t RealOffset = Value + Addend - FinalAddress;
327 support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
328 RealOffset;
329 LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
330 << format("%p\n", FinalAddress));
331 break;
332 }
333 case ELF::R_X86_64_GOTOFF64: {
334 // Compute Value - GOTBase.
335 uint64_t GOTBase = 0;
336 for (const auto &Section : Sections) {
337 if (Section.getName() == ".got") {
338 GOTBase = Section.getLoadAddressWithOffset(OffsetBytes: 0);
339 break;
340 }
341 }
342 assert(GOTBase != 0 && "missing GOT");
343 int64_t GOTOffset = Value - GOTBase + Addend;
344 support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) = GOTOffset;
345 break;
346 }
347 case ELF::R_X86_64_DTPMOD64: {
348 // We only have one DSO, so the module id is always 1.
349 support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) = 1;
350 break;
351 }
352 case ELF::R_X86_64_DTPOFF64:
353 case ELF::R_X86_64_TPOFF64: {
354 // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
355 // offset in the *initial* TLS block. Since we are statically linking, all
356 // TLS blocks already exist in the initial block, so resolve both
357 // relocations equally.
358 support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
359 Value + Addend;
360 break;
361 }
362 case ELF::R_X86_64_DTPOFF32:
363 case ELF::R_X86_64_TPOFF32: {
364 // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
365 // be resolved equally.
366 int64_t RealValue = Value + Addend;
367 assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
368 int32_t TruncValue = RealValue;
369 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
370 TruncValue;
371 break;
372 }
373 }
374}
375
376void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
377 uint64_t Offset, uint32_t Value,
378 uint32_t Type, int32_t Addend) {
379 switch (Type) {
380 case ELF::R_386_32: {
381 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
382 Value + Addend;
383 break;
384 }
385 // Handle R_386_PLT32 like R_386_PC32 since it should be able to
386 // reach any 32 bit address.
387 case ELF::R_386_PLT32:
388 case ELF::R_386_PC32: {
389 uint32_t FinalAddress =
390 Section.getLoadAddressWithOffset(OffsetBytes: Offset) & 0xFFFFFFFF;
391 uint32_t RealOffset = Value + Addend - FinalAddress;
392 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
393 RealOffset;
394 break;
395 }
396 default:
397 // There are other relocation types, but it appears these are the
398 // only ones currently used by the LLVM ELF object writer
399 report_fatal_error(reason: "Relocation type not implemented yet!");
400 break;
401 }
402}
403
404void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
405 uint64_t Offset, uint64_t Value,
406 uint32_t Type, int64_t Addend) {
407 uint32_t *TargetPtr =
408 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(OffsetBytes: Offset));
409 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
410 // Data should use target endian. Code should always use little endian.
411 bool isBE = Arch == Triple::aarch64_be;
412
413 LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
414 << format("%llx", Section.getAddressWithOffset(Offset))
415 << " FinalAddress: 0x" << format("%llx", FinalAddress)
416 << " Value: 0x" << format("%llx", Value) << " Type: 0x"
417 << format("%x", Type) << " Addend: 0x"
418 << format("%llx", Addend) << "\n");
419
420 switch (Type) {
421 default:
422 report_fatal_error(reason: "Relocation type not implemented yet!");
423 break;
424 case ELF::R_AARCH64_NONE:
425 break;
426 case ELF::R_AARCH64_ABS16: {
427 uint64_t Result = Value + Addend;
428 assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 16)) ||
429 (Result >> 16) == 0);
430 write(isBE, P: TargetPtr, V: static_cast<uint16_t>(Result & 0xffffU));
431 break;
432 }
433 case ELF::R_AARCH64_ABS32: {
434 uint64_t Result = Value + Addend;
435 assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 32)) ||
436 (Result >> 32) == 0);
437 write(isBE, P: TargetPtr, V: static_cast<uint32_t>(Result & 0xffffffffU));
438 break;
439 }
440 case ELF::R_AARCH64_ABS64:
441 write(isBE, P: TargetPtr, V: Value + Addend);
442 break;
443 case ELF::R_AARCH64_PLT32: {
444 uint64_t Result = Value + Addend - FinalAddress;
445 assert(static_cast<int64_t>(Result) >= INT32_MIN &&
446 static_cast<int64_t>(Result) <= INT32_MAX);
447 write(isBE, P: TargetPtr, V: static_cast<uint32_t>(Result));
448 break;
449 }
450 case ELF::R_AARCH64_PREL16: {
451 uint64_t Result = Value + Addend - FinalAddress;
452 assert(static_cast<int64_t>(Result) >= INT16_MIN &&
453 static_cast<int64_t>(Result) <= UINT16_MAX);
454 write(isBE, P: TargetPtr, V: static_cast<uint16_t>(Result & 0xffffU));
455 break;
456 }
457 case ELF::R_AARCH64_PREL32: {
458 uint64_t Result = Value + Addend - FinalAddress;
459 assert(static_cast<int64_t>(Result) >= INT32_MIN &&
460 static_cast<int64_t>(Result) <= UINT32_MAX);
461 write(isBE, P: TargetPtr, V: static_cast<uint32_t>(Result & 0xffffffffU));
462 break;
463 }
464 case ELF::R_AARCH64_PREL64:
465 write(isBE, P: TargetPtr, V: Value + Addend - FinalAddress);
466 break;
467 case ELF::R_AARCH64_CONDBR19: {
468 uint64_t BranchImm = Value + Addend - FinalAddress;
469
470 assert(isInt<21>(BranchImm));
471 *TargetPtr &= 0xff00001fU;
472 // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
473 or32le(P: TargetPtr, V: (BranchImm & 0x001FFFFC) << 3);
474 break;
475 }
476 case ELF::R_AARCH64_TSTBR14: {
477 uint64_t BranchImm = Value + Addend - FinalAddress;
478
479 assert(isInt<16>(BranchImm));
480
481 uint32_t RawInstr = *(support::little32_t *)TargetPtr;
482 *(support::little32_t *)TargetPtr = RawInstr & 0xfff8001fU;
483
484 // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
485 or32le(P: TargetPtr, V: (BranchImm & 0x0000FFFC) << 3);
486 break;
487 }
488 case ELF::R_AARCH64_CALL26: // fallthrough
489 case ELF::R_AARCH64_JUMP26: {
490 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
491 // calculation.
492 uint64_t BranchImm = Value + Addend - FinalAddress;
493
494 // "Check that -2^27 <= result < 2^27".
495 assert(isInt<28>(BranchImm));
496 or32le(P: TargetPtr, V: (BranchImm & 0x0FFFFFFC) >> 2);
497 break;
498 }
499 case ELF::R_AARCH64_MOVW_UABS_G3:
500 or32le(P: TargetPtr, V: ((Value + Addend) & 0xFFFF000000000000) >> 43);
501 break;
502 case ELF::R_AARCH64_MOVW_UABS_G2_NC:
503 or32le(P: TargetPtr, V: ((Value + Addend) & 0xFFFF00000000) >> 27);
504 break;
505 case ELF::R_AARCH64_MOVW_UABS_G1_NC:
506 or32le(P: TargetPtr, V: ((Value + Addend) & 0xFFFF0000) >> 11);
507 break;
508 case ELF::R_AARCH64_MOVW_UABS_G0_NC:
509 or32le(P: TargetPtr, V: ((Value + Addend) & 0xFFFF) << 5);
510 break;
511 case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
512 // Operation: Page(S+A) - Page(P)
513 uint64_t Result =
514 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
515
516 // Check that -2^32 <= X < 2^32
517 assert(isInt<33>(Result) && "overflow check failed for relocation");
518
519 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
520 // from bits 32:12 of X.
521 write32AArch64Addr(L: TargetPtr, Imm: Result >> 12);
522 break;
523 }
524 case ELF::R_AARCH64_ADD_ABS_LO12_NC:
525 // Operation: S + A
526 // Immediate goes in bits 21:10 of LD/ST instruction, taken
527 // from bits 11:0 of X
528 or32AArch64Imm(L: TargetPtr, Imm: Value + Addend);
529 break;
530 case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
531 // Operation: S + A
532 // Immediate goes in bits 21:10 of LD/ST instruction, taken
533 // from bits 11:0 of X
534 or32AArch64Imm(L: TargetPtr, Imm: getBits(Val: Value + Addend, Start: 0, End: 11));
535 break;
536 case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
537 // Operation: S + A
538 // Immediate goes in bits 21:10 of LD/ST instruction, taken
539 // from bits 11:1 of X
540 or32AArch64Imm(L: TargetPtr, Imm: getBits(Val: Value + Addend, Start: 1, End: 11));
541 break;
542 case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
543 // Operation: S + A
544 // Immediate goes in bits 21:10 of LD/ST instruction, taken
545 // from bits 11:2 of X
546 or32AArch64Imm(L: TargetPtr, Imm: getBits(Val: Value + Addend, Start: 2, End: 11));
547 break;
548 case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
549 // Operation: S + A
550 // Immediate goes in bits 21:10 of LD/ST instruction, taken
551 // from bits 11:3 of X
552 or32AArch64Imm(L: TargetPtr, Imm: getBits(Val: Value + Addend, Start: 3, End: 11));
553 break;
554 case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
555 // Operation: S + A
556 // Immediate goes in bits 21:10 of LD/ST instruction, taken
557 // from bits 11:4 of X
558 or32AArch64Imm(L: TargetPtr, Imm: getBits(Val: Value + Addend, Start: 4, End: 11));
559 break;
560 case ELF::R_AARCH64_LD_PREL_LO19: {
561 // Operation: S + A - P
562 uint64_t Result = Value + Addend - FinalAddress;
563
564 // "Check that -2^20 <= result < 2^20".
565 assert(isInt<21>(Result));
566
567 *TargetPtr &= 0xff00001fU;
568 // Immediate goes in bits 23:5 of LD imm instruction, taken
569 // from bits 20:2 of X
570 *TargetPtr |= ((Result & 0xffc) << (5 - 2));
571 break;
572 }
573 case ELF::R_AARCH64_ADR_PREL_LO21: {
574 // Operation: S + A - P
575 uint64_t Result = Value + Addend - FinalAddress;
576
577 // "Check that -2^20 <= result < 2^20".
578 assert(isInt<21>(Result));
579
580 *TargetPtr &= 0x9f00001fU;
581 // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
582 // from bits 20:0 of X
583 *TargetPtr |= ((Result & 0xffc) << (5 - 2));
584 *TargetPtr |= (Result & 0x3) << 29;
585 break;
586 }
587 }
588}
589
590void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
591 uint64_t Offset, uint32_t Value,
592 uint32_t Type, int32_t Addend) {
593 // TODO: Add Thumb relocations.
594 uint32_t *TargetPtr =
595 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(OffsetBytes: Offset));
596 uint32_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset) & 0xFFFFFFFF;
597 Value += Addend;
598
599 LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
600 << Section.getAddressWithOffset(Offset)
601 << " FinalAddress: " << format("%p", FinalAddress)
602 << " Value: " << format("%x", Value)
603 << " Type: " << format("%x", Type)
604 << " Addend: " << format("%x", Addend) << "\n");
605
606 switch (Type) {
607 default:
608 llvm_unreachable("Not implemented relocation type!");
609
610 case ELF::R_ARM_NONE:
611 break;
612 // Write a 31bit signed offset
613 case ELF::R_ARM_PREL31:
614 support::ulittle32_t::ref{TargetPtr} =
615 (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
616 ((Value - FinalAddress) & ~0x80000000);
617 break;
618 case ELF::R_ARM_TARGET1:
619 case ELF::R_ARM_ABS32:
620 support::ulittle32_t::ref{TargetPtr} = Value;
621 break;
622 // Write first 16 bit of 32 bit value to the mov instruction.
623 // Last 4 bit should be shifted.
624 case ELF::R_ARM_MOVW_ABS_NC:
625 case ELF::R_ARM_MOVT_ABS:
626 if (Type == ELF::R_ARM_MOVW_ABS_NC)
627 Value = Value & 0xFFFF;
628 else if (Type == ELF::R_ARM_MOVT_ABS)
629 Value = (Value >> 16) & 0xFFFF;
630 support::ulittle32_t::ref{TargetPtr} =
631 (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
632 (((Value >> 12) & 0xF) << 16);
633 break;
634 // Write 24 bit relative value to the branch instruction.
635 case ELF::R_ARM_PC24: // Fall through.
636 case ELF::R_ARM_CALL: // Fall through.
637 case ELF::R_ARM_JUMP24:
638 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
639 RelValue = (RelValue & 0x03FFFFFC) >> 2;
640 assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
641 support::ulittle32_t::ref{TargetPtr} =
642 (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
643 break;
644 }
645}
646
647bool RuntimeDyldELF::resolveLoongArch64ShortBranch(
648 unsigned SectionID, relocation_iterator RelI,
649 const RelocationValueRef &Value) {
650 uint64_t Address;
651 if (Value.SymbolName) {
652 auto Loc = GlobalSymbolTable.find(Key: Value.SymbolName);
653 // Don't create direct branch for external symbols.
654 if (Loc == GlobalSymbolTable.end())
655 return false;
656 const auto &SymInfo = Loc->second;
657 Address =
658 uint64_t(Sections[SymInfo.getSectionID()].getLoadAddressWithOffset(
659 OffsetBytes: SymInfo.getOffset()));
660 } else {
661 Address = uint64_t(Sections[Value.SectionID].getLoadAddress());
662 }
663 uint64_t Offset = RelI->getOffset();
664 uint64_t SourceAddress = Sections[SectionID].getLoadAddressWithOffset(OffsetBytes: Offset);
665 uint64_t Delta = Address + Value.Addend - SourceAddress;
666 // Normal call
667 if (RelI->getType() == ELF::R_LARCH_B26) {
668 if (!isInt<28>(x: Delta))
669 return false;
670 resolveRelocation(Section: Sections[SectionID], Offset, Value: Address, Type: RelI->getType(),
671 Addend: Value.Addend);
672 return true;
673 }
674 // Medium call: R_LARCH_CALL36
675 // Range: [-128G - 0x20000, +128G - 0x20000)
676 if (((int64_t)Delta + 0x20000) != llvm::SignExtend64(X: Delta + 0x20000, B: 38))
677 return false;
678 resolveRelocation(Section: Sections[SectionID], Offset, Value: Address, Type: RelI->getType(),
679 Addend: Value.Addend);
680 return true;
681}
682
683void RuntimeDyldELF::resolveLoongArch64Branch(unsigned SectionID,
684 const RelocationValueRef &Value,
685 relocation_iterator RelI,
686 StubMap &Stubs) {
687 LLVM_DEBUG(dbgs() << "\t\tThis is an LoongArch64 branch relocation.\n");
688
689 if (resolveLoongArch64ShortBranch(SectionID, RelI, Value))
690 return;
691
692 SectionEntry &Section = Sections[SectionID];
693 uint64_t Offset = RelI->getOffset();
694 unsigned RelType = RelI->getType();
695 // Look for an existing stub.
696 auto [It, Inserted] = Stubs.try_emplace(k: Value);
697 if (!Inserted) {
698 resolveRelocation(Section, Offset,
699 Value: (uint64_t)Section.getAddressWithOffset(OffsetBytes: It->second),
700 Type: RelType, Addend: 0);
701 LLVM_DEBUG(dbgs() << " Stub function found\n");
702 return;
703 }
704 // Create a new stub function.
705 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
706 It->second = Section.getStubOffset();
707 uint8_t *StubTargetAddr =
708 createStubFunction(Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()));
709 RelocationEntry LU12I_W(SectionID, StubTargetAddr - Section.getAddress(),
710 ELF::R_LARCH_ABS_HI20, Value.Addend);
711 RelocationEntry ORI(SectionID, StubTargetAddr - Section.getAddress() + 4,
712 ELF::R_LARCH_ABS_LO12, Value.Addend);
713 RelocationEntry LU32I_D(SectionID, StubTargetAddr - Section.getAddress() + 8,
714 ELF::R_LARCH_ABS64_LO20, Value.Addend);
715 RelocationEntry LU52I_D(SectionID, StubTargetAddr - Section.getAddress() + 12,
716 ELF::R_LARCH_ABS64_HI12, Value.Addend);
717 if (Value.SymbolName) {
718 addRelocationForSymbol(RE: LU12I_W, SymbolName: Value.SymbolName);
719 addRelocationForSymbol(RE: ORI, SymbolName: Value.SymbolName);
720 addRelocationForSymbol(RE: LU32I_D, SymbolName: Value.SymbolName);
721 addRelocationForSymbol(RE: LU52I_D, SymbolName: Value.SymbolName);
722 } else {
723 addRelocationForSection(RE: LU12I_W, SectionID: Value.SectionID);
724 addRelocationForSection(RE: ORI, SectionID: Value.SectionID);
725 addRelocationForSection(RE: LU32I_D, SectionID: Value.SectionID);
726
727 addRelocationForSection(RE: LU52I_D, SectionID: Value.SectionID);
728 }
729 resolveRelocation(Section, Offset,
730 Value: reinterpret_cast<uint64_t>(
731 Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset())),
732 Type: RelType, Addend: 0);
733 Section.advanceStubOffset(StubSize: getMaxStubSize());
734}
735
736// Returns extract bits Val[Hi:Lo].
737static inline uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo) {
738 return Hi == 63 ? Val >> Lo : (Val & (((1ULL << (Hi + 1)) - 1))) >> Lo;
739}
740
741void RuntimeDyldELF::resolveLoongArch64Relocation(const SectionEntry &Section,
742 uint64_t Offset,
743 uint64_t Value, uint32_t Type,
744 int64_t Addend) {
745 auto *TargetPtr = Section.getAddressWithOffset(OffsetBytes: Offset);
746 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
747
748 LLVM_DEBUG(dbgs() << "resolveLoongArch64Relocation, LocalAddress: 0x"
749 << format("%llx", Section.getAddressWithOffset(Offset))
750 << " FinalAddress: 0x" << format("%llx", FinalAddress)
751 << " Value: 0x" << format("%llx", Value) << " Type: 0x"
752 << format("%x", Type) << " Addend: 0x"
753 << format("%llx", Addend) << "\n");
754
755 switch (Type) {
756 default:
757 report_fatal_error(reason: "Relocation type not implemented yet!");
758 break;
759 case ELF::R_LARCH_32:
760 support::ulittle32_t::ref{TargetPtr} =
761 static_cast<uint32_t>(Value + Addend);
762 break;
763 case ELF::R_LARCH_64:
764 support::ulittle64_t::ref{TargetPtr} = Value + Addend;
765 break;
766 case ELF::R_LARCH_32_PCREL:
767 support::ulittle32_t::ref{TargetPtr} =
768 static_cast<uint32_t>(Value + Addend - FinalAddress);
769 break;
770 case ELF::R_LARCH_B26: {
771 uint64_t B26 = (Value + Addend - FinalAddress) >> 2;
772 auto Instr = support::ulittle32_t::ref(TargetPtr);
773 uint32_t Imm15_0 = extractBits(Val: B26, /*Hi=*/15, /*Lo=*/0) << 10;
774 uint32_t Imm25_16 = extractBits(Val: B26, /*Hi=*/25, /*Lo=*/16);
775 Instr = (Instr & 0xfc000000) | Imm15_0 | Imm25_16;
776 break;
777 }
778 case ELF::R_LARCH_CALL36: {
779 uint64_t Call36 = (Value + Addend - FinalAddress) >> 2;
780 auto Pcaddu18i = support::ulittle32_t::ref(TargetPtr);
781 uint32_t Imm35_16 =
782 extractBits(Val: (Call36 + (1UL << 15)), /*Hi=*/35, /*Lo=*/16) << 5;
783 Pcaddu18i = (Pcaddu18i & 0xfe00001f) | Imm35_16;
784 auto Jirl = support::ulittle32_t::ref(TargetPtr + 4);
785 uint32_t Imm15_0 = extractBits(Val: Call36, /*Hi=*/15, /*Lo=*/0) << 10;
786 Jirl = (Jirl & 0xfc0003ff) | Imm15_0;
787 break;
788 }
789 case ELF::R_LARCH_GOT_PC_HI20:
790 case ELF::R_LARCH_PCALA_HI20: {
791 uint64_t Target = Value + Addend;
792 uint64_t TargetPage =
793 (Target + (Target & 0x800)) & ~static_cast<uint64_t>(0xfff);
794 uint64_t PCPage = FinalAddress & ~static_cast<uint64_t>(0xfff);
795 int64_t PageDelta = TargetPage - PCPage;
796 auto Instr = support::ulittle32_t::ref(TargetPtr);
797 uint32_t Imm31_12 = extractBits(Val: PageDelta, /*Hi=*/31, /*Lo=*/12) << 5;
798 Instr = (Instr & 0xfe00001f) | Imm31_12;
799 break;
800 }
801 case ELF::R_LARCH_GOT_PC_LO12:
802 case ELF::R_LARCH_PCALA_LO12: {
803 uint64_t TargetOffset = (Value + Addend) & 0xfff;
804 auto Instr = support::ulittle32_t::ref(TargetPtr);
805 uint32_t Imm11_0 = TargetOffset << 10;
806 Instr = (Instr & 0xffc003ff) | Imm11_0;
807 break;
808 }
809 case ELF::R_LARCH_ABS_HI20: {
810 uint64_t Target = Value + Addend;
811 auto Instr = support::ulittle32_t::ref(TargetPtr);
812 uint32_t Imm31_12 = extractBits(Val: Target, /*Hi=*/31, /*Lo=*/12) << 5;
813 Instr = (Instr & 0xfe00001f) | Imm31_12;
814 break;
815 }
816 case ELF::R_LARCH_ABS_LO12: {
817 uint64_t Target = Value + Addend;
818 auto Instr = support::ulittle32_t::ref(TargetPtr);
819 uint32_t Imm11_0 = extractBits(Val: Target, /*Hi=*/11, /*Lo=*/0) << 10;
820 Instr = (Instr & 0xffc003ff) | Imm11_0;
821 break;
822 }
823 case ELF::R_LARCH_ABS64_LO20: {
824 uint64_t Target = Value + Addend;
825 auto Instr = support::ulittle32_t::ref(TargetPtr);
826 uint32_t Imm51_32 = extractBits(Val: Target, /*Hi=*/51, /*Lo=*/32) << 5;
827 Instr = (Instr & 0xfe00001f) | Imm51_32;
828 break;
829 }
830 case ELF::R_LARCH_ABS64_HI12: {
831 uint64_t Target = Value + Addend;
832 auto Instr = support::ulittle32_t::ref(TargetPtr);
833 uint32_t Imm63_52 = extractBits(Val: Target, /*Hi=*/63, /*Lo=*/52) << 10;
834 Instr = (Instr & 0xffc003ff) | Imm63_52;
835 break;
836 }
837 case ELF::R_LARCH_ADD32:
838 support::ulittle32_t::ref{TargetPtr} =
839 (support::ulittle32_t::ref{TargetPtr} +
840 static_cast<uint32_t>(Value + Addend));
841 break;
842 case ELF::R_LARCH_SUB32:
843 support::ulittle32_t::ref{TargetPtr} =
844 (support::ulittle32_t::ref{TargetPtr} -
845 static_cast<uint32_t>(Value + Addend));
846 break;
847 case ELF::R_LARCH_ADD64:
848 support::ulittle64_t::ref{TargetPtr} =
849 (support::ulittle64_t::ref{TargetPtr} + Value + Addend);
850 break;
851 case ELF::R_LARCH_SUB64:
852 support::ulittle64_t::ref{TargetPtr} =
853 (support::ulittle64_t::ref{TargetPtr} - Value - Addend);
854 break;
855 }
856}
857
858void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
859 if (Arch == Triple::UnknownArch ||
860 Triple::getArchTypePrefix(Kind: Arch) != "mips") {
861 IsMipsO32ABI = false;
862 IsMipsN32ABI = false;
863 IsMipsN64ABI = false;
864 return;
865 }
866 if (auto *E = dyn_cast<ELFObjectFileBase>(Val: &Obj)) {
867 unsigned AbiVariant = E->getPlatformFlags();
868 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
869 IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
870 }
871 IsMipsN64ABI = Obj.getFileFormatName() == "elf64-mips";
872}
873
874// Return the .TOC. section and offset.
875Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
876 ObjSectionToIDMap &LocalSections,
877 RelocationValueRef &Rel) {
878 // Set a default SectionID in case we do not find a TOC section below.
879 // This may happen for references to TOC base base (sym@toc, .odp
880 // relocation) without a .toc directive. In this case just use the
881 // first section (which is usually the .odp) since the code won't
882 // reference the .toc base directly.
883 Rel.SymbolName = nullptr;
884 Rel.SectionID = 0;
885
886 // The TOC consists of sections .got, .toc, .tocbss, .plt in that
887 // order. The TOC starts where the first of these sections starts.
888 for (auto &Section : Obj.sections()) {
889 Expected<StringRef> NameOrErr = Section.getName();
890 if (!NameOrErr)
891 return NameOrErr.takeError();
892 StringRef SectionName = *NameOrErr;
893
894 if (SectionName == ".got"
895 || SectionName == ".toc"
896 || SectionName == ".tocbss"
897 || SectionName == ".plt") {
898 if (auto SectionIDOrErr =
899 findOrEmitSection(Obj, Section, IsCode: false, LocalSections))
900 Rel.SectionID = *SectionIDOrErr;
901 else
902 return SectionIDOrErr.takeError();
903 break;
904 }
905 }
906
907 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
908 // thus permitting a full 64 Kbytes segment.
909 Rel.Addend = 0x8000;
910
911 return Error::success();
912}
913
914// Returns the sections and offset associated with the ODP entry referenced
915// by Symbol.
916Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
917 ObjSectionToIDMap &LocalSections,
918 RelocationValueRef &Rel) {
919 // Get the ELF symbol value (st_value) to compare with Relocation offset in
920 // .opd entries
921 for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
922 si != se; ++si) {
923
924 Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
925 if (!RelSecOrErr)
926 report_fatal_error(reason: Twine(toString(E: RelSecOrErr.takeError())));
927
928 section_iterator RelSecI = *RelSecOrErr;
929 if (RelSecI == Obj.section_end())
930 continue;
931
932 Expected<StringRef> NameOrErr = RelSecI->getName();
933 if (!NameOrErr)
934 return NameOrErr.takeError();
935 StringRef RelSectionName = *NameOrErr;
936
937 if (RelSectionName != ".opd")
938 continue;
939
940 for (elf_relocation_iterator i = si->relocation_begin(),
941 e = si->relocation_end();
942 i != e;) {
943 // The R_PPC64_ADDR64 relocation indicates the first field
944 // of a .opd entry
945 uint64_t TypeFunc = i->getType();
946 if (TypeFunc != ELF::R_PPC64_ADDR64) {
947 ++i;
948 continue;
949 }
950
951 uint64_t TargetSymbolOffset = i->getOffset();
952 symbol_iterator TargetSymbol = i->getSymbol();
953 int64_t Addend;
954 if (auto AddendOrErr = i->getAddend())
955 Addend = *AddendOrErr;
956 else
957 return AddendOrErr.takeError();
958
959 ++i;
960 if (i == e)
961 break;
962
963 // Just check if following relocation is a R_PPC64_TOC
964 uint64_t TypeTOC = i->getType();
965 if (TypeTOC != ELF::R_PPC64_TOC)
966 continue;
967
968 // Finally compares the Symbol value and the target symbol offset
969 // to check if this .opd entry refers to the symbol the relocation
970 // points to.
971 if (Rel.Addend != (int64_t)TargetSymbolOffset)
972 continue;
973
974 section_iterator TSI = Obj.section_end();
975 if (auto TSIOrErr = TargetSymbol->getSection())
976 TSI = *TSIOrErr;
977 else
978 return TSIOrErr.takeError();
979 assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
980
981 bool IsCode = TSI->isText();
982 if (auto SectionIDOrErr = findOrEmitSection(Obj, Section: *TSI, IsCode,
983 LocalSections))
984 Rel.SectionID = *SectionIDOrErr;
985 else
986 return SectionIDOrErr.takeError();
987 Rel.Addend = (intptr_t)Addend;
988 return Error::success();
989 }
990 }
991 llvm_unreachable("Attempting to get address of ODP entry!");
992}
993
994// Relocation masks following the #lo(value), #hi(value), #ha(value),
995// #higher(value), #highera(value), #highest(value), and #highesta(value)
996// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
997// document.
998
999static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
1000
1001static inline uint16_t applyPPChi(uint64_t value) {
1002 return (value >> 16) & 0xffff;
1003}
1004
1005static inline uint16_t applyPPCha (uint64_t value) {
1006 return ((value + 0x8000) >> 16) & 0xffff;
1007}
1008
1009static inline uint16_t applyPPChigher(uint64_t value) {
1010 return (value >> 32) & 0xffff;
1011}
1012
1013static inline uint16_t applyPPChighera (uint64_t value) {
1014 return ((value + 0x8000) >> 32) & 0xffff;
1015}
1016
1017static inline uint16_t applyPPChighest(uint64_t value) {
1018 return (value >> 48) & 0xffff;
1019}
1020
1021static inline uint16_t applyPPChighesta (uint64_t value) {
1022 return ((value + 0x8000) >> 48) & 0xffff;
1023}
1024
1025void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
1026 uint64_t Offset, uint64_t Value,
1027 uint32_t Type, int64_t Addend) {
1028 uint8_t *LocalAddress = Section.getAddressWithOffset(OffsetBytes: Offset);
1029 switch (Type) {
1030 default:
1031 report_fatal_error(reason: "Relocation type not implemented yet!");
1032 break;
1033 case ELF::R_PPC_ADDR16_LO:
1034 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Value + Addend));
1035 break;
1036 case ELF::R_PPC_ADDR16_HI:
1037 writeInt16BE(Addr: LocalAddress, Value: applyPPChi(value: Value + Addend));
1038 break;
1039 case ELF::R_PPC_ADDR16_HA:
1040 writeInt16BE(Addr: LocalAddress, Value: applyPPCha(value: Value + Addend));
1041 break;
1042 }
1043}
1044
1045void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
1046 uint64_t Offset, uint64_t Value,
1047 uint32_t Type, int64_t Addend) {
1048 uint8_t *LocalAddress = Section.getAddressWithOffset(OffsetBytes: Offset);
1049 switch (Type) {
1050 default:
1051 report_fatal_error(reason: "Relocation type not implemented yet!");
1052 break;
1053 case ELF::R_PPC64_ADDR16:
1054 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Value + Addend));
1055 break;
1056 case ELF::R_PPC64_ADDR16_DS:
1057 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Value + Addend) & ~3);
1058 break;
1059 case ELF::R_PPC64_ADDR16_LO:
1060 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Value + Addend));
1061 break;
1062 case ELF::R_PPC64_ADDR16_LO_DS:
1063 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Value + Addend) & ~3);
1064 break;
1065 case ELF::R_PPC64_ADDR16_HI:
1066 case ELF::R_PPC64_ADDR16_HIGH:
1067 writeInt16BE(Addr: LocalAddress, Value: applyPPChi(value: Value + Addend));
1068 break;
1069 case ELF::R_PPC64_ADDR16_HA:
1070 case ELF::R_PPC64_ADDR16_HIGHA:
1071 writeInt16BE(Addr: LocalAddress, Value: applyPPCha(value: Value + Addend));
1072 break;
1073 case ELF::R_PPC64_ADDR16_HIGHER:
1074 writeInt16BE(Addr: LocalAddress, Value: applyPPChigher(value: Value + Addend));
1075 break;
1076 case ELF::R_PPC64_ADDR16_HIGHERA:
1077 writeInt16BE(Addr: LocalAddress, Value: applyPPChighera(value: Value + Addend));
1078 break;
1079 case ELF::R_PPC64_ADDR16_HIGHEST:
1080 writeInt16BE(Addr: LocalAddress, Value: applyPPChighest(value: Value + Addend));
1081 break;
1082 case ELF::R_PPC64_ADDR16_HIGHESTA:
1083 writeInt16BE(Addr: LocalAddress, Value: applyPPChighesta(value: Value + Addend));
1084 break;
1085 case ELF::R_PPC64_ADDR14: {
1086 assert(((Value + Addend) & 3) == 0);
1087 // Preserve the AA/LK bits in the branch instruction
1088 uint8_t aalk = *(LocalAddress + 3);
1089 writeInt16BE(Addr: LocalAddress + 2, Value: (aalk & 3) | ((Value + Addend) & 0xfffc));
1090 } break;
1091 case ELF::R_PPC64_REL16_LO: {
1092 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1093 uint64_t Delta = Value - FinalAddress + Addend;
1094 writeInt16BE(Addr: LocalAddress, Value: applyPPClo(value: Delta));
1095 } break;
1096 case ELF::R_PPC64_REL16_HI: {
1097 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1098 uint64_t Delta = Value - FinalAddress + Addend;
1099 writeInt16BE(Addr: LocalAddress, Value: applyPPChi(value: Delta));
1100 } break;
1101 case ELF::R_PPC64_REL16_HA: {
1102 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1103 uint64_t Delta = Value - FinalAddress + Addend;
1104 writeInt16BE(Addr: LocalAddress, Value: applyPPCha(value: Delta));
1105 } break;
1106 case ELF::R_PPC64_ADDR32: {
1107 int64_t Result = static_cast<int64_t>(Value + Addend);
1108 if (SignExtend64<32>(x: Result) != Result)
1109 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
1110 writeInt32BE(Addr: LocalAddress, Value: Result);
1111 } break;
1112 case ELF::R_PPC64_REL24: {
1113 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1114 int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
1115 if (SignExtend64<26>(x: delta) != delta)
1116 llvm_unreachable("Relocation R_PPC64_REL24 overflow");
1117 // We preserve bits other than LI field, i.e. PO and AA/LK fields.
1118 uint32_t Inst = readBytesUnaligned(Src: LocalAddress, Size: 4);
1119 writeInt32BE(Addr: LocalAddress, Value: (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
1120 } break;
1121 case ELF::R_PPC64_REL32: {
1122 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1123 int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
1124 if (SignExtend64<32>(x: delta) != delta)
1125 llvm_unreachable("Relocation R_PPC64_REL32 overflow");
1126 writeInt32BE(Addr: LocalAddress, Value: delta);
1127 } break;
1128 case ELF::R_PPC64_REL64: {
1129 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1130 uint64_t Delta = Value - FinalAddress + Addend;
1131 writeInt64BE(Addr: LocalAddress, Value: Delta);
1132 } break;
1133 case ELF::R_PPC64_ADDR64:
1134 writeInt64BE(Addr: LocalAddress, Value: Value + Addend);
1135 break;
1136 }
1137}
1138
1139void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
1140 uint64_t Offset, uint64_t Value,
1141 uint32_t Type, int64_t Addend) {
1142 uint8_t *LocalAddress = Section.getAddressWithOffset(OffsetBytes: Offset);
1143 switch (Type) {
1144 default:
1145 report_fatal_error(reason: "Relocation type not implemented yet!");
1146 break;
1147 case ELF::R_390_PC16DBL:
1148 case ELF::R_390_PLT16DBL: {
1149 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1150 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
1151 writeInt16BE(Addr: LocalAddress, Value: Delta / 2);
1152 break;
1153 }
1154 case ELF::R_390_PC32DBL:
1155 case ELF::R_390_PLT32DBL: {
1156 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1157 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
1158 writeInt32BE(Addr: LocalAddress, Value: Delta / 2);
1159 break;
1160 }
1161 case ELF::R_390_PC16: {
1162 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1163 assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
1164 writeInt16BE(Addr: LocalAddress, Value: Delta);
1165 break;
1166 }
1167 case ELF::R_390_PC32: {
1168 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1169 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
1170 writeInt32BE(Addr: LocalAddress, Value: Delta);
1171 break;
1172 }
1173 case ELF::R_390_PC64: {
1174 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1175 writeInt64BE(Addr: LocalAddress, Value: Delta);
1176 break;
1177 }
1178 case ELF::R_390_8:
1179 *LocalAddress = (uint8_t)(Value + Addend);
1180 break;
1181 case ELF::R_390_16:
1182 writeInt16BE(Addr: LocalAddress, Value: Value + Addend);
1183 break;
1184 case ELF::R_390_32:
1185 writeInt32BE(Addr: LocalAddress, Value: Value + Addend);
1186 break;
1187 case ELF::R_390_64:
1188 writeInt64BE(Addr: LocalAddress, Value: Value + Addend);
1189 break;
1190 }
1191}
1192
1193void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
1194 uint64_t Offset, uint64_t Value,
1195 uint32_t Type, int64_t Addend) {
1196 bool isBE = Arch == Triple::bpfeb;
1197
1198 switch (Type) {
1199 default:
1200 report_fatal_error(reason: "Relocation type not implemented yet!");
1201 break;
1202 case ELF::R_BPF_NONE:
1203 case ELF::R_BPF_64_64:
1204 case ELF::R_BPF_64_32:
1205 case ELF::R_BPF_64_NODYLD32:
1206 break;
1207 case ELF::R_BPF_64_ABS64: {
1208 write(isBE, P: Section.getAddressWithOffset(OffsetBytes: Offset), V: Value + Addend);
1209 LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
1210 << format("%p\n", Section.getAddressWithOffset(Offset)));
1211 break;
1212 }
1213 case ELF::R_BPF_64_ABS32: {
1214 Value += Addend;
1215 assert(Value <= UINT32_MAX);
1216 write(isBE, P: Section.getAddressWithOffset(OffsetBytes: Offset), V: static_cast<uint32_t>(Value));
1217 LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
1218 << format("%p\n", Section.getAddressWithOffset(Offset)));
1219 break;
1220 }
1221 }
1222}
1223
1224static void applyUTypeImmRISCV(uint8_t *InstrAddr, uint32_t Imm) {
1225 uint32_t UpperImm = (Imm + 0x800) & 0xfffff000;
1226 auto Instr = support::ulittle32_t::ref(InstrAddr);
1227 Instr = (Instr & 0xfff) | UpperImm;
1228}
1229
1230static void applyITypeImmRISCV(uint8_t *InstrAddr, uint32_t Imm) {
1231 uint32_t LowerImm = Imm & 0xfff;
1232 auto Instr = support::ulittle32_t::ref(InstrAddr);
1233 Instr = (Instr & 0xfffff) | (LowerImm << 20);
1234}
1235
1236void RuntimeDyldELF::resolveRISCVRelocation(const SectionEntry &Section,
1237 uint64_t Offset, uint64_t Value,
1238 uint32_t Type, int64_t Addend,
1239 SID SectionID) {
1240 switch (Type) {
1241 default: {
1242 std::string Err = "Unimplemented reloc type: " + std::to_string(val: Type);
1243 llvm::report_fatal_error(reason: Err.c_str());
1244 }
1245 // 32-bit PC-relative function call, macros call, tail (PIC)
1246 // Write first 20 bits of 32 bit value to the auipc instruction
1247 // Last 12 bits to the jalr instruction
1248 case ELF::R_RISCV_CALL:
1249 case ELF::R_RISCV_CALL_PLT: {
1250 uint64_t P = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1251 uint64_t PCOffset = Value + Addend - P;
1252 applyUTypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset), Imm: PCOffset);
1253 applyITypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset + 4), Imm: PCOffset);
1254 break;
1255 }
1256 // High 20 bits of 32-bit absolute address, %hi(symbol)
1257 case ELF::R_RISCV_HI20: {
1258 uint64_t PCOffset = Value + Addend;
1259 applyUTypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset), Imm: PCOffset);
1260 break;
1261 }
1262 // Low 12 bits of 32-bit absolute address, %lo(symbol)
1263 case ELF::R_RISCV_LO12_I: {
1264 uint64_t PCOffset = Value + Addend;
1265 applyITypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset), Imm: PCOffset);
1266 break;
1267 }
1268 // High 20 bits of 32-bit PC-relative reference, %pcrel_hi(symbol)
1269 case ELF::R_RISCV_GOT_HI20:
1270 case ELF::R_RISCV_PCREL_HI20: {
1271 uint64_t P = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1272 uint64_t PCOffset = Value + Addend - P;
1273 applyUTypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset), Imm: PCOffset);
1274 break;
1275 }
1276
1277 // label:
1278 // auipc a0, %pcrel_hi(symbol) // R_RISCV_PCREL_HI20
1279 // addi a0, a0, %pcrel_lo(label) // R_RISCV_PCREL_LO12_I
1280 //
1281 // The low 12 bits of relative address between pc and symbol.
1282 // The symbol is related to the high part instruction which is marked by
1283 // label.
1284 case ELF::R_RISCV_PCREL_LO12_I: {
1285 for (auto &&PendingReloc : PendingRelocs) {
1286 const RelocationValueRef &MatchingValue = PendingReloc.first;
1287 RelocationEntry &Reloc = PendingReloc.second;
1288 uint64_t HIRelocPC =
1289 getSectionLoadAddress(SectionID: Reloc.SectionID) + Reloc.Offset;
1290 if (Value + Addend == HIRelocPC) {
1291 uint64_t Symbol = getSectionLoadAddress(SectionID: MatchingValue.SectionID) +
1292 MatchingValue.Addend;
1293 auto PCOffset = Symbol - HIRelocPC;
1294 applyITypeImmRISCV(InstrAddr: Section.getAddressWithOffset(OffsetBytes: Offset), Imm: PCOffset);
1295 return;
1296 }
1297 }
1298
1299 llvm::report_fatal_error(
1300 reason: "R_RISCV_PCREL_LO12_I without matching R_RISCV_PCREL_HI20");
1301 }
1302 case ELF::R_RISCV_32_PCREL: {
1303 uint64_t FinalAddress = Section.getLoadAddressWithOffset(OffsetBytes: Offset);
1304 int64_t RealOffset = Value + Addend - FinalAddress;
1305 int32_t TruncOffset = Lo_32(Value: RealOffset);
1306 support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset)) =
1307 TruncOffset;
1308 break;
1309 }
1310 case ELF::R_RISCV_32: {
1311 auto Ref = support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1312 Ref = Value + Addend;
1313 break;
1314 }
1315 case ELF::R_RISCV_64: {
1316 auto Ref = support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1317 Ref = Value + Addend;
1318 break;
1319 }
1320 case ELF::R_RISCV_ADD8: {
1321 auto Ref = support::ulittle8_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1322 Ref = Ref + Value + Addend;
1323 break;
1324 }
1325 case ELF::R_RISCV_ADD16: {
1326 auto Ref = support::ulittle16_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1327 Ref = Ref + Value + Addend;
1328 break;
1329 }
1330 case ELF::R_RISCV_ADD32: {
1331 auto Ref = support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1332 Ref = Ref + Value + Addend;
1333 break;
1334 }
1335 case ELF::R_RISCV_ADD64: {
1336 auto Ref = support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1337 Ref = Ref + Value + Addend;
1338 break;
1339 }
1340 case ELF::R_RISCV_SUB8: {
1341 auto Ref = support::ulittle8_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1342 Ref = Ref - Value - Addend;
1343 break;
1344 }
1345 case ELF::R_RISCV_SUB16: {
1346 auto Ref = support::ulittle16_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1347 Ref = Ref - Value - Addend;
1348 break;
1349 }
1350 case ELF::R_RISCV_SUB32: {
1351 auto Ref = support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1352 Ref = Ref - Value - Addend;
1353 break;
1354 }
1355 case ELF::R_RISCV_SUB64: {
1356 auto Ref = support::ulittle64_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1357 Ref = Ref - Value - Addend;
1358 break;
1359 }
1360 case ELF::R_RISCV_SET8: {
1361 auto Ref = support::ulittle8_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1362 Ref = Value + Addend;
1363 break;
1364 }
1365 case ELF::R_RISCV_SET16: {
1366 auto Ref = support::ulittle16_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1367 Ref = Value + Addend;
1368 break;
1369 }
1370 case ELF::R_RISCV_SET32: {
1371 auto Ref = support::ulittle32_t::ref(Section.getAddressWithOffset(OffsetBytes: Offset));
1372 Ref = Value + Addend;
1373 break;
1374 }
1375 }
1376}
1377
1378// The target location for the relocation is described by RE.SectionID and
1379// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
1380// SectionEntry has three members describing its location.
1381// SectionEntry::Address is the address at which the section has been loaded
1382// into memory in the current (host) process. SectionEntry::LoadAddress is the
1383// address that the section will have in the target process.
1384// SectionEntry::ObjAddress is the address of the bits for this section in the
1385// original emitted object image (also in the current address space).
1386//
1387// Relocations will be applied as if the section were loaded at
1388// SectionEntry::LoadAddress, but they will be applied at an address based
1389// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
1390// Target memory contents if they are required for value calculations.
1391//
1392// The Value parameter here is the load address of the symbol for the
1393// relocation to be applied. For relocations which refer to symbols in the
1394// current object Value will be the LoadAddress of the section in which
1395// the symbol resides (RE.Addend provides additional information about the
1396// symbol location). For external symbols, Value will be the address of the
1397// symbol in the target address space.
1398void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
1399 uint64_t Value) {
1400 const SectionEntry &Section = Sections[RE.SectionID];
1401 return resolveRelocation(Section, Offset: RE.Offset, Value, Type: RE.RelType, Addend: RE.Addend,
1402 SymOffset: RE.SymOffset, SectionID: RE.SectionID);
1403}
1404
1405void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
1406 uint64_t Offset, uint64_t Value,
1407 uint32_t Type, int64_t Addend,
1408 uint64_t SymOffset, SID SectionID) {
1409 switch (Arch) {
1410 case Triple::x86_64:
1411 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
1412 break;
1413 case Triple::x86:
1414 resolveX86Relocation(Section, Offset, Value: (uint32_t)(Value & 0xffffffffL), Type,
1415 Addend: (uint32_t)(Addend & 0xffffffffL));
1416 break;
1417 case Triple::aarch64:
1418 case Triple::aarch64_be:
1419 resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
1420 break;
1421 case Triple::arm: // Fall through.
1422 case Triple::armeb:
1423 case Triple::thumb:
1424 case Triple::thumbeb:
1425 resolveARMRelocation(Section, Offset, Value: (uint32_t)(Value & 0xffffffffL), Type,
1426 Addend: (uint32_t)(Addend & 0xffffffffL));
1427 break;
1428 case Triple::loongarch64:
1429 resolveLoongArch64Relocation(Section, Offset, Value, Type, Addend);
1430 break;
1431 case Triple::ppc: // Fall through.
1432 case Triple::ppcle:
1433 resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
1434 break;
1435 case Triple::ppc64: // Fall through.
1436 case Triple::ppc64le:
1437 resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
1438 break;
1439 case Triple::systemz:
1440 resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
1441 break;
1442 case Triple::bpfel:
1443 case Triple::bpfeb:
1444 resolveBPFRelocation(Section, Offset, Value, Type, Addend);
1445 break;
1446 case Triple::riscv32: // Fall through.
1447 case Triple::riscv64:
1448 resolveRISCVRelocation(Section, Offset, Value, Type, Addend, SectionID);
1449 break;
1450 default:
1451 llvm_unreachable("Unsupported CPU type!");
1452 }
1453}
1454
1455void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID,
1456 uint64_t Offset) const {
1457 return (void *)(Sections[SectionID].getObjAddress() + Offset);
1458}
1459
1460void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
1461 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
1462 if (Value.SymbolName)
1463 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
1464 else
1465 addRelocationForSection(RE, SectionID: Value.SectionID);
1466}
1467
1468uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
1469 bool IsLocal) const {
1470 switch (RelType) {
1471 case ELF::R_MICROMIPS_GOT16:
1472 if (IsLocal)
1473 return ELF::R_MICROMIPS_LO16;
1474 break;
1475 case ELF::R_MICROMIPS_HI16:
1476 return ELF::R_MICROMIPS_LO16;
1477 case ELF::R_MIPS_GOT16:
1478 if (IsLocal)
1479 return ELF::R_MIPS_LO16;
1480 break;
1481 case ELF::R_MIPS_HI16:
1482 return ELF::R_MIPS_LO16;
1483 case ELF::R_MIPS_PCHI16:
1484 return ELF::R_MIPS_PCLO16;
1485 default:
1486 break;
1487 }
1488 return ELF::R_MIPS_NONE;
1489}
1490
1491// Sometimes we don't need to create thunk for a branch.
1492// This typically happens when branch target is located
1493// in the same object file. In such case target is either
1494// a weak symbol or symbol in a different executable section.
1495// This function checks if branch target is located in the
1496// same object file and if distance between source and target
1497// fits R_AARCH64_CALL26 relocation. If both conditions are
1498// met, it emits direct jump to the target and returns true.
1499// Otherwise false is returned and thunk is created.
1500bool RuntimeDyldELF::resolveAArch64ShortBranch(
1501 unsigned SectionID, relocation_iterator RelI,
1502 const RelocationValueRef &Value) {
1503 uint64_t TargetOffset;
1504 unsigned TargetSectionID;
1505 if (Value.SymbolName) {
1506 auto Loc = GlobalSymbolTable.find(Key: Value.SymbolName);
1507
1508 // Don't create direct branch for external symbols.
1509 if (Loc == GlobalSymbolTable.end())
1510 return false;
1511
1512 const auto &SymInfo = Loc->second;
1513
1514 TargetSectionID = SymInfo.getSectionID();
1515 TargetOffset = SymInfo.getOffset();
1516 } else {
1517 TargetSectionID = Value.SectionID;
1518 TargetOffset = 0;
1519 }
1520
1521 // We don't actually know the load addresses at this point, so if the
1522 // branch is cross-section, we don't know exactly how far away it is.
1523 if (TargetSectionID != SectionID)
1524 return false;
1525
1526 uint64_t SourceOffset = RelI->getOffset();
1527
1528 // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
1529 // If distance between source and target is out of range then we should
1530 // create thunk.
1531 if (!isInt<28>(x: TargetOffset + Value.Addend - SourceOffset))
1532 return false;
1533
1534 RelocationEntry RE(SectionID, SourceOffset, RelI->getType(), Value.Addend);
1535 if (Value.SymbolName)
1536 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
1537 else
1538 addRelocationForSection(RE, SectionID: Value.SectionID);
1539
1540 return true;
1541}
1542
1543void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
1544 const RelocationValueRef &Value,
1545 relocation_iterator RelI,
1546 StubMap &Stubs) {
1547
1548 LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
1549 SectionEntry &Section = Sections[SectionID];
1550
1551 uint64_t Offset = RelI->getOffset();
1552 unsigned RelType = RelI->getType();
1553 // Look for an existing stub.
1554 StubMap::const_iterator i = Stubs.find(x: Value);
1555 if (i != Stubs.end()) {
1556 resolveRelocation(Section, Offset,
1557 Value: Section.getLoadAddressWithOffset(OffsetBytes: i->second), Type: RelType, Addend: 0);
1558 LLVM_DEBUG(dbgs() << " Stub function found\n");
1559 } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
1560 // Create a new stub function.
1561 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1562 Stubs[Value] = Section.getStubOffset();
1563 uint8_t *StubTargetAddr = createStubFunction(
1564 Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()));
1565
1566 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
1567 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
1568 RelocationEntry REmovk_g2(SectionID,
1569 StubTargetAddr - Section.getAddress() + 4,
1570 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
1571 RelocationEntry REmovk_g1(SectionID,
1572 StubTargetAddr - Section.getAddress() + 8,
1573 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
1574 RelocationEntry REmovk_g0(SectionID,
1575 StubTargetAddr - Section.getAddress() + 12,
1576 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
1577
1578 if (Value.SymbolName) {
1579 addRelocationForSymbol(RE: REmovz_g3, SymbolName: Value.SymbolName);
1580 addRelocationForSymbol(RE: REmovk_g2, SymbolName: Value.SymbolName);
1581 addRelocationForSymbol(RE: REmovk_g1, SymbolName: Value.SymbolName);
1582 addRelocationForSymbol(RE: REmovk_g0, SymbolName: Value.SymbolName);
1583 } else {
1584 addRelocationForSection(RE: REmovz_g3, SectionID: Value.SectionID);
1585 addRelocationForSection(RE: REmovk_g2, SectionID: Value.SectionID);
1586 addRelocationForSection(RE: REmovk_g1, SectionID: Value.SectionID);
1587 addRelocationForSection(RE: REmovk_g0, SectionID: Value.SectionID);
1588 }
1589 resolveRelocation(Section, Offset,
1590 Value: Section.getLoadAddressWithOffset(OffsetBytes: Section.getStubOffset()),
1591 Type: RelType, Addend: 0);
1592 Section.advanceStubOffset(StubSize: getMaxStubSize());
1593 }
1594}
1595
1596Expected<relocation_iterator>
1597RuntimeDyldELF::processRelocationRef(
1598 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
1599 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
1600 const auto &Obj = cast<ELFObjectFileBase>(Val: O);
1601 uint64_t RelType = RelI->getType();
1602 int64_t Addend = 0;
1603 if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
1604 Addend = *AddendOrErr;
1605 else
1606 consumeError(Err: AddendOrErr.takeError());
1607 elf_symbol_iterator Symbol = RelI->getSymbol();
1608
1609 // Obtain the symbol name which is referenced in the relocation
1610 StringRef TargetName;
1611 if (Symbol != Obj.symbol_end()) {
1612 if (auto TargetNameOrErr = Symbol->getName())
1613 TargetName = *TargetNameOrErr;
1614 else
1615 return TargetNameOrErr.takeError();
1616 }
1617 LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
1618 << " TargetName: " << TargetName << "\n");
1619 RelocationValueRef Value;
1620 // First search for the symbol in the local symbol table
1621 SymbolRef::Type SymType = SymbolRef::ST_Unknown;
1622
1623 // Search for the symbol in the global symbol table
1624 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
1625 if (Symbol != Obj.symbol_end()) {
1626 gsi = GlobalSymbolTable.find(Key: TargetName.data());
1627 Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
1628 if (!SymTypeOrErr) {
1629 std::string Buf;
1630 raw_string_ostream OS(Buf);
1631 logAllUnhandledErrors(E: SymTypeOrErr.takeError(), OS);
1632 report_fatal_error(reason: Twine(Buf));
1633 }
1634 SymType = *SymTypeOrErr;
1635 }
1636 if (gsi != GlobalSymbolTable.end()) {
1637 const auto &SymInfo = gsi->second;
1638 Value.SectionID = SymInfo.getSectionID();
1639 Value.Offset = SymInfo.getOffset();
1640 Value.Addend = SymInfo.getOffset() + Addend;
1641 } else {
1642 switch (SymType) {
1643 case SymbolRef::ST_Debug: {
1644 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
1645 // and can be changed by another developers. Maybe best way is add
1646 // a new symbol type ST_Section to SymbolRef and use it.
1647 auto SectionOrErr = Symbol->getSection();
1648 if (!SectionOrErr) {
1649 std::string Buf;
1650 raw_string_ostream OS(Buf);
1651 logAllUnhandledErrors(E: SectionOrErr.takeError(), OS);
1652 report_fatal_error(reason: Twine(Buf));
1653 }
1654 section_iterator si = *SectionOrErr;
1655 if (si == Obj.section_end())
1656 llvm_unreachable("Symbol section not found, bad object file format!");
1657 LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
1658 bool isCode = si->isText();
1659 if (auto SectionIDOrErr = findOrEmitSection(Obj, Section: (*si), IsCode: isCode,
1660 LocalSections&: ObjSectionToID))
1661 Value.SectionID = *SectionIDOrErr;
1662 else
1663 return SectionIDOrErr.takeError();
1664 Value.Addend = Addend;
1665 break;
1666 }
1667 case SymbolRef::ST_Data:
1668 case SymbolRef::ST_Function:
1669 case SymbolRef::ST_Other:
1670 case SymbolRef::ST_Unknown: {
1671 Value.SymbolName = TargetName.data();
1672 Value.Addend = Addend;
1673
1674 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
1675 // will manifest here as a NULL symbol name.
1676 // We can set this as a valid (but empty) symbol name, and rely
1677 // on addRelocationForSymbol to handle this.
1678 if (!Value.SymbolName)
1679 Value.SymbolName = "";
1680 break;
1681 }
1682 default:
1683 llvm_unreachable("Unresolved symbol type!");
1684 break;
1685 }
1686 }
1687
1688 uint64_t Offset = RelI->getOffset();
1689
1690 LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
1691 << "\n");
1692 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
1693 if ((RelType == ELF::R_AARCH64_CALL26 ||
1694 RelType == ELF::R_AARCH64_JUMP26) &&
1695 MemMgr.allowStubAllocation()) {
1696 resolveAArch64Branch(SectionID, Value, RelI, Stubs);
1697 } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
1698 // Create new GOT entry or find existing one. If GOT entry is
1699 // to be created, then we also emit ABS64 relocation for it.
1700 uint64_t GOTOffset = findOrAllocGOTEntry(Value, GOTRelType: ELF::R_AARCH64_ABS64);
1701 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: GOTOffset + Addend,
1702 Type: ELF::R_AARCH64_ADR_PREL_PG_HI21);
1703
1704 } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
1705 uint64_t GOTOffset = findOrAllocGOTEntry(Value, GOTRelType: ELF::R_AARCH64_ABS64);
1706 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: GOTOffset + Addend,
1707 Type: ELF::R_AARCH64_LDST64_ABS_LO12_NC);
1708 } else {
1709 processSimpleRelocation(SectionID, Offset, RelType, Value);
1710 }
1711 } else if (Arch == Triple::arm) {
1712 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
1713 RelType == ELF::R_ARM_JUMP24) {
1714 // This is an ARM branch relocation, need to use a stub function.
1715 LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
1716 SectionEntry &Section = Sections[SectionID];
1717
1718 // Look for an existing stub.
1719 auto [It, Inserted] = Stubs.try_emplace(k: Value);
1720 if (!Inserted) {
1721 resolveRelocation(Section, Offset,
1722 Value: Section.getLoadAddressWithOffset(OffsetBytes: It->second), Type: RelType,
1723 Addend: 0);
1724 LLVM_DEBUG(dbgs() << " Stub function found\n");
1725 } else {
1726 // Create a new stub function.
1727 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1728 It->second = Section.getStubOffset();
1729 uint8_t *StubTargetAddr = createStubFunction(
1730 Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()));
1731 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1732 ELF::R_ARM_ABS32, Value.Addend);
1733 if (Value.SymbolName)
1734 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
1735 else
1736 addRelocationForSection(RE, SectionID: Value.SectionID);
1737
1738 resolveRelocation(
1739 Section, Offset,
1740 Value: Section.getLoadAddressWithOffset(OffsetBytes: Section.getStubOffset()), Type: RelType,
1741 Addend: 0);
1742 Section.advanceStubOffset(StubSize: getMaxStubSize());
1743 }
1744 } else {
1745 uint32_t *Placeholder =
1746 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
1747 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
1748 RelType == ELF::R_ARM_ABS32) {
1749 Value.Addend += *Placeholder;
1750 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
1751 // See ELF for ARM documentation
1752 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
1753 }
1754 processSimpleRelocation(SectionID, Offset, RelType, Value);
1755 }
1756 } else if (Arch == Triple::loongarch64) {
1757 if ((RelType == ELF::R_LARCH_B26 || RelType == ELF::R_LARCH_CALL36) &&
1758 MemMgr.allowStubAllocation()) {
1759 resolveLoongArch64Branch(SectionID, Value, RelI, Stubs);
1760 } else if (RelType == ELF::R_LARCH_GOT_PC_HI20 ||
1761 RelType == ELF::R_LARCH_GOT_PC_LO12) {
1762 uint64_t GOTOffset = findOrAllocGOTEntry(Value, GOTRelType: ELF::R_LARCH_64);
1763 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: GOTOffset + Addend,
1764 Type: RelType);
1765 } else {
1766 processSimpleRelocation(SectionID, Offset, RelType, Value);
1767 }
1768 } else if (IsMipsO32ABI) {
1769 uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
1770 computePlaceholderAddress(SectionID, Offset));
1771 uint32_t Opcode = readBytesUnaligned(Src: Placeholder, Size: 4);
1772 if (RelType == ELF::R_MIPS_26) {
1773 // This is an Mips branch relocation, need to use a stub function.
1774 LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1775 SectionEntry &Section = Sections[SectionID];
1776
1777 // Extract the addend from the instruction.
1778 // We shift up by two since the Value will be down shifted again
1779 // when applying the relocation.
1780 uint32_t Addend = (Opcode & 0x03ffffff) << 2;
1781
1782 Value.Addend += Addend;
1783
1784 // Look up for existing stub.
1785 auto [It, Inserted] = Stubs.try_emplace(k: Value);
1786 if (!Inserted) {
1787 RelocationEntry RE(SectionID, Offset, RelType, It->second);
1788 addRelocationForSection(RE, SectionID);
1789 LLVM_DEBUG(dbgs() << " Stub function found\n");
1790 } else {
1791 // Create a new stub function.
1792 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1793 It->second = Section.getStubOffset();
1794
1795 unsigned AbiVariant = Obj.getPlatformFlags();
1796
1797 uint8_t *StubTargetAddr = createStubFunction(
1798 Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()), AbiVariant);
1799
1800 // Creating Hi and Lo relocations for the filled stub instructions.
1801 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1802 ELF::R_MIPS_HI16, Value.Addend);
1803 RelocationEntry RELo(SectionID,
1804 StubTargetAddr - Section.getAddress() + 4,
1805 ELF::R_MIPS_LO16, Value.Addend);
1806
1807 if (Value.SymbolName) {
1808 addRelocationForSymbol(RE: REHi, SymbolName: Value.SymbolName);
1809 addRelocationForSymbol(RE: RELo, SymbolName: Value.SymbolName);
1810 } else {
1811 addRelocationForSection(RE: REHi, SectionID: Value.SectionID);
1812 addRelocationForSection(RE: RELo, SectionID: Value.SectionID);
1813 }
1814
1815 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1816 addRelocationForSection(RE, SectionID);
1817 Section.advanceStubOffset(StubSize: getMaxStubSize());
1818 }
1819 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
1820 int64_t Addend = (Opcode & 0x0000ffff) << 16;
1821 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1822 PendingRelocs.push_back(Elt: std::make_pair(x&: Value, y&: RE));
1823 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
1824 int64_t Addend = Value.Addend + SignExtend32<16>(X: Opcode & 0x0000ffff);
1825 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
1826 const RelocationValueRef &MatchingValue = I->first;
1827 RelocationEntry &Reloc = I->second;
1828 if (MatchingValue == Value &&
1829 RelType == getMatchingLoRelocation(RelType: Reloc.RelType) &&
1830 SectionID == Reloc.SectionID) {
1831 Reloc.Addend += Addend;
1832 if (Value.SymbolName)
1833 addRelocationForSymbol(RE: Reloc, SymbolName: Value.SymbolName);
1834 else
1835 addRelocationForSection(RE: Reloc, SectionID: Value.SectionID);
1836 I = PendingRelocs.erase(CI: I);
1837 } else
1838 ++I;
1839 }
1840 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1841 if (Value.SymbolName)
1842 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
1843 else
1844 addRelocationForSection(RE, SectionID: Value.SectionID);
1845 } else {
1846 if (RelType == ELF::R_MIPS_32)
1847 Value.Addend += Opcode;
1848 else if (RelType == ELF::R_MIPS_PC16)
1849 Value.Addend += SignExtend32<18>(X: (Opcode & 0x0000ffff) << 2);
1850 else if (RelType == ELF::R_MIPS_PC19_S2)
1851 Value.Addend += SignExtend32<21>(X: (Opcode & 0x0007ffff) << 2);
1852 else if (RelType == ELF::R_MIPS_PC21_S2)
1853 Value.Addend += SignExtend32<23>(X: (Opcode & 0x001fffff) << 2);
1854 else if (RelType == ELF::R_MIPS_PC26_S2)
1855 Value.Addend += SignExtend32<28>(X: (Opcode & 0x03ffffff) << 2);
1856 processSimpleRelocation(SectionID, Offset, RelType, Value);
1857 }
1858 } else if (IsMipsN32ABI || IsMipsN64ABI) {
1859 uint32_t r_type = RelType & 0xff;
1860 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1861 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
1862 || r_type == ELF::R_MIPS_GOT_DISP) {
1863 auto [I, Inserted] = GOTSymbolOffsets.try_emplace(Key: TargetName);
1864 if (Inserted)
1865 I->second = allocateGOTEntries(no: 1);
1866 RE.SymOffset = I->second;
1867 if (Value.SymbolName)
1868 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
1869 else
1870 addRelocationForSection(RE, SectionID: Value.SectionID);
1871 } else if (RelType == ELF::R_MIPS_26) {
1872 // This is an Mips branch relocation, need to use a stub function.
1873 LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1874 SectionEntry &Section = Sections[SectionID];
1875
1876 // Look up for existing stub.
1877 StubMap::const_iterator i = Stubs.find(x: Value);
1878 if (i != Stubs.end()) {
1879 RelocationEntry RE(SectionID, Offset, RelType, i->second);
1880 addRelocationForSection(RE, SectionID);
1881 LLVM_DEBUG(dbgs() << " Stub function found\n");
1882 } else {
1883 // Create a new stub function.
1884 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1885 Stubs[Value] = Section.getStubOffset();
1886
1887 unsigned AbiVariant = Obj.getPlatformFlags();
1888
1889 uint8_t *StubTargetAddr = createStubFunction(
1890 Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()), AbiVariant);
1891
1892 if (IsMipsN32ABI) {
1893 // Creating Hi and Lo relocations for the filled stub instructions.
1894 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1895 ELF::R_MIPS_HI16, Value.Addend);
1896 RelocationEntry RELo(SectionID,
1897 StubTargetAddr - Section.getAddress() + 4,
1898 ELF::R_MIPS_LO16, Value.Addend);
1899 if (Value.SymbolName) {
1900 addRelocationForSymbol(RE: REHi, SymbolName: Value.SymbolName);
1901 addRelocationForSymbol(RE: RELo, SymbolName: Value.SymbolName);
1902 } else {
1903 addRelocationForSection(RE: REHi, SectionID: Value.SectionID);
1904 addRelocationForSection(RE: RELo, SectionID: Value.SectionID);
1905 }
1906 } else {
1907 // Creating Highest, Higher, Hi and Lo relocations for the filled stub
1908 // instructions.
1909 RelocationEntry REHighest(SectionID,
1910 StubTargetAddr - Section.getAddress(),
1911 ELF::R_MIPS_HIGHEST, Value.Addend);
1912 RelocationEntry REHigher(SectionID,
1913 StubTargetAddr - Section.getAddress() + 4,
1914 ELF::R_MIPS_HIGHER, Value.Addend);
1915 RelocationEntry REHi(SectionID,
1916 StubTargetAddr - Section.getAddress() + 12,
1917 ELF::R_MIPS_HI16, Value.Addend);
1918 RelocationEntry RELo(SectionID,
1919 StubTargetAddr - Section.getAddress() + 20,
1920 ELF::R_MIPS_LO16, Value.Addend);
1921 if (Value.SymbolName) {
1922 addRelocationForSymbol(RE: REHighest, SymbolName: Value.SymbolName);
1923 addRelocationForSymbol(RE: REHigher, SymbolName: Value.SymbolName);
1924 addRelocationForSymbol(RE: REHi, SymbolName: Value.SymbolName);
1925 addRelocationForSymbol(RE: RELo, SymbolName: Value.SymbolName);
1926 } else {
1927 addRelocationForSection(RE: REHighest, SectionID: Value.SectionID);
1928 addRelocationForSection(RE: REHigher, SectionID: Value.SectionID);
1929 addRelocationForSection(RE: REHi, SectionID: Value.SectionID);
1930 addRelocationForSection(RE: RELo, SectionID: Value.SectionID);
1931 }
1932 }
1933 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1934 addRelocationForSection(RE, SectionID);
1935 Section.advanceStubOffset(StubSize: getMaxStubSize());
1936 }
1937 } else {
1938 processSimpleRelocation(SectionID, Offset, RelType, Value);
1939 }
1940
1941 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
1942 if (RelType == ELF::R_PPC64_REL24) {
1943 // Determine ABI variant in use for this object.
1944 unsigned AbiVariant = Obj.getPlatformFlags();
1945 AbiVariant &= ELF::EF_PPC64_ABI;
1946 // A PPC branch relocation will need a stub function if the target is
1947 // an external symbol (either Value.SymbolName is set, or SymType is
1948 // Symbol::ST_Unknown) or if the target address is not within the
1949 // signed 24-bits branch address.
1950 SectionEntry &Section = Sections[SectionID];
1951 uint8_t *Target = Section.getAddressWithOffset(OffsetBytes: Offset);
1952 bool RangeOverflow = false;
1953 bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
1954 if (!IsExtern) {
1955 if (AbiVariant != 2) {
1956 // In the ELFv1 ABI, a function call may point to the .opd entry,
1957 // so the final symbol value is calculated based on the relocation
1958 // values in the .opd section.
1959 if (auto Err = findOPDEntrySection(Obj, LocalSections&: ObjSectionToID, Rel&: Value))
1960 return std::move(Err);
1961 } else {
1962 // In the ELFv2 ABI, a function symbol may provide a local entry
1963 // point, which must be used for direct calls.
1964 if (Value.SectionID == SectionID){
1965 uint8_t SymOther = Symbol->getOther();
1966 Value.Addend += ELF::decodePPC64LocalEntryOffset(Other: SymOther);
1967 }
1968 }
1969 uint8_t *RelocTarget =
1970 Sections[Value.SectionID].getAddressWithOffset(OffsetBytes: Value.Addend);
1971 int64_t delta = static_cast<int64_t>(Target - RelocTarget);
1972 // If it is within 26-bits branch range, just set the branch target
1973 if (SignExtend64<26>(x: delta) != delta) {
1974 RangeOverflow = true;
1975 } else if ((AbiVariant != 2) ||
1976 (AbiVariant == 2 && Value.SectionID == SectionID)) {
1977 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1978 addRelocationForSection(RE, SectionID: Value.SectionID);
1979 }
1980 }
1981 if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
1982 RangeOverflow) {
1983 // It is an external symbol (either Value.SymbolName is set, or
1984 // SymType is SymbolRef::ST_Unknown) or out of range.
1985 auto [It, Inserted] = Stubs.try_emplace(k: Value);
1986 if (!Inserted) {
1987 // Symbol function stub already created, just relocate to it
1988 resolveRelocation(Section, Offset,
1989 Value: Section.getLoadAddressWithOffset(OffsetBytes: It->second),
1990 Type: RelType, Addend: 0);
1991 LLVM_DEBUG(dbgs() << " Stub function found\n");
1992 } else {
1993 // Create a new stub function.
1994 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1995 It->second = Section.getStubOffset();
1996 uint8_t *StubTargetAddr = createStubFunction(
1997 Addr: Section.getAddressWithOffset(OffsetBytes: Section.getStubOffset()),
1998 AbiVariant);
1999 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
2000 ELF::R_PPC64_ADDR64, Value.Addend);
2001
2002 // Generates the 64-bits address loads as exemplified in section
2003 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
2004 // apply to the low part of the instructions, so we have to update
2005 // the offset according to the target endianness.
2006 uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
2007 if (!IsTargetLittleEndian)
2008 StubRelocOffset += 2;
2009
2010 RelocationEntry REhst(SectionID, StubRelocOffset + 0,
2011 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
2012 RelocationEntry REhr(SectionID, StubRelocOffset + 4,
2013 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
2014 RelocationEntry REh(SectionID, StubRelocOffset + 12,
2015 ELF::R_PPC64_ADDR16_HI, Value.Addend);
2016 RelocationEntry REl(SectionID, StubRelocOffset + 16,
2017 ELF::R_PPC64_ADDR16_LO, Value.Addend);
2018
2019 if (Value.SymbolName) {
2020 addRelocationForSymbol(RE: REhst, SymbolName: Value.SymbolName);
2021 addRelocationForSymbol(RE: REhr, SymbolName: Value.SymbolName);
2022 addRelocationForSymbol(RE: REh, SymbolName: Value.SymbolName);
2023 addRelocationForSymbol(RE: REl, SymbolName: Value.SymbolName);
2024 } else {
2025 addRelocationForSection(RE: REhst, SectionID: Value.SectionID);
2026 addRelocationForSection(RE: REhr, SectionID: Value.SectionID);
2027 addRelocationForSection(RE: REh, SectionID: Value.SectionID);
2028 addRelocationForSection(RE: REl, SectionID: Value.SectionID);
2029 }
2030
2031 resolveRelocation(
2032 Section, Offset,
2033 Value: Section.getLoadAddressWithOffset(OffsetBytes: Section.getStubOffset()),
2034 Type: RelType, Addend: 0);
2035 Section.advanceStubOffset(StubSize: getMaxStubSize());
2036 }
2037 if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
2038 // Restore the TOC for external calls
2039 if (AbiVariant == 2)
2040 writeInt32BE(Addr: Target + 4, Value: 0xE8410018); // ld r2,24(r1)
2041 else
2042 writeInt32BE(Addr: Target + 4, Value: 0xE8410028); // ld r2,40(r1)
2043 }
2044 }
2045 } else if (RelType == ELF::R_PPC64_TOC16 ||
2046 RelType == ELF::R_PPC64_TOC16_DS ||
2047 RelType == ELF::R_PPC64_TOC16_LO ||
2048 RelType == ELF::R_PPC64_TOC16_LO_DS ||
2049 RelType == ELF::R_PPC64_TOC16_HI ||
2050 RelType == ELF::R_PPC64_TOC16_HA) {
2051 // These relocations are supposed to subtract the TOC address from
2052 // the final value. This does not fit cleanly into the RuntimeDyld
2053 // scheme, since there may be *two* sections involved in determining
2054 // the relocation value (the section of the symbol referred to by the
2055 // relocation, and the TOC section associated with the current module).
2056 //
2057 // Fortunately, these relocations are currently only ever generated
2058 // referring to symbols that themselves reside in the TOC, which means
2059 // that the two sections are actually the same. Thus they cancel out
2060 // and we can immediately resolve the relocation right now.
2061 switch (RelType) {
2062 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
2063 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
2064 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
2065 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
2066 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
2067 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
2068 default: llvm_unreachable("Wrong relocation type.");
2069 }
2070
2071 RelocationValueRef TOCValue;
2072 if (auto Err = findPPC64TOCSection(Obj, LocalSections&: ObjSectionToID, Rel&: TOCValue))
2073 return std::move(Err);
2074 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
2075 llvm_unreachable("Unsupported TOC relocation.");
2076 Value.Addend -= TOCValue.Addend;
2077 resolveRelocation(Section: Sections[SectionID], Offset, Value: Value.Addend, Type: RelType, Addend: 0);
2078 } else {
2079 // There are two ways to refer to the TOC address directly: either
2080 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
2081 // ignored), or via any relocation that refers to the magic ".TOC."
2082 // symbols (in which case the addend is respected).
2083 if (RelType == ELF::R_PPC64_TOC) {
2084 RelType = ELF::R_PPC64_ADDR64;
2085 if (auto Err = findPPC64TOCSection(Obj, LocalSections&: ObjSectionToID, Rel&: Value))
2086 return std::move(Err);
2087 } else if (TargetName == ".TOC.") {
2088 if (auto Err = findPPC64TOCSection(Obj, LocalSections&: ObjSectionToID, Rel&: Value))
2089 return std::move(Err);
2090 Value.Addend += Addend;
2091 }
2092
2093 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
2094
2095 if (Value.SymbolName)
2096 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2097 else
2098 addRelocationForSection(RE, SectionID: Value.SectionID);
2099 }
2100 } else if (Arch == Triple::systemz &&
2101 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
2102 // Create function stubs for both PLT and GOT references, regardless of
2103 // whether the GOT reference is to data or code. The stub contains the
2104 // full address of the symbol, as needed by GOT references, and the
2105 // executable part only adds an overhead of 8 bytes.
2106 //
2107 // We could try to conserve space by allocating the code and data
2108 // parts of the stub separately. However, as things stand, we allocate
2109 // a stub for every relocation, so using a GOT in JIT code should be
2110 // no less space efficient than using an explicit constant pool.
2111 LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
2112 SectionEntry &Section = Sections[SectionID];
2113
2114 // Look for an existing stub.
2115 StubMap::const_iterator i = Stubs.find(x: Value);
2116 uintptr_t StubAddress;
2117 if (i != Stubs.end()) {
2118 StubAddress = uintptr_t(Section.getAddressWithOffset(OffsetBytes: i->second));
2119 LLVM_DEBUG(dbgs() << " Stub function found\n");
2120 } else {
2121 // Create a new stub function.
2122 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
2123
2124 uintptr_t BaseAddress = uintptr_t(Section.getAddress());
2125 StubAddress =
2126 alignTo(Size: BaseAddress + Section.getStubOffset(), A: getStubAlignment());
2127 unsigned StubOffset = StubAddress - BaseAddress;
2128
2129 Stubs[Value] = StubOffset;
2130 createStubFunction(Addr: (uint8_t *)StubAddress);
2131 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
2132 Value.Offset);
2133 if (Value.SymbolName)
2134 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2135 else
2136 addRelocationForSection(RE, SectionID: Value.SectionID);
2137 Section.advanceStubOffset(StubSize: getMaxStubSize());
2138 }
2139
2140 if (RelType == ELF::R_390_GOTENT)
2141 resolveRelocation(Section, Offset, Value: StubAddress + 8, Type: ELF::R_390_PC32DBL,
2142 Addend);
2143 else
2144 resolveRelocation(Section, Offset, Value: StubAddress, Type: RelType, Addend);
2145 } else if (Arch == Triple::x86_64) {
2146 if (RelType == ELF::R_X86_64_PLT32) {
2147 // The way the PLT relocations normally work is that the linker allocates
2148 // the
2149 // PLT and this relocation makes a PC-relative call into the PLT. The PLT
2150 // entry will then jump to an address provided by the GOT. On first call,
2151 // the
2152 // GOT address will point back into PLT code that resolves the symbol. After
2153 // the first call, the GOT entry points to the actual function.
2154 //
2155 // For local functions we're ignoring all of that here and just replacing
2156 // the PLT32 relocation type with PC32, which will translate the relocation
2157 // into a PC-relative call directly to the function. For external symbols we
2158 // can't be sure the function will be within 2^32 bytes of the call site, so
2159 // we need to create a stub, which calls into the GOT. This case is
2160 // equivalent to the usual PLT implementation except that we use the stub
2161 // mechanism in RuntimeDyld (which puts stubs at the end of the section)
2162 // rather than allocating a PLT section.
2163 if (Value.SymbolName && MemMgr.allowStubAllocation()) {
2164 // This is a call to an external function.
2165 // Look for an existing stub.
2166 SectionEntry *Section = &Sections[SectionID];
2167 auto [It, Inserted] = Stubs.try_emplace(k: Value);
2168 uintptr_t StubAddress;
2169 if (!Inserted) {
2170 StubAddress = uintptr_t(Section->getAddress()) + It->second;
2171 LLVM_DEBUG(dbgs() << " Stub function found\n");
2172 } else {
2173 // Create a new stub function (equivalent to a PLT entry).
2174 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
2175
2176 uintptr_t BaseAddress = uintptr_t(Section->getAddress());
2177 StubAddress = alignTo(Size: BaseAddress + Section->getStubOffset(),
2178 A: getStubAlignment());
2179 unsigned StubOffset = StubAddress - BaseAddress;
2180 It->second = StubOffset;
2181 createStubFunction(Addr: (uint8_t *)StubAddress);
2182
2183 // Bump our stub offset counter
2184 Section->advanceStubOffset(StubSize: getMaxStubSize());
2185
2186 // Allocate a GOT Entry
2187 uint64_t GOTOffset = allocateGOTEntries(no: 1);
2188 // This potentially creates a new Section which potentially
2189 // invalidates the Section pointer, so reload it.
2190 Section = &Sections[SectionID];
2191
2192 // The load of the GOT address has an addend of -4
2193 resolveGOTOffsetRelocation(SectionID, Offset: StubOffset + 2, GOTOffset: GOTOffset - 4,
2194 Type: ELF::R_X86_64_PC32);
2195
2196 // Fill in the value of the symbol we're targeting into the GOT
2197 addRelocationForSymbol(
2198 RE: computeGOTOffsetRE(GOTOffset, SymbolOffset: 0, Type: ELF::R_X86_64_64),
2199 SymbolName: Value.SymbolName);
2200 }
2201
2202 // Make the target call a call into the stub table.
2203 resolveRelocation(Section: *Section, Offset, Value: StubAddress, Type: ELF::R_X86_64_PC32,
2204 Addend);
2205 } else {
2206 Value.Addend += support::ulittle32_t::ref(
2207 computePlaceholderAddress(SectionID, Offset));
2208 processSimpleRelocation(SectionID, Offset, RelType: ELF::R_X86_64_PC32, Value);
2209 }
2210 } else if (RelType == ELF::R_X86_64_GOTPCREL ||
2211 RelType == ELF::R_X86_64_GOTPCRELX ||
2212 RelType == ELF::R_X86_64_REX_GOTPCRELX) {
2213 uint64_t GOTOffset = allocateGOTEntries(no: 1);
2214 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: GOTOffset + Addend,
2215 Type: ELF::R_X86_64_PC32);
2216
2217 // Fill in the value of the symbol we're targeting into the GOT
2218 RelocationEntry RE =
2219 computeGOTOffsetRE(GOTOffset, SymbolOffset: Value.Offset, Type: ELF::R_X86_64_64);
2220 if (Value.SymbolName)
2221 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2222 else
2223 addRelocationForSection(RE, SectionID: Value.SectionID);
2224 } else if (RelType == ELF::R_X86_64_GOT64) {
2225 // Fill in a 64-bit GOT offset.
2226 uint64_t GOTOffset = allocateGOTEntries(no: 1);
2227 resolveRelocation(Section: Sections[SectionID], Offset, Value: GOTOffset,
2228 Type: ELF::R_X86_64_64, Addend: 0);
2229
2230 // Fill in the value of the symbol we're targeting into the GOT
2231 RelocationEntry RE =
2232 computeGOTOffsetRE(GOTOffset, SymbolOffset: Value.Offset, Type: ELF::R_X86_64_64);
2233 if (Value.SymbolName)
2234 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2235 else
2236 addRelocationForSection(RE, SectionID: Value.SectionID);
2237 } else if (RelType == ELF::R_X86_64_GOTPC32) {
2238 // Materialize the address of the base of the GOT relative to the PC.
2239 // This doesn't create a GOT entry, but it does mean we need a GOT
2240 // section.
2241 (void)allocateGOTEntries(no: 0);
2242 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: Addend, Type: ELF::R_X86_64_PC32);
2243 } else if (RelType == ELF::R_X86_64_GOTPC64) {
2244 (void)allocateGOTEntries(no: 0);
2245 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: Addend, Type: ELF::R_X86_64_PC64);
2246 } else if (RelType == ELF::R_X86_64_GOTOFF64) {
2247 // GOTOFF relocations ultimately require a section difference relocation.
2248 (void)allocateGOTEntries(no: 0);
2249 processSimpleRelocation(SectionID, Offset, RelType, Value);
2250 } else if (RelType == ELF::R_X86_64_PC32) {
2251 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
2252 processSimpleRelocation(SectionID, Offset, RelType, Value);
2253 } else if (RelType == ELF::R_X86_64_PC64) {
2254 Value.Addend += support::ulittle64_t::ref(
2255 computePlaceholderAddress(SectionID, Offset));
2256 processSimpleRelocation(SectionID, Offset, RelType, Value);
2257 } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
2258 processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
2259 } else if (RelType == ELF::R_X86_64_TLSGD ||
2260 RelType == ELF::R_X86_64_TLSLD) {
2261 // The next relocation must be the relocation for __tls_get_addr.
2262 ++RelI;
2263 auto &GetAddrRelocation = *RelI;
2264 processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
2265 GetAddrRelocation);
2266 } else {
2267 processSimpleRelocation(SectionID, Offset, RelType, Value);
2268 }
2269 } else if (Arch == Triple::riscv32 || Arch == Triple::riscv64) {
2270 // *_LO12 relocation receive information about a symbol from the
2271 // corresponding *_HI20 relocation, so we have to collect this information
2272 // before resolving
2273 if (RelType == ELF::R_RISCV_GOT_HI20 ||
2274 RelType == ELF::R_RISCV_PCREL_HI20 ||
2275 RelType == ELF::R_RISCV_TPREL_HI20 ||
2276 RelType == ELF::R_RISCV_TLS_GD_HI20 ||
2277 RelType == ELF::R_RISCV_TLS_GOT_HI20) {
2278 RelocationEntry RE(SectionID, Offset, RelType, Addend);
2279 PendingRelocs.push_back(Elt: {Value, RE});
2280 }
2281 processSimpleRelocation(SectionID, Offset, RelType, Value);
2282 } else {
2283 if (Arch == Triple::x86) {
2284 Value.Addend += support::ulittle32_t::ref(
2285 computePlaceholderAddress(SectionID, Offset));
2286 }
2287 processSimpleRelocation(SectionID, Offset, RelType, Value);
2288 }
2289 return ++RelI;
2290}
2291
2292void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
2293 uint64_t Offset,
2294 RelocationValueRef Value,
2295 int64_t Addend) {
2296 // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
2297 // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
2298 // only mentions one optimization even though there are two different
2299 // code sequences for the Initial Exec TLS Model. We match the code to
2300 // find out which one was used.
2301
2302 // A possible TLS code sequence and its replacement
2303 struct CodeSequence {
2304 // The expected code sequence
2305 ArrayRef<uint8_t> ExpectedCodeSequence;
2306 // The negative offset of the GOTTPOFF relocation to the beginning of
2307 // the sequence
2308 uint64_t TLSSequenceOffset;
2309 // The new code sequence
2310 ArrayRef<uint8_t> NewCodeSequence;
2311 // The offset of the new TPOFF relocation
2312 uint64_t TpoffRelocationOffset;
2313 };
2314
2315 std::array<CodeSequence, 2> CodeSequences;
2316
2317 // Initial Exec Code Model Sequence
2318 {
2319 static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
2320 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2321 0x00, // mov %fs:0, %rax
2322 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
2323 // %rax
2324 };
2325 CodeSequences[0].ExpectedCodeSequence =
2326 ArrayRef<uint8_t>(ExpectedCodeSequenceList);
2327 CodeSequences[0].TLSSequenceOffset = 12;
2328
2329 static const std::initializer_list<uint8_t> NewCodeSequenceList = {
2330 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
2331 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
2332 };
2333 CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
2334 CodeSequences[0].TpoffRelocationOffset = 12;
2335 }
2336
2337 // Initial Exec Code Model Sequence, II
2338 {
2339 static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
2340 0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
2341 0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
2342 };
2343 CodeSequences[1].ExpectedCodeSequence =
2344 ArrayRef<uint8_t>(ExpectedCodeSequenceList);
2345 CodeSequences[1].TLSSequenceOffset = 3;
2346
2347 static const std::initializer_list<uint8_t> NewCodeSequenceList = {
2348 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
2349 0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
2350 };
2351 CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
2352 CodeSequences[1].TpoffRelocationOffset = 10;
2353 }
2354
2355 bool Resolved = false;
2356 auto &Section = Sections[SectionID];
2357 for (const auto &C : CodeSequences) {
2358 assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
2359 "Old and new code sequences must have the same size");
2360
2361 if (Offset < C.TLSSequenceOffset ||
2362 (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
2363 Section.getSize()) {
2364 // This can't be a matching sequence as it doesn't fit in the current
2365 // section
2366 continue;
2367 }
2368
2369 auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
2370 auto *TLSSequence = Section.getAddressWithOffset(OffsetBytes: TLSSequenceStartOffset);
2371 if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
2372 C.ExpectedCodeSequence) {
2373 continue;
2374 }
2375
2376 memcpy(dest: TLSSequence, src: C.NewCodeSequence.data(), n: C.NewCodeSequence.size());
2377
2378 // The original GOTTPOFF relocation has an addend as it is PC relative,
2379 // so it needs to be corrected. The TPOFF32 relocation is used as an
2380 // absolute value (which is an offset from %fs:0), so remove the addend
2381 // again.
2382 RelocationEntry RE(SectionID,
2383 TLSSequenceStartOffset + C.TpoffRelocationOffset,
2384 ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
2385
2386 if (Value.SymbolName)
2387 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2388 else
2389 addRelocationForSection(RE, SectionID: Value.SectionID);
2390
2391 Resolved = true;
2392 break;
2393 }
2394
2395 if (!Resolved) {
2396 // The GOTTPOFF relocation was not used in one of the sequences
2397 // described in the spec, so we can't optimize it to a TPOFF
2398 // relocation.
2399 uint64_t GOTOffset = allocateGOTEntries(no: 1);
2400 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset: GOTOffset + Addend,
2401 Type: ELF::R_X86_64_PC32);
2402 RelocationEntry RE =
2403 computeGOTOffsetRE(GOTOffset, SymbolOffset: Value.Offset, Type: ELF::R_X86_64_TPOFF64);
2404 if (Value.SymbolName)
2405 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2406 else
2407 addRelocationForSection(RE, SectionID: Value.SectionID);
2408 }
2409}
2410
2411void RuntimeDyldELF::processX86_64TLSRelocation(
2412 unsigned SectionID, uint64_t Offset, uint64_t RelType,
2413 RelocationValueRef Value, int64_t Addend,
2414 const RelocationRef &GetAddrRelocation) {
2415 // Since we are statically linking and have no additional DSOs, we can resolve
2416 // the relocation directly without using __tls_get_addr.
2417 // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
2418 // to replace it with the Local Exec relocation variant.
2419
2420 // Find out whether the code was compiled with the large or small memory
2421 // model. For this we look at the next relocation which is the relocation
2422 // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
2423 // small code model, with a 64 bit relocation it's the large code model.
2424 bool IsSmallCodeModel;
2425 // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
2426 bool IsGOTPCRel = false;
2427
2428 switch (GetAddrRelocation.getType()) {
2429 case ELF::R_X86_64_GOTPCREL:
2430 case ELF::R_X86_64_REX_GOTPCRELX:
2431 case ELF::R_X86_64_GOTPCRELX:
2432 IsGOTPCRel = true;
2433 [[fallthrough]];
2434 case ELF::R_X86_64_PLT32:
2435 IsSmallCodeModel = true;
2436 break;
2437 case ELF::R_X86_64_PLTOFF64:
2438 IsSmallCodeModel = false;
2439 break;
2440 default:
2441 report_fatal_error(
2442 reason: "invalid TLS relocations for General/Local Dynamic TLS Model: "
2443 "expected PLT or GOT relocation for __tls_get_addr function");
2444 }
2445
2446 // The negative offset to the start of the TLS code sequence relative to
2447 // the offset of the TLSGD/TLSLD relocation
2448 uint64_t TLSSequenceOffset;
2449 // The expected start of the code sequence
2450 ArrayRef<uint8_t> ExpectedCodeSequence;
2451 // The new TLS code sequence that will replace the existing code
2452 ArrayRef<uint8_t> NewCodeSequence;
2453
2454 if (RelType == ELF::R_X86_64_TLSGD) {
2455 // The offset of the new TPOFF32 relocation (offset starting from the
2456 // beginning of the whole TLS sequence)
2457 uint64_t TpoffRelocOffset;
2458
2459 if (IsSmallCodeModel) {
2460 if (!IsGOTPCRel) {
2461 static const std::initializer_list<uint8_t> CodeSequence = {
2462 0x66, // data16 (no-op prefix)
2463 0x48, 0x8d, 0x3d, 0x00, 0x00,
2464 0x00, 0x00, // lea <disp32>(%rip), %rdi
2465 0x66, 0x66, // two data16 prefixes
2466 0x48, // rex64 (no-op prefix)
2467 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2468 };
2469 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2470 TLSSequenceOffset = 4;
2471 } else {
2472 // This code sequence is not described in the TLS spec but gcc
2473 // generates it sometimes.
2474 static const std::initializer_list<uint8_t> CodeSequence = {
2475 0x66, // data16 (no-op prefix)
2476 0x48, 0x8d, 0x3d, 0x00, 0x00,
2477 0x00, 0x00, // lea <disp32>(%rip), %rdi
2478 0x66, // data16 prefix (no-op prefix)
2479 0x48, // rex64 (no-op prefix)
2480 0xff, 0x15, 0x00, 0x00, 0x00,
2481 0x00 // call *__tls_get_addr@gotpcrel(%rip)
2482 };
2483 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2484 TLSSequenceOffset = 4;
2485 }
2486
2487 // The replacement code for the small code model. It's the same for
2488 // both sequences.
2489 static const std::initializer_list<uint8_t> SmallSequence = {
2490 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2491 0x00, // mov %fs:0, %rax
2492 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
2493 // %rax
2494 };
2495 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2496 TpoffRelocOffset = 12;
2497 } else {
2498 static const std::initializer_list<uint8_t> CodeSequence = {
2499 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2500 // %rdi
2501 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2502 0x00, // movabs $__tls_get_addr@pltoff, %rax
2503 0x48, 0x01, 0xd8, // add %rbx, %rax
2504 0xff, 0xd0 // call *%rax
2505 };
2506 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2507 TLSSequenceOffset = 3;
2508
2509 // The replacement code for the large code model
2510 static const std::initializer_list<uint8_t> LargeSequence = {
2511 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2512 0x00, // mov %fs:0, %rax
2513 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
2514 // %rax
2515 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
2516 };
2517 NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2518 TpoffRelocOffset = 12;
2519 }
2520
2521 // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
2522 // The new TPOFF32 relocations is used as an absolute offset from
2523 // %fs:0, so remove the TLSGD/TLSLD addend again.
2524 RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
2525 ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
2526 if (Value.SymbolName)
2527 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2528 else
2529 addRelocationForSection(RE, SectionID: Value.SectionID);
2530 } else if (RelType == ELF::R_X86_64_TLSLD) {
2531 if (IsSmallCodeModel) {
2532 if (!IsGOTPCRel) {
2533 static const std::initializer_list<uint8_t> CodeSequence = {
2534 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2535 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2536 };
2537 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2538 TLSSequenceOffset = 3;
2539
2540 // The replacement code for the small code model
2541 static const std::initializer_list<uint8_t> SmallSequence = {
2542 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2543 0x64, 0x48, 0x8b, 0x04, 0x25,
2544 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2545 };
2546 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2547 } else {
2548 // This code sequence is not described in the TLS spec but gcc
2549 // generates it sometimes.
2550 static const std::initializer_list<uint8_t> CodeSequence = {
2551 0x48, 0x8d, 0x3d, 0x00,
2552 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2553 0xff, 0x15, 0x00, 0x00,
2554 0x00, 0x00 // call
2555 // *__tls_get_addr@gotpcrel(%rip)
2556 };
2557 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2558 TLSSequenceOffset = 3;
2559
2560 // The replacement is code is just like above but it needs to be
2561 // one byte longer.
2562 static const std::initializer_list<uint8_t> SmallSequence = {
2563 0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
2564 0x64, 0x48, 0x8b, 0x04, 0x25,
2565 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2566 };
2567 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2568 }
2569 } else {
2570 // This is the same sequence as for the TLSGD sequence with the large
2571 // memory model above
2572 static const std::initializer_list<uint8_t> CodeSequence = {
2573 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2574 // %rdi
2575 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2576 0x48, // movabs $__tls_get_addr@pltoff, %rax
2577 0x01, 0xd8, // add %rbx, %rax
2578 0xff, 0xd0 // call *%rax
2579 };
2580 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2581 TLSSequenceOffset = 3;
2582
2583 // The replacement code for the large code model
2584 static const std::initializer_list<uint8_t> LargeSequence = {
2585 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2586 0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
2587 0x00, // 10 byte nop
2588 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
2589 };
2590 NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2591 }
2592 } else {
2593 llvm_unreachable("both TLS relocations handled above");
2594 }
2595
2596 assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
2597 "Old and new code sequences must have the same size");
2598
2599 auto &Section = Sections[SectionID];
2600 if (Offset < TLSSequenceOffset ||
2601 (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
2602 Section.getSize()) {
2603 report_fatal_error(reason: "unexpected end of section in TLS sequence");
2604 }
2605
2606 auto *TLSSequence = Section.getAddressWithOffset(OffsetBytes: Offset - TLSSequenceOffset);
2607 if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
2608 ExpectedCodeSequence) {
2609 report_fatal_error(
2610 reason: "invalid TLS sequence for Global/Local Dynamic TLS Model");
2611 }
2612
2613 memcpy(dest: TLSSequence, src: NewCodeSequence.data(), n: NewCodeSequence.size());
2614}
2615
2616size_t RuntimeDyldELF::getGOTEntrySize() {
2617 // We don't use the GOT in all of these cases, but it's essentially free
2618 // to put them all here.
2619 size_t Result = 0;
2620 switch (Arch) {
2621 case Triple::x86_64:
2622 case Triple::aarch64:
2623 case Triple::aarch64_be:
2624 case Triple::loongarch64:
2625 case Triple::ppc64:
2626 case Triple::ppc64le:
2627 case Triple::systemz:
2628 Result = sizeof(uint64_t);
2629 break;
2630 case Triple::x86:
2631 case Triple::arm:
2632 case Triple::thumb:
2633 Result = sizeof(uint32_t);
2634 break;
2635 case Triple::mips:
2636 case Triple::mipsel:
2637 case Triple::mips64:
2638 case Triple::mips64el:
2639 if (IsMipsO32ABI || IsMipsN32ABI)
2640 Result = sizeof(uint32_t);
2641 else if (IsMipsN64ABI)
2642 Result = sizeof(uint64_t);
2643 else
2644 llvm_unreachable("Mips ABI not handled");
2645 break;
2646 default:
2647 llvm_unreachable("Unsupported CPU type!");
2648 }
2649 return Result;
2650}
2651
2652uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
2653 if (GOTSectionID == 0) {
2654 GOTSectionID = Sections.size();
2655 // Reserve a section id. We'll allocate the section later
2656 // once we know the total size
2657 Sections.push_back(x: SectionEntry(".got", nullptr, 0, 0, 0));
2658 }
2659 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
2660 CurrentGOTIndex += no;
2661 return StartOffset;
2662}
2663
2664uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
2665 unsigned GOTRelType) {
2666 auto E = GOTOffsetMap.insert(x: {Value, 0});
2667 if (E.second) {
2668 uint64_t GOTOffset = allocateGOTEntries(no: 1);
2669
2670 // Create relocation for newly created GOT entry
2671 RelocationEntry RE =
2672 computeGOTOffsetRE(GOTOffset, SymbolOffset: Value.Offset, Type: GOTRelType);
2673 if (Value.SymbolName)
2674 addRelocationForSymbol(RE, SymbolName: Value.SymbolName);
2675 else
2676 addRelocationForSection(RE, SectionID: Value.SectionID);
2677
2678 E.first->second = GOTOffset;
2679 }
2680
2681 return E.first->second;
2682}
2683
2684void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
2685 uint64_t Offset,
2686 uint64_t GOTOffset,
2687 uint32_t Type) {
2688 // Fill in the relative address of the GOT Entry into the stub
2689 RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
2690 addRelocationForSection(RE: GOTRE, SectionID: GOTSectionID);
2691}
2692
2693RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
2694 uint64_t SymbolOffset,
2695 uint32_t Type) {
2696 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
2697}
2698
2699void RuntimeDyldELF::processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Symbol) {
2700 // This should never return an error as `processNewSymbol` wouldn't have been
2701 // called if getFlags() returned an error before.
2702 auto ObjSymbolFlags = cantFail(ValOrErr: ObjSymbol.getFlags());
2703
2704 if (ObjSymbolFlags & SymbolRef::SF_Indirect) {
2705 if (IFuncStubSectionID == 0) {
2706 // Create a dummy section for the ifunc stubs. It will be actually
2707 // allocated in finalizeLoad() below.
2708 IFuncStubSectionID = Sections.size();
2709 Sections.push_back(
2710 x: SectionEntry(".text.__llvm_IFuncStubs", nullptr, 0, 0, 0));
2711 // First 64B are reserverd for the IFunc resolver
2712 IFuncStubOffset = 64;
2713 }
2714
2715 IFuncStubs.push_back(Elt: IFuncStub{.StubOffset: IFuncStubOffset, .OriginalSymbol: Symbol});
2716 // Modify the symbol so that it points to the ifunc stub instead of to the
2717 // resolver function.
2718 Symbol = SymbolTableEntry(IFuncStubSectionID, IFuncStubOffset,
2719 Symbol.getFlags());
2720 IFuncStubOffset += getMaxIFuncStubSize();
2721 }
2722}
2723
2724Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
2725 ObjSectionToIDMap &SectionMap) {
2726 if (IsMipsO32ABI)
2727 if (!PendingRelocs.empty())
2728 return make_error<RuntimeDyldError>(Args: "Can't find matching LO16 reloc");
2729
2730 // Create the IFunc stubs if necessary. This must be done before processing
2731 // the GOT entries, as the IFunc stubs may create some.
2732 if (IFuncStubSectionID != 0) {
2733 uint8_t *IFuncStubsAddr = MemMgr.allocateCodeSection(
2734 Size: IFuncStubOffset, Alignment: 1, SectionID: IFuncStubSectionID, SectionName: ".text.__llvm_IFuncStubs");
2735 if (!IFuncStubsAddr)
2736 return make_error<RuntimeDyldError>(
2737 Args: "Unable to allocate memory for IFunc stubs!");
2738 Sections[IFuncStubSectionID] =
2739 SectionEntry(".text.__llvm_IFuncStubs", IFuncStubsAddr, IFuncStubOffset,
2740 IFuncStubOffset, 0);
2741
2742 createIFuncResolver(Addr: IFuncStubsAddr);
2743
2744 LLVM_DEBUG(dbgs() << "Creating IFunc stubs SectionID: "
2745 << IFuncStubSectionID << " Addr: "
2746 << Sections[IFuncStubSectionID].getAddress() << '\n');
2747 for (auto &IFuncStub : IFuncStubs) {
2748 auto &Symbol = IFuncStub.OriginalSymbol;
2749 LLVM_DEBUG(dbgs() << "\tSectionID: " << Symbol.getSectionID()
2750 << " Offset: " << format("%p", Symbol.getOffset())
2751 << " IFuncStubOffset: "
2752 << format("%p\n", IFuncStub.StubOffset));
2753 createIFuncStub(IFuncStubSectionID, IFuncResolverOffset: 0, IFuncStubOffset: IFuncStub.StubOffset,
2754 IFuncSectionID: Symbol.getSectionID(), IFuncOffset: Symbol.getOffset());
2755 }
2756
2757 IFuncStubSectionID = 0;
2758 IFuncStubOffset = 0;
2759 IFuncStubs.clear();
2760 }
2761
2762 // If necessary, allocate the global offset table
2763 if (GOTSectionID != 0) {
2764 // Allocate memory for the section
2765 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
2766 uint8_t *Addr = MemMgr.allocateDataSection(Size: TotalSize, Alignment: getGOTEntrySize(),
2767 SectionID: GOTSectionID, SectionName: ".got", IsReadOnly: false);
2768 if (!Addr)
2769 return make_error<RuntimeDyldError>(Args: "Unable to allocate memory for GOT!");
2770
2771 Sections[GOTSectionID] =
2772 SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
2773
2774 // For now, initialize all GOT entries to zero. We'll fill them in as
2775 // needed when GOT-based relocations are applied.
2776 memset(s: Addr, c: 0, n: TotalSize);
2777 if (IsMipsN32ABI || IsMipsN64ABI) {
2778 // To correctly resolve Mips GOT relocations, we need a mapping from
2779 // object's sections to GOTs.
2780 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
2781 SI != SE; ++SI) {
2782 if (SI->relocation_begin() != SI->relocation_end()) {
2783 Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
2784 if (!RelSecOrErr)
2785 return make_error<RuntimeDyldError>(
2786 Args: toString(E: RelSecOrErr.takeError()));
2787
2788 section_iterator RelocatedSection = *RelSecOrErr;
2789 ObjSectionToIDMap::iterator i = SectionMap.find(x: *RelocatedSection);
2790 assert(i != SectionMap.end());
2791 SectionToGOTMap[i->second] = GOTSectionID;
2792 }
2793 }
2794 GOTSymbolOffsets.clear();
2795 }
2796 }
2797
2798 // Look for and record the EH frame section.
2799 ObjSectionToIDMap::iterator i, e;
2800 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
2801 const SectionRef &Section = i->first;
2802
2803 StringRef Name;
2804 Expected<StringRef> NameOrErr = Section.getName();
2805 if (NameOrErr)
2806 Name = *NameOrErr;
2807 else
2808 consumeError(Err: NameOrErr.takeError());
2809
2810 if (Name == ".eh_frame") {
2811 UnregisteredEHFrameSections.push_back(Elt: i->second);
2812 break;
2813 }
2814 }
2815
2816 GOTOffsetMap.clear();
2817 GOTSectionID = 0;
2818 CurrentGOTIndex = 0;
2819
2820 return Error::success();
2821}
2822
2823bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
2824 return Obj.isELF();
2825}
2826
2827void RuntimeDyldELF::createIFuncResolver(uint8_t *Addr) const {
2828 if (Arch == Triple::x86_64) {
2829 // The adddres of the GOT1 entry is in %r11, the GOT2 entry is in %r11+8
2830 // (see createIFuncStub() for details)
2831 // The following code first saves all registers that contain the original
2832 // function arguments as those registers are not saved by the resolver
2833 // function. %r11 is saved as well so that the GOT2 entry can be updated
2834 // afterwards. Then it calls the actual IFunc resolver function whose
2835 // address is stored in GOT2. After the resolver function returns, all
2836 // saved registers are restored and the return value is written to GOT1.
2837 // Finally, jump to the now resolved function.
2838 // clang-format off
2839 const uint8_t StubCode[] = {
2840 0x57, // push %rdi
2841 0x56, // push %rsi
2842 0x52, // push %rdx
2843 0x51, // push %rcx
2844 0x41, 0x50, // push %r8
2845 0x41, 0x51, // push %r9
2846 0x41, 0x53, // push %r11
2847 0x41, 0xff, 0x53, 0x08, // call *0x8(%r11)
2848 0x41, 0x5b, // pop %r11
2849 0x41, 0x59, // pop %r9
2850 0x41, 0x58, // pop %r8
2851 0x59, // pop %rcx
2852 0x5a, // pop %rdx
2853 0x5e, // pop %rsi
2854 0x5f, // pop %rdi
2855 0x49, 0x89, 0x03, // mov %rax,(%r11)
2856 0xff, 0xe0 // jmp *%rax
2857 };
2858 // clang-format on
2859 static_assert(sizeof(StubCode) <= 64,
2860 "maximum size of the IFunc resolver is 64B");
2861 memcpy(dest: Addr, src: StubCode, n: sizeof(StubCode));
2862 } else {
2863 report_fatal_error(
2864 reason: "IFunc resolver is not supported for target architecture");
2865 }
2866}
2867
2868void RuntimeDyldELF::createIFuncStub(unsigned IFuncStubSectionID,
2869 uint64_t IFuncResolverOffset,
2870 uint64_t IFuncStubOffset,
2871 unsigned IFuncSectionID,
2872 uint64_t IFuncOffset) {
2873 auto &IFuncStubSection = Sections[IFuncStubSectionID];
2874 auto *Addr = IFuncStubSection.getAddressWithOffset(OffsetBytes: IFuncStubOffset);
2875
2876 if (Arch == Triple::x86_64) {
2877 // The first instruction loads a PC-relative address into %r11 which is a
2878 // GOT entry for this stub. This initially contains the address to the
2879 // IFunc resolver. We can use %r11 here as it's caller saved but not used
2880 // to pass any arguments. In fact, x86_64 ABI even suggests using %r11 for
2881 // code in the PLT. The IFunc resolver will use %r11 to update the GOT
2882 // entry.
2883 //
2884 // The next instruction just jumps to the address contained in the GOT
2885 // entry. As mentioned above, we do this two-step jump by first setting
2886 // %r11 so that the IFunc resolver has access to it.
2887 //
2888 // The IFunc resolver of course also needs to know the actual address of
2889 // the actual IFunc resolver function. This will be stored in a GOT entry
2890 // right next to the first one for this stub. So, the IFunc resolver will
2891 // be able to call it with %r11+8.
2892 //
2893 // In total, two adjacent GOT entries (+relocation) and one additional
2894 // relocation are required:
2895 // GOT1: Address of the IFunc resolver.
2896 // GOT2: Address of the IFunc resolver function.
2897 // IFuncStubOffset+3: 32-bit PC-relative address of GOT1.
2898 uint64_t GOT1 = allocateGOTEntries(no: 2);
2899 uint64_t GOT2 = GOT1 + getGOTEntrySize();
2900
2901 RelocationEntry RE1(GOTSectionID, GOT1, ELF::R_X86_64_64,
2902 IFuncResolverOffset, {});
2903 addRelocationForSection(RE: RE1, SectionID: IFuncStubSectionID);
2904 RelocationEntry RE2(GOTSectionID, GOT2, ELF::R_X86_64_64, IFuncOffset, {});
2905 addRelocationForSection(RE: RE2, SectionID: IFuncSectionID);
2906
2907 const uint8_t StubCode[] = {
2908 0x4c, 0x8d, 0x1d, 0x00, 0x00, 0x00, 0x00, // leaq 0x0(%rip),%r11
2909 0x41, 0xff, 0x23 // jmpq *(%r11)
2910 };
2911 assert(sizeof(StubCode) <= getMaxIFuncStubSize() &&
2912 "IFunc stub size must not exceed getMaxIFuncStubSize()");
2913 memcpy(dest: Addr, src: StubCode, n: sizeof(StubCode));
2914
2915 // The PC-relative value starts 4 bytes from the end of the leaq
2916 // instruction, so the addend is -4.
2917 resolveGOTOffsetRelocation(SectionID: IFuncStubSectionID, Offset: IFuncStubOffset + 3,
2918 GOTOffset: GOT1 - 4, Type: ELF::R_X86_64_PC32);
2919 } else {
2920 report_fatal_error(reason: "IFunc stub is not supported for target architecture");
2921 }
2922}
2923
2924unsigned RuntimeDyldELF::getMaxIFuncStubSize() const {
2925 if (Arch == Triple::x86_64) {
2926 return 10;
2927 }
2928 return 0;
2929}
2930
2931bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
2932 unsigned RelTy = R.getType();
2933 if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
2934 return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
2935 RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
2936
2937 if (Arch == Triple::loongarch64)
2938 return RelTy == ELF::R_LARCH_GOT_PC_HI20 ||
2939 RelTy == ELF::R_LARCH_GOT_PC_LO12;
2940
2941 if (Arch == Triple::x86_64)
2942 return RelTy == ELF::R_X86_64_GOTPCREL ||
2943 RelTy == ELF::R_X86_64_GOTPCRELX ||
2944 RelTy == ELF::R_X86_64_GOT64 ||
2945 RelTy == ELF::R_X86_64_REX_GOTPCRELX;
2946 return false;
2947}
2948
2949bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
2950 if (Arch != Triple::x86_64)
2951 return true; // Conservative answer
2952
2953 switch (R.getType()) {
2954 default:
2955 return true; // Conservative answer
2956
2957
2958 case ELF::R_X86_64_GOTPCREL:
2959 case ELF::R_X86_64_GOTPCRELX:
2960 case ELF::R_X86_64_REX_GOTPCRELX:
2961 case ELF::R_X86_64_GOTPC64:
2962 case ELF::R_X86_64_GOT64:
2963 case ELF::R_X86_64_GOTOFF64:
2964 case ELF::R_X86_64_PC32:
2965 case ELF::R_X86_64_PC64:
2966 case ELF::R_X86_64_64:
2967 // We know that these reloation types won't need a stub function. This list
2968 // can be extended as needed.
2969 return false;
2970 }
2971}
2972
2973} // namespace llvm
2974