1//===- PPC64.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "OutputSections.h"
11#include "RelocScan.h"
12#include "SymbolTable.h"
13#include "Symbols.h"
14#include "SyntheticSections.h"
15#include "Target.h"
16#include "Thunks.h"
17
18using namespace llvm;
19using namespace llvm::object;
20using namespace llvm::support::endian;
21using namespace llvm::ELF;
22using namespace lld;
23using namespace lld::elf;
24
25constexpr uint64_t ppc64TocOffset = 0x8000;
26constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
27
28namespace {
29// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
30// instructions that can be used as part of the initial exec TLS sequence.
31enum XFormOpcd {
32 LBZX = 87,
33 LHZX = 279,
34 LWZX = 23,
35 LDX = 21,
36 STBX = 215,
37 STHX = 407,
38 STWX = 151,
39 STDX = 149,
40 LHAX = 343,
41 LWAX = 341,
42 LFSX = 535,
43 LFDX = 599,
44 STFSX = 663,
45 STFDX = 727,
46 ADD = 266,
47};
48
49enum DFormOpcd {
50 LBZ = 34,
51 LBZU = 35,
52 LHZ = 40,
53 LHZU = 41,
54 LHAU = 43,
55 LWZ = 32,
56 LWZU = 33,
57 LFSU = 49,
58 LFDU = 51,
59 STB = 38,
60 STBU = 39,
61 STH = 44,
62 STHU = 45,
63 STW = 36,
64 STWU = 37,
65 STFSU = 53,
66 STFDU = 55,
67 LHA = 42,
68 LFS = 48,
69 LFD = 50,
70 STFS = 52,
71 STFD = 54,
72 ADDI = 14
73};
74
75enum DSFormOpcd {
76 LD = 58,
77 LWA = 58,
78 STD = 62
79};
80
81constexpr uint32_t NOP = 0x60000000;
82
83enum class PPCLegacyInsn : uint32_t {
84 NOINSN = 0,
85 // Loads.
86 LBZ = 0x88000000,
87 LHZ = 0xa0000000,
88 LWZ = 0x80000000,
89 LHA = 0xa8000000,
90 LWA = 0xe8000002,
91 LD = 0xe8000000,
92 LFS = 0xC0000000,
93 LXSSP = 0xe4000003,
94 LFD = 0xc8000000,
95 LXSD = 0xe4000002,
96 LXV = 0xf4000001,
97 LXVP = 0x18000000,
98
99 // Stores.
100 STB = 0x98000000,
101 STH = 0xb0000000,
102 STW = 0x90000000,
103 STD = 0xf8000000,
104 STFS = 0xd0000000,
105 STXSSP = 0xf4000003,
106 STFD = 0xd8000000,
107 STXSD = 0xf4000002,
108 STXV = 0xf4000005,
109 STXVP = 0x18000001
110};
111enum class PPCPrefixedInsn : uint64_t {
112 NOINSN = 0,
113 PREFIX_MLS = 0x0610000000000000,
114 PREFIX_8LS = 0x0410000000000000,
115
116 // Loads.
117 PLBZ = PREFIX_MLS,
118 PLHZ = PREFIX_MLS,
119 PLWZ = PREFIX_MLS,
120 PLHA = PREFIX_MLS,
121 PLWA = PREFIX_8LS | 0xa4000000,
122 PLD = PREFIX_8LS | 0xe4000000,
123 PLFS = PREFIX_MLS,
124 PLXSSP = PREFIX_8LS | 0xac000000,
125 PLFD = PREFIX_MLS,
126 PLXSD = PREFIX_8LS | 0xa8000000,
127 PLXV = PREFIX_8LS | 0xc8000000,
128 PLXVP = PREFIX_8LS | 0xe8000000,
129
130 // Stores.
131 PSTB = PREFIX_MLS,
132 PSTH = PREFIX_MLS,
133 PSTW = PREFIX_MLS,
134 PSTD = PREFIX_8LS | 0xf4000000,
135 PSTFS = PREFIX_MLS,
136 PSTXSSP = PREFIX_8LS | 0xbc000000,
137 PSTFD = PREFIX_MLS,
138 PSTXSD = PREFIX_8LS | 0xb8000000,
139 PSTXV = PREFIX_8LS | 0xd8000000,
140 PSTXVP = PREFIX_8LS | 0xf8000000
141};
142
143static bool checkPPCLegacyInsn(uint32_t encoding) {
144 PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
145 if (insn == PPCLegacyInsn::NOINSN)
146 return false;
147#define PCREL_OPT(Legacy, PCRel, InsnMask) \
148 if (insn == PPCLegacyInsn::Legacy) \
149 return true;
150#include "PPCInsns.def"
151#undef PCREL_OPT
152 return false;
153}
154
155// Masks to apply to legacy instructions when converting them to prefixed,
156// pc-relative versions. For the most part, the primary opcode is shared
157// between the legacy instruction and the suffix of its prefixed version.
158// However, there are some instances where that isn't the case (DS-Form and
159// DQ-form instructions).
160enum class LegacyToPrefixMask : uint64_t {
161 NOMASK = 0x0,
162 OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
163 ONLY_RST = 0x3e00000, // [RS]T (6-10).
164 ST_STX28_TO5 =
165 0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
166};
167
168class PPC64 final : public TargetInfo {
169public:
170 PPC64(Ctx &);
171 int getTlsGdRelaxSkip(RelType type) const override;
172 uint32_t calcEFlags() const override;
173 RelExpr getRelExpr(RelType type, const Symbol &s,
174 const uint8_t *loc) const override;
175 RelType getDynRel(RelType type) const override;
176 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
177 void writePltHeader(uint8_t *buf) const override;
178 void writePlt(uint8_t *buf, const Symbol &sym,
179 uint64_t pltEntryAddr) const override;
180 void writeIplt(uint8_t *buf, const Symbol &sym,
181 uint64_t pltEntryAddr) const override;
182 template <class ELFT, class RelTy>
183 void scanSectionImpl(InputSectionBase &, Relocs<RelTy>);
184 void scanSection(InputSectionBase &) override;
185 void relocate(uint8_t *loc, const Relocation &rel,
186 uint64_t val) const override;
187 void writeGotHeader(uint8_t *buf) const override;
188 bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
189 uint64_t branchAddr, const Symbol &s,
190 int64_t a) const override;
191 uint32_t getThunkSectionSpacing() const override;
192 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
193 RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
194 RelExpr adjustGotPcExpr(RelType type, int64_t addend,
195 const uint8_t *loc) const override;
196 void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
197 void relocateAlloc(InputSection &sec, uint8_t *buf) const override;
198
199 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
200 uint8_t stOther) const override;
201
202private:
203 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
204 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
205 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
206 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
207};
208} // namespace
209
210uint64_t elf::getPPC64TocBase(Ctx &ctx) {
211 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
212 // TOC starts where the first of these sections starts. We always create a
213 // .got when we see a relocation that uses it, so for us the start is always
214 // the .got.
215 uint64_t tocVA = ctx.in.got->getVA();
216
217 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
218 // thus permitting a full 64 Kbytes segment. Note that the glibc startup
219 // code (crt1.o) assumes that you can get from the TOC base to the
220 // start of the .toc section with only a single (signed) 16-bit relocation.
221 return tocVA + ppc64TocOffset;
222}
223
224unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(Ctx &ctx, uint8_t stOther) {
225 // The offset is encoded into the 3 most significant bits of the st_other
226 // field, with some special values described in section 3.4.1 of the ABI:
227 // 0 --> Zero offset between the GEP and LEP, and the function does NOT use
228 // the TOC pointer (r2). r2 will hold the same value on returning from
229 // the function as it did on entering the function.
230 // 1 --> Zero offset between the GEP and LEP, and r2 should be treated as a
231 // caller-saved register for all callers.
232 // 2-6 --> The binary logarithm of the offset eg:
233 // 2 --> 2^2 = 4 bytes --> 1 instruction.
234 // 6 --> 2^6 = 64 bytes --> 16 instructions.
235 // 7 --> Reserved.
236 uint8_t gepToLep = (stOther >> 5) & 7;
237 if (gepToLep < 2)
238 return 0;
239
240 // The value encoded in the st_other bits is the
241 // log-base-2(offset).
242 if (gepToLep < 7)
243 return 1 << gepToLep;
244
245 ErrAlways(ctx)
246 << "reserved value of 7 in the 3 most-significant-bits of st_other";
247 return 0;
248}
249
250void elf::writePrefixedInst(Ctx &ctx, uint8_t *loc, uint64_t insn) {
251 insn = ctx.arg.isLE ? insn << 32 | insn >> 32 : insn;
252 write64(ctx, p: loc, v: insn);
253}
254
255static bool addOptional(Ctx &ctx, StringRef name, uint64_t value,
256 std::vector<Defined *> &defined) {
257 Symbol *sym = ctx.symtab->find(name);
258 if (!sym || sym->isDefined())
259 return false;
260 sym->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
261 STV_HIDDEN, STT_FUNC, value,
262 /*size=*/0, /*section=*/nullptr});
263 defined.push_back(x: cast<Defined>(Val: sym));
264 return true;
265}
266
267// If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
268// firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
269// The labels are defined only if they exist in the symbol table.
270static void writeSequence(Ctx &ctx, const char *prefix, int from,
271 uint32_t firstInsn, ArrayRef<uint32_t> tail) {
272 std::vector<Defined *> defined;
273 char name[16];
274 int first;
275 const size_t size = 32 - from + tail.size();
276 MutableArrayRef<uint32_t> buf(ctx.bAlloc.Allocate<uint32_t>(Num: size), size);
277 uint32_t *ptr = buf.data();
278 for (int r = from; r < 32; ++r) {
279 format(Fmt: "%s%d", Vals: prefix, Vals: r).snprint(Buffer: name, BufferSize: sizeof(name));
280 if (addOptional(ctx, name, value: 4 * (r - from), defined) && defined.size() == 1)
281 first = r - from;
282 write32(ctx, p: ptr++, v: firstInsn + 0x200008 * (r - from));
283 }
284 for (uint32_t insn : tail)
285 write32(ctx, p: ptr++, v: insn);
286 assert(ptr == &*buf.end());
287
288 if (defined.empty())
289 return;
290 // The full section content has the extent of [begin, end). We drop unused
291 // instructions and write [first,end).
292 auto *sec = make<InputSection>(
293 args&: ctx.internalFile, args: ".text", args: SHT_PROGBITS, args: SHF_ALLOC, /*addralign=*/args: 4,
294 /*entsize=*/args: 0,
295 args: ArrayRef(reinterpret_cast<uint8_t *>(buf.data() + first),
296 4 * (buf.size() - first)));
297 ctx.inputSections.push_back(Elt: sec);
298 for (Defined *sym : defined) {
299 sym->section = sec;
300 sym->value -= 4 * first;
301 }
302}
303
304// Implements some save and restore functions as described by ELF V2 ABI to be
305// compatible with GCC. With GCC -Os, when the number of call-saved registers
306// exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and
307// expects the linker to define them. See
308// https://sourceware.org/pipermail/binutils/2002-February/017444.html and
309// https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is
310// weird because libgcc.a would be the natural place. The linker generation
311// approach has the advantage that the linker can generate multiple copies to
312// avoid long branch thunks. However, we don't consider the advantage
313// significant enough to complicate our trunk implementation, so we take the
314// simple approach and synthesize .text sections providing the implementation.
315void elf::addPPC64SaveRestore(Ctx &ctx) {
316 constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
317
318 // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
319 // Tail: ld 0, 16(1); mtlr 0; blr
320 writeSequence(ctx, prefix: "_restgpr0_", from: 14, firstInsn: 0xe9c1ff70, tail: {0xe8010010, mtlr_0, blr});
321 // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
322 // Tail: blr
323 writeSequence(ctx, prefix: "_restgpr1_", from: 14, firstInsn: 0xe9ccff70, tail: {blr});
324 // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
325 // Tail: std 0, 16(1); blr
326 writeSequence(ctx, prefix: "_savegpr0_", from: 14, firstInsn: 0xf9c1ff70, tail: {0xf8010010, blr});
327 // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
328 // Tail: blr
329 writeSequence(ctx, prefix: "_savegpr1_", from: 14, firstInsn: 0xf9ccff70, tail: {blr});
330}
331
332// Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
333template <typename ELFT>
334static std::pair<Defined *, int64_t>
335getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
336 // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
337 // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
338 // relocation index in most cases.
339 //
340 // In rare cases a TOC entry may store a constant that doesn't need an
341 // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
342 // points to a relocation with larger r_offset. Do a linear probe then.
343 // Constants are extremely uncommon in .toc and the extra number of array
344 // accesses can be seen as a small constant.
345 ArrayRef<typename ELFT::Rela> relas =
346 tocSec->template relsOrRelas<ELFT>().relas;
347 if (relas.empty())
348 return {};
349 uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
350 for (;;) {
351 if (relas[index].r_offset == offset) {
352 Symbol &sym = tocSec->file->getRelocTargetSym(relas[index]);
353 return {dyn_cast<Defined>(Val: &sym), getAddend<ELFT>(relas[index])};
354 }
355 if (relas[index].r_offset < offset || index == 0)
356 break;
357 --index;
358 }
359 return {};
360}
361
362// When accessing a symbol defined in another translation unit, compilers
363// reserve a .toc entry, allocate a local label and generate toc-indirect
364// instructions:
365//
366// addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA
367// ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
368// ld/lwa 3, 0(3) # load the value from the address
369//
370// .section .toc,"aw",@progbits
371// .LC0: .tc var[TC],var
372//
373// If var is defined, non-preemptable and addressable with a 32-bit signed
374// offset from the toc base, the address of var can be computed by adding an
375// offset to the toc base, saving a load.
376//
377// addis 3,2,var@toc@ha # this may be relaxed to a nop,
378// addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc
379// ld/lwa 3, 0(3) # load the value from the address
380//
381// Returns true if the relaxation is performed.
382static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel,
383 uint8_t *bufLoc) {
384 assert(ctx.arg.tocOptimize);
385 if (rel.addend < 0)
386 return false;
387
388 // If the symbol is not the .toc section, this isn't a toc-indirection.
389 Defined *defSym = dyn_cast<Defined>(Val: rel.sym);
390 if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
391 return false;
392
393 Defined *d;
394 int64_t addend;
395 auto *tocISB = cast<InputSectionBase>(Val: defSym->section);
396 std::tie(args&: d, args&: addend) =
397 ctx.arg.isLE ? getRelaTocSymAndAddend<ELF64LE>(tocSec: tocISB, offset: rel.addend)
398 : getRelaTocSymAndAddend<ELF64BE>(tocSec: tocISB, offset: rel.addend);
399
400 // Only non-preemptable defined symbols can be relaxed.
401 if (!d || d->isPreemptible)
402 return false;
403
404 // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable
405 // ifunc and changed its type to STT_FUNC.
406 assert(!d->isGnuIFunc());
407
408 // Two instructions can materialize a 32-bit signed offset from the toc base.
409 uint64_t tocRelative = d->getVA(ctx, addend) - getPPC64TocBase(ctx);
410 if (!isInt<32>(x: tocRelative))
411 return false;
412
413 // Add PPC64TocOffset that will be subtracted by PPC64::relocate().
414 static_cast<const PPC64 &>(*ctx.target)
415 .relaxGot(loc: bufLoc, rel, val: tocRelative + ppc64TocOffset);
416 return true;
417}
418
419// Relocation masks following the #lo(value), #hi(value), #ha(value),
420// #higher(value), #highera(value), #highest(value), and #highesta(value)
421// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
422// document.
423static uint16_t lo(uint64_t v) { return v; }
424static uint16_t hi(uint64_t v) { return v >> 16; }
425static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
426static uint16_t higher(uint64_t v) { return v >> 32; }
427static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
428static uint16_t highest(uint64_t v) { return v >> 48; }
429static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
430
431// Extracts the 'PO' field of an instruction encoding.
432static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
433
434static bool isDQFormInstruction(uint32_t encoding) {
435 switch (getPrimaryOpCode(encoding)) {
436 default:
437 return false;
438 case 6: // Power10 paired loads/stores (lxvp, stxvp).
439 case 56:
440 // The only instruction with a primary opcode of 56 is `lq`.
441 return true;
442 case 61:
443 // There are both DS and DQ instruction forms with this primary opcode.
444 // Namely `lxv` and `stxv` are the DQ-forms that use it.
445 // The DS 'XO' bits being set to 01 is restricted to DQ form.
446 return (encoding & 3) == 0x1;
447 }
448}
449
450static bool isDSFormInstruction(PPCLegacyInsn insn) {
451 switch (insn) {
452 default:
453 return false;
454 case PPCLegacyInsn::LWA:
455 case PPCLegacyInsn::LD:
456 case PPCLegacyInsn::LXSD:
457 case PPCLegacyInsn::LXSSP:
458 case PPCLegacyInsn::STD:
459 case PPCLegacyInsn::STXSD:
460 case PPCLegacyInsn::STXSSP:
461 return true;
462 }
463}
464
465static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
466 uint32_t opc = encoding & 0xfc000000;
467
468 // If the primary opcode is shared between multiple instructions, we need to
469 // fix it up to match the actual instruction we are after.
470 if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
471 opc == 0xf8000000) &&
472 !isDQFormInstruction(encoding))
473 opc = encoding & 0xfc000003;
474 else if (opc == 0xf4000000)
475 opc = encoding & 0xfc000007;
476 else if (opc == 0x18000000)
477 opc = encoding & 0xfc00000f;
478
479 // If the value is not one of the enumerators in PPCLegacyInsn, we want to
480 // return PPCLegacyInsn::NOINSN.
481 if (!checkPPCLegacyInsn(encoding: opc))
482 return PPCLegacyInsn::NOINSN;
483 return static_cast<PPCLegacyInsn>(opc);
484}
485
486static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
487 switch (insn) {
488#define PCREL_OPT(Legacy, PCRel, InsnMask) \
489 case PPCLegacyInsn::Legacy: \
490 return PPCPrefixedInsn::PCRel
491#include "PPCInsns.def"
492#undef PCREL_OPT
493 }
494 return PPCPrefixedInsn::NOINSN;
495}
496
497static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
498 switch (insn) {
499#define PCREL_OPT(Legacy, PCRel, InsnMask) \
500 case PPCLegacyInsn::Legacy: \
501 return LegacyToPrefixMask::InsnMask
502#include "PPCInsns.def"
503#undef PCREL_OPT
504 }
505 return LegacyToPrefixMask::NOMASK;
506}
507static uint64_t getPCRelativeForm(uint32_t encoding) {
508 PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
509 PPCPrefixedInsn pcrelInsn = getPCRelativeForm(insn: origInsn);
510 if (pcrelInsn == PPCPrefixedInsn::NOINSN)
511 return UINT64_C(-1);
512 LegacyToPrefixMask origInsnMask = getInsnMask(insn: origInsn);
513 uint64_t pcrelEncoding =
514 (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
515
516 // If the mask requires moving bit 28 to bit 5, do that now.
517 if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
518 pcrelEncoding |= (encoding & 0x8) << 23;
519 return pcrelEncoding;
520}
521
522static bool isInstructionUpdateForm(uint32_t encoding) {
523 switch (getPrimaryOpCode(encoding)) {
524 default:
525 return false;
526 case LBZU:
527 case LHAU:
528 case LHZU:
529 case LWZU:
530 case LFSU:
531 case LFDU:
532 case STBU:
533 case STHU:
534 case STWU:
535 case STFSU:
536 case STFDU:
537 return true;
538 // LWA has the same opcode as LD, and the DS bits is what differentiates
539 // between LD/LDU/LWA
540 case LD:
541 case STD:
542 return (encoding & 3) == 1;
543 }
544}
545
546// Compute the total displacement between the prefixed instruction that gets
547// to the start of the data and the load/store instruction that has the offset
548// into the data structure.
549// For example:
550// paddi 3, 0, 1000, 1
551// lwz 3, 20(3)
552// Should add up to 1020 for total displacement.
553static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
554 int64_t disp34 = llvm::SignExtend64(
555 X: ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), B: 34);
556 int32_t disp16 = llvm::SignExtend32(X: accessInsn & 0xffff, B: 16);
557 // For DS and DQ form instructions, we need to mask out the XO bits.
558 if (isDQFormInstruction(encoding: accessInsn))
559 disp16 &= ~0xf;
560 else if (isDSFormInstruction(insn: getPPCLegacyInsn(encoding: accessInsn)))
561 disp16 &= ~0x3;
562 return disp34 + disp16;
563}
564
565// There are a number of places when we either want to read or write an
566// instruction when handling a half16 relocation type. On big-endian the buffer
567// pointer is pointing into the middle of the word we want to extract, and on
568// little-endian it is pointing to the start of the word. These 2 helpers are to
569// simplify reading and writing in that context.
570static void writeFromHalf16(Ctx &ctx, uint8_t *loc, uint32_t insn) {
571 write32(ctx, p: ctx.arg.isLE ? loc : loc - 2, v: insn);
572}
573
574static uint32_t readFromHalf16(Ctx &ctx, const uint8_t *loc) {
575 return read32(ctx, p: ctx.arg.isLE ? loc : loc - 2);
576}
577
578static uint64_t readPrefixedInst(Ctx &ctx, const uint8_t *loc) {
579 uint64_t fullInstr = read64(ctx, p: loc);
580 return ctx.arg.isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
581}
582
583PPC64::PPC64(Ctx &ctx) : TargetInfo(ctx) {
584 copyRel = R_PPC64_COPY;
585 gotRel = R_PPC64_GLOB_DAT;
586 pltRel = R_PPC64_JMP_SLOT;
587 relativeRel = R_PPC64_RELATIVE;
588 iRelativeRel = R_PPC64_IRELATIVE;
589 symbolicRel = R_PPC64_ADDR64;
590 pltHeaderSize = 60;
591 pltEntrySize = 4;
592 ipltEntrySize = 16; // PPC64PltCallStub::size
593 gotHeaderEntriesNum = 1;
594 gotPltHeaderEntriesNum = 2;
595 needsThunks = true;
596
597 tlsModuleIndexRel = R_PPC64_DTPMOD64;
598 tlsOffsetRel = R_PPC64_DTPREL64;
599
600 tlsGotRel = R_PPC64_TPREL64;
601
602 needsMoreStackNonSplit = false;
603
604 // We need 64K pages (at least under glibc/Linux, the loader won't
605 // set different permissions on a finer granularity than that).
606 defaultMaxPageSize = 65536;
607
608 // The PPC64 ELF ABI v1 spec, says:
609 //
610 // It is normally desirable to put segments with different characteristics
611 // in separate 256 Mbyte portions of the address space, to give the
612 // operating system full paging flexibility in the 64-bit address space.
613 //
614 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
615 // use 0x10000000 as the starting address.
616 defaultImageBase = 0x10000000;
617
618 write32(ctx, p: trapInstr.data(), v: 0x7fe00008);
619}
620
621int PPC64::getTlsGdRelaxSkip(RelType type) const {
622 // A __tls_get_addr call instruction is marked with 2 relocations:
623 //
624 // R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation
625 // R_PPC64_REL24: __tls_get_addr
626 //
627 // After the relaxation we no longer call __tls_get_addr and should skip both
628 // relocations to not create a false dependence on __tls_get_addr being
629 // defined.
630 if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD)
631 return 2;
632 return 1;
633}
634
635static uint32_t getEFlags(InputFile *file) {
636 if (file->ekind == ELF64BEKind)
637 return cast<ObjFile<ELF64BE>>(Val: file)->getObj().getHeader().e_flags;
638 return cast<ObjFile<ELF64LE>>(Val: file)->getObj().getHeader().e_flags;
639}
640
641// This file implements v2 ABI. This function makes sure that all
642// object files have v2 or an unspecified version as an ABI version.
643uint32_t PPC64::calcEFlags() const {
644 for (InputFile *f : ctx.objectFiles) {
645 uint32_t flag = getEFlags(file: f);
646 if (flag == 1)
647 ErrAlways(ctx) << f << ": ABI version 1 is not supported";
648 else if (flag > 2)
649 ErrAlways(ctx) << f << ": unrecognized e_flags: " << flag;
650 }
651 return 2;
652}
653
654void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
655 switch (rel.type) {
656 case R_PPC64_TOC16_HA:
657 // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
658 relocate(loc, rel, val);
659 break;
660 case R_PPC64_TOC16_LO_DS: {
661 // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
662 // "addi reg, 2, var@toc".
663 uint32_t insn = readFromHalf16(ctx, loc);
664 if (getPrimaryOpCode(encoding: insn) != LD)
665 ErrAlways(ctx)
666 << "expected a 'ld' for got-indirect to toc-relative relaxing";
667 writeFromHalf16(ctx, loc, insn: (insn & 0x03ffffff) | 0x38000000);
668 relocateNoSym(loc, type: R_PPC64_TOC16_LO, val);
669 break;
670 }
671 case R_PPC64_GOT_PCREL34: {
672 // Clear the first 8 bits of the prefix and the first 6 bits of the
673 // instruction (the primary opcode).
674 uint64_t insn = readPrefixedInst(ctx, loc);
675 if ((insn & 0xfc000000) != 0xe4000000)
676 ErrAlways(ctx)
677 << "expected a 'pld' for got-indirect to pc-relative relaxing";
678 insn &= ~0xff000000fc000000;
679
680 // Replace the cleared bits with the values for PADDI (0x600000038000000);
681 insn |= 0x600000038000000;
682 writePrefixedInst(ctx, loc, insn);
683 relocate(loc, rel, val);
684 break;
685 }
686 case R_PPC64_PCREL_OPT: {
687 // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
688 // be relaxed. The eligibility for the relaxation needs to be determined
689 // on that relocation since this one does not relocate a symbol.
690 uint64_t insn = readPrefixedInst(ctx, loc);
691 uint32_t accessInsn = read32(ctx, p: loc + rel.addend);
692 uint64_t pcRelInsn = getPCRelativeForm(encoding: accessInsn);
693
694 // This error is not necessary for correctness but is emitted for now
695 // to ensure we don't miss these opportunities in real code. It can be
696 // removed at a later date.
697 if (pcRelInsn == UINT64_C(-1)) {
698 Err(ctx)
699 << "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x"
700 << utohexstr(X: accessInsn, LowerCase: true);
701 break;
702 }
703
704 int64_t totalDisp = getTotalDisp(prefixedInsn: insn, accessInsn);
705 if (!isInt<34>(x: totalDisp))
706 break; // Displacement doesn't fit.
707 // Convert the PADDI to the prefixed version of accessInsn and convert
708 // accessInsn to a nop.
709 writePrefixedInst(ctx, loc,
710 insn: pcRelInsn | ((totalDisp & 0x3ffff0000) << 16) |
711 (totalDisp & 0xffff));
712 write32(ctx, p: loc + rel.addend, v: NOP); // nop accessInsn.
713 break;
714 }
715 default:
716 llvm_unreachable("unexpected relocation type");
717 }
718}
719
720void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
721 uint64_t val) const {
722 // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
723 // The general dynamic code sequence for a global `x` will look like:
724 // Instruction Relocation Symbol
725 // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
726 // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
727 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
728 // R_PPC64_REL24 __tls_get_addr
729 // nop None None
730
731 // Relaxing to local exec entails converting:
732 // addis r3, r2, x@got@tlsgd@ha into nop
733 // addi r3, r3, x@got@tlsgd@l into addis r3, r13, x@tprel@ha
734 // bl __tls_get_addr(x@tlsgd) into nop
735 // nop into addi r3, r3, x@tprel@l
736
737 switch (rel.type) {
738 case R_PPC64_GOT_TLSGD16_HA:
739 writeFromHalf16(ctx, loc, insn: NOP);
740 break;
741 case R_PPC64_GOT_TLSGD16:
742 case R_PPC64_GOT_TLSGD16_LO:
743 writeFromHalf16(ctx, loc, insn: 0x3c6d0000); // addis r3, r13
744 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
745 break;
746 case R_PPC64_GOT_TLSGD_PCREL34:
747 // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
748 // paddi r3, r13, x@tprel, 0
749 writePrefixedInst(ctx, loc, insn: 0x06000000386d0000);
750 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
751 break;
752 case R_PPC64_TLSGD: {
753 // PC Relative Relaxation:
754 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
755 // nop
756 // TOC Relaxation:
757 // Relax from bl __tls_get_addr(x@tlsgd)
758 // nop
759 // to
760 // nop
761 // addi r3, r3, x@tprel@l
762 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
763 if (locAsInt % 4 == 0) {
764 write32(ctx, p: loc, v: NOP); // nop
765 write32(ctx, p: loc + 4, v: 0x38630000); // addi r3, r3
766 // Since we are relocating a half16 type relocation and Loc + 4 points to
767 // the start of an instruction we need to advance the buffer by an extra
768 // 2 bytes on BE.
769 relocateNoSym(loc: loc + 4 + (ctx.arg.ekind == ELF64BEKind ? 2 : 0),
770 type: R_PPC64_TPREL16_LO, val);
771 } else if (locAsInt % 4 == 1) {
772 write32(ctx, p: loc - 1, v: NOP);
773 } else {
774 Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
775 }
776 break;
777 }
778 default:
779 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
780 }
781}
782
783void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
784 uint64_t val) const {
785 // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
786 // The local dynamic code sequence for a global `x` will look like:
787 // Instruction Relocation Symbol
788 // addis r3, r2, x@got@tlsld@ha R_PPC64_GOT_TLSLD16_HA x
789 // addi r3, r3, x@got@tlsld@l R_PPC64_GOT_TLSLD16_LO x
790 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSLD x
791 // R_PPC64_REL24 __tls_get_addr
792 // nop None None
793
794 // Relaxing to local exec entails converting:
795 // addis r3, r2, x@got@tlsld@ha into nop
796 // addi r3, r3, x@got@tlsld@l into addis r3, r13, 0
797 // bl __tls_get_addr(x@tlsgd) into nop
798 // nop into addi r3, r3, 4096
799
800 switch (rel.type) {
801 case R_PPC64_GOT_TLSLD16_HA:
802 writeFromHalf16(ctx, loc, insn: NOP);
803 break;
804 case R_PPC64_GOT_TLSLD16_LO:
805 writeFromHalf16(ctx, loc, insn: 0x3c6d0000); // addis r3, r13, 0
806 break;
807 case R_PPC64_GOT_TLSLD_PCREL34:
808 // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
809 // paddi r3, r13, 0x1000, 0
810 writePrefixedInst(ctx, loc, insn: 0x06000000386d1000);
811 break;
812 case R_PPC64_TLSLD: {
813 // PC Relative Relaxation:
814 // Relax from bl __tls_get_addr@notoc(x@tlsld)
815 // to
816 // nop
817 // TOC Relaxation:
818 // Relax from bl __tls_get_addr(x@tlsld)
819 // nop
820 // to
821 // nop
822 // addi r3, r3, 4096
823 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
824 if (locAsInt % 4 == 0) {
825 write32(ctx, p: loc, v: NOP);
826 write32(ctx, p: loc + 4, v: 0x38631000); // addi r3, r3, 4096
827 } else if (locAsInt % 4 == 1) {
828 write32(ctx, p: loc - 1, v: NOP);
829 } else {
830 Err(ctx) << "R_PPC64_TLSLD has unexpected byte alignment";
831 }
832 break;
833 }
834 case R_PPC64_DTPREL16:
835 case R_PPC64_DTPREL16_HA:
836 case R_PPC64_DTPREL16_HI:
837 case R_PPC64_DTPREL16_DS:
838 case R_PPC64_DTPREL16_LO:
839 case R_PPC64_DTPREL16_LO_DS:
840 case R_PPC64_DTPREL34:
841 relocate(loc, rel, val);
842 break;
843 default:
844 llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
845 }
846}
847
848// Map X-Form instructions to their DS-Form counterparts, if applicable.
849// The full encoding is returned here to distinguish between the different
850// DS-Form instructions.
851unsigned elf::getPPCDSFormOp(unsigned secondaryOp) {
852 switch (secondaryOp) {
853 case LWAX:
854 return (LWA << 26) | 0x2;
855 case LDX:
856 return LD << 26;
857 case STDX:
858 return STD << 26;
859 default:
860 return 0;
861 }
862}
863
864unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
865 switch (secondaryOp) {
866 case LBZX:
867 return LBZ << 26;
868 case LHZX:
869 return LHZ << 26;
870 case LWZX:
871 return LWZ << 26;
872 case STBX:
873 return STB << 26;
874 case STHX:
875 return STH << 26;
876 case STWX:
877 return STW << 26;
878 case LHAX:
879 return LHA << 26;
880 case LFSX:
881 return LFS << 26;
882 case LFDX:
883 return LFD << 26;
884 case STFSX:
885 return STFS << 26;
886 case STFDX:
887 return STFD << 26;
888 case ADD:
889 return ADDI << 26;
890 default:
891 return 0;
892 }
893}
894
895void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
896 uint64_t val) const {
897 // The initial exec code sequence for a global `x` will look like:
898 // Instruction Relocation Symbol
899 // addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x
900 // ld r9, x@got@tprel@l(r9) R_PPC64_GOT_TPREL16_LO_DS x
901 // add r9, r9, x@tls R_PPC64_TLS x
902
903 // Relaxing to local exec entails converting:
904 // addis r9, r2, x@got@tprel@ha into nop
905 // ld r9, x@got@tprel@l(r9) into addis r9, r13, x@tprel@ha
906 // add r9, r9, x@tls into addi r9, r9, x@tprel@l
907
908 // x@tls R_PPC64_TLS is a relocation which does not compute anything,
909 // it is replaced with r13 (thread pointer).
910
911 // The add instruction in the initial exec sequence has multiple variations
912 // that need to be handled. If we are building an address it will use an add
913 // instruction, if we are accessing memory it will use any of the X-form
914 // indexed load or store instructions.
915
916 unsigned offset = (ctx.arg.ekind == ELF64BEKind) ? 2 : 0;
917 switch (rel.type) {
918 case R_PPC64_GOT_TPREL16_HA:
919 write32(ctx, p: loc - offset, v: NOP);
920 break;
921 case R_PPC64_GOT_TPREL16_LO_DS:
922 case R_PPC64_GOT_TPREL16_DS: {
923 uint32_t regNo = read32(ctx, p: loc - offset) & 0x03e00000; // bits 6-10
924 write32(ctx, p: loc - offset, v: 0x3c0d0000 | regNo); // addis RegNo, r13
925 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
926 break;
927 }
928 case R_PPC64_GOT_TPREL_PCREL34: {
929 const uint64_t pldRT = readPrefixedInst(ctx, loc) & 0x0000000003e00000;
930 // paddi RT(from pld), r13, symbol@tprel, 0
931 writePrefixedInst(ctx, loc, insn: 0x06000000380d0000 | pldRT);
932 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
933 break;
934 }
935 case R_PPC64_TLS: {
936 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
937 if (locAsInt % 4 == 0) {
938 uint32_t primaryOp = getPrimaryOpCode(encoding: read32(ctx, p: loc));
939 if (primaryOp != 31)
940 ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
941 uint32_t secondaryOp = (read32(ctx, p: loc) & 0x000007fe) >> 1; // bits 21-30
942 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
943 uint32_t finalReloc;
944 if (dFormOp == 0) { // Expecting a DS-Form instruction.
945 dFormOp = getPPCDSFormOp(secondaryOp);
946 if (dFormOp == 0)
947 ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
948 finalReloc = R_PPC64_TPREL16_LO_DS;
949 } else
950 finalReloc = R_PPC64_TPREL16_LO;
951 write32(ctx, p: loc, v: dFormOp | (read32(ctx, p: loc) & 0x03ff0000));
952 relocateNoSym(loc: loc + offset, type: finalReloc, val);
953 } else if (locAsInt % 4 == 1) {
954 // If the offset is not 4 byte aligned then we have a PCRel type reloc.
955 // This version of the relocation is offset by one byte from the
956 // instruction it references.
957 uint32_t tlsInstr = read32(ctx, p: loc - 1);
958 uint32_t primaryOp = getPrimaryOpCode(encoding: tlsInstr);
959 if (primaryOp != 31)
960 Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
961 uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
962 // The add is a special case and should be turned into a nop. The paddi
963 // that comes before it will already have computed the address of the
964 // symbol.
965 if (secondaryOp == 266) {
966 // Check if the add uses the same result register as the input register.
967 uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
968 uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
969 if (ra == rt) {
970 write32(ctx, p: loc - 1, v: NOP);
971 } else {
972 // mr rt, ra
973 write32(ctx, p: loc - 1,
974 v: 0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
975 }
976 } else {
977 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
978 if (dFormOp == 0) { // Expecting a DS-Form instruction.
979 dFormOp = getPPCDSFormOp(secondaryOp);
980 if (dFormOp == 0)
981 Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
982 }
983 write32(ctx, p: loc - 1, v: (dFormOp | (tlsInstr & 0x03ff0000)));
984 }
985 } else {
986 Err(ctx) << "R_PPC64_TLS must be either 4 byte aligned or one byte "
987 "offset from 4 byte aligned";
988 }
989 break;
990 }
991 default:
992 llvm_unreachable("unknown relocation for IE to LE");
993 break;
994 }
995}
996
997RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
998 const uint8_t *loc) const {
999 switch (type) {
1000 case R_PPC64_NONE:
1001 return R_NONE;
1002 case R_PPC64_ADDR16:
1003 case R_PPC64_ADDR16_DS:
1004 case R_PPC64_ADDR16_HA:
1005 case R_PPC64_ADDR16_HI:
1006 case R_PPC64_ADDR16_HIGH:
1007 case R_PPC64_ADDR16_HIGHER:
1008 case R_PPC64_ADDR16_HIGHERA:
1009 case R_PPC64_ADDR16_HIGHEST:
1010 case R_PPC64_ADDR16_HIGHESTA:
1011 case R_PPC64_ADDR16_LO:
1012 case R_PPC64_ADDR16_LO_DS:
1013 case R_PPC64_ADDR32:
1014 case R_PPC64_ADDR64:
1015 return R_ABS;
1016 case R_PPC64_GOT16:
1017 case R_PPC64_GOT16_DS:
1018 case R_PPC64_GOT16_HA:
1019 case R_PPC64_GOT16_HI:
1020 case R_PPC64_GOT16_LO:
1021 case R_PPC64_GOT16_LO_DS:
1022 return R_GOT_OFF;
1023 case R_PPC64_TOC16:
1024 case R_PPC64_TOC16_DS:
1025 case R_PPC64_TOC16_HI:
1026 case R_PPC64_TOC16_LO:
1027 return R_GOTREL;
1028 case R_PPC64_GOT_PCREL34:
1029 case R_PPC64_GOT_TPREL_PCREL34:
1030 case R_PPC64_PCREL_OPT:
1031 return R_GOT_PC;
1032 case R_PPC64_TOC16_HA:
1033 case R_PPC64_TOC16_LO_DS:
1034 return ctx.arg.tocOptimize ? RE_PPC64_RELAX_TOC : R_GOTREL;
1035 case R_PPC64_TOC:
1036 return RE_PPC64_TOCBASE;
1037 case R_PPC64_REL14:
1038 case R_PPC64_REL24:
1039 return RE_PPC64_CALL_PLT;
1040 case R_PPC64_REL24_NOTOC:
1041 return R_PLT_PC;
1042 case R_PPC64_REL16_LO:
1043 case R_PPC64_REL16_HA:
1044 case R_PPC64_REL16_HI:
1045 case R_PPC64_REL32:
1046 case R_PPC64_REL64:
1047 case R_PPC64_PCREL34:
1048 return R_PC;
1049 case R_PPC64_GOT_TLSGD16:
1050 case R_PPC64_GOT_TLSGD16_HA:
1051 case R_PPC64_GOT_TLSGD16_HI:
1052 case R_PPC64_GOT_TLSGD16_LO:
1053 return R_TLSGD_GOT;
1054 case R_PPC64_GOT_TLSGD_PCREL34:
1055 return R_TLSGD_PC;
1056 case R_PPC64_GOT_TLSLD16:
1057 case R_PPC64_GOT_TLSLD16_HA:
1058 case R_PPC64_GOT_TLSLD16_HI:
1059 case R_PPC64_GOT_TLSLD16_LO:
1060 return R_TLSLD_GOT;
1061 case R_PPC64_GOT_TLSLD_PCREL34:
1062 return R_TLSLD_PC;
1063 case R_PPC64_GOT_TPREL16_HA:
1064 case R_PPC64_GOT_TPREL16_LO_DS:
1065 case R_PPC64_GOT_TPREL16_DS:
1066 case R_PPC64_GOT_TPREL16_HI:
1067 return R_GOT_OFF;
1068 case R_PPC64_GOT_DTPREL16_HA:
1069 case R_PPC64_GOT_DTPREL16_LO_DS:
1070 case R_PPC64_GOT_DTPREL16_DS:
1071 case R_PPC64_GOT_DTPREL16_HI:
1072 return R_TLSLD_GOT_OFF;
1073 case R_PPC64_TPREL16:
1074 case R_PPC64_TPREL16_HA:
1075 case R_PPC64_TPREL16_LO:
1076 case R_PPC64_TPREL16_HI:
1077 case R_PPC64_TPREL16_DS:
1078 case R_PPC64_TPREL16_LO_DS:
1079 case R_PPC64_TPREL16_HIGHER:
1080 case R_PPC64_TPREL16_HIGHERA:
1081 case R_PPC64_TPREL16_HIGHEST:
1082 case R_PPC64_TPREL16_HIGHESTA:
1083 case R_PPC64_TPREL34:
1084 return R_TPREL;
1085 case R_PPC64_DTPREL16:
1086 case R_PPC64_DTPREL16_DS:
1087 case R_PPC64_DTPREL16_HA:
1088 case R_PPC64_DTPREL16_HI:
1089 case R_PPC64_DTPREL16_HIGHER:
1090 case R_PPC64_DTPREL16_HIGHERA:
1091 case R_PPC64_DTPREL16_HIGHEST:
1092 case R_PPC64_DTPREL16_HIGHESTA:
1093 case R_PPC64_DTPREL16_LO:
1094 case R_PPC64_DTPREL16_LO_DS:
1095 case R_PPC64_DTPREL64:
1096 case R_PPC64_DTPREL34:
1097 return R_DTPREL;
1098 case R_PPC64_TLSGD:
1099 return R_TLSDESC_CALL;
1100 case R_PPC64_TLSLD:
1101 return R_TLSLD_HINT;
1102 case R_PPC64_TLS:
1103 return R_TLSIE_HINT;
1104 default:
1105 Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
1106 << ") against symbol " << &s;
1107 return R_NONE;
1108 }
1109}
1110
1111RelType PPC64::getDynRel(RelType type) const {
1112 if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
1113 return R_PPC64_ADDR64;
1114 return R_PPC64_NONE;
1115}
1116
1117int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
1118 switch (type) {
1119 case R_PPC64_NONE:
1120 case R_PPC64_GLOB_DAT:
1121 case R_PPC64_JMP_SLOT:
1122 return 0;
1123 case R_PPC64_REL32:
1124 return SignExtend64<32>(x: read32(ctx, p: buf));
1125 case R_PPC64_ADDR64:
1126 case R_PPC64_REL64:
1127 case R_PPC64_RELATIVE:
1128 case R_PPC64_IRELATIVE:
1129 case R_PPC64_DTPMOD64:
1130 case R_PPC64_DTPREL64:
1131 case R_PPC64_TPREL64:
1132 return read64(ctx, p: buf);
1133 default:
1134 InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
1135 return 0;
1136 }
1137}
1138
1139void PPC64::writeGotHeader(uint8_t *buf) const {
1140 write64(ctx, p: buf, v: getPPC64TocBase(ctx));
1141}
1142
1143void PPC64::writePltHeader(uint8_t *buf) const {
1144 // The generic resolver stub goes first.
1145 write32(ctx, p: buf + 0, v: 0x7c0802a6); // mflr r0
1146 write32(ctx, p: buf + 4, v: 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
1147 write32(ctx, p: buf + 8, v: 0x7d6802a6); // mflr r11
1148 write32(ctx, p: buf + 12, v: 0x7c0803a6); // mtlr r0
1149 write32(ctx, p: buf + 16, v: 0x7d8b6050); // subf r12, r11, r12
1150 write32(ctx, p: buf + 20, v: 0x380cffcc); // subi r0,r12,52
1151 write32(ctx, p: buf + 24, v: 0x7800f082); // srdi r0,r0,62,2
1152 write32(ctx, p: buf + 28, v: 0xe98b002c); // ld r12,44(r11)
1153 write32(ctx, p: buf + 32, v: 0x7d6c5a14); // add r11,r12,r11
1154 write32(ctx, p: buf + 36, v: 0xe98b0000); // ld r12,0(r11)
1155 write32(ctx, p: buf + 40, v: 0xe96b0008); // ld r11,8(r11)
1156 write32(ctx, p: buf + 44, v: 0x7d8903a6); // mtctr r12
1157 write32(ctx, p: buf + 48, v: 0x4e800420); // bctr
1158
1159 // The 'bcl' instruction will set the link register to the address of the
1160 // following instruction ('mflr r11'). Here we store the offset from that
1161 // instruction to the first entry in the GotPlt section.
1162 int64_t gotPltOffset = ctx.in.gotPlt->getVA() - (ctx.in.plt->getVA() + 8);
1163 write64(ctx, p: buf + 52, v: gotPltOffset);
1164}
1165
1166void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
1167 uint64_t /*pltEntryAddr*/) const {
1168 int32_t offset = pltHeaderSize + sym.getPltIdx(ctx) * pltEntrySize;
1169 // bl __glink_PLTresolve
1170 write32(ctx, p: buf, v: 0x48000000 | ((-offset) & 0x03fffffc));
1171}
1172
1173void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
1174 uint64_t /*pltEntryAddr*/) const {
1175 writePPC64LoadAndBranch(ctx, buf,
1176 offset: sym.getGotPltVA(ctx) - getPPC64TocBase(ctx));
1177}
1178
1179static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
1180 // Relocations relative to the toc-base need to be adjusted by the Toc offset.
1181 uint64_t tocBiasedVal = val - ppc64TocOffset;
1182 // Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset.
1183 uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset;
1184
1185 switch (type) {
1186 // TOC biased relocation.
1187 case R_PPC64_GOT16:
1188 case R_PPC64_GOT_TLSGD16:
1189 case R_PPC64_GOT_TLSLD16:
1190 case R_PPC64_TOC16:
1191 return {R_PPC64_ADDR16, tocBiasedVal};
1192 case R_PPC64_GOT16_DS:
1193 case R_PPC64_TOC16_DS:
1194 case R_PPC64_GOT_TPREL16_DS:
1195 case R_PPC64_GOT_DTPREL16_DS:
1196 return {R_PPC64_ADDR16_DS, tocBiasedVal};
1197 case R_PPC64_GOT16_HA:
1198 case R_PPC64_GOT_TLSGD16_HA:
1199 case R_PPC64_GOT_TLSLD16_HA:
1200 case R_PPC64_GOT_TPREL16_HA:
1201 case R_PPC64_GOT_DTPREL16_HA:
1202 case R_PPC64_TOC16_HA:
1203 return {R_PPC64_ADDR16_HA, tocBiasedVal};
1204 case R_PPC64_GOT16_HI:
1205 case R_PPC64_GOT_TLSGD16_HI:
1206 case R_PPC64_GOT_TLSLD16_HI:
1207 case R_PPC64_GOT_TPREL16_HI:
1208 case R_PPC64_GOT_DTPREL16_HI:
1209 case R_PPC64_TOC16_HI:
1210 return {R_PPC64_ADDR16_HI, tocBiasedVal};
1211 case R_PPC64_GOT16_LO:
1212 case R_PPC64_GOT_TLSGD16_LO:
1213 case R_PPC64_GOT_TLSLD16_LO:
1214 case R_PPC64_TOC16_LO:
1215 return {R_PPC64_ADDR16_LO, tocBiasedVal};
1216 case R_PPC64_GOT16_LO_DS:
1217 case R_PPC64_TOC16_LO_DS:
1218 case R_PPC64_GOT_TPREL16_LO_DS:
1219 case R_PPC64_GOT_DTPREL16_LO_DS:
1220 return {R_PPC64_ADDR16_LO_DS, tocBiasedVal};
1221
1222 // Dynamic Thread pointer biased relocation types.
1223 case R_PPC64_DTPREL16:
1224 return {R_PPC64_ADDR16, dtpBiasedVal};
1225 case R_PPC64_DTPREL16_DS:
1226 return {R_PPC64_ADDR16_DS, dtpBiasedVal};
1227 case R_PPC64_DTPREL16_HA:
1228 return {R_PPC64_ADDR16_HA, dtpBiasedVal};
1229 case R_PPC64_DTPREL16_HI:
1230 return {R_PPC64_ADDR16_HI, dtpBiasedVal};
1231 case R_PPC64_DTPREL16_HIGHER:
1232 return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal};
1233 case R_PPC64_DTPREL16_HIGHERA:
1234 return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal};
1235 case R_PPC64_DTPREL16_HIGHEST:
1236 return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal};
1237 case R_PPC64_DTPREL16_HIGHESTA:
1238 return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal};
1239 case R_PPC64_DTPREL16_LO:
1240 return {R_PPC64_ADDR16_LO, dtpBiasedVal};
1241 case R_PPC64_DTPREL16_LO_DS:
1242 return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal};
1243 case R_PPC64_DTPREL64:
1244 return {R_PPC64_ADDR64, dtpBiasedVal};
1245
1246 default:
1247 return {type, val};
1248 }
1249}
1250
1251static bool isTocOptType(RelType type) {
1252 switch (type) {
1253 case R_PPC64_GOT16_HA:
1254 case R_PPC64_GOT16_LO_DS:
1255 case R_PPC64_TOC16_HA:
1256 case R_PPC64_TOC16_LO_DS:
1257 case R_PPC64_TOC16_LO:
1258 return true;
1259 default:
1260 return false;
1261 }
1262}
1263
1264// R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
1265// General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is
1266// found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the
1267// instructions are generated by very old IBM XL compilers. Work around the
1268// issue by disabling GD/LD to IE/LE relaxation.
1269template <class RelTy>
1270static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) {
1271 // Skip if sec is synthetic (sec.file is null) or if sec has been marked.
1272 if (!sec.file || sec.file->ppc64DisableTLSRelax)
1273 return;
1274 bool hasGDLD = false;
1275 for (const RelTy &rel : rels) {
1276 RelType type = rel.getType(false);
1277 switch (type) {
1278 case R_PPC64_TLSGD:
1279 case R_PPC64_TLSLD:
1280 return; // Found a marker
1281 case R_PPC64_GOT_TLSGD16:
1282 case R_PPC64_GOT_TLSGD16_HA:
1283 case R_PPC64_GOT_TLSGD16_HI:
1284 case R_PPC64_GOT_TLSGD16_LO:
1285 case R_PPC64_GOT_TLSLD16:
1286 case R_PPC64_GOT_TLSLD16_HA:
1287 case R_PPC64_GOT_TLSLD16_HI:
1288 case R_PPC64_GOT_TLSLD16_LO:
1289 hasGDLD = true;
1290 break;
1291 }
1292 }
1293 if (hasGDLD) {
1294 sec.file->ppc64DisableTLSRelax = true;
1295 Warn(ctx&: sec.file->ctx)
1296 << sec.file
1297 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations "
1298 "without "
1299 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations";
1300 }
1301}
1302
1303template <class ELFT, class RelTy>
1304void PPC64::scanSectionImpl(InputSectionBase &sec, Relocs<RelTy> rels) {
1305 RelocScan rs(ctx, &sec);
1306 sec.relocations.reserve(N: rels.size());
1307 checkPPC64TLSRelax<RelTy>(sec, rels);
1308 for (auto it = rels.begin(); it != rels.end(); ++it) {
1309 const RelTy &rel = *it;
1310 uint64_t offset = rel.r_offset;
1311 uint32_t symIdx = rel.getSymbol(false);
1312 Symbol &sym = sec.getFile<ELFT>()->getSymbol(symIdx);
1313 RelType type = rel.getType(false);
1314 RelExpr expr = getRelExpr(type, s: sym, loc: sec.content().data() + offset);
1315 if (expr == R_NONE)
1316 continue;
1317 if (sym.isUndefined() && symIdx != 0 &&
1318 rs.maybeReportUndefined(sym&: cast<Undefined>(Val&: sym), offset))
1319 continue;
1320
1321 auto addend = getAddend<ELFT>(rel);
1322 if (ctx.arg.isPic && type == R_PPC64_TOC)
1323 addend += getPPC64TocBase(ctx);
1324
1325 // We can separate the small code model relocations into 2 categories:
1326 // 1) Those that access the compiler generated .toc sections.
1327 // 2) Those that access the linker allocated got entries.
1328 // lld allocates got entries to symbols on demand. Since we don't try to
1329 // sort the got entries in any way, we don't have to track which objects
1330 // have got-based small code model relocs. The .toc sections get placed
1331 // after the end of the linker allocated .got section and we do sort those
1332 // so sections addressed with small code model relocations come first.
1333 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS)
1334 sec.file->ppc64SmallCodeModelTocRelocs = true;
1335
1336 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in
1337 // PPC64::relocateAlloc().
1338 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(Val: sym) &&
1339 cast<Defined>(Val&: sym).section->name == ".toc")
1340 ctx.ppc64noTocRelax.insert({&sym, addend});
1341
1342 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) ||
1343 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) {
1344 auto it1 = it;
1345 ++it1;
1346 if (it1 == rels.end()) {
1347 auto diag = Err(ctx);
1348 diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1349 "relocation";
1350 printLocation(s&: diag, sec, sym, off: offset);
1351 continue;
1352 }
1353
1354 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC
1355 // case, so we can discern it later from the toc-case.
1356 if (it1->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC)
1357 ++offset;
1358 }
1359
1360 if (oneof<R_GOTREL, RE_PPC64_TOCBASE, RE_PPC64_RELAX_TOC>(expr))
1361 ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1362
1363 if (sym.isTls()) {
1364 if (unsigned processed =
1365 rs.handleTlsRelocation(expr, type, offset, sym, addend)) {
1366 it += processed - 1;
1367 continue;
1368 }
1369 }
1370 rs.process(expr, type, offset, sym, addend);
1371 }
1372}
1373
1374void PPC64::scanSection(InputSectionBase &sec) {
1375 if (ctx.arg.isLE)
1376 elf::scanSection1<PPC64, ELF64LE>(target&: *this, sec);
1377 else
1378 elf::scanSection1<PPC64, ELF64BE>(target&: *this, sec);
1379}
1380
1381void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
1382 RelType type = rel.type;
1383 bool shouldTocOptimize = isTocOptType(type);
1384 // For dynamic thread pointer relative, toc-relative, and got-indirect
1385 // relocations, proceed in terms of the corresponding ADDR16 relocation type.
1386 std::tie(args&: type, args&: val) = toAddr16Rel(type, val);
1387
1388 switch (type) {
1389 case R_PPC64_ADDR14: {
1390 checkAlignment(ctx, loc, v: val, n: 4, rel);
1391 // Preserve the AA/LK bits in the branch instruction
1392 uint8_t aalk = loc[3];
1393 write16(ctx, p: loc + 2, v: (aalk & 3) | (val & 0xfffc));
1394 break;
1395 }
1396 case R_PPC64_ADDR16:
1397 checkIntUInt(ctx, loc, v: val, n: 16, rel);
1398 write16(ctx, p: loc, v: val);
1399 break;
1400 case R_PPC64_ADDR32:
1401 checkIntUInt(ctx, loc, v: val, n: 32, rel);
1402 write32(ctx, p: loc, v: val);
1403 break;
1404 case R_PPC64_ADDR16_DS:
1405 case R_PPC64_TPREL16_DS: {
1406 checkInt(ctx, loc, v: val, n: 16, rel);
1407 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1408 // DS-form instructions only use bits 30-31.
1409 uint16_t mask = isDQFormInstruction(encoding: readFromHalf16(ctx, loc)) ? 0xf : 0x3;
1410 checkAlignment(ctx, loc, v: lo(v: val), n: mask + 1, rel);
1411 write16(ctx, p: loc, v: (read16(ctx, p: loc) & mask) | lo(v: val));
1412 } break;
1413 case R_PPC64_ADDR16_HA:
1414 case R_PPC64_REL16_HA:
1415 case R_PPC64_TPREL16_HA:
1416 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0)
1417 writeFromHalf16(ctx, loc, insn: NOP);
1418 else {
1419 checkInt(ctx, loc, v: val + 0x8000, n: 32, rel);
1420 write16(ctx, p: loc, v: ha(v: val));
1421 }
1422 break;
1423 case R_PPC64_ADDR16_HI:
1424 case R_PPC64_REL16_HI:
1425 case R_PPC64_TPREL16_HI:
1426 checkInt(ctx, loc, v: val, n: 32, rel);
1427 write16(ctx, p: loc, v: hi(v: val));
1428 break;
1429 case R_PPC64_ADDR16_HIGH:
1430 write16(ctx, p: loc, v: hi(v: val));
1431 break;
1432 case R_PPC64_ADDR16_HIGHER:
1433 case R_PPC64_TPREL16_HIGHER:
1434 write16(ctx, p: loc, v: higher(v: val));
1435 break;
1436 case R_PPC64_ADDR16_HIGHERA:
1437 case R_PPC64_TPREL16_HIGHERA:
1438 write16(ctx, p: loc, v: highera(v: val));
1439 break;
1440 case R_PPC64_ADDR16_HIGHEST:
1441 case R_PPC64_TPREL16_HIGHEST:
1442 write16(ctx, p: loc, v: highest(v: val));
1443 break;
1444 case R_PPC64_ADDR16_HIGHESTA:
1445 case R_PPC64_TPREL16_HIGHESTA:
1446 write16(ctx, p: loc, v: highesta(v: val));
1447 break;
1448 case R_PPC64_ADDR16_LO:
1449 case R_PPC64_REL16_LO:
1450 case R_PPC64_TPREL16_LO:
1451 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1452 // changed into a nop. The lo part then needs to be updated to use the
1453 // toc-pointer register r2, as the base register.
1454 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1455 uint32_t insn = readFromHalf16(ctx, loc);
1456 if (isInstructionUpdateForm(encoding: insn))
1457 Err(ctx) << getErrorLoc(ctx, loc)
1458 << "can't toc-optimize an update instruction: 0x"
1459 << utohexstr(X: insn, LowerCase: true);
1460 writeFromHalf16(ctx, loc, insn: (insn & 0xffe00000) | 0x00020000 | lo(v: val));
1461 } else {
1462 write16(ctx, p: loc, v: lo(v: val));
1463 }
1464 break;
1465 case R_PPC64_ADDR16_LO_DS:
1466 case R_PPC64_TPREL16_LO_DS: {
1467 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1468 // DS-form instructions only use bits 30-31.
1469 uint32_t insn = readFromHalf16(ctx, loc);
1470 uint16_t mask = isDQFormInstruction(encoding: insn) ? 0xf : 0x3;
1471 checkAlignment(ctx, loc, v: lo(v: val), n: mask + 1, rel);
1472 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1473 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1474 // changed into a nop. The lo part then needs to be updated to use the toc
1475 // pointer register r2, as the base register.
1476 if (isInstructionUpdateForm(encoding: insn))
1477 Err(ctx) << getErrorLoc(ctx, loc)
1478 << "can't toc-optimize an update instruction: 0x"
1479 << utohexstr(X: insn, LowerCase: true);
1480 insn &= 0xffe00000 | mask;
1481 writeFromHalf16(ctx, loc, insn: insn | 0x00020000 | lo(v: val));
1482 } else {
1483 write16(ctx, p: loc, v: (read16(ctx, p: loc) & mask) | lo(v: val));
1484 }
1485 } break;
1486 case R_PPC64_TPREL16:
1487 checkInt(ctx, loc, v: val, n: 16, rel);
1488 write16(ctx, p: loc, v: val);
1489 break;
1490 case R_PPC64_REL32:
1491 checkInt(ctx, loc, v: val, n: 32, rel);
1492 write32(ctx, p: loc, v: val);
1493 break;
1494 case R_PPC64_ADDR64:
1495 case R_PPC64_REL64:
1496 case R_PPC64_TOC:
1497 write64(ctx, p: loc, v: val);
1498 break;
1499 case R_PPC64_REL14: {
1500 uint32_t mask = 0x0000FFFC;
1501 checkInt(ctx, loc, v: val, n: 16, rel);
1502 checkAlignment(ctx, loc, v: val, n: 4, rel);
1503 write32(ctx, p: loc, v: (read32(ctx, p: loc) & ~mask) | (val & mask));
1504 break;
1505 }
1506 case R_PPC64_REL24:
1507 case R_PPC64_REL24_NOTOC: {
1508 uint32_t mask = 0x03FFFFFC;
1509 checkInt(ctx, loc, v: val, n: 26, rel);
1510 checkAlignment(ctx, loc, v: val, n: 4, rel);
1511 write32(ctx, p: loc, v: (read32(ctx, p: loc) & ~mask) | (val & mask));
1512 break;
1513 }
1514 case R_PPC64_DTPREL64:
1515 write64(ctx, p: loc, v: val - dynamicThreadPointerOffset);
1516 break;
1517 case R_PPC64_DTPREL34:
1518 // The Dynamic Thread Vector actually points 0x8000 bytes past the start
1519 // of the TLS block. Therefore, in the case of R_PPC64_DTPREL34 we first
1520 // need to subtract that value then fallthrough to the general case.
1521 val -= dynamicThreadPointerOffset;
1522 [[fallthrough]];
1523 case R_PPC64_PCREL34:
1524 case R_PPC64_GOT_PCREL34:
1525 case R_PPC64_GOT_TLSGD_PCREL34:
1526 case R_PPC64_GOT_TLSLD_PCREL34:
1527 case R_PPC64_GOT_TPREL_PCREL34:
1528 case R_PPC64_TPREL34: {
1529 const uint64_t si0Mask = 0x00000003ffff0000;
1530 const uint64_t si1Mask = 0x000000000000ffff;
1531 const uint64_t fullMask = 0x0003ffff0000ffff;
1532 checkInt(ctx, loc, v: val, n: 34, rel);
1533
1534 uint64_t instr = readPrefixedInst(ctx, loc) & ~fullMask;
1535 writePrefixedInst(ctx, loc,
1536 insn: instr | ((val & si0Mask) << 16) | (val & si1Mask));
1537 break;
1538 }
1539 // If we encounter a PCREL_OPT relocation that we won't optimize.
1540 case R_PPC64_PCREL_OPT:
1541 break;
1542 default:
1543 llvm_unreachable("unknown relocation");
1544 }
1545}
1546
1547bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
1548 uint64_t branchAddr, const Symbol &s, int64_t a) const {
1549 if (type != R_PPC64_REL14 && type != R_PPC64_REL24 &&
1550 type != R_PPC64_REL24_NOTOC)
1551 return false;
1552
1553 // If a function is in the Plt it needs to be called with a call-stub.
1554 if (s.isInPlt(ctx))
1555 return true;
1556
1557 // This check looks at the st_other bits of the callee with relocation
1558 // R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
1559 // clobbers the TOC and we need an R2 save stub.
1560 if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
1561 return true;
1562
1563 if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
1564 return true;
1565
1566 // An undefined weak symbol not in a PLT does not need a thunk. If it is
1567 // hidden, its binding has been converted to local, so we just check
1568 // isUndefined() here. A undefined non-weak symbol has been errored.
1569 if (s.isUndefined())
1570 return false;
1571
1572 // If the offset exceeds the range of the branch type then it will need
1573 // a range-extending thunk.
1574 // See the comment in getRelocTargetVA() about RE_PPC64_CALL.
1575 return !inBranchRange(
1576 type, src: branchAddr,
1577 dst: s.getVA(ctx, addend: a) + getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther: s.stOther));
1578}
1579
1580uint32_t PPC64::getThunkSectionSpacing() const {
1581 // See comment in Arch/ARM.cpp for a more detailed explanation of
1582 // getThunkSectionSpacing(). For PPC64 we pick the constant here based on
1583 // R_PPC64_REL24, which is used by unconditional branch instructions.
1584 // 0x2000000 = (1 << 24-1) * 4
1585 return 0x2000000;
1586}
1587
1588bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
1589 int64_t offset = dst - src;
1590 if (type == R_PPC64_REL14)
1591 return isInt<16>(x: offset);
1592 if (type == R_PPC64_REL24 || type == R_PPC64_REL24_NOTOC)
1593 return isInt<26>(x: offset);
1594 llvm_unreachable("unsupported relocation type used in branch");
1595}
1596
1597RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
1598 if (type != R_PPC64_GOT_TLSGD_PCREL34 && expr == R_RELAX_TLS_GD_TO_IE)
1599 return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
1600 if (expr == R_RELAX_TLS_LD_TO_LE)
1601 return R_RELAX_TLS_LD_TO_LE_ABS;
1602 return expr;
1603}
1604
1605RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
1606 const uint8_t *loc) const {
1607 if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
1608 ctx.arg.pcRelOptimize) {
1609 // It only makes sense to optimize pld since paddi means that the address
1610 // of the object in the GOT is required rather than the object itself.
1611 if ((readPrefixedInst(ctx, loc) & 0xfc000000) == 0xe4000000)
1612 return RE_PPC64_RELAX_GOT_PC;
1613 }
1614 return R_GOT_PC;
1615}
1616
1617// Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
1618// The general dynamic code sequence for a global `x` uses 4 instructions.
1619// Instruction Relocation Symbol
1620// addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
1621// addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
1622// bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
1623// R_PPC64_REL24 __tls_get_addr
1624// nop None None
1625//
1626// Relaxing to initial-exec entails:
1627// 1) Convert the addis/addi pair that builds the address of the tls_index
1628// struct for 'x' to an addis/ld pair that loads an offset from a got-entry.
1629// 2) Convert the call to __tls_get_addr to a nop.
1630// 3) Convert the nop following the call to an add of the loaded offset to the
1631// thread pointer.
1632// Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
1633// used as the relaxation hint for both steps 2 and 3.
1634void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
1635 uint64_t val) const {
1636 switch (rel.type) {
1637 case R_PPC64_GOT_TLSGD16_HA:
1638 // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
1639 // addis rT, r2, sym@got@tprel@ha.
1640 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_HA, val);
1641 return;
1642 case R_PPC64_GOT_TLSGD16:
1643 case R_PPC64_GOT_TLSGD16_LO: {
1644 // Relax from addi r3, rA, sym@got@tlsgd@l to
1645 // ld r3, sym@got@tprel@l(rA)
1646 uint32_t ra = (readFromHalf16(ctx, loc) & (0x1f << 16));
1647 writeFromHalf16(ctx, loc, insn: 0xe8600000 | ra);
1648 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_LO_DS, val);
1649 return;
1650 }
1651 case R_PPC64_GOT_TLSGD_PCREL34: {
1652 // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
1653 // pld r3, sym@got@tprel@pcrel
1654 writePrefixedInst(ctx, loc, insn: 0x04100000e4600000);
1655 relocateNoSym(loc, type: R_PPC64_GOT_TPREL_PCREL34, val);
1656 return;
1657 }
1658 case R_PPC64_TLSGD: {
1659 // PC Relative Relaxation:
1660 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
1661 // nop
1662 // TOC Relaxation:
1663 // Relax from bl __tls_get_addr(x@tlsgd)
1664 // nop
1665 // to
1666 // nop
1667 // add r3, r3, r13
1668 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
1669 if (locAsInt % 4 == 0) {
1670 write32(ctx, p: loc, v: NOP); // bl __tls_get_addr(sym@tlsgd) --> nop
1671 write32(ctx, p: loc + 4, v: 0x7c636a14); // nop --> add r3, r3, r13
1672 } else if (locAsInt % 4 == 1) {
1673 // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
1674 write32(ctx, p: loc - 1, v: 0x7c636a14);
1675 } else {
1676 Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
1677 }
1678 return;
1679 }
1680 default:
1681 llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
1682 }
1683}
1684
1685void PPC64::relocateAlloc(InputSection &sec, uint8_t *buf) const {
1686 uint64_t secAddr = sec.getOutputSection()->addr + sec.outSecOff;
1687 uint64_t lastPPCRelaxedRelocOff = -1;
1688 for (const Relocation &rel : sec.relocs()) {
1689 uint8_t *loc = buf + rel.offset;
1690 const uint64_t val = sec.getRelocTargetVA(ctx, r: rel, p: secAddr + rel.offset);
1691 switch (rel.expr) {
1692 case RE_PPC64_RELAX_GOT_PC: {
1693 // The R_PPC64_PCREL_OPT relocation must appear immediately after
1694 // R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
1695 // We can only relax R_PPC64_PCREL_OPT if we have also relaxed
1696 // the associated R_PPC64_GOT_PCREL34 since only the latter has an
1697 // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34
1698 // and only relax the other if the saved offset matches.
1699 if (rel.type == R_PPC64_GOT_PCREL34)
1700 lastPPCRelaxedRelocOff = rel.offset;
1701 if (rel.type == R_PPC64_PCREL_OPT && rel.offset != lastPPCRelaxedRelocOff)
1702 break;
1703 relaxGot(loc, rel, val);
1704 break;
1705 }
1706 case RE_PPC64_RELAX_TOC:
1707 // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1708 // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1709 // entry, there may be R_PPC64_TOC16_HA not paired with
1710 // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1711 // opportunities but is safe.
1712 if (ctx.ppc64noTocRelax.contains(V: {rel.sym, rel.addend}) ||
1713 !tryRelaxPPC64TocIndirection(ctx, rel, bufLoc: loc))
1714 relocate(loc, rel, val);
1715 break;
1716 case RE_PPC64_CALL:
1717 // If this is a call to __tls_get_addr, it may be part of a TLS
1718 // sequence that has been relaxed and turned into a nop. In this
1719 // case, we don't want to handle it as a call.
1720 if (read32(ctx, p: loc) == 0x60000000) // nop
1721 break;
1722
1723 // Patch a nop (0x60000000) to a ld.
1724 if (rel.sym->needsTocRestore()) {
1725 // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1726 // recursive calls even if the function is preemptible. This is not
1727 // wrong in the common case where the function is not preempted at
1728 // runtime. Just ignore.
1729 if ((rel.offset + 8 > sec.content().size() ||
1730 read32(ctx, p: loc + 4) != 0x60000000) &&
1731 rel.sym->file != sec.file) {
1732 // Use substr(6) to remove the "__plt_" prefix.
1733 Err(ctx) << getErrorLoc(ctx, loc) << "call to "
1734 << toStr(ctx, *rel.sym).substr(pos: 6)
1735 << " lacks nop, can't restore toc";
1736 break;
1737 }
1738 write32(ctx, p: loc + 4, v: 0xe8410018); // ld %r2, 24(%r1)
1739 }
1740 relocate(loc, rel, val);
1741 break;
1742 case R_RELAX_TLS_GD_TO_IE:
1743 case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
1744 relaxTlsGdToIe(loc, rel, val);
1745 break;
1746 case R_RELAX_TLS_GD_TO_LE:
1747 relaxTlsGdToLe(loc, rel, val);
1748 break;
1749 case R_RELAX_TLS_LD_TO_LE_ABS:
1750 relaxTlsLdToLe(loc, rel, val);
1751 break;
1752 case R_RELAX_TLS_IE_TO_LE:
1753 relaxTlsIeToLe(loc, rel, val);
1754 break;
1755 default:
1756 relocate(loc, rel, val);
1757 break;
1758 }
1759 }
1760}
1761
1762// The prologue for a split-stack function is expected to look roughly
1763// like this:
1764// .Lglobal_entry_point:
1765// # TOC pointer initialization.
1766// ...
1767// .Llocal_entry_point:
1768// # load the __private_ss member of the threads tcbhead.
1769// ld r0,-0x7000-64(r13)
1770// # subtract the functions stack size from the stack pointer.
1771// addis r12, r1, ha(-stack-frame size)
1772// addi r12, r12, l(-stack-frame size)
1773// # compare needed to actual and branch to allocate_more_stack if more
1774// # space is needed, otherwise fallthrough to 'normal' function body.
1775// cmpld cr7,r12,r0
1776// blt- cr7, .Lallocate_more_stack
1777//
1778// -) The allocate_more_stack block might be placed after the split-stack
1779// prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body`
1780// instead.
1781// -) If either the addis or addi is not needed due to the stack size being
1782// smaller then 32K or a multiple of 64K they will be replaced with a nop,
1783// but there will always be 2 instructions the linker can overwrite for the
1784// adjusted stack size.
1785//
1786// The linkers job here is to increase the stack size used in the addis/addi
1787// pair by split-stack-size-adjust.
1788// addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
1789// addi r12, r12, l(-stack-frame size - split-stack-adjust-size)
1790bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
1791 uint8_t stOther) const {
1792 // If the caller has a global entry point adjust the buffer past it. The start
1793 // of the split-stack prologue will be at the local entry point.
1794 loc += getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther);
1795
1796 // At the very least we expect to see a load of some split-stack data from the
1797 // tcb, and 2 instructions that calculate the ending stack address this
1798 // function will require. If there is not enough room for at least 3
1799 // instructions it can't be a split-stack prologue.
1800 if (loc + 12 >= end)
1801 return false;
1802
1803 // First instruction must be `ld r0, -0x7000-64(r13)`
1804 if (read32(ctx, p: loc) != 0xe80d8fc0)
1805 return false;
1806
1807 int16_t hiImm = 0;
1808 int16_t loImm = 0;
1809 // First instruction can be either an addis if the frame size is larger then
1810 // 32K, or an addi if the size is less then 32K.
1811 int32_t firstInstr = read32(ctx, p: loc + 4);
1812 if (getPrimaryOpCode(encoding: firstInstr) == 15) {
1813 hiImm = firstInstr & 0xFFFF;
1814 } else if (getPrimaryOpCode(encoding: firstInstr) == 14) {
1815 loImm = firstInstr & 0xFFFF;
1816 } else {
1817 return false;
1818 }
1819
1820 // Second instruction is either an addi or a nop. If the first instruction was
1821 // an addi then LoImm is set and the second instruction must be a nop.
1822 uint32_t secondInstr = read32(ctx, p: loc + 8);
1823 if (!loImm && getPrimaryOpCode(encoding: secondInstr) == 14) {
1824 loImm = secondInstr & 0xFFFF;
1825 } else if (secondInstr != NOP) {
1826 return false;
1827 }
1828
1829 // The register operands of the first instruction should be the stack-pointer
1830 // (r1) as the input (RA) and r12 as the output (RT). If the second
1831 // instruction is not a nop, then it should use r12 as both input and output.
1832 auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
1833 uint8_t expectedRA) {
1834 return ((instr & 0x3E00000) >> 21 == expectedRT) &&
1835 ((instr & 0x1F0000) >> 16 == expectedRA);
1836 };
1837 if (!checkRegOperands(firstInstr, 12, 1))
1838 return false;
1839 if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
1840 return false;
1841
1842 int32_t stackFrameSize = (hiImm * 65536) + loImm;
1843 // Check that the adjusted size doesn't overflow what we can represent with 2
1844 // instructions.
1845 if (stackFrameSize < ctx.arg.splitStackAdjustSize + INT32_MIN) {
1846 Err(ctx) << getErrorLoc(ctx, loc)
1847 << "split-stack prologue adjustment overflows";
1848 return false;
1849 }
1850
1851 int32_t adjustedStackFrameSize =
1852 stackFrameSize - ctx.arg.splitStackAdjustSize;
1853
1854 loImm = adjustedStackFrameSize & 0xFFFF;
1855 hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
1856 if (hiImm) {
1857 write32(ctx, p: loc + 4, v: 0x3d810000 | (uint16_t)hiImm);
1858 // If the low immediate is zero the second instruction will be a nop.
1859 secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
1860 write32(ctx, p: loc + 8, v: secondInstr);
1861 } else {
1862 // addi r12, r1, imm
1863 write32(ctx, p: loc + 4, v: (0x39810000) | (uint16_t)loImm);
1864 write32(ctx, p: loc + 8, v: NOP);
1865 }
1866
1867 return true;
1868}
1869
1870void elf::setPPC64TargetInfo(Ctx &ctx) { ctx.target.reset(p: new PPC64(ctx)); }
1871