1//===- PPC64.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "OutputSections.h"
11#include "RelocScan.h"
12#include "SymbolTable.h"
13#include "Symbols.h"
14#include "SyntheticSections.h"
15#include "Target.h"
16#include "Thunks.h"
17
18using namespace llvm;
19using namespace llvm::object;
20using namespace llvm::support::endian;
21using namespace llvm::ELF;
22using namespace lld;
23using namespace lld::elf;
24
25constexpr uint64_t ppc64TocOffset = 0x8000;
26constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
27
28namespace {
29// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
30// instructions that can be used as part of the initial exec TLS sequence.
31enum XFormOpcd {
32 LBZX = 87,
33 LHZX = 279,
34 LWZX = 23,
35 LDX = 21,
36 STBX = 215,
37 STHX = 407,
38 STWX = 151,
39 STDX = 149,
40 LHAX = 343,
41 LWAX = 341,
42 LFSX = 535,
43 LFDX = 599,
44 STFSX = 663,
45 STFDX = 727,
46 ADD = 266,
47};
48
49enum DFormOpcd {
50 LBZ = 34,
51 LBZU = 35,
52 LHZ = 40,
53 LHZU = 41,
54 LHAU = 43,
55 LWZ = 32,
56 LWZU = 33,
57 LFSU = 49,
58 LFDU = 51,
59 STB = 38,
60 STBU = 39,
61 STH = 44,
62 STHU = 45,
63 STW = 36,
64 STWU = 37,
65 STFSU = 53,
66 STFDU = 55,
67 LHA = 42,
68 LFS = 48,
69 LFD = 50,
70 STFS = 52,
71 STFD = 54,
72 ADDI = 14
73};
74
75enum DSFormOpcd {
76 LD = 58,
77 LWA = 58,
78 STD = 62
79};
80
81constexpr uint32_t NOP = 0x60000000;
82
83enum class PPCLegacyInsn : uint32_t {
84 NOINSN = 0,
85 // Loads.
86 LBZ = 0x88000000,
87 LHZ = 0xa0000000,
88 LWZ = 0x80000000,
89 LHA = 0xa8000000,
90 LWA = 0xe8000002,
91 LD = 0xe8000000,
92 LFS = 0xC0000000,
93 LXSSP = 0xe4000003,
94 LFD = 0xc8000000,
95 LXSD = 0xe4000002,
96 LXV = 0xf4000001,
97 LXVP = 0x18000000,
98
99 // Stores.
100 STB = 0x98000000,
101 STH = 0xb0000000,
102 STW = 0x90000000,
103 STD = 0xf8000000,
104 STFS = 0xd0000000,
105 STXSSP = 0xf4000003,
106 STFD = 0xd8000000,
107 STXSD = 0xf4000002,
108 STXV = 0xf4000005,
109 STXVP = 0x18000001
110};
111enum class PPCPrefixedInsn : uint64_t {
112 NOINSN = 0,
113 PREFIX_MLS = 0x0610000000000000,
114 PREFIX_8LS = 0x0410000000000000,
115
116 // Loads.
117 PLBZ = PREFIX_MLS,
118 PLHZ = PREFIX_MLS,
119 PLWZ = PREFIX_MLS,
120 PLHA = PREFIX_MLS,
121 PLWA = PREFIX_8LS | 0xa4000000,
122 PLD = PREFIX_8LS | 0xe4000000,
123 PLFS = PREFIX_MLS,
124 PLXSSP = PREFIX_8LS | 0xac000000,
125 PLFD = PREFIX_MLS,
126 PLXSD = PREFIX_8LS | 0xa8000000,
127 PLXV = PREFIX_8LS | 0xc8000000,
128 PLXVP = PREFIX_8LS | 0xe8000000,
129
130 // Stores.
131 PSTB = PREFIX_MLS,
132 PSTH = PREFIX_MLS,
133 PSTW = PREFIX_MLS,
134 PSTD = PREFIX_8LS | 0xf4000000,
135 PSTFS = PREFIX_MLS,
136 PSTXSSP = PREFIX_8LS | 0xbc000000,
137 PSTFD = PREFIX_MLS,
138 PSTXSD = PREFIX_8LS | 0xb8000000,
139 PSTXV = PREFIX_8LS | 0xd8000000,
140 PSTXVP = PREFIX_8LS | 0xf8000000
141};
142
143static bool checkPPCLegacyInsn(uint32_t encoding) {
144 PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
145 if (insn == PPCLegacyInsn::NOINSN)
146 return false;
147#define PCREL_OPT(Legacy, PCRel, InsnMask) \
148 if (insn == PPCLegacyInsn::Legacy) \
149 return true;
150#include "PPCInsns.def"
151#undef PCREL_OPT
152 return false;
153}
154
155// Masks to apply to legacy instructions when converting them to prefixed,
156// pc-relative versions. For the most part, the primary opcode is shared
157// between the legacy instruction and the suffix of its prefixed version.
158// However, there are some instances where that isn't the case (DS-Form and
159// DQ-form instructions).
160enum class LegacyToPrefixMask : uint64_t {
161 NOMASK = 0x0,
162 OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
163 ONLY_RST = 0x3e00000, // [RS]T (6-10).
164 ST_STX28_TO5 =
165 0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
166};
167
168class PPC64 final : public TargetInfo {
169public:
170 PPC64(Ctx &);
171 uint32_t calcEFlags() const override;
172 RelExpr getRelExpr(RelType type, const Symbol &s,
173 const uint8_t *loc) const override;
174 RelType getDynRel(RelType type) const override;
175 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
176 void writePltHeader(uint8_t *buf) const override;
177 void writePlt(uint8_t *buf, const Symbol &sym,
178 uint64_t pltEntryAddr) const override;
179 void writeIplt(uint8_t *buf, const Symbol &sym,
180 uint64_t pltEntryAddr) const override;
181 template <class ELFT, class RelTy>
182 void scanSectionImpl(InputSectionBase &, Relocs<RelTy>);
183 void scanSection(InputSectionBase &) override;
184 void relocate(uint8_t *loc, const Relocation &rel,
185 uint64_t val) const override;
186 void writeGotHeader(uint8_t *buf) const override;
187 bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
188 uint64_t branchAddr, const Symbol &s,
189 int64_t a) const override;
190 uint32_t getThunkSectionSpacing() const override;
191 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
192 RelExpr adjustGotPcExpr(RelType type, int64_t addend,
193 const uint8_t *loc) const override;
194 void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
195 void relocateAlloc(InputSection &sec, uint8_t *buf) const override;
196
197 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
198 uint8_t stOther) const override;
199
200private:
201 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
202 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
203 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
204 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
205};
206} // namespace
207
208uint64_t elf::getPPC64TocBase(Ctx &ctx) {
209 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
210 // TOC starts where the first of these sections starts. We always create a
211 // .got when we see a relocation that uses it, so for us the start is always
212 // the .got.
213 uint64_t tocVA = ctx.in.got->getVA();
214
215 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
216 // thus permitting a full 64 Kbytes segment. Note that the glibc startup
217 // code (crt1.o) assumes that you can get from the TOC base to the
218 // start of the .toc section with only a single (signed) 16-bit relocation.
219 return tocVA + ppc64TocOffset;
220}
221
222unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(Ctx &ctx, uint8_t stOther) {
223 // The offset is encoded into the 3 most significant bits of the st_other
224 // field, with some special values described in section 3.4.1 of the ABI:
225 // 0 --> Zero offset between the GEP and LEP, and the function does NOT use
226 // the TOC pointer (r2). r2 will hold the same value on returning from
227 // the function as it did on entering the function.
228 // 1 --> Zero offset between the GEP and LEP, and r2 should be treated as a
229 // caller-saved register for all callers.
230 // 2-6 --> The binary logarithm of the offset eg:
231 // 2 --> 2^2 = 4 bytes --> 1 instruction.
232 // 6 --> 2^6 = 64 bytes --> 16 instructions.
233 // 7 --> Reserved.
234 uint8_t gepToLep = (stOther >> 5) & 7;
235 if (gepToLep < 2)
236 return 0;
237
238 // The value encoded in the st_other bits is the
239 // log-base-2(offset).
240 if (gepToLep < 7)
241 return 1 << gepToLep;
242
243 ErrAlways(ctx)
244 << "reserved value of 7 in the 3 most-significant-bits of st_other";
245 return 0;
246}
247
248void elf::writePrefixedInst(Ctx &ctx, uint8_t *loc, uint64_t insn) {
249 insn = ctx.arg.isLE ? insn << 32 | insn >> 32 : insn;
250 write64(ctx, p: loc, v: insn);
251}
252
253static bool addOptional(Ctx &ctx, StringRef name, uint64_t value,
254 std::vector<Defined *> &defined) {
255 Symbol *sym = ctx.symtab->find(name);
256 if (!sym || sym->isDefined())
257 return false;
258 sym->resolve(ctx, other: Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
259 STV_HIDDEN, STT_FUNC, value,
260 /*size=*/0, /*section=*/nullptr});
261 defined.push_back(x: cast<Defined>(Val: sym));
262 return true;
263}
264
265// If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
266// firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
267// The labels are defined only if they exist in the symbol table.
268static void writeSequence(Ctx &ctx, const char *prefix, int from,
269 uint32_t firstInsn, ArrayRef<uint32_t> tail) {
270 std::vector<Defined *> defined;
271 char name[16];
272 int first;
273 const size_t size = 32 - from + tail.size();
274 MutableArrayRef<uint32_t> buf(ctx.bAlloc.Allocate<uint32_t>(Num: size), size);
275 uint32_t *ptr = buf.data();
276 for (int r = from; r < 32; ++r) {
277 format(Fmt: "%s%d", Vals: prefix, Vals: r).snprint(Buffer: name, BufferSize: sizeof(name));
278 if (addOptional(ctx, name, value: 4 * (r - from), defined) && defined.size() == 1)
279 first = r - from;
280 write32(ctx, p: ptr++, v: firstInsn + 0x200008 * (r - from));
281 }
282 for (uint32_t insn : tail)
283 write32(ctx, p: ptr++, v: insn);
284 assert(ptr == &*buf.end());
285
286 if (defined.empty())
287 return;
288 // The full section content has the extent of [begin, end). We drop unused
289 // instructions and write [first,end).
290 auto *sec = make<InputSection>(
291 args&: ctx.internalFile, args: ".text", args: SHT_PROGBITS, args: SHF_ALLOC, /*addralign=*/args: 4,
292 /*entsize=*/args: 0,
293 args: ArrayRef(reinterpret_cast<uint8_t *>(buf.data() + first),
294 4 * (buf.size() - first)));
295 ctx.inputSections.push_back(Elt: sec);
296 for (Defined *sym : defined) {
297 sym->section = sec;
298 sym->value -= 4 * first;
299 }
300}
301
302// Implements some save and restore functions as described by ELF V2 ABI to be
303// compatible with GCC. With GCC -Os, when the number of call-saved registers
304// exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and
305// expects the linker to define them. See
306// https://sourceware.org/pipermail/binutils/2002-February/017444.html and
307// https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is
308// weird because libgcc.a would be the natural place. The linker generation
309// approach has the advantage that the linker can generate multiple copies to
310// avoid long branch thunks. However, we don't consider the advantage
311// significant enough to complicate our trunk implementation, so we take the
312// simple approach and synthesize .text sections providing the implementation.
313void elf::addPPC64SaveRestore(Ctx &ctx) {
314 constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
315
316 // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
317 // Tail: ld 0, 16(1); mtlr 0; blr
318 writeSequence(ctx, prefix: "_restgpr0_", from: 14, firstInsn: 0xe9c1ff70, tail: {0xe8010010, mtlr_0, blr});
319 // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
320 // Tail: blr
321 writeSequence(ctx, prefix: "_restgpr1_", from: 14, firstInsn: 0xe9ccff70, tail: {blr});
322 // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
323 // Tail: std 0, 16(1); blr
324 writeSequence(ctx, prefix: "_savegpr0_", from: 14, firstInsn: 0xf9c1ff70, tail: {0xf8010010, blr});
325 // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
326 // Tail: blr
327 writeSequence(ctx, prefix: "_savegpr1_", from: 14, firstInsn: 0xf9ccff70, tail: {blr});
328}
329
330// Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
331template <typename ELFT>
332static std::pair<Defined *, int64_t>
333getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
334 // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
335 // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
336 // relocation index in most cases.
337 //
338 // In rare cases a TOC entry may store a constant that doesn't need an
339 // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
340 // points to a relocation with larger r_offset. Do a linear probe then.
341 // Constants are extremely uncommon in .toc and the extra number of array
342 // accesses can be seen as a small constant.
343 ArrayRef<typename ELFT::Rela> relas =
344 tocSec->template relsOrRelas<ELFT>().relas;
345 if (relas.empty())
346 return {};
347 uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
348 for (;;) {
349 if (relas[index].r_offset == offset) {
350 Symbol &sym = tocSec->file->getRelocTargetSym(relas[index]);
351 return {dyn_cast<Defined>(Val: &sym), getAddend<ELFT>(relas[index])};
352 }
353 if (relas[index].r_offset < offset || index == 0)
354 break;
355 --index;
356 }
357 return {};
358}
359
360// When accessing a symbol defined in another translation unit, compilers
361// reserve a .toc entry, allocate a local label and generate toc-indirect
362// instructions:
363//
364// addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA
365// ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
366// ld/lwa 3, 0(3) # load the value from the address
367//
368// .section .toc,"aw",@progbits
369// .LC0: .tc var[TC],var
370//
371// If var is defined, non-preemptable and addressable with a 32-bit signed
372// offset from the toc base, the address of var can be computed by adding an
373// offset to the toc base, saving a load.
374//
375// addis 3,2,var@toc@ha # this may be relaxed to a nop,
376// addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc
377// ld/lwa 3, 0(3) # load the value from the address
378//
379// Returns true if the relaxation is performed.
380static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel,
381 uint8_t *bufLoc) {
382 assert(ctx.arg.tocOptimize);
383 if (rel.addend < 0)
384 return false;
385
386 // If the symbol is not the .toc section, this isn't a toc-indirection.
387 Defined *defSym = dyn_cast<Defined>(Val: rel.sym);
388 if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
389 return false;
390
391 Defined *d;
392 int64_t addend;
393 auto *tocISB = cast<InputSectionBase>(Val: defSym->section);
394 std::tie(args&: d, args&: addend) =
395 ctx.arg.isLE ? getRelaTocSymAndAddend<ELF64LE>(tocSec: tocISB, offset: rel.addend)
396 : getRelaTocSymAndAddend<ELF64BE>(tocSec: tocISB, offset: rel.addend);
397
398 // Only non-preemptable defined symbols can be relaxed.
399 if (!d || d->isPreemptible)
400 return false;
401
402 // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable
403 // ifunc and changed its type to STT_FUNC.
404 assert(!d->isGnuIFunc());
405
406 // Two instructions can materialize a 32-bit signed offset from the toc base.
407 uint64_t tocRelative = d->getVA(ctx, addend) - getPPC64TocBase(ctx);
408 if (!isInt<32>(x: tocRelative))
409 return false;
410
411 // Add PPC64TocOffset that will be subtracted by PPC64::relocate().
412 static_cast<const PPC64 &>(*ctx.target)
413 .relaxGot(loc: bufLoc, rel, val: tocRelative + ppc64TocOffset);
414 return true;
415}
416
417// Relocation masks following the #lo(value), #hi(value), #ha(value),
418// #higher(value), #highera(value), #highest(value), and #highesta(value)
419// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
420// document.
421static uint16_t lo(uint64_t v) { return v; }
422static uint16_t hi(uint64_t v) { return v >> 16; }
423static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
424static uint16_t higher(uint64_t v) { return v >> 32; }
425static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
426static uint16_t highest(uint64_t v) { return v >> 48; }
427static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
428
429// Extracts the 'PO' field of an instruction encoding.
430static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
431
432static bool isDQFormInstruction(uint32_t encoding) {
433 switch (getPrimaryOpCode(encoding)) {
434 default:
435 return false;
436 case 6: // Power10 paired loads/stores (lxvp, stxvp).
437 case 56:
438 // The only instruction with a primary opcode of 56 is `lq`.
439 return true;
440 case 61:
441 // There are both DS and DQ instruction forms with this primary opcode.
442 // Namely `lxv` and `stxv` are the DQ-forms that use it.
443 // The DS 'XO' bits being set to 01 is restricted to DQ form.
444 return (encoding & 3) == 0x1;
445 }
446}
447
448static bool isDSFormInstruction(PPCLegacyInsn insn) {
449 switch (insn) {
450 default:
451 return false;
452 case PPCLegacyInsn::LWA:
453 case PPCLegacyInsn::LD:
454 case PPCLegacyInsn::LXSD:
455 case PPCLegacyInsn::LXSSP:
456 case PPCLegacyInsn::STD:
457 case PPCLegacyInsn::STXSD:
458 case PPCLegacyInsn::STXSSP:
459 return true;
460 }
461}
462
463static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
464 uint32_t opc = encoding & 0xfc000000;
465
466 // If the primary opcode is shared between multiple instructions, we need to
467 // fix it up to match the actual instruction we are after.
468 if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
469 opc == 0xf8000000) &&
470 !isDQFormInstruction(encoding))
471 opc = encoding & 0xfc000003;
472 else if (opc == 0xf4000000)
473 opc = encoding & 0xfc000007;
474 else if (opc == 0x18000000)
475 opc = encoding & 0xfc00000f;
476
477 // If the value is not one of the enumerators in PPCLegacyInsn, we want to
478 // return PPCLegacyInsn::NOINSN.
479 if (!checkPPCLegacyInsn(encoding: opc))
480 return PPCLegacyInsn::NOINSN;
481 return static_cast<PPCLegacyInsn>(opc);
482}
483
484static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
485 switch (insn) {
486#define PCREL_OPT(Legacy, PCRel, InsnMask) \
487 case PPCLegacyInsn::Legacy: \
488 return PPCPrefixedInsn::PCRel
489#include "PPCInsns.def"
490#undef PCREL_OPT
491 }
492 return PPCPrefixedInsn::NOINSN;
493}
494
495static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
496 switch (insn) {
497#define PCREL_OPT(Legacy, PCRel, InsnMask) \
498 case PPCLegacyInsn::Legacy: \
499 return LegacyToPrefixMask::InsnMask
500#include "PPCInsns.def"
501#undef PCREL_OPT
502 }
503 return LegacyToPrefixMask::NOMASK;
504}
505static uint64_t getPCRelativeForm(uint32_t encoding) {
506 PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
507 PPCPrefixedInsn pcrelInsn = getPCRelativeForm(insn: origInsn);
508 if (pcrelInsn == PPCPrefixedInsn::NOINSN)
509 return UINT64_C(-1);
510 LegacyToPrefixMask origInsnMask = getInsnMask(insn: origInsn);
511 uint64_t pcrelEncoding =
512 (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
513
514 // If the mask requires moving bit 28 to bit 5, do that now.
515 if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
516 pcrelEncoding |= (encoding & 0x8) << 23;
517 return pcrelEncoding;
518}
519
520static bool isInstructionUpdateForm(uint32_t encoding) {
521 switch (getPrimaryOpCode(encoding)) {
522 default:
523 return false;
524 case LBZU:
525 case LHAU:
526 case LHZU:
527 case LWZU:
528 case LFSU:
529 case LFDU:
530 case STBU:
531 case STHU:
532 case STWU:
533 case STFSU:
534 case STFDU:
535 return true;
536 // LWA has the same opcode as LD, and the DS bits is what differentiates
537 // between LD/LDU/LWA
538 case LD:
539 case STD:
540 return (encoding & 3) == 1;
541 }
542}
543
544// Compute the total displacement between the prefixed instruction that gets
545// to the start of the data and the load/store instruction that has the offset
546// into the data structure.
547// For example:
548// paddi 3, 0, 1000, 1
549// lwz 3, 20(3)
550// Should add up to 1020 for total displacement.
551static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
552 int64_t disp34 = llvm::SignExtend64(
553 X: ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), B: 34);
554 int32_t disp16 = llvm::SignExtend32(X: accessInsn & 0xffff, B: 16);
555 // For DS and DQ form instructions, we need to mask out the XO bits.
556 if (isDQFormInstruction(encoding: accessInsn))
557 disp16 &= ~0xf;
558 else if (isDSFormInstruction(insn: getPPCLegacyInsn(encoding: accessInsn)))
559 disp16 &= ~0x3;
560 return disp34 + disp16;
561}
562
563// There are a number of places when we either want to read or write an
564// instruction when handling a half16 relocation type. On big-endian the buffer
565// pointer is pointing into the middle of the word we want to extract, and on
566// little-endian it is pointing to the start of the word. These 2 helpers are to
567// simplify reading and writing in that context.
568static void writeFromHalf16(Ctx &ctx, uint8_t *loc, uint32_t insn) {
569 write32(ctx, p: ctx.arg.isLE ? loc : loc - 2, v: insn);
570}
571
572static uint32_t readFromHalf16(Ctx &ctx, const uint8_t *loc) {
573 return read32(ctx, p: ctx.arg.isLE ? loc : loc - 2);
574}
575
576static uint64_t readPrefixedInst(Ctx &ctx, const uint8_t *loc) {
577 uint64_t fullInstr = read64(ctx, p: loc);
578 return ctx.arg.isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
579}
580
581PPC64::PPC64(Ctx &ctx) : TargetInfo(ctx) {
582 copyRel = R_PPC64_COPY;
583 gotRel = R_PPC64_GLOB_DAT;
584 pltRel = R_PPC64_JMP_SLOT;
585 relativeRel = R_PPC64_RELATIVE;
586 iRelativeRel = R_PPC64_IRELATIVE;
587 symbolicRel = R_PPC64_ADDR64;
588 pltHeaderSize = 60;
589 pltEntrySize = 4;
590 ipltEntrySize = 16; // PPC64PltCallStub::size
591 gotHeaderEntriesNum = 1;
592 gotPltHeaderEntriesNum = 2;
593 needsThunks = true;
594
595 tlsModuleIndexRel = R_PPC64_DTPMOD64;
596 tlsOffsetRel = R_PPC64_DTPREL64;
597
598 tlsGotRel = R_PPC64_TPREL64;
599
600 needsMoreStackNonSplit = false;
601
602 // We need 64K pages (at least under glibc/Linux, the loader won't
603 // set different permissions on a finer granularity than that).
604 defaultMaxPageSize = 65536;
605
606 // The PPC64 ELF ABI v1 spec, says:
607 //
608 // It is normally desirable to put segments with different characteristics
609 // in separate 256 Mbyte portions of the address space, to give the
610 // operating system full paging flexibility in the 64-bit address space.
611 //
612 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
613 // use 0x10000000 as the starting address.
614 defaultImageBase = 0x10000000;
615
616 write32(ctx, p: trapInstr.data(), v: 0x7fe00008);
617}
618
619static uint32_t getEFlags(InputFile *file) {
620 if (file->ekind == ELF64BEKind)
621 return cast<ObjFile<ELF64BE>>(Val: file)->getObj().getHeader().e_flags;
622 return cast<ObjFile<ELF64LE>>(Val: file)->getObj().getHeader().e_flags;
623}
624
625// This file implements v2 ABI. This function makes sure that all
626// object files have v2 or an unspecified version as an ABI version.
627uint32_t PPC64::calcEFlags() const {
628 for (InputFile *f : ctx.objectFiles) {
629 uint32_t flag = getEFlags(file: f);
630 if (flag == 1)
631 ErrAlways(ctx) << f << ": ABI version 1 is not supported";
632 else if (flag > 2)
633 ErrAlways(ctx) << f << ": unrecognized e_flags: " << flag;
634 }
635 return 2;
636}
637
638void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
639 switch (rel.type) {
640 case R_PPC64_TOC16_HA:
641 // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
642 relocate(loc, rel, val);
643 break;
644 case R_PPC64_TOC16_LO_DS: {
645 // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
646 // "addi reg, 2, var@toc".
647 uint32_t insn = readFromHalf16(ctx, loc);
648 if (getPrimaryOpCode(encoding: insn) != LD)
649 ErrAlways(ctx)
650 << "expected a 'ld' for got-indirect to toc-relative relaxing";
651 writeFromHalf16(ctx, loc, insn: (insn & 0x03ffffff) | 0x38000000);
652 relocateNoSym(loc, type: R_PPC64_TOC16_LO, val);
653 break;
654 }
655 case R_PPC64_GOT_PCREL34: {
656 // Clear the first 8 bits of the prefix and the first 6 bits of the
657 // instruction (the primary opcode).
658 uint64_t insn = readPrefixedInst(ctx, loc);
659 if ((insn & 0xfc000000) != 0xe4000000)
660 ErrAlways(ctx)
661 << "expected a 'pld' for got-indirect to pc-relative relaxing";
662 insn &= ~0xff000000fc000000;
663
664 // Replace the cleared bits with the values for PADDI (0x600000038000000);
665 insn |= 0x600000038000000;
666 writePrefixedInst(ctx, loc, insn);
667 relocate(loc, rel, val);
668 break;
669 }
670 case R_PPC64_PCREL_OPT: {
671 // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
672 // be relaxed. The eligibility for the relaxation needs to be determined
673 // on that relocation since this one does not relocate a symbol.
674 uint64_t insn = readPrefixedInst(ctx, loc);
675 uint32_t accessInsn = read32(ctx, p: loc + rel.addend);
676 uint64_t pcRelInsn = getPCRelativeForm(encoding: accessInsn);
677
678 // This error is not necessary for correctness but is emitted for now
679 // to ensure we don't miss these opportunities in real code. It can be
680 // removed at a later date.
681 if (pcRelInsn == UINT64_C(-1)) {
682 Err(ctx)
683 << "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x"
684 << utohexstr(X: accessInsn, LowerCase: true);
685 break;
686 }
687
688 int64_t totalDisp = getTotalDisp(prefixedInsn: insn, accessInsn);
689 if (!isInt<34>(x: totalDisp))
690 break; // Displacement doesn't fit.
691 // Convert the PADDI to the prefixed version of accessInsn and convert
692 // accessInsn to a nop.
693 writePrefixedInst(ctx, loc,
694 insn: pcRelInsn | ((totalDisp & 0x3ffff0000) << 16) |
695 (totalDisp & 0xffff));
696 write32(ctx, p: loc + rel.addend, v: NOP); // nop accessInsn.
697 break;
698 }
699 default:
700 llvm_unreachable("unexpected relocation type");
701 }
702}
703
704void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
705 uint64_t val) const {
706 // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
707 // The general dynamic code sequence for a global `x` will look like:
708 // Instruction Relocation Symbol
709 // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
710 // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
711 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
712 // R_PPC64_REL24 __tls_get_addr
713 // nop None None
714
715 // Relaxing to local exec entails converting:
716 // addis r3, r2, x@got@tlsgd@ha into nop
717 // addi r3, r3, x@got@tlsgd@l into addis r3, r13, x@tprel@ha
718 // bl __tls_get_addr(x@tlsgd) into nop
719 // nop into addi r3, r3, x@tprel@l
720
721 switch (rel.type) {
722 case R_PPC64_GOT_TLSGD16_HA:
723 writeFromHalf16(ctx, loc, insn: NOP);
724 break;
725 case R_PPC64_GOT_TLSGD16:
726 case R_PPC64_GOT_TLSGD16_LO:
727 writeFromHalf16(ctx, loc, insn: 0x3c6d0000); // addis r3, r13
728 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
729 break;
730 case R_PPC64_GOT_TLSGD_PCREL34:
731 // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
732 // paddi r3, r13, x@tprel, 0
733 writePrefixedInst(ctx, loc, insn: 0x06000000386d0000);
734 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
735 break;
736 case R_PPC64_TLSGD: {
737 // PC Relative Relaxation:
738 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
739 // nop
740 // TOC Relaxation:
741 // Relax from bl __tls_get_addr(x@tlsgd)
742 // nop
743 // to
744 // nop
745 // addi r3, r3, x@tprel@l
746 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
747 if (locAsInt % 4 == 0) {
748 write32(ctx, p: loc, v: NOP); // nop
749 write32(ctx, p: loc + 4, v: 0x38630000); // addi r3, r3
750 // Since we are relocating a half16 type relocation and Loc + 4 points to
751 // the start of an instruction we need to advance the buffer by an extra
752 // 2 bytes on BE.
753 relocateNoSym(loc: loc + 4 + (ctx.arg.ekind == ELF64BEKind ? 2 : 0),
754 type: R_PPC64_TPREL16_LO, val);
755 } else if (locAsInt % 4 == 1) {
756 write32(ctx, p: loc - 1, v: NOP);
757 } else {
758 Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
759 }
760 break;
761 }
762 default:
763 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
764 }
765}
766
767void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
768 uint64_t val) const {
769 // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
770 // The local dynamic code sequence for a global `x` will look like:
771 // Instruction Relocation Symbol
772 // addis r3, r2, x@got@tlsld@ha R_PPC64_GOT_TLSLD16_HA x
773 // addi r3, r3, x@got@tlsld@l R_PPC64_GOT_TLSLD16_LO x
774 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSLD x
775 // R_PPC64_REL24 __tls_get_addr
776 // nop None None
777
778 // Relaxing to local exec entails converting:
779 // addis r3, r2, x@got@tlsld@ha into nop
780 // addi r3, r3, x@got@tlsld@l into addis r3, r13, 0
781 // bl __tls_get_addr(x@tlsgd) into nop
782 // nop into addi r3, r3, 4096
783
784 switch (rel.type) {
785 case R_PPC64_GOT_TLSLD16_HA:
786 writeFromHalf16(ctx, loc, insn: NOP);
787 break;
788 case R_PPC64_GOT_TLSLD16_LO:
789 writeFromHalf16(ctx, loc, insn: 0x3c6d0000); // addis r3, r13, 0
790 break;
791 case R_PPC64_GOT_TLSLD_PCREL34:
792 // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
793 // paddi r3, r13, 0x1000, 0
794 writePrefixedInst(ctx, loc, insn: 0x06000000386d1000);
795 break;
796 case R_PPC64_TLSLD: {
797 // PC Relative Relaxation:
798 // Relax from bl __tls_get_addr@notoc(x@tlsld)
799 // to
800 // nop
801 // TOC Relaxation:
802 // Relax from bl __tls_get_addr(x@tlsld)
803 // nop
804 // to
805 // nop
806 // addi r3, r3, 4096
807 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
808 if (locAsInt % 4 == 0) {
809 write32(ctx, p: loc, v: NOP);
810 write32(ctx, p: loc + 4, v: 0x38631000); // addi r3, r3, 4096
811 } else if (locAsInt % 4 == 1) {
812 write32(ctx, p: loc - 1, v: NOP);
813 } else {
814 Err(ctx) << "R_PPC64_TLSLD has unexpected byte alignment";
815 }
816 break;
817 }
818 default:
819 llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
820 }
821}
822
823// Map X-Form instructions to their DS-Form counterparts, if applicable.
824// The full encoding is returned here to distinguish between the different
825// DS-Form instructions.
826unsigned elf::getPPCDSFormOp(unsigned secondaryOp) {
827 switch (secondaryOp) {
828 case LWAX:
829 return (LWA << 26) | 0x2;
830 case LDX:
831 return LD << 26;
832 case STDX:
833 return STD << 26;
834 default:
835 return 0;
836 }
837}
838
839unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
840 switch (secondaryOp) {
841 case LBZX:
842 return LBZ << 26;
843 case LHZX:
844 return LHZ << 26;
845 case LWZX:
846 return LWZ << 26;
847 case STBX:
848 return STB << 26;
849 case STHX:
850 return STH << 26;
851 case STWX:
852 return STW << 26;
853 case LHAX:
854 return LHA << 26;
855 case LFSX:
856 return LFS << 26;
857 case LFDX:
858 return LFD << 26;
859 case STFSX:
860 return STFS << 26;
861 case STFDX:
862 return STFD << 26;
863 case ADD:
864 return ADDI << 26;
865 default:
866 return 0;
867 }
868}
869
870void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
871 uint64_t val) const {
872 // The initial exec code sequence for a global `x` will look like:
873 // Instruction Relocation Symbol
874 // addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x
875 // ld r9, x@got@tprel@l(r9) R_PPC64_GOT_TPREL16_LO_DS x
876 // add r9, r9, x@tls R_PPC64_TLS x
877
878 // Relaxing to local exec entails converting:
879 // addis r9, r2, x@got@tprel@ha into nop
880 // ld r9, x@got@tprel@l(r9) into addis r9, r13, x@tprel@ha
881 // add r9, r9, x@tls into addi r9, r9, x@tprel@l
882
883 // x@tls R_PPC64_TLS is a relocation which does not compute anything,
884 // it is replaced with r13 (thread pointer).
885
886 // The add instruction in the initial exec sequence has multiple variations
887 // that need to be handled. If we are building an address it will use an add
888 // instruction, if we are accessing memory it will use any of the X-form
889 // indexed load or store instructions.
890
891 unsigned offset = (ctx.arg.ekind == ELF64BEKind) ? 2 : 0;
892 switch (rel.type) {
893 case R_PPC64_GOT_TPREL16_HA:
894 write32(ctx, p: loc - offset, v: NOP);
895 break;
896 case R_PPC64_GOT_TPREL16_LO_DS:
897 case R_PPC64_GOT_TPREL16_DS: {
898 uint32_t regNo = read32(ctx, p: loc - offset) & 0x03e00000; // bits 6-10
899 write32(ctx, p: loc - offset, v: 0x3c0d0000 | regNo); // addis RegNo, r13
900 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
901 break;
902 }
903 case R_PPC64_GOT_TPREL_PCREL34: {
904 const uint64_t pldRT = readPrefixedInst(ctx, loc) & 0x0000000003e00000;
905 // paddi RT(from pld), r13, symbol@tprel, 0
906 writePrefixedInst(ctx, loc, insn: 0x06000000380d0000 | pldRT);
907 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
908 break;
909 }
910 case R_PPC64_TLS: {
911 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
912 if (locAsInt % 4 == 0) {
913 uint32_t primaryOp = getPrimaryOpCode(encoding: read32(ctx, p: loc));
914 if (primaryOp != 31)
915 ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
916 uint32_t secondaryOp = (read32(ctx, p: loc) & 0x000007fe) >> 1; // bits 21-30
917 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
918 uint32_t finalReloc;
919 if (dFormOp == 0) { // Expecting a DS-Form instruction.
920 dFormOp = getPPCDSFormOp(secondaryOp);
921 if (dFormOp == 0)
922 ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
923 finalReloc = R_PPC64_TPREL16_LO_DS;
924 } else
925 finalReloc = R_PPC64_TPREL16_LO;
926 write32(ctx, p: loc, v: dFormOp | (read32(ctx, p: loc) & 0x03ff0000));
927 relocateNoSym(loc: loc + offset, type: finalReloc, val);
928 } else if (locAsInt % 4 == 1) {
929 // If the offset is not 4 byte aligned then we have a PCRel type reloc.
930 // This version of the relocation is offset by one byte from the
931 // instruction it references.
932 uint32_t tlsInstr = read32(ctx, p: loc - 1);
933 uint32_t primaryOp = getPrimaryOpCode(encoding: tlsInstr);
934 if (primaryOp != 31)
935 Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
936 uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
937 // The add is a special case and should be turned into a nop. The paddi
938 // that comes before it will already have computed the address of the
939 // symbol.
940 if (secondaryOp == 266) {
941 // Check if the add uses the same result register as the input register.
942 uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
943 uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
944 if (ra == rt) {
945 write32(ctx, p: loc - 1, v: NOP);
946 } else {
947 // mr rt, ra
948 write32(ctx, p: loc - 1,
949 v: 0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
950 }
951 } else {
952 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
953 if (dFormOp == 0) { // Expecting a DS-Form instruction.
954 dFormOp = getPPCDSFormOp(secondaryOp);
955 if (dFormOp == 0)
956 Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
957 }
958 write32(ctx, p: loc - 1, v: (dFormOp | (tlsInstr & 0x03ff0000)));
959 }
960 } else {
961 Err(ctx) << "R_PPC64_TLS must be either 4 byte aligned or one byte "
962 "offset from 4 byte aligned";
963 }
964 break;
965 }
966 default:
967 llvm_unreachable("unknown relocation for IE to LE");
968 break;
969 }
970}
971
972// Only needed to support relocations used by relocateNonAlloc and relocateEh.
973RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
974 const uint8_t *loc) const {
975 switch (type) {
976 case R_PPC64_NONE:
977 return R_NONE;
978 case R_PPC64_ADDR16:
979 case R_PPC64_ADDR32:
980 case R_PPC64_ADDR64:
981 return R_ABS;
982 case R_PPC64_REL32:
983 case R_PPC64_REL64:
984 return R_PC;
985 case R_PPC64_DTPREL64:
986 return R_DTPREL;
987 default:
988 Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
989 << ") against symbol " << &s;
990 return R_NONE;
991 }
992}
993
994RelType PPC64::getDynRel(RelType type) const {
995 if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
996 return R_PPC64_ADDR64;
997 return R_PPC64_NONE;
998}
999
1000int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
1001 switch (type) {
1002 case R_PPC64_NONE:
1003 case R_PPC64_GLOB_DAT:
1004 case R_PPC64_JMP_SLOT:
1005 return 0;
1006 case R_PPC64_REL32:
1007 return SignExtend64<32>(x: read32(ctx, p: buf));
1008 case R_PPC64_ADDR64:
1009 case R_PPC64_REL64:
1010 case R_PPC64_RELATIVE:
1011 case R_PPC64_IRELATIVE:
1012 case R_PPC64_DTPMOD64:
1013 case R_PPC64_DTPREL64:
1014 case R_PPC64_TPREL64:
1015 return read64(ctx, p: buf);
1016 default:
1017 InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
1018 return 0;
1019 }
1020}
1021
1022void PPC64::writeGotHeader(uint8_t *buf) const {
1023 write64(ctx, p: buf, v: getPPC64TocBase(ctx));
1024}
1025
1026void PPC64::writePltHeader(uint8_t *buf) const {
1027 // The generic resolver stub goes first.
1028 write32(ctx, p: buf + 0, v: 0x7c0802a6); // mflr r0
1029 write32(ctx, p: buf + 4, v: 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
1030 write32(ctx, p: buf + 8, v: 0x7d6802a6); // mflr r11
1031 write32(ctx, p: buf + 12, v: 0x7c0803a6); // mtlr r0
1032 write32(ctx, p: buf + 16, v: 0x7d8b6050); // subf r12, r11, r12
1033 write32(ctx, p: buf + 20, v: 0x380cffcc); // subi r0,r12,52
1034 write32(ctx, p: buf + 24, v: 0x7800f082); // srdi r0,r0,62,2
1035 write32(ctx, p: buf + 28, v: 0xe98b002c); // ld r12,44(r11)
1036 write32(ctx, p: buf + 32, v: 0x7d6c5a14); // add r11,r12,r11
1037 write32(ctx, p: buf + 36, v: 0xe98b0000); // ld r12,0(r11)
1038 write32(ctx, p: buf + 40, v: 0xe96b0008); // ld r11,8(r11)
1039 write32(ctx, p: buf + 44, v: 0x7d8903a6); // mtctr r12
1040 write32(ctx, p: buf + 48, v: 0x4e800420); // bctr
1041
1042 // The 'bcl' instruction will set the link register to the address of the
1043 // following instruction ('mflr r11'). Here we store the offset from that
1044 // instruction to the first entry in the GotPlt section.
1045 int64_t gotPltOffset = ctx.in.gotPlt->getVA() - (ctx.in.plt->getVA() + 8);
1046 write64(ctx, p: buf + 52, v: gotPltOffset);
1047}
1048
1049void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
1050 uint64_t /*pltEntryAddr*/) const {
1051 int32_t offset = pltHeaderSize + sym.getPltIdx(ctx) * pltEntrySize;
1052 // bl __glink_PLTresolve
1053 write32(ctx, p: buf, v: 0x48000000 | ((-offset) & 0x03fffffc));
1054}
1055
1056void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
1057 uint64_t /*pltEntryAddr*/) const {
1058 writePPC64LoadAndBranch(ctx, buf,
1059 offset: sym.getGotPltVA(ctx) - getPPC64TocBase(ctx));
1060}
1061
1062static bool isTocOptType(RelType type) {
1063 switch (type) {
1064 case R_PPC64_GOT16_HA:
1065 case R_PPC64_GOT16_LO_DS:
1066 case R_PPC64_TOC16_HA:
1067 case R_PPC64_TOC16_LO_DS:
1068 case R_PPC64_TOC16_LO:
1069 return true;
1070 default:
1071 return false;
1072 }
1073}
1074
1075// Return true if the section has GD/LD GOT relocations without
1076// R_PPC64_TLSGD/R_PPC64_TLSLD markers. Old IBM XL compilers generate GD/LD code
1077// sequences without markers; disable GD/LD to IE/LE relaxation for the section.
1078template <class RelTy>
1079static bool missingTlsGdLdMarker(InputSectionBase &sec, Relocs<RelTy> rels) {
1080 bool hasGotGdLd = false;
1081 for (const RelTy &rel : rels) {
1082 RelType type = rel.getType(false);
1083 switch (type) {
1084 case R_PPC64_TLSGD:
1085 case R_PPC64_TLSLD:
1086 return false; // Found a marker
1087 case R_PPC64_GOT_TLSGD16:
1088 case R_PPC64_GOT_TLSGD16_HA:
1089 case R_PPC64_GOT_TLSGD16_HI:
1090 case R_PPC64_GOT_TLSGD16_LO:
1091 case R_PPC64_GOT_TLSLD16:
1092 case R_PPC64_GOT_TLSLD16_HA:
1093 case R_PPC64_GOT_TLSLD16_HI:
1094 case R_PPC64_GOT_TLSLD16_LO:
1095 hasGotGdLd = true;
1096 break;
1097 }
1098 }
1099 if (hasGotGdLd) {
1100 Warn(ctx&: sec.file->ctx)
1101 << sec.file
1102 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations "
1103 "without "
1104 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations";
1105 }
1106 return hasGotGdLd;
1107}
1108
1109template <class ELFT, class RelTy>
1110void PPC64::scanSectionImpl(InputSectionBase &sec, Relocs<RelTy> rels) {
1111 RelocScan rs(ctx, &sec);
1112 sec.relocations.reserve(N: rels.size());
1113 bool optimizeTlsGdLd =
1114 !missingTlsGdLdMarker<RelTy>(sec, rels) && !ctx.arg.shared;
1115 for (auto it = rels.begin(); it != rels.end(); ++it) {
1116 RelType type = it->getType(false);
1117 uint32_t symIdx = it->getSymbol(false);
1118 Symbol &sym = sec.getFile<ELFT>()->getSymbol(symIdx);
1119 uint64_t offset = it->r_offset;
1120 if (sym.isUndefined() && symIdx != 0 &&
1121 rs.maybeReportUndefined(sym&: cast<Undefined>(Val&: sym), offset))
1122 continue;
1123 int64_t addend = rs.getAddend<ELFT>(*it, type);
1124 if (ctx.arg.isPic && type == R_PPC64_TOC)
1125 addend += getPPC64TocBase(ctx);
1126
1127 RelExpr expr;
1128 // Relocation types that only need a RelExpr set `expr` and break out of
1129 // the switch to reach rs.process(). Types that need special handling
1130 // (fast-path helpers, TLS) call a handler and use `continue`.
1131 switch (type) {
1132 case R_PPC64_NONE:
1133 continue;
1134 // Absolute relocations:
1135 case R_PPC64_ADDR16:
1136 case R_PPC64_ADDR16_DS:
1137 case R_PPC64_ADDR16_HA:
1138 case R_PPC64_ADDR16_HI:
1139 case R_PPC64_ADDR16_HIGH:
1140 case R_PPC64_ADDR16_HIGHER:
1141 case R_PPC64_ADDR16_HIGHERA:
1142 case R_PPC64_ADDR16_HIGHEST:
1143 case R_PPC64_ADDR16_HIGHESTA:
1144 case R_PPC64_ADDR16_LO:
1145 case R_PPC64_ADDR16_LO_DS:
1146 case R_PPC64_ADDR32:
1147 case R_PPC64_ADDR64:
1148 expr = R_ABS;
1149 break;
1150
1151 // PC-relative relocations:
1152 case R_PPC64_REL16_LO:
1153 case R_PPC64_REL16_HA:
1154 case R_PPC64_REL16_HI:
1155 case R_PPC64_REL32:
1156 case R_PPC64_REL64:
1157 case R_PPC64_PCREL34:
1158 rs.processR_PC(type, offset, addend, sym);
1159 continue;
1160
1161 // GOT-generating relocations:
1162 case R_PPC64_GOT16:
1163 case R_PPC64_GOT16_DS:
1164 case R_PPC64_GOT16_HA:
1165 case R_PPC64_GOT16_HI:
1166 case R_PPC64_GOT16_LO:
1167 case R_PPC64_GOT16_LO_DS:
1168 expr = R_GOT_OFF;
1169 break;
1170 case R_PPC64_GOT_PCREL34:
1171 expr = R_GOT_PC;
1172 break;
1173 case R_PPC64_PCREL_OPT:
1174 expr = adjustGotPcExpr(type, addend, loc: sec.content().data() + offset);
1175 if (expr == R_RELAX_GOT_PC)
1176 ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1177 rs.processAux(expr, type, offset, sym, addend);
1178 continue;
1179
1180 // TOC-relative relocations:
1181 case R_PPC64_TOC16:
1182 case R_PPC64_TOC16_DS:
1183 sec.file->ppc64SmallCodeModelTocRelocs = true;
1184 expr = R_GOTREL;
1185 break;
1186 case R_PPC64_TOC16_HI:
1187 expr = R_GOTREL;
1188 break;
1189 case R_PPC64_TOC16_LO:
1190 // Record the TOC entry (.toc + addend) as not relaxable.
1191 if (sym.isSection() && isa<Defined>(Val: sym) &&
1192 cast<Defined>(Val&: sym).section->name == ".toc")
1193 ctx.ppc64noTocRelax.insert(V: {&sym, addend});
1194 expr = R_GOTREL;
1195 break;
1196 case R_PPC64_TOC16_HA:
1197 case R_PPC64_TOC16_LO_DS:
1198 expr = R_GOTREL;
1199 break;
1200 case R_PPC64_TOC:
1201 expr = RE_PPC64_TOCBASE;
1202 break;
1203
1204 // PLT-generating relocations:
1205 case R_PPC64_REL14:
1206 case R_PPC64_REL24:
1207 expr = RE_PPC64_CALL_PLT;
1208 break;
1209 case R_PPC64_REL24_NOTOC:
1210 rs.processR_PLT_PC(type, offset, addend, sym);
1211 continue;
1212
1213 // TLS relocations:
1214
1215 // TLS LE:
1216 case R_PPC64_TPREL16:
1217 case R_PPC64_TPREL16_HA:
1218 case R_PPC64_TPREL16_LO:
1219 case R_PPC64_TPREL16_HI:
1220 case R_PPC64_TPREL16_DS:
1221 case R_PPC64_TPREL16_LO_DS:
1222 case R_PPC64_TPREL16_HIGHER:
1223 case R_PPC64_TPREL16_HIGHERA:
1224 case R_PPC64_TPREL16_HIGHEST:
1225 case R_PPC64_TPREL16_HIGHESTA:
1226 case R_PPC64_TPREL34:
1227 if (rs.checkTlsLe(offset, sym, type))
1228 continue;
1229 expr = R_TPREL;
1230 break;
1231
1232 // TLS IE:
1233 case R_PPC64_GOT_TPREL16_HA:
1234 case R_PPC64_GOT_TPREL16_LO_DS:
1235 case R_PPC64_GOT_TPREL16_DS:
1236 case R_PPC64_GOT_TPREL16_HI:
1237 rs.handleTlsIe(ieExpr: R_GOT_OFF, type, offset, addend, sym);
1238 continue;
1239 case R_PPC64_GOT_TPREL_PCREL34:
1240 rs.handleTlsIe(ieExpr: R_GOT_PC, type, offset, addend, sym);
1241 continue;
1242 case R_PPC64_TLS:
1243 if (!ctx.arg.shared && !sym.isPreemptible)
1244 sec.addReloc(r: {.expr: R_TPREL, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1245 continue;
1246
1247 // TLS GD:
1248 case R_PPC64_GOT_TLSGD16:
1249 case R_PPC64_GOT_TLSGD16_HA:
1250 case R_PPC64_GOT_TLSGD16_HI:
1251 case R_PPC64_GOT_TLSGD16_LO:
1252 case R_PPC64_GOT_TLSGD_PCREL34: {
1253 bool isPCRel = type == R_PPC64_GOT_TLSGD_PCREL34;
1254 if (optimizeTlsGdLd) {
1255 if (sym.isPreemptible) {
1256 ctx.hasTlsIe.store(i: true, m: std::memory_order_relaxed);
1257 sym.setFlags(NEEDS_TLSIE);
1258 sec.addReloc(
1259 r: {.expr: isPCRel ? R_GOT_PC : R_GOT_OFF, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1260 } else {
1261 sec.addReloc(r: {.expr: R_TPREL, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1262 }
1263 } else {
1264 sym.setFlags(NEEDS_TLSGD);
1265 sec.addReloc(
1266 r: {.expr: isPCRel ? R_TLSGD_PC : R_TLSGD_GOT, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1267 }
1268 continue;
1269 }
1270 // bl __tls_get_addr(x@tlsgd) is relocated by R_PPC64_TLSGD and
1271 // R_PPC64_REL24. After optimization we no longer call __tls_get_addr
1272 // and should skip both relocations to avoid a false dependence on
1273 // __tls_get_addr being defined.
1274 case R_PPC64_TLSGD:
1275 case R_PPC64_TLSLD: {
1276 auto it1 = it;
1277 ++it1;
1278 if (it1 == rels.end()) {
1279 auto diag = Err(ctx);
1280 diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1281 "relocation";
1282 printLocation(s&: diag, sec, sym, off: offset);
1283 continue;
1284 }
1285 // Increment the offset for the NOTOC case so that relaxTlsGdToIe
1286 // and relaxTlsGdToLe can distinguish it from the TOC case.
1287 if (it1->getType(false) == R_PPC64_REL24_NOTOC)
1288 ++offset;
1289 if (optimizeTlsGdLd) {
1290 sec.addReloc(r: {.expr: sym.isPreemptible ? R_GOT_OFF : R_TPREL, .type: type, .offset: offset,
1291 .addend: addend, .sym: &sym});
1292 ++it; // skip REL24
1293 }
1294 continue;
1295 }
1296
1297 // TLS LD:
1298 case R_PPC64_GOT_TLSLD16:
1299 case R_PPC64_GOT_TLSLD16_HA:
1300 case R_PPC64_GOT_TLSLD16_HI:
1301 case R_PPC64_GOT_TLSLD16_LO:
1302 case R_PPC64_GOT_TLSLD_PCREL34:
1303 if (optimizeTlsGdLd) {
1304 sec.addReloc(r: {.expr: R_TPREL, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1305 } else {
1306 ctx.needsTlsLd.store(i: true, m: std::memory_order_relaxed);
1307 sec.addReloc(
1308 r: {.expr: type == R_PPC64_GOT_TLSLD_PCREL34 ? R_TLSLD_PC : R_TLSLD_GOT, .type: type,
1309 .offset: offset, .addend: addend, .sym: &sym});
1310 }
1311 continue;
1312 case R_PPC64_DTPREL16:
1313 case R_PPC64_DTPREL16_DS:
1314 case R_PPC64_DTPREL16_HA:
1315 case R_PPC64_DTPREL16_HI:
1316 case R_PPC64_DTPREL16_HIGHER:
1317 case R_PPC64_DTPREL16_HIGHERA:
1318 case R_PPC64_DTPREL16_HIGHEST:
1319 case R_PPC64_DTPREL16_HIGHESTA:
1320 case R_PPC64_DTPREL16_LO:
1321 case R_PPC64_DTPREL16_LO_DS:
1322 case R_PPC64_DTPREL64:
1323 case R_PPC64_DTPREL34:
1324 sec.addReloc(r: {.expr: R_DTPREL, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1325 continue;
1326 case R_PPC64_GOT_DTPREL16_HA:
1327 case R_PPC64_GOT_DTPREL16_LO_DS:
1328 case R_PPC64_GOT_DTPREL16_DS:
1329 case R_PPC64_GOT_DTPREL16_HI:
1330 sym.setFlags(NEEDS_GOT_DTPREL);
1331 sec.addReloc(r: {.expr: R_TLSLD_GOT_OFF, .type: type, .offset: offset, .addend: addend, .sym: &sym});
1332 continue;
1333
1334 default:
1335 Err(ctx) << getErrorLoc(ctx, loc: sec.content().data() + offset)
1336 << "unknown relocation (" << type.v << ") against symbol "
1337 << &sym;
1338 continue;
1339 }
1340 if (oneof<R_GOTREL, RE_PPC64_TOCBASE>(expr))
1341 ctx.in.got->hasGotOffRel.store(i: true, m: std::memory_order_relaxed);
1342 rs.process(expr, type, offset, sym, addend);
1343 }
1344}
1345
1346void PPC64::scanSection(InputSectionBase &sec) {
1347 if (ctx.arg.isLE)
1348 elf::scanSection1<PPC64, ELF64LE>(target&: *this, sec);
1349 else
1350 elf::scanSection1<PPC64, ELF64BE>(target&: *this, sec);
1351
1352 // Sort relocations by offset for .toc sections. This is needed so that
1353 // sections addressed with small code model relocations come first.
1354 if (sec.name == ".toc")
1355 llvm::stable_sort(Range: sec.relocs(),
1356 C: [](const Relocation &lhs, const Relocation &rhs) {
1357 return lhs.offset < rhs.offset;
1358 });
1359}
1360
1361void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
1362 RelType type = rel.type;
1363 bool shouldTocOptimize = isTocOptType(type);
1364
1365 // Handle TLS optimization.
1366 switch (type) {
1367 case R_PPC64_GOT_TLSGD16:
1368 case R_PPC64_GOT_TLSGD16_HA:
1369 case R_PPC64_GOT_TLSGD16_HI:
1370 case R_PPC64_GOT_TLSGD16_LO:
1371 case R_PPC64_GOT_TLSGD_PCREL34:
1372 case R_PPC64_TLSGD:
1373 if (rel.expr == R_TPREL) {
1374 relaxTlsGdToLe(loc, rel, val);
1375 return;
1376 }
1377 if (oneof<R_GOT_OFF, R_GOT_PC>(expr: rel.expr)) {
1378 relaxTlsGdToIe(loc, rel, val);
1379 return;
1380 }
1381 break;
1382 case R_PPC64_GOT_TLSLD16:
1383 case R_PPC64_GOT_TLSLD16_HA:
1384 case R_PPC64_GOT_TLSLD16_HI:
1385 case R_PPC64_GOT_TLSLD16_LO:
1386 case R_PPC64_GOT_TLSLD_PCREL34:
1387 case R_PPC64_TLSLD:
1388 if (rel.expr == R_TPREL) {
1389 relaxTlsLdToLe(loc, rel, val);
1390 return;
1391 }
1392 break;
1393 case R_PPC64_GOT_TPREL16_HA:
1394 case R_PPC64_GOT_TPREL16_LO_DS:
1395 case R_PPC64_GOT_TPREL16_DS:
1396 case R_PPC64_GOT_TPREL16_HI:
1397 case R_PPC64_GOT_TPREL_PCREL34:
1398 case R_PPC64_TLS:
1399 if (rel.expr == R_TPREL) {
1400 relaxTlsIeToLe(loc, rel, val);
1401 return;
1402 }
1403 break;
1404 default:
1405 break;
1406 }
1407
1408 switch (type) {
1409 case R_PPC64_ADDR14: {
1410 checkAlignment(ctx, loc, v: val, n: 4, rel);
1411 // Preserve the AA/LK bits in the branch instruction
1412 uint8_t aalk = loc[3];
1413 write16(ctx, p: loc + 2, v: (aalk & 3) | (val & 0xfffc));
1414 break;
1415 }
1416 case R_PPC64_GOT16:
1417 case R_PPC64_GOT_TLSGD16:
1418 case R_PPC64_GOT_TLSLD16:
1419 case R_PPC64_TOC16:
1420 case R_PPC64_DTPREL16: // semantically subtracts DTP offset (== tocOffset)
1421 val -= ppc64TocOffset;
1422 [[fallthrough]];
1423 case R_PPC64_ADDR16:
1424 checkIntUInt(ctx, loc, v: val, n: 16, rel);
1425 write16(ctx, p: loc, v: val);
1426 break;
1427 case R_PPC64_ADDR32:
1428 checkIntUInt(ctx, loc, v: val, n: 32, rel);
1429 write32(ctx, p: loc, v: val);
1430 break;
1431 case R_PPC64_GOT16_DS:
1432 case R_PPC64_TOC16_DS:
1433 case R_PPC64_GOT_DTPREL16_DS:
1434 case R_PPC64_GOT_TPREL16_DS:
1435 case R_PPC64_DTPREL16_DS:
1436 val -= ppc64TocOffset;
1437 [[fallthrough]];
1438 case R_PPC64_ADDR16_DS:
1439 case R_PPC64_TPREL16_DS: {
1440 checkInt(ctx, loc, v: val, n: 16, rel);
1441 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1442 // DS-form instructions only use bits 30-31.
1443 uint16_t mask = isDQFormInstruction(encoding: readFromHalf16(ctx, loc)) ? 0xf : 0x3;
1444 checkAlignment(ctx, loc, v: lo(v: val), n: mask + 1, rel);
1445 write16(ctx, p: loc, v: (read16(ctx, p: loc) & mask) | lo(v: val));
1446 } break;
1447 case R_PPC64_GOT16_HA:
1448 case R_PPC64_GOT_TLSGD16_HA:
1449 case R_PPC64_GOT_TLSLD16_HA:
1450 case R_PPC64_GOT_TPREL16_HA:
1451 case R_PPC64_GOT_DTPREL16_HA:
1452 case R_PPC64_TOC16_HA:
1453 case R_PPC64_DTPREL16_HA:
1454 val -= ppc64TocOffset;
1455 [[fallthrough]];
1456 case R_PPC64_ADDR16_HA:
1457 case R_PPC64_REL16_HA:
1458 case R_PPC64_TPREL16_HA:
1459 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0)
1460 writeFromHalf16(ctx, loc, insn: NOP);
1461 else {
1462 checkInt(ctx, loc, v: val + 0x8000, n: 32, rel);
1463 write16(ctx, p: loc, v: ha(v: val));
1464 }
1465 break;
1466 case R_PPC64_GOT16_HI:
1467 case R_PPC64_GOT_TLSGD16_HI:
1468 case R_PPC64_GOT_TLSLD16_HI:
1469 case R_PPC64_GOT_TPREL16_HI:
1470 case R_PPC64_GOT_DTPREL16_HI:
1471 case R_PPC64_TOC16_HI:
1472 case R_PPC64_DTPREL16_HI:
1473 val -= ppc64TocOffset;
1474 [[fallthrough]];
1475 case R_PPC64_ADDR16_HI:
1476 case R_PPC64_REL16_HI:
1477 case R_PPC64_TPREL16_HI:
1478 checkInt(ctx, loc, v: val, n: 32, rel);
1479 write16(ctx, p: loc, v: hi(v: val));
1480 break;
1481 case R_PPC64_ADDR16_HIGH:
1482 write16(ctx, p: loc, v: hi(v: val));
1483 break;
1484 case R_PPC64_DTPREL16_HIGHER:
1485 val -= ppc64TocOffset;
1486 [[fallthrough]];
1487 case R_PPC64_ADDR16_HIGHER:
1488 case R_PPC64_TPREL16_HIGHER:
1489 write16(ctx, p: loc, v: higher(v: val));
1490 break;
1491 case R_PPC64_DTPREL16_HIGHERA:
1492 val -= ppc64TocOffset;
1493 [[fallthrough]];
1494 case R_PPC64_ADDR16_HIGHERA:
1495 case R_PPC64_TPREL16_HIGHERA:
1496 write16(ctx, p: loc, v: highera(v: val));
1497 break;
1498 case R_PPC64_DTPREL16_HIGHEST:
1499 val -= ppc64TocOffset;
1500 [[fallthrough]];
1501 case R_PPC64_ADDR16_HIGHEST:
1502 case R_PPC64_TPREL16_HIGHEST:
1503 write16(ctx, p: loc, v: highest(v: val));
1504 break;
1505 case R_PPC64_DTPREL16_HIGHESTA:
1506 val -= ppc64TocOffset;
1507 [[fallthrough]];
1508 case R_PPC64_ADDR16_HIGHESTA:
1509 case R_PPC64_TPREL16_HIGHESTA:
1510 write16(ctx, p: loc, v: highesta(v: val));
1511 break;
1512 case R_PPC64_GOT16_LO:
1513 case R_PPC64_GOT_TLSGD16_LO:
1514 case R_PPC64_GOT_TLSLD16_LO:
1515 case R_PPC64_TOC16_LO:
1516 case R_PPC64_DTPREL16_LO:
1517 val -= ppc64TocOffset;
1518 [[fallthrough]];
1519 case R_PPC64_ADDR16_LO:
1520 case R_PPC64_REL16_LO:
1521 case R_PPC64_TPREL16_LO:
1522 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1523 // changed into a nop. The lo part then needs to be updated to use the
1524 // toc-pointer register r2, as the base register.
1525 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1526 uint32_t insn = readFromHalf16(ctx, loc);
1527 if (isInstructionUpdateForm(encoding: insn))
1528 Err(ctx) << getErrorLoc(ctx, loc)
1529 << "can't toc-optimize an update instruction: 0x"
1530 << utohexstr(X: insn, LowerCase: true);
1531 writeFromHalf16(ctx, loc, insn: (insn & 0xffe00000) | 0x00020000 | lo(v: val));
1532 } else {
1533 write16(ctx, p: loc, v: lo(v: val));
1534 }
1535 break;
1536 case R_PPC64_GOT16_LO_DS:
1537 case R_PPC64_GOT_TPREL16_LO_DS:
1538 case R_PPC64_GOT_DTPREL16_LO_DS:
1539 case R_PPC64_TOC16_LO_DS:
1540 case R_PPC64_DTPREL16_LO_DS:
1541 val -= ppc64TocOffset;
1542 [[fallthrough]];
1543 case R_PPC64_ADDR16_LO_DS:
1544 case R_PPC64_TPREL16_LO_DS: {
1545 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1546 // DS-form instructions only use bits 30-31.
1547 uint32_t insn = readFromHalf16(ctx, loc);
1548 uint16_t mask = isDQFormInstruction(encoding: insn) ? 0xf : 0x3;
1549 checkAlignment(ctx, loc, v: lo(v: val), n: mask + 1, rel);
1550 if (ctx.arg.tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1551 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1552 // changed into a nop. The lo part then needs to be updated to use the toc
1553 // pointer register r2, as the base register.
1554 if (isInstructionUpdateForm(encoding: insn))
1555 Err(ctx) << getErrorLoc(ctx, loc)
1556 << "can't toc-optimize an update instruction: 0x"
1557 << utohexstr(X: insn, LowerCase: true);
1558 insn &= 0xffe00000 | mask;
1559 writeFromHalf16(ctx, loc, insn: insn | 0x00020000 | lo(v: val));
1560 } else {
1561 write16(ctx, p: loc, v: (read16(ctx, p: loc) & mask) | lo(v: val));
1562 }
1563 } break;
1564 case R_PPC64_TPREL16:
1565 checkInt(ctx, loc, v: val, n: 16, rel);
1566 write16(ctx, p: loc, v: val);
1567 break;
1568 case R_PPC64_REL32:
1569 checkInt(ctx, loc, v: val, n: 32, rel);
1570 write32(ctx, p: loc, v: val);
1571 break;
1572 case R_PPC64_DTPREL64:
1573 val -= dynamicThreadPointerOffset;
1574 [[fallthrough]];
1575 case R_PPC64_ADDR64:
1576 case R_PPC64_REL64:
1577 case R_PPC64_TOC:
1578 write64(ctx, p: loc, v: val);
1579 break;
1580 case R_PPC64_REL14: {
1581 uint32_t mask = 0x0000FFFC;
1582 checkInt(ctx, loc, v: val, n: 16, rel);
1583 checkAlignment(ctx, loc, v: val, n: 4, rel);
1584 write32(ctx, p: loc, v: (read32(ctx, p: loc) & ~mask) | (val & mask));
1585 break;
1586 }
1587 case R_PPC64_REL24:
1588 case R_PPC64_REL24_NOTOC: {
1589 uint32_t mask = 0x03FFFFFC;
1590 checkInt(ctx, loc, v: val, n: 26, rel);
1591 checkAlignment(ctx, loc, v: val, n: 4, rel);
1592 write32(ctx, p: loc, v: (read32(ctx, p: loc) & ~mask) | (val & mask));
1593 break;
1594 }
1595 case R_PPC64_DTPREL34:
1596 val -= dynamicThreadPointerOffset;
1597 [[fallthrough]];
1598 case R_PPC64_PCREL34:
1599 case R_PPC64_GOT_PCREL34:
1600 case R_PPC64_GOT_TLSGD_PCREL34:
1601 case R_PPC64_GOT_TLSLD_PCREL34:
1602 case R_PPC64_GOT_TPREL_PCREL34:
1603 case R_PPC64_TPREL34: {
1604 const uint64_t si0Mask = 0x00000003ffff0000;
1605 const uint64_t si1Mask = 0x000000000000ffff;
1606 const uint64_t fullMask = 0x0003ffff0000ffff;
1607 checkInt(ctx, loc, v: val, n: 34, rel);
1608
1609 uint64_t instr = readPrefixedInst(ctx, loc) & ~fullMask;
1610 writePrefixedInst(ctx, loc,
1611 insn: instr | ((val & si0Mask) << 16) | (val & si1Mask));
1612 break;
1613 }
1614 // If we encounter a PCREL_OPT relocation that we won't optimize.
1615 case R_PPC64_PCREL_OPT:
1616 break;
1617 default:
1618 llvm_unreachable("unknown relocation");
1619 }
1620}
1621
1622bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
1623 uint64_t branchAddr, const Symbol &s, int64_t a) const {
1624 if (type != R_PPC64_REL14 && type != R_PPC64_REL24 &&
1625 type != R_PPC64_REL24_NOTOC)
1626 return false;
1627
1628 // If a function is in the Plt it needs to be called with a call-stub.
1629 if (s.isInPlt(ctx))
1630 return true;
1631
1632 // This check looks at the st_other bits of the callee with relocation
1633 // R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
1634 // clobbers the TOC and we need an R2 save stub.
1635 if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
1636 return true;
1637
1638 if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
1639 return true;
1640
1641 // An undefined weak symbol not in a PLT does not need a thunk. If it is
1642 // hidden, its binding has been converted to local, so we just check
1643 // isUndefined() here. A undefined non-weak symbol has been errored.
1644 if (s.isUndefined())
1645 return false;
1646
1647 // If the offset exceeds the range of the branch type then it will need
1648 // a range-extending thunk.
1649 // See the comment in getRelocTargetVA() about RE_PPC64_CALL.
1650 return !inBranchRange(
1651 type, src: branchAddr,
1652 dst: s.getVA(ctx, addend: a) + getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther: s.stOther));
1653}
1654
1655uint32_t PPC64::getThunkSectionSpacing() const {
1656 // See comment in Arch/ARM.cpp for a more detailed explanation of
1657 // getThunkSectionSpacing(). For PPC64 we pick the constant here based on
1658 // R_PPC64_REL24, which is used by unconditional branch instructions.
1659 // 0x2000000 = (1 << 24-1) * 4
1660 return 0x2000000;
1661}
1662
1663bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
1664 int64_t offset = dst - src;
1665 if (type == R_PPC64_REL14)
1666 return isInt<16>(x: offset);
1667 if (type == R_PPC64_REL24 || type == R_PPC64_REL24_NOTOC)
1668 return isInt<26>(x: offset);
1669 llvm_unreachable("unsupported relocation type used in branch");
1670}
1671
1672RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
1673 const uint8_t *loc) const {
1674 if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
1675 ctx.arg.pcRelOptimize) {
1676 // It only makes sense to optimize pld since paddi means that the address
1677 // of the object in the GOT is required rather than the object itself.
1678 if ((readPrefixedInst(ctx, loc) & 0xfc000000) == 0xe4000000)
1679 return R_RELAX_GOT_PC;
1680 }
1681 return R_GOT_PC;
1682}
1683
1684// Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
1685// The general dynamic code sequence for a global `x` uses 4 instructions.
1686// Instruction Relocation Symbol
1687// addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
1688// addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
1689// bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
1690// R_PPC64_REL24 __tls_get_addr
1691// nop None None
1692//
1693// Relaxing to initial-exec entails:
1694// 1) Convert the addis/addi pair that builds the address of the tls_index
1695// struct for 'x' to an addis/ld pair that loads an offset from a got-entry.
1696// 2) Convert the call to __tls_get_addr to a nop.
1697// 3) Convert the nop following the call to an add of the loaded offset to the
1698// thread pointer.
1699// Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
1700// used as the relaxation hint for both steps 2 and 3.
1701void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
1702 uint64_t val) const {
1703 switch (rel.type) {
1704 case R_PPC64_GOT_TLSGD16_HA:
1705 // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
1706 // addis rT, r2, sym@got@tprel@ha.
1707 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_HA, val);
1708 return;
1709 case R_PPC64_GOT_TLSGD16:
1710 case R_PPC64_GOT_TLSGD16_LO: {
1711 // Relax from addi r3, rA, sym@got@tlsgd@l to
1712 // ld r3, sym@got@tprel@l(rA)
1713 uint32_t ra = (readFromHalf16(ctx, loc) & (0x1f << 16));
1714 writeFromHalf16(ctx, loc, insn: 0xe8600000 | ra);
1715 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_LO_DS, val);
1716 return;
1717 }
1718 case R_PPC64_GOT_TLSGD_PCREL34: {
1719 // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
1720 // pld r3, sym@got@tprel@pcrel
1721 writePrefixedInst(ctx, loc, insn: 0x04100000e4600000);
1722 relocateNoSym(loc, type: R_PPC64_GOT_TPREL_PCREL34, val);
1723 return;
1724 }
1725 case R_PPC64_TLSGD: {
1726 // PC Relative Relaxation:
1727 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
1728 // nop
1729 // TOC Relaxation:
1730 // Relax from bl __tls_get_addr(x@tlsgd)
1731 // nop
1732 // to
1733 // nop
1734 // add r3, r3, r13
1735 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
1736 if (locAsInt % 4 == 0) {
1737 write32(ctx, p: loc, v: NOP); // bl __tls_get_addr(sym@tlsgd) --> nop
1738 write32(ctx, p: loc + 4, v: 0x7c636a14); // nop --> add r3, r3, r13
1739 } else if (locAsInt % 4 == 1) {
1740 // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
1741 write32(ctx, p: loc - 1, v: 0x7c636a14);
1742 } else {
1743 Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
1744 }
1745 return;
1746 }
1747 default:
1748 llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
1749 }
1750}
1751
1752void PPC64::relocateAlloc(InputSection &sec, uint8_t *buf) const {
1753 uint64_t secAddr = sec.getOutputSection()->addr + sec.outSecOff;
1754 uint64_t lastPPCRelaxedRelocOff = -1;
1755 for (const Relocation &rel : sec.relocs()) {
1756 uint8_t *loc = buf + rel.offset;
1757 const uint64_t val = sec.getRelocTargetVA(ctx, r: rel, p: secAddr + rel.offset);
1758 switch (rel.type) {
1759 case R_PPC64_GOT_PCREL34:
1760 if (rel.expr == R_RELAX_GOT_PC) {
1761 lastPPCRelaxedRelocOff = rel.offset;
1762 relaxGot(loc, rel, val);
1763 continue;
1764 }
1765 break;
1766 case R_PPC64_PCREL_OPT:
1767 // R_PPC64_PCREL_OPT must appear immediately after R_PPC64_GOT_PCREL34
1768 // at the same offset. Only relax if the associated GOT_PCREL34 was
1769 // relaxed.
1770 if (rel.expr == R_RELAX_GOT_PC && rel.offset == lastPPCRelaxedRelocOff) {
1771 relaxGot(loc, rel, val);
1772 continue;
1773 }
1774 break;
1775 case R_PPC64_TOC16_HA:
1776 case R_PPC64_TOC16_LO_DS:
1777 // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1778 // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1779 // entry, there may be R_PPC64_TOC16_HA not paired with
1780 // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1781 // opportunities but is safe.
1782 if (ctx.arg.tocOptimize &&
1783 !ctx.ppc64noTocRelax.contains(V: {rel.sym, rel.addend}) &&
1784 tryRelaxPPC64TocIndirection(ctx, rel, bufLoc: loc))
1785 continue;
1786 break;
1787 case R_PPC64_REL14:
1788 case R_PPC64_REL24:
1789 // If this is a call to __tls_get_addr, it may be part of a TLS
1790 // sequence that has been relaxed and turned into a nop. In this
1791 // case, we don't want to handle it as a call.
1792 if (read32(ctx, p: loc) == NOP)
1793 continue;
1794
1795 // Patch a nop (0x60000000) to a ld.
1796 if (rel.sym->needsTocRestore()) {
1797 // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1798 // recursive calls even if the function is preemptible. This is not
1799 // wrong in the common case where the function is not preempted at
1800 // runtime. Just ignore.
1801 if ((rel.offset + 8 > sec.content().size() ||
1802 read32(ctx, p: loc + 4) != NOP) &&
1803 rel.sym->file != sec.file) {
1804 // Use substr(6) to remove the "__plt_" prefix.
1805 Err(ctx) << getErrorLoc(ctx, loc) << "call to "
1806 << toStr(ctx, *rel.sym).substr(pos: 6)
1807 << " lacks nop, can't restore toc";
1808 continue;
1809 }
1810 write32(ctx, p: loc + 4, v: 0xe8410018); // ld %r2, 24(%r1)
1811 }
1812 break;
1813 }
1814 relocate(loc, rel, val);
1815 }
1816}
1817
1818// The prologue for a split-stack function is expected to look roughly
1819// like this:
1820// .Lglobal_entry_point:
1821// # TOC pointer initialization.
1822// ...
1823// .Llocal_entry_point:
1824// # load the __private_ss member of the threads tcbhead.
1825// ld r0,-0x7000-64(r13)
1826// # subtract the functions stack size from the stack pointer.
1827// addis r12, r1, ha(-stack-frame size)
1828// addi r12, r12, l(-stack-frame size)
1829// # compare needed to actual and branch to allocate_more_stack if more
1830// # space is needed, otherwise fallthrough to 'normal' function body.
1831// cmpld cr7,r12,r0
1832// blt- cr7, .Lallocate_more_stack
1833//
1834// -) The allocate_more_stack block might be placed after the split-stack
1835// prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body`
1836// instead.
1837// -) If either the addis or addi is not needed due to the stack size being
1838// smaller then 32K or a multiple of 64K they will be replaced with a nop,
1839// but there will always be 2 instructions the linker can overwrite for the
1840// adjusted stack size.
1841//
1842// The linkers job here is to increase the stack size used in the addis/addi
1843// pair by split-stack-size-adjust.
1844// addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
1845// addi r12, r12, l(-stack-frame size - split-stack-adjust-size)
1846bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
1847 uint8_t stOther) const {
1848 // If the caller has a global entry point adjust the buffer past it. The start
1849 // of the split-stack prologue will be at the local entry point.
1850 loc += getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther);
1851
1852 // At the very least we expect to see a load of some split-stack data from the
1853 // tcb, and 2 instructions that calculate the ending stack address this
1854 // function will require. If there is not enough room for at least 3
1855 // instructions it can't be a split-stack prologue.
1856 if (loc + 12 >= end)
1857 return false;
1858
1859 // First instruction must be `ld r0, -0x7000-64(r13)`
1860 if (read32(ctx, p: loc) != 0xe80d8fc0)
1861 return false;
1862
1863 int16_t hiImm = 0;
1864 int16_t loImm = 0;
1865 // First instruction can be either an addis if the frame size is larger then
1866 // 32K, or an addi if the size is less then 32K.
1867 int32_t firstInstr = read32(ctx, p: loc + 4);
1868 if (getPrimaryOpCode(encoding: firstInstr) == 15) {
1869 hiImm = firstInstr & 0xFFFF;
1870 } else if (getPrimaryOpCode(encoding: firstInstr) == 14) {
1871 loImm = firstInstr & 0xFFFF;
1872 } else {
1873 return false;
1874 }
1875
1876 // Second instruction is either an addi or a nop. If the first instruction was
1877 // an addi then LoImm is set and the second instruction must be a nop.
1878 uint32_t secondInstr = read32(ctx, p: loc + 8);
1879 if (!loImm && getPrimaryOpCode(encoding: secondInstr) == 14) {
1880 loImm = secondInstr & 0xFFFF;
1881 } else if (secondInstr != NOP) {
1882 return false;
1883 }
1884
1885 // The register operands of the first instruction should be the stack-pointer
1886 // (r1) as the input (RA) and r12 as the output (RT). If the second
1887 // instruction is not a nop, then it should use r12 as both input and output.
1888 auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
1889 uint8_t expectedRA) {
1890 return ((instr & 0x3E00000) >> 21 == expectedRT) &&
1891 ((instr & 0x1F0000) >> 16 == expectedRA);
1892 };
1893 if (!checkRegOperands(firstInstr, 12, 1))
1894 return false;
1895 if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
1896 return false;
1897
1898 int32_t stackFrameSize = (hiImm * 65536) + loImm;
1899 // Check that the adjusted size doesn't overflow what we can represent with 2
1900 // instructions.
1901 if (stackFrameSize < ctx.arg.splitStackAdjustSize + INT32_MIN) {
1902 Err(ctx) << getErrorLoc(ctx, loc)
1903 << "split-stack prologue adjustment overflows";
1904 return false;
1905 }
1906
1907 int32_t adjustedStackFrameSize =
1908 stackFrameSize - ctx.arg.splitStackAdjustSize;
1909
1910 loImm = adjustedStackFrameSize & 0xFFFF;
1911 hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
1912 if (hiImm) {
1913 write32(ctx, p: loc + 4, v: 0x3d810000 | (uint16_t)hiImm);
1914 // If the low immediate is zero the second instruction will be a nop.
1915 secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
1916 write32(ctx, p: loc + 8, v: secondInstr);
1917 } else {
1918 // addi r12, r1, imm
1919 write32(ctx, p: loc + 4, v: (0x39810000) | (uint16_t)loImm);
1920 write32(ctx, p: loc + 8, v: NOP);
1921 }
1922
1923 return true;
1924}
1925
1926void elf::setPPC64TargetInfo(Ctx &ctx) { ctx.target.reset(p: new PPC64(ctx)); }
1927