1//===- PPC64.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "OutputSections.h"
11#include "SymbolTable.h"
12#include "Symbols.h"
13#include "SyntheticSections.h"
14#include "Target.h"
15#include "Thunks.h"
16#include "lld/Common/CommonLinkerContext.h"
17#include "llvm/Support/Endian.h"
18
19using namespace llvm;
20using namespace llvm::object;
21using namespace llvm::support::endian;
22using namespace llvm::ELF;
23using namespace lld;
24using namespace lld::elf;
25
26constexpr uint64_t ppc64TocOffset = 0x8000;
27constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
28
29namespace {
30// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
31// instructions that can be used as part of the initial exec TLS sequence.
32enum XFormOpcd {
33 LBZX = 87,
34 LHZX = 279,
35 LWZX = 23,
36 LDX = 21,
37 STBX = 215,
38 STHX = 407,
39 STWX = 151,
40 STDX = 149,
41 LHAX = 343,
42 LWAX = 341,
43 LFSX = 535,
44 LFDX = 599,
45 STFSX = 663,
46 STFDX = 727,
47 ADD = 266,
48};
49
50enum DFormOpcd {
51 LBZ = 34,
52 LBZU = 35,
53 LHZ = 40,
54 LHZU = 41,
55 LHAU = 43,
56 LWZ = 32,
57 LWZU = 33,
58 LFSU = 49,
59 LFDU = 51,
60 STB = 38,
61 STBU = 39,
62 STH = 44,
63 STHU = 45,
64 STW = 36,
65 STWU = 37,
66 STFSU = 53,
67 STFDU = 55,
68 LHA = 42,
69 LFS = 48,
70 LFD = 50,
71 STFS = 52,
72 STFD = 54,
73 ADDI = 14
74};
75
76enum DSFormOpcd {
77 LD = 58,
78 LWA = 58,
79 STD = 62
80};
81
82constexpr uint32_t NOP = 0x60000000;
83
84enum class PPCLegacyInsn : uint32_t {
85 NOINSN = 0,
86 // Loads.
87 LBZ = 0x88000000,
88 LHZ = 0xa0000000,
89 LWZ = 0x80000000,
90 LHA = 0xa8000000,
91 LWA = 0xe8000002,
92 LD = 0xe8000000,
93 LFS = 0xC0000000,
94 LXSSP = 0xe4000003,
95 LFD = 0xc8000000,
96 LXSD = 0xe4000002,
97 LXV = 0xf4000001,
98 LXVP = 0x18000000,
99
100 // Stores.
101 STB = 0x98000000,
102 STH = 0xb0000000,
103 STW = 0x90000000,
104 STD = 0xf8000000,
105 STFS = 0xd0000000,
106 STXSSP = 0xf4000003,
107 STFD = 0xd8000000,
108 STXSD = 0xf4000002,
109 STXV = 0xf4000005,
110 STXVP = 0x18000001
111};
112enum class PPCPrefixedInsn : uint64_t {
113 NOINSN = 0,
114 PREFIX_MLS = 0x0610000000000000,
115 PREFIX_8LS = 0x0410000000000000,
116
117 // Loads.
118 PLBZ = PREFIX_MLS,
119 PLHZ = PREFIX_MLS,
120 PLWZ = PREFIX_MLS,
121 PLHA = PREFIX_MLS,
122 PLWA = PREFIX_8LS | 0xa4000000,
123 PLD = PREFIX_8LS | 0xe4000000,
124 PLFS = PREFIX_MLS,
125 PLXSSP = PREFIX_8LS | 0xac000000,
126 PLFD = PREFIX_MLS,
127 PLXSD = PREFIX_8LS | 0xa8000000,
128 PLXV = PREFIX_8LS | 0xc8000000,
129 PLXVP = PREFIX_8LS | 0xe8000000,
130
131 // Stores.
132 PSTB = PREFIX_MLS,
133 PSTH = PREFIX_MLS,
134 PSTW = PREFIX_MLS,
135 PSTD = PREFIX_8LS | 0xf4000000,
136 PSTFS = PREFIX_MLS,
137 PSTXSSP = PREFIX_8LS | 0xbc000000,
138 PSTFD = PREFIX_MLS,
139 PSTXSD = PREFIX_8LS | 0xb8000000,
140 PSTXV = PREFIX_8LS | 0xd8000000,
141 PSTXVP = PREFIX_8LS | 0xf8000000
142};
143
144static bool checkPPCLegacyInsn(uint32_t encoding) {
145 PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
146 if (insn == PPCLegacyInsn::NOINSN)
147 return false;
148#define PCREL_OPT(Legacy, PCRel, InsnMask) \
149 if (insn == PPCLegacyInsn::Legacy) \
150 return true;
151#include "PPCInsns.def"
152#undef PCREL_OPT
153 return false;
154}
155
156// Masks to apply to legacy instructions when converting them to prefixed,
157// pc-relative versions. For the most part, the primary opcode is shared
158// between the legacy instruction and the suffix of its prefixed version.
159// However, there are some instances where that isn't the case (DS-Form and
160// DQ-form instructions).
161enum class LegacyToPrefixMask : uint64_t {
162 NOMASK = 0x0,
163 OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
164 ONLY_RST = 0x3e00000, // [RS]T (6-10).
165 ST_STX28_TO5 =
166 0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
167};
168
169class PPC64 final : public TargetInfo {
170public:
171 PPC64();
172 int getTlsGdRelaxSkip(RelType type) const override;
173 uint32_t calcEFlags() const override;
174 RelExpr getRelExpr(RelType type, const Symbol &s,
175 const uint8_t *loc) const override;
176 RelType getDynRel(RelType type) const override;
177 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
178 void writePltHeader(uint8_t *buf) const override;
179 void writePlt(uint8_t *buf, const Symbol &sym,
180 uint64_t pltEntryAddr) const override;
181 void writeIplt(uint8_t *buf, const Symbol &sym,
182 uint64_t pltEntryAddr) const override;
183 void relocate(uint8_t *loc, const Relocation &rel,
184 uint64_t val) const override;
185 void writeGotHeader(uint8_t *buf) const override;
186 bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
187 uint64_t branchAddr, const Symbol &s,
188 int64_t a) const override;
189 uint32_t getThunkSectionSpacing() const override;
190 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
191 RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
192 RelExpr adjustGotPcExpr(RelType type, int64_t addend,
193 const uint8_t *loc) const override;
194 void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
195 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
196
197 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
198 uint8_t stOther) const override;
199
200private:
201 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
202 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
203 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
204 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
205};
206} // namespace
207
208uint64_t elf::getPPC64TocBase() {
209 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
210 // TOC starts where the first of these sections starts. We always create a
211 // .got when we see a relocation that uses it, so for us the start is always
212 // the .got.
213 uint64_t tocVA = in.got->getVA();
214
215 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
216 // thus permitting a full 64 Kbytes segment. Note that the glibc startup
217 // code (crt1.o) assumes that you can get from the TOC base to the
218 // start of the .toc section with only a single (signed) 16-bit relocation.
219 return tocVA + ppc64TocOffset;
220}
221
222unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) {
223 // The offset is encoded into the 3 most significant bits of the st_other
224 // field, with some special values described in section 3.4.1 of the ABI:
225 // 0 --> Zero offset between the GEP and LEP, and the function does NOT use
226 // the TOC pointer (r2). r2 will hold the same value on returning from
227 // the function as it did on entering the function.
228 // 1 --> Zero offset between the GEP and LEP, and r2 should be treated as a
229 // caller-saved register for all callers.
230 // 2-6 --> The binary logarithm of the offset eg:
231 // 2 --> 2^2 = 4 bytes --> 1 instruction.
232 // 6 --> 2^6 = 64 bytes --> 16 instructions.
233 // 7 --> Reserved.
234 uint8_t gepToLep = (stOther >> 5) & 7;
235 if (gepToLep < 2)
236 return 0;
237
238 // The value encoded in the st_other bits is the
239 // log-base-2(offset).
240 if (gepToLep < 7)
241 return 1 << gepToLep;
242
243 error(msg: "reserved value of 7 in the 3 most-significant-bits of st_other");
244 return 0;
245}
246
247void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
248 insn = config->isLE ? insn << 32 | insn >> 32 : insn;
249 write64(p: loc, v: insn);
250}
251
252static bool addOptional(StringRef name, uint64_t value,
253 std::vector<Defined *> &defined) {
254 Symbol *sym = symtab.find(name);
255 if (!sym || sym->isDefined())
256 return false;
257 sym->resolve(other: Defined{ctx.internalFile, StringRef(), STB_GLOBAL, STV_HIDDEN,
258 STT_FUNC, value,
259 /*size=*/0, /*section=*/nullptr});
260 defined.push_back(x: cast<Defined>(Val: sym));
261 return true;
262}
263
264// If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
265// firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
266// The labels are defined only if they exist in the symbol table.
267static void writeSequence(MutableArrayRef<uint32_t> buf, const char *prefix,
268 int from, uint32_t firstInsn,
269 ArrayRef<uint32_t> tail) {
270 std::vector<Defined *> defined;
271 char name[16];
272 int first;
273 uint32_t *ptr = buf.data();
274 for (int r = from; r < 32; ++r) {
275 format(Fmt: "%s%d", Vals: prefix, Vals: r).snprint(Buffer: name, BufferSize: sizeof(name));
276 if (addOptional(name, value: 4 * (r - from), defined) && defined.size() == 1)
277 first = r - from;
278 write32(p: ptr++, v: firstInsn + 0x200008 * (r - from));
279 }
280 for (uint32_t insn : tail)
281 write32(p: ptr++, v: insn);
282 assert(ptr == &*buf.end());
283
284 if (defined.empty())
285 return;
286 // The full section content has the extent of [begin, end). We drop unused
287 // instructions and write [first,end).
288 auto *sec = make<InputSection>(
289 args&: ctx.internalFile, args: SHF_ALLOC, args: SHT_PROGBITS, args: 4,
290 args: ArrayRef(reinterpret_cast<uint8_t *>(buf.data() + first),
291 4 * (buf.size() - first)),
292 args: ".text");
293 ctx.inputSections.push_back(Elt: sec);
294 for (Defined *sym : defined) {
295 sym->section = sec;
296 sym->value -= 4 * first;
297 }
298}
299
300// Implements some save and restore functions as described by ELF V2 ABI to be
301// compatible with GCC. With GCC -Os, when the number of call-saved registers
302// exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and
303// expects the linker to define them. See
304// https://sourceware.org/pipermail/binutils/2002-February/017444.html and
305// https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is
306// weird because libgcc.a would be the natural place. The linker generation
307// approach has the advantage that the linker can generate multiple copies to
308// avoid long branch thunks. However, we don't consider the advantage
309// significant enough to complicate our trunk implementation, so we take the
310// simple approach and synthesize .text sections providing the implementation.
311void elf::addPPC64SaveRestore() {
312 static uint32_t savegpr0[20], restgpr0[21], savegpr1[19], restgpr1[19];
313 constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
314
315 // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
316 // Tail: ld 0, 16(1); mtlr 0; blr
317 writeSequence(buf: restgpr0, prefix: "_restgpr0_", from: 14, firstInsn: 0xe9c1ff70,
318 tail: {0xe8010010, mtlr_0, blr});
319 // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
320 // Tail: blr
321 writeSequence(buf: restgpr1, prefix: "_restgpr1_", from: 14, firstInsn: 0xe9ccff70, tail: {blr});
322 // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
323 // Tail: std 0, 16(1); blr
324 writeSequence(buf: savegpr0, prefix: "_savegpr0_", from: 14, firstInsn: 0xf9c1ff70, tail: {0xf8010010, blr});
325 // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
326 // Tail: blr
327 writeSequence(buf: savegpr1, prefix: "_savegpr1_", from: 14, firstInsn: 0xf9ccff70, tail: {blr});
328}
329
330// Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
331template <typename ELFT>
332static std::pair<Defined *, int64_t>
333getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
334 // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
335 // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
336 // relocation index in most cases.
337 //
338 // In rare cases a TOC entry may store a constant that doesn't need an
339 // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
340 // points to a relocation with larger r_offset. Do a linear probe then.
341 // Constants are extremely uncommon in .toc and the extra number of array
342 // accesses can be seen as a small constant.
343 ArrayRef<typename ELFT::Rela> relas =
344 tocSec->template relsOrRelas<ELFT>().relas;
345 if (relas.empty())
346 return {};
347 uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
348 for (;;) {
349 if (relas[index].r_offset == offset) {
350 Symbol &sym = tocSec->file->getRelocTargetSym(relas[index]);
351 return {dyn_cast<Defined>(Val: &sym), getAddend<ELFT>(relas[index])};
352 }
353 if (relas[index].r_offset < offset || index == 0)
354 break;
355 --index;
356 }
357 return {};
358}
359
360// When accessing a symbol defined in another translation unit, compilers
361// reserve a .toc entry, allocate a local label and generate toc-indirect
362// instructions:
363//
364// addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA
365// ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
366// ld/lwa 3, 0(3) # load the value from the address
367//
368// .section .toc,"aw",@progbits
369// .LC0: .tc var[TC],var
370//
371// If var is defined, non-preemptable and addressable with a 32-bit signed
372// offset from the toc base, the address of var can be computed by adding an
373// offset to the toc base, saving a load.
374//
375// addis 3,2,var@toc@ha # this may be relaxed to a nop,
376// addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc
377// ld/lwa 3, 0(3) # load the value from the address
378//
379// Returns true if the relaxation is performed.
380static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
381 uint8_t *bufLoc) {
382 assert(config->tocOptimize);
383 if (rel.addend < 0)
384 return false;
385
386 // If the symbol is not the .toc section, this isn't a toc-indirection.
387 Defined *defSym = dyn_cast<Defined>(Val: rel.sym);
388 if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
389 return false;
390
391 Defined *d;
392 int64_t addend;
393 auto *tocISB = cast<InputSectionBase>(Val: defSym->section);
394 std::tie(args&: d, args&: addend) =
395 config->isLE ? getRelaTocSymAndAddend<ELF64LE>(tocSec: tocISB, offset: rel.addend)
396 : getRelaTocSymAndAddend<ELF64BE>(tocSec: tocISB, offset: rel.addend);
397
398 // Only non-preemptable defined symbols can be relaxed.
399 if (!d || d->isPreemptible)
400 return false;
401
402 // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable
403 // ifunc and changed its type to STT_FUNC.
404 assert(!d->isGnuIFunc());
405
406 // Two instructions can materialize a 32-bit signed offset from the toc base.
407 uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase();
408 if (!isInt<32>(x: tocRelative))
409 return false;
410
411 // Add PPC64TocOffset that will be subtracted by PPC64::relocate().
412 static_cast<const PPC64 &>(*target).relaxGot(loc: bufLoc, rel,
413 val: tocRelative + ppc64TocOffset);
414 return true;
415}
416
417// Relocation masks following the #lo(value), #hi(value), #ha(value),
418// #higher(value), #highera(value), #highest(value), and #highesta(value)
419// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
420// document.
421static uint16_t lo(uint64_t v) { return v; }
422static uint16_t hi(uint64_t v) { return v >> 16; }
423static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
424static uint16_t higher(uint64_t v) { return v >> 32; }
425static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
426static uint16_t highest(uint64_t v) { return v >> 48; }
427static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
428
429// Extracts the 'PO' field of an instruction encoding.
430static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
431
432static bool isDQFormInstruction(uint32_t encoding) {
433 switch (getPrimaryOpCode(encoding)) {
434 default:
435 return false;
436 case 6: // Power10 paired loads/stores (lxvp, stxvp).
437 case 56:
438 // The only instruction with a primary opcode of 56 is `lq`.
439 return true;
440 case 61:
441 // There are both DS and DQ instruction forms with this primary opcode.
442 // Namely `lxv` and `stxv` are the DQ-forms that use it.
443 // The DS 'XO' bits being set to 01 is restricted to DQ form.
444 return (encoding & 3) == 0x1;
445 }
446}
447
448static bool isDSFormInstruction(PPCLegacyInsn insn) {
449 switch (insn) {
450 default:
451 return false;
452 case PPCLegacyInsn::LWA:
453 case PPCLegacyInsn::LD:
454 case PPCLegacyInsn::LXSD:
455 case PPCLegacyInsn::LXSSP:
456 case PPCLegacyInsn::STD:
457 case PPCLegacyInsn::STXSD:
458 case PPCLegacyInsn::STXSSP:
459 return true;
460 }
461}
462
463static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
464 uint32_t opc = encoding & 0xfc000000;
465
466 // If the primary opcode is shared between multiple instructions, we need to
467 // fix it up to match the actual instruction we are after.
468 if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
469 opc == 0xf8000000) &&
470 !isDQFormInstruction(encoding))
471 opc = encoding & 0xfc000003;
472 else if (opc == 0xf4000000)
473 opc = encoding & 0xfc000007;
474 else if (opc == 0x18000000)
475 opc = encoding & 0xfc00000f;
476
477 // If the value is not one of the enumerators in PPCLegacyInsn, we want to
478 // return PPCLegacyInsn::NOINSN.
479 if (!checkPPCLegacyInsn(encoding: opc))
480 return PPCLegacyInsn::NOINSN;
481 return static_cast<PPCLegacyInsn>(opc);
482}
483
484static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
485 switch (insn) {
486#define PCREL_OPT(Legacy, PCRel, InsnMask) \
487 case PPCLegacyInsn::Legacy: \
488 return PPCPrefixedInsn::PCRel
489#include "PPCInsns.def"
490#undef PCREL_OPT
491 }
492 return PPCPrefixedInsn::NOINSN;
493}
494
495static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
496 switch (insn) {
497#define PCREL_OPT(Legacy, PCRel, InsnMask) \
498 case PPCLegacyInsn::Legacy: \
499 return LegacyToPrefixMask::InsnMask
500#include "PPCInsns.def"
501#undef PCREL_OPT
502 }
503 return LegacyToPrefixMask::NOMASK;
504}
505static uint64_t getPCRelativeForm(uint32_t encoding) {
506 PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
507 PPCPrefixedInsn pcrelInsn = getPCRelativeForm(insn: origInsn);
508 if (pcrelInsn == PPCPrefixedInsn::NOINSN)
509 return UINT64_C(-1);
510 LegacyToPrefixMask origInsnMask = getInsnMask(insn: origInsn);
511 uint64_t pcrelEncoding =
512 (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
513
514 // If the mask requires moving bit 28 to bit 5, do that now.
515 if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
516 pcrelEncoding |= (encoding & 0x8) << 23;
517 return pcrelEncoding;
518}
519
520static bool isInstructionUpdateForm(uint32_t encoding) {
521 switch (getPrimaryOpCode(encoding)) {
522 default:
523 return false;
524 case LBZU:
525 case LHAU:
526 case LHZU:
527 case LWZU:
528 case LFSU:
529 case LFDU:
530 case STBU:
531 case STHU:
532 case STWU:
533 case STFSU:
534 case STFDU:
535 return true;
536 // LWA has the same opcode as LD, and the DS bits is what differentiates
537 // between LD/LDU/LWA
538 case LD:
539 case STD:
540 return (encoding & 3) == 1;
541 }
542}
543
544// Compute the total displacement between the prefixed instruction that gets
545// to the start of the data and the load/store instruction that has the offset
546// into the data structure.
547// For example:
548// paddi 3, 0, 1000, 1
549// lwz 3, 20(3)
550// Should add up to 1020 for total displacement.
551static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
552 int64_t disp34 = llvm::SignExtend64(
553 X: ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), B: 34);
554 int32_t disp16 = llvm::SignExtend32(X: accessInsn & 0xffff, B: 16);
555 // For DS and DQ form instructions, we need to mask out the XO bits.
556 if (isDQFormInstruction(encoding: accessInsn))
557 disp16 &= ~0xf;
558 else if (isDSFormInstruction(insn: getPPCLegacyInsn(encoding: accessInsn)))
559 disp16 &= ~0x3;
560 return disp34 + disp16;
561}
562
563// There are a number of places when we either want to read or write an
564// instruction when handling a half16 relocation type. On big-endian the buffer
565// pointer is pointing into the middle of the word we want to extract, and on
566// little-endian it is pointing to the start of the word. These 2 helpers are to
567// simplify reading and writing in that context.
568static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
569 write32(p: config->isLE ? loc : loc - 2, v: insn);
570}
571
572static uint32_t readFromHalf16(const uint8_t *loc) {
573 return read32(p: config->isLE ? loc : loc - 2);
574}
575
576static uint64_t readPrefixedInstruction(const uint8_t *loc) {
577 uint64_t fullInstr = read64(p: loc);
578 return config->isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
579}
580
581PPC64::PPC64() {
582 copyRel = R_PPC64_COPY;
583 gotRel = R_PPC64_GLOB_DAT;
584 pltRel = R_PPC64_JMP_SLOT;
585 relativeRel = R_PPC64_RELATIVE;
586 iRelativeRel = R_PPC64_IRELATIVE;
587 symbolicRel = R_PPC64_ADDR64;
588 pltHeaderSize = 60;
589 pltEntrySize = 4;
590 ipltEntrySize = 16; // PPC64PltCallStub::size
591 gotHeaderEntriesNum = 1;
592 gotPltHeaderEntriesNum = 2;
593 needsThunks = true;
594
595 tlsModuleIndexRel = R_PPC64_DTPMOD64;
596 tlsOffsetRel = R_PPC64_DTPREL64;
597
598 tlsGotRel = R_PPC64_TPREL64;
599
600 needsMoreStackNonSplit = false;
601
602 // We need 64K pages (at least under glibc/Linux, the loader won't
603 // set different permissions on a finer granularity than that).
604 defaultMaxPageSize = 65536;
605
606 // The PPC64 ELF ABI v1 spec, says:
607 //
608 // It is normally desirable to put segments with different characteristics
609 // in separate 256 Mbyte portions of the address space, to give the
610 // operating system full paging flexibility in the 64-bit address space.
611 //
612 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
613 // use 0x10000000 as the starting address.
614 defaultImageBase = 0x10000000;
615
616 write32(p: trapInstr.data(), v: 0x7fe00008);
617}
618
619int PPC64::getTlsGdRelaxSkip(RelType type) const {
620 // A __tls_get_addr call instruction is marked with 2 relocations:
621 //
622 // R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation
623 // R_PPC64_REL24: __tls_get_addr
624 //
625 // After the relaxation we no longer call __tls_get_addr and should skip both
626 // relocations to not create a false dependence on __tls_get_addr being
627 // defined.
628 if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD)
629 return 2;
630 return 1;
631}
632
633static uint32_t getEFlags(InputFile *file) {
634 if (file->ekind == ELF64BEKind)
635 return cast<ObjFile<ELF64BE>>(Val: file)->getObj().getHeader().e_flags;
636 return cast<ObjFile<ELF64LE>>(Val: file)->getObj().getHeader().e_flags;
637}
638
639// This file implements v2 ABI. This function makes sure that all
640// object files have v2 or an unspecified version as an ABI version.
641uint32_t PPC64::calcEFlags() const {
642 for (InputFile *f : ctx.objectFiles) {
643 uint32_t flag = getEFlags(file: f);
644 if (flag == 1)
645 error(msg: toString(f) + ": ABI version 1 is not supported");
646 else if (flag > 2)
647 error(msg: toString(f) + ": unrecognized e_flags: " + Twine(flag));
648 }
649 return 2;
650}
651
652void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
653 switch (rel.type) {
654 case R_PPC64_TOC16_HA:
655 // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
656 relocate(loc, rel, val);
657 break;
658 case R_PPC64_TOC16_LO_DS: {
659 // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
660 // "addi reg, 2, var@toc".
661 uint32_t insn = readFromHalf16(loc);
662 if (getPrimaryOpCode(encoding: insn) != LD)
663 error(msg: "expected a 'ld' for got-indirect to toc-relative relaxing");
664 writeFromHalf16(loc, insn: (insn & 0x03ffffff) | 0x38000000);
665 relocateNoSym(loc, type: R_PPC64_TOC16_LO, val);
666 break;
667 }
668 case R_PPC64_GOT_PCREL34: {
669 // Clear the first 8 bits of the prefix and the first 6 bits of the
670 // instruction (the primary opcode).
671 uint64_t insn = readPrefixedInstruction(loc);
672 if ((insn & 0xfc000000) != 0xe4000000)
673 error(msg: "expected a 'pld' for got-indirect to pc-relative relaxing");
674 insn &= ~0xff000000fc000000;
675
676 // Replace the cleared bits with the values for PADDI (0x600000038000000);
677 insn |= 0x600000038000000;
678 writePrefixedInstruction(loc, insn);
679 relocate(loc, rel, val);
680 break;
681 }
682 case R_PPC64_PCREL_OPT: {
683 // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
684 // be relaxed. The eligibility for the relaxation needs to be determined
685 // on that relocation since this one does not relocate a symbol.
686 uint64_t insn = readPrefixedInstruction(loc);
687 uint32_t accessInsn = read32(p: loc + rel.addend);
688 uint64_t pcRelInsn = getPCRelativeForm(encoding: accessInsn);
689
690 // This error is not necessary for correctness but is emitted for now
691 // to ensure we don't miss these opportunities in real code. It can be
692 // removed at a later date.
693 if (pcRelInsn == UINT64_C(-1)) {
694 errorOrWarn(
695 msg: "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x" +
696 Twine::utohexstr(Val: accessInsn));
697 break;
698 }
699
700 int64_t totalDisp = getTotalDisp(prefixedInsn: insn, accessInsn);
701 if (!isInt<34>(x: totalDisp))
702 break; // Displacement doesn't fit.
703 // Convert the PADDI to the prefixed version of accessInsn and convert
704 // accessInsn to a nop.
705 writePrefixedInstruction(loc, insn: pcRelInsn |
706 ((totalDisp & 0x3ffff0000) << 16) |
707 (totalDisp & 0xffff));
708 write32(p: loc + rel.addend, v: NOP); // nop accessInsn.
709 break;
710 }
711 default:
712 llvm_unreachable("unexpected relocation type");
713 }
714}
715
716void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
717 uint64_t val) const {
718 // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
719 // The general dynamic code sequence for a global `x` will look like:
720 // Instruction Relocation Symbol
721 // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
722 // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
723 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
724 // R_PPC64_REL24 __tls_get_addr
725 // nop None None
726
727 // Relaxing to local exec entails converting:
728 // addis r3, r2, x@got@tlsgd@ha into nop
729 // addi r3, r3, x@got@tlsgd@l into addis r3, r13, x@tprel@ha
730 // bl __tls_get_addr(x@tlsgd) into nop
731 // nop into addi r3, r3, x@tprel@l
732
733 switch (rel.type) {
734 case R_PPC64_GOT_TLSGD16_HA:
735 writeFromHalf16(loc, insn: NOP);
736 break;
737 case R_PPC64_GOT_TLSGD16:
738 case R_PPC64_GOT_TLSGD16_LO:
739 writeFromHalf16(loc, insn: 0x3c6d0000); // addis r3, r13
740 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
741 break;
742 case R_PPC64_GOT_TLSGD_PCREL34:
743 // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
744 // paddi r3, r13, x@tprel, 0
745 writePrefixedInstruction(loc, insn: 0x06000000386d0000);
746 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
747 break;
748 case R_PPC64_TLSGD: {
749 // PC Relative Relaxation:
750 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
751 // nop
752 // TOC Relaxation:
753 // Relax from bl __tls_get_addr(x@tlsgd)
754 // nop
755 // to
756 // nop
757 // addi r3, r3, x@tprel@l
758 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
759 if (locAsInt % 4 == 0) {
760 write32(p: loc, v: NOP); // nop
761 write32(p: loc + 4, v: 0x38630000); // addi r3, r3
762 // Since we are relocating a half16 type relocation and Loc + 4 points to
763 // the start of an instruction we need to advance the buffer by an extra
764 // 2 bytes on BE.
765 relocateNoSym(loc: loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
766 type: R_PPC64_TPREL16_LO, val);
767 } else if (locAsInt % 4 == 1) {
768 write32(p: loc - 1, v: NOP);
769 } else {
770 errorOrWarn(msg: "R_PPC64_TLSGD has unexpected byte alignment");
771 }
772 break;
773 }
774 default:
775 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
776 }
777}
778
779void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
780 uint64_t val) const {
781 // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
782 // The local dynamic code sequence for a global `x` will look like:
783 // Instruction Relocation Symbol
784 // addis r3, r2, x@got@tlsld@ha R_PPC64_GOT_TLSLD16_HA x
785 // addi r3, r3, x@got@tlsld@l R_PPC64_GOT_TLSLD16_LO x
786 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSLD x
787 // R_PPC64_REL24 __tls_get_addr
788 // nop None None
789
790 // Relaxing to local exec entails converting:
791 // addis r3, r2, x@got@tlsld@ha into nop
792 // addi r3, r3, x@got@tlsld@l into addis r3, r13, 0
793 // bl __tls_get_addr(x@tlsgd) into nop
794 // nop into addi r3, r3, 4096
795
796 switch (rel.type) {
797 case R_PPC64_GOT_TLSLD16_HA:
798 writeFromHalf16(loc, insn: NOP);
799 break;
800 case R_PPC64_GOT_TLSLD16_LO:
801 writeFromHalf16(loc, insn: 0x3c6d0000); // addis r3, r13, 0
802 break;
803 case R_PPC64_GOT_TLSLD_PCREL34:
804 // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
805 // paddi r3, r13, 0x1000, 0
806 writePrefixedInstruction(loc, insn: 0x06000000386d1000);
807 break;
808 case R_PPC64_TLSLD: {
809 // PC Relative Relaxation:
810 // Relax from bl __tls_get_addr@notoc(x@tlsld)
811 // to
812 // nop
813 // TOC Relaxation:
814 // Relax from bl __tls_get_addr(x@tlsld)
815 // nop
816 // to
817 // nop
818 // addi r3, r3, 4096
819 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
820 if (locAsInt % 4 == 0) {
821 write32(p: loc, v: NOP);
822 write32(p: loc + 4, v: 0x38631000); // addi r3, r3, 4096
823 } else if (locAsInt % 4 == 1) {
824 write32(p: loc - 1, v: NOP);
825 } else {
826 errorOrWarn(msg: "R_PPC64_TLSLD has unexpected byte alignment");
827 }
828 break;
829 }
830 case R_PPC64_DTPREL16:
831 case R_PPC64_DTPREL16_HA:
832 case R_PPC64_DTPREL16_HI:
833 case R_PPC64_DTPREL16_DS:
834 case R_PPC64_DTPREL16_LO:
835 case R_PPC64_DTPREL16_LO_DS:
836 case R_PPC64_DTPREL34:
837 relocate(loc, rel, val);
838 break;
839 default:
840 llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
841 }
842}
843
844// Map X-Form instructions to their DS-Form counterparts, if applicable.
845// The full encoding is returned here to distinguish between the different
846// DS-Form instructions.
847unsigned elf::getPPCDSFormOp(unsigned secondaryOp) {
848 switch (secondaryOp) {
849 case LWAX:
850 return (LWA << 26) | 0x2;
851 case LDX:
852 return LD << 26;
853 case STDX:
854 return STD << 26;
855 default:
856 return 0;
857 }
858}
859
860unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
861 switch (secondaryOp) {
862 case LBZX:
863 return LBZ << 26;
864 case LHZX:
865 return LHZ << 26;
866 case LWZX:
867 return LWZ << 26;
868 case STBX:
869 return STB << 26;
870 case STHX:
871 return STH << 26;
872 case STWX:
873 return STW << 26;
874 case LHAX:
875 return LHA << 26;
876 case LFSX:
877 return LFS << 26;
878 case LFDX:
879 return LFD << 26;
880 case STFSX:
881 return STFS << 26;
882 case STFDX:
883 return STFD << 26;
884 case ADD:
885 return ADDI << 26;
886 default:
887 return 0;
888 }
889}
890
891void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
892 uint64_t val) const {
893 // The initial exec code sequence for a global `x` will look like:
894 // Instruction Relocation Symbol
895 // addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x
896 // ld r9, x@got@tprel@l(r9) R_PPC64_GOT_TPREL16_LO_DS x
897 // add r9, r9, x@tls R_PPC64_TLS x
898
899 // Relaxing to local exec entails converting:
900 // addis r9, r2, x@got@tprel@ha into nop
901 // ld r9, x@got@tprel@l(r9) into addis r9, r13, x@tprel@ha
902 // add r9, r9, x@tls into addi r9, r9, x@tprel@l
903
904 // x@tls R_PPC64_TLS is a relocation which does not compute anything,
905 // it is replaced with r13 (thread pointer).
906
907 // The add instruction in the initial exec sequence has multiple variations
908 // that need to be handled. If we are building an address it will use an add
909 // instruction, if we are accessing memory it will use any of the X-form
910 // indexed load or store instructions.
911
912 unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0;
913 switch (rel.type) {
914 case R_PPC64_GOT_TPREL16_HA:
915 write32(p: loc - offset, v: NOP);
916 break;
917 case R_PPC64_GOT_TPREL16_LO_DS:
918 case R_PPC64_GOT_TPREL16_DS: {
919 uint32_t regNo = read32(p: loc - offset) & 0x03E00000; // bits 6-10
920 write32(p: loc - offset, v: 0x3C0D0000 | regNo); // addis RegNo, r13
921 relocateNoSym(loc, type: R_PPC64_TPREL16_HA, val);
922 break;
923 }
924 case R_PPC64_GOT_TPREL_PCREL34: {
925 const uint64_t pldRT = readPrefixedInstruction(loc) & 0x0000000003e00000;
926 // paddi RT(from pld), r13, symbol@tprel, 0
927 writePrefixedInstruction(loc, insn: 0x06000000380d0000 | pldRT);
928 relocateNoSym(loc, type: R_PPC64_TPREL34, val);
929 break;
930 }
931 case R_PPC64_TLS: {
932 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
933 if (locAsInt % 4 == 0) {
934 uint32_t primaryOp = getPrimaryOpCode(encoding: read32(p: loc));
935 if (primaryOp != 31)
936 error(msg: "unrecognized instruction for IE to LE R_PPC64_TLS");
937 uint32_t secondaryOp = (read32(p: loc) & 0x000007FE) >> 1; // bits 21-30
938 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
939 uint32_t finalReloc;
940 if (dFormOp == 0) { // Expecting a DS-Form instruction.
941 dFormOp = getPPCDSFormOp(secondaryOp);
942 if (dFormOp == 0)
943 error(msg: "unrecognized instruction for IE to LE R_PPC64_TLS");
944 finalReloc = R_PPC64_TPREL16_LO_DS;
945 } else
946 finalReloc = R_PPC64_TPREL16_LO;
947 write32(p: loc, v: dFormOp | (read32(p: loc) & 0x03ff0000));
948 relocateNoSym(loc: loc + offset, type: finalReloc, val);
949 } else if (locAsInt % 4 == 1) {
950 // If the offset is not 4 byte aligned then we have a PCRel type reloc.
951 // This version of the relocation is offset by one byte from the
952 // instruction it references.
953 uint32_t tlsInstr = read32(p: loc - 1);
954 uint32_t primaryOp = getPrimaryOpCode(encoding: tlsInstr);
955 if (primaryOp != 31)
956 errorOrWarn(msg: "unrecognized instruction for IE to LE R_PPC64_TLS");
957 uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
958 // The add is a special case and should be turned into a nop. The paddi
959 // that comes before it will already have computed the address of the
960 // symbol.
961 if (secondaryOp == 266) {
962 // Check if the add uses the same result register as the input register.
963 uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
964 uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
965 if (ra == rt) {
966 write32(p: loc - 1, v: NOP);
967 } else {
968 // mr rt, ra
969 write32(p: loc - 1, v: 0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
970 }
971 } else {
972 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
973 if (dFormOp == 0) { // Expecting a DS-Form instruction.
974 dFormOp = getPPCDSFormOp(secondaryOp);
975 if (dFormOp == 0)
976 errorOrWarn(msg: "unrecognized instruction for IE to LE R_PPC64_TLS");
977 }
978 write32(p: loc - 1, v: (dFormOp | (tlsInstr & 0x03ff0000)));
979 }
980 } else {
981 errorOrWarn(msg: "R_PPC64_TLS must be either 4 byte aligned or one byte "
982 "offset from 4 byte aligned");
983 }
984 break;
985 }
986 default:
987 llvm_unreachable("unknown relocation for IE to LE");
988 break;
989 }
990}
991
992RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
993 const uint8_t *loc) const {
994 switch (type) {
995 case R_PPC64_NONE:
996 return R_NONE;
997 case R_PPC64_ADDR16:
998 case R_PPC64_ADDR16_DS:
999 case R_PPC64_ADDR16_HA:
1000 case R_PPC64_ADDR16_HI:
1001 case R_PPC64_ADDR16_HIGH:
1002 case R_PPC64_ADDR16_HIGHER:
1003 case R_PPC64_ADDR16_HIGHERA:
1004 case R_PPC64_ADDR16_HIGHEST:
1005 case R_PPC64_ADDR16_HIGHESTA:
1006 case R_PPC64_ADDR16_LO:
1007 case R_PPC64_ADDR16_LO_DS:
1008 case R_PPC64_ADDR32:
1009 case R_PPC64_ADDR64:
1010 return R_ABS;
1011 case R_PPC64_GOT16:
1012 case R_PPC64_GOT16_DS:
1013 case R_PPC64_GOT16_HA:
1014 case R_PPC64_GOT16_HI:
1015 case R_PPC64_GOT16_LO:
1016 case R_PPC64_GOT16_LO_DS:
1017 return R_GOT_OFF;
1018 case R_PPC64_TOC16:
1019 case R_PPC64_TOC16_DS:
1020 case R_PPC64_TOC16_HI:
1021 case R_PPC64_TOC16_LO:
1022 return R_GOTREL;
1023 case R_PPC64_GOT_PCREL34:
1024 case R_PPC64_GOT_TPREL_PCREL34:
1025 case R_PPC64_PCREL_OPT:
1026 return R_GOT_PC;
1027 case R_PPC64_TOC16_HA:
1028 case R_PPC64_TOC16_LO_DS:
1029 return config->tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL;
1030 case R_PPC64_TOC:
1031 return R_PPC64_TOCBASE;
1032 case R_PPC64_REL14:
1033 case R_PPC64_REL24:
1034 return R_PPC64_CALL_PLT;
1035 case R_PPC64_REL24_NOTOC:
1036 return R_PLT_PC;
1037 case R_PPC64_REL16_LO:
1038 case R_PPC64_REL16_HA:
1039 case R_PPC64_REL16_HI:
1040 case R_PPC64_REL32:
1041 case R_PPC64_REL64:
1042 case R_PPC64_PCREL34:
1043 return R_PC;
1044 case R_PPC64_GOT_TLSGD16:
1045 case R_PPC64_GOT_TLSGD16_HA:
1046 case R_PPC64_GOT_TLSGD16_HI:
1047 case R_PPC64_GOT_TLSGD16_LO:
1048 return R_TLSGD_GOT;
1049 case R_PPC64_GOT_TLSGD_PCREL34:
1050 return R_TLSGD_PC;
1051 case R_PPC64_GOT_TLSLD16:
1052 case R_PPC64_GOT_TLSLD16_HA:
1053 case R_PPC64_GOT_TLSLD16_HI:
1054 case R_PPC64_GOT_TLSLD16_LO:
1055 return R_TLSLD_GOT;
1056 case R_PPC64_GOT_TLSLD_PCREL34:
1057 return R_TLSLD_PC;
1058 case R_PPC64_GOT_TPREL16_HA:
1059 case R_PPC64_GOT_TPREL16_LO_DS:
1060 case R_PPC64_GOT_TPREL16_DS:
1061 case R_PPC64_GOT_TPREL16_HI:
1062 return R_GOT_OFF;
1063 case R_PPC64_GOT_DTPREL16_HA:
1064 case R_PPC64_GOT_DTPREL16_LO_DS:
1065 case R_PPC64_GOT_DTPREL16_DS:
1066 case R_PPC64_GOT_DTPREL16_HI:
1067 return R_TLSLD_GOT_OFF;
1068 case R_PPC64_TPREL16:
1069 case R_PPC64_TPREL16_HA:
1070 case R_PPC64_TPREL16_LO:
1071 case R_PPC64_TPREL16_HI:
1072 case R_PPC64_TPREL16_DS:
1073 case R_PPC64_TPREL16_LO_DS:
1074 case R_PPC64_TPREL16_HIGHER:
1075 case R_PPC64_TPREL16_HIGHERA:
1076 case R_PPC64_TPREL16_HIGHEST:
1077 case R_PPC64_TPREL16_HIGHESTA:
1078 case R_PPC64_TPREL34:
1079 return R_TPREL;
1080 case R_PPC64_DTPREL16:
1081 case R_PPC64_DTPREL16_DS:
1082 case R_PPC64_DTPREL16_HA:
1083 case R_PPC64_DTPREL16_HI:
1084 case R_PPC64_DTPREL16_HIGHER:
1085 case R_PPC64_DTPREL16_HIGHERA:
1086 case R_PPC64_DTPREL16_HIGHEST:
1087 case R_PPC64_DTPREL16_HIGHESTA:
1088 case R_PPC64_DTPREL16_LO:
1089 case R_PPC64_DTPREL16_LO_DS:
1090 case R_PPC64_DTPREL64:
1091 case R_PPC64_DTPREL34:
1092 return R_DTPREL;
1093 case R_PPC64_TLSGD:
1094 return R_TLSDESC_CALL;
1095 case R_PPC64_TLSLD:
1096 return R_TLSLD_HINT;
1097 case R_PPC64_TLS:
1098 return R_TLSIE_HINT;
1099 default:
1100 error(msg: getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
1101 ") against symbol " + toString(s));
1102 return R_NONE;
1103 }
1104}
1105
1106RelType PPC64::getDynRel(RelType type) const {
1107 if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
1108 return R_PPC64_ADDR64;
1109 return R_PPC64_NONE;
1110}
1111
1112int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
1113 switch (type) {
1114 case R_PPC64_NONE:
1115 case R_PPC64_GLOB_DAT:
1116 case R_PPC64_JMP_SLOT:
1117 return 0;
1118 case R_PPC64_REL32:
1119 return SignExtend64<32>(x: read32(p: buf));
1120 case R_PPC64_ADDR64:
1121 case R_PPC64_REL64:
1122 case R_PPC64_RELATIVE:
1123 case R_PPC64_IRELATIVE:
1124 case R_PPC64_DTPMOD64:
1125 case R_PPC64_DTPREL64:
1126 case R_PPC64_TPREL64:
1127 return read64(p: buf);
1128 default:
1129 internalLinkerError(loc: getErrorLocation(loc: buf),
1130 msg: "cannot read addend for relocation " + toString(type));
1131 return 0;
1132 }
1133}
1134
1135void PPC64::writeGotHeader(uint8_t *buf) const {
1136 write64(p: buf, v: getPPC64TocBase());
1137}
1138
1139void PPC64::writePltHeader(uint8_t *buf) const {
1140 // The generic resolver stub goes first.
1141 write32(p: buf + 0, v: 0x7c0802a6); // mflr r0
1142 write32(p: buf + 4, v: 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
1143 write32(p: buf + 8, v: 0x7d6802a6); // mflr r11
1144 write32(p: buf + 12, v: 0x7c0803a6); // mtlr r0
1145 write32(p: buf + 16, v: 0x7d8b6050); // subf r12, r11, r12
1146 write32(p: buf + 20, v: 0x380cffcc); // subi r0,r12,52
1147 write32(p: buf + 24, v: 0x7800f082); // srdi r0,r0,62,2
1148 write32(p: buf + 28, v: 0xe98b002c); // ld r12,44(r11)
1149 write32(p: buf + 32, v: 0x7d6c5a14); // add r11,r12,r11
1150 write32(p: buf + 36, v: 0xe98b0000); // ld r12,0(r11)
1151 write32(p: buf + 40, v: 0xe96b0008); // ld r11,8(r11)
1152 write32(p: buf + 44, v: 0x7d8903a6); // mtctr r12
1153 write32(p: buf + 48, v: 0x4e800420); // bctr
1154
1155 // The 'bcl' instruction will set the link register to the address of the
1156 // following instruction ('mflr r11'). Here we store the offset from that
1157 // instruction to the first entry in the GotPlt section.
1158 int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8);
1159 write64(p: buf + 52, v: gotPltOffset);
1160}
1161
1162void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
1163 uint64_t /*pltEntryAddr*/) const {
1164 int32_t offset = pltHeaderSize + sym.getPltIdx() * pltEntrySize;
1165 // bl __glink_PLTresolve
1166 write32(p: buf, v: 0x48000000 | ((-offset) & 0x03FFFFFc));
1167}
1168
1169void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
1170 uint64_t /*pltEntryAddr*/) const {
1171 writePPC64LoadAndBranch(buf, offset: sym.getGotPltVA() - getPPC64TocBase());
1172}
1173
1174static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
1175 // Relocations relative to the toc-base need to be adjusted by the Toc offset.
1176 uint64_t tocBiasedVal = val - ppc64TocOffset;
1177 // Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset.
1178 uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset;
1179
1180 switch (type) {
1181 // TOC biased relocation.
1182 case R_PPC64_GOT16:
1183 case R_PPC64_GOT_TLSGD16:
1184 case R_PPC64_GOT_TLSLD16:
1185 case R_PPC64_TOC16:
1186 return {R_PPC64_ADDR16, tocBiasedVal};
1187 case R_PPC64_GOT16_DS:
1188 case R_PPC64_TOC16_DS:
1189 case R_PPC64_GOT_TPREL16_DS:
1190 case R_PPC64_GOT_DTPREL16_DS:
1191 return {R_PPC64_ADDR16_DS, tocBiasedVal};
1192 case R_PPC64_GOT16_HA:
1193 case R_PPC64_GOT_TLSGD16_HA:
1194 case R_PPC64_GOT_TLSLD16_HA:
1195 case R_PPC64_GOT_TPREL16_HA:
1196 case R_PPC64_GOT_DTPREL16_HA:
1197 case R_PPC64_TOC16_HA:
1198 return {R_PPC64_ADDR16_HA, tocBiasedVal};
1199 case R_PPC64_GOT16_HI:
1200 case R_PPC64_GOT_TLSGD16_HI:
1201 case R_PPC64_GOT_TLSLD16_HI:
1202 case R_PPC64_GOT_TPREL16_HI:
1203 case R_PPC64_GOT_DTPREL16_HI:
1204 case R_PPC64_TOC16_HI:
1205 return {R_PPC64_ADDR16_HI, tocBiasedVal};
1206 case R_PPC64_GOT16_LO:
1207 case R_PPC64_GOT_TLSGD16_LO:
1208 case R_PPC64_GOT_TLSLD16_LO:
1209 case R_PPC64_TOC16_LO:
1210 return {R_PPC64_ADDR16_LO, tocBiasedVal};
1211 case R_PPC64_GOT16_LO_DS:
1212 case R_PPC64_TOC16_LO_DS:
1213 case R_PPC64_GOT_TPREL16_LO_DS:
1214 case R_PPC64_GOT_DTPREL16_LO_DS:
1215 return {R_PPC64_ADDR16_LO_DS, tocBiasedVal};
1216
1217 // Dynamic Thread pointer biased relocation types.
1218 case R_PPC64_DTPREL16:
1219 return {R_PPC64_ADDR16, dtpBiasedVal};
1220 case R_PPC64_DTPREL16_DS:
1221 return {R_PPC64_ADDR16_DS, dtpBiasedVal};
1222 case R_PPC64_DTPREL16_HA:
1223 return {R_PPC64_ADDR16_HA, dtpBiasedVal};
1224 case R_PPC64_DTPREL16_HI:
1225 return {R_PPC64_ADDR16_HI, dtpBiasedVal};
1226 case R_PPC64_DTPREL16_HIGHER:
1227 return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal};
1228 case R_PPC64_DTPREL16_HIGHERA:
1229 return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal};
1230 case R_PPC64_DTPREL16_HIGHEST:
1231 return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal};
1232 case R_PPC64_DTPREL16_HIGHESTA:
1233 return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal};
1234 case R_PPC64_DTPREL16_LO:
1235 return {R_PPC64_ADDR16_LO, dtpBiasedVal};
1236 case R_PPC64_DTPREL16_LO_DS:
1237 return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal};
1238 case R_PPC64_DTPREL64:
1239 return {R_PPC64_ADDR64, dtpBiasedVal};
1240
1241 default:
1242 return {type, val};
1243 }
1244}
1245
1246static bool isTocOptType(RelType type) {
1247 switch (type) {
1248 case R_PPC64_GOT16_HA:
1249 case R_PPC64_GOT16_LO_DS:
1250 case R_PPC64_TOC16_HA:
1251 case R_PPC64_TOC16_LO_DS:
1252 case R_PPC64_TOC16_LO:
1253 return true;
1254 default:
1255 return false;
1256 }
1257}
1258
1259void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
1260 RelType type = rel.type;
1261 bool shouldTocOptimize = isTocOptType(type);
1262 // For dynamic thread pointer relative, toc-relative, and got-indirect
1263 // relocations, proceed in terms of the corresponding ADDR16 relocation type.
1264 std::tie(args&: type, args&: val) = toAddr16Rel(type, val);
1265
1266 switch (type) {
1267 case R_PPC64_ADDR14: {
1268 checkAlignment(loc, v: val, n: 4, rel);
1269 // Preserve the AA/LK bits in the branch instruction
1270 uint8_t aalk = loc[3];
1271 write16(p: loc + 2, v: (aalk & 3) | (val & 0xfffc));
1272 break;
1273 }
1274 case R_PPC64_ADDR16:
1275 checkIntUInt(loc, v: val, n: 16, rel);
1276 write16(p: loc, v: val);
1277 break;
1278 case R_PPC64_ADDR32:
1279 checkIntUInt(loc, v: val, n: 32, rel);
1280 write32(p: loc, v: val);
1281 break;
1282 case R_PPC64_ADDR16_DS:
1283 case R_PPC64_TPREL16_DS: {
1284 checkInt(loc, v: val, n: 16, rel);
1285 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1286 // DS-form instructions only use bits 30-31.
1287 uint16_t mask = isDQFormInstruction(encoding: readFromHalf16(loc)) ? 0xf : 0x3;
1288 checkAlignment(loc, v: lo(v: val), n: mask + 1, rel);
1289 write16(p: loc, v: (read16(p: loc) & mask) | lo(v: val));
1290 } break;
1291 case R_PPC64_ADDR16_HA:
1292 case R_PPC64_REL16_HA:
1293 case R_PPC64_TPREL16_HA:
1294 if (config->tocOptimize && shouldTocOptimize && ha(v: val) == 0)
1295 writeFromHalf16(loc, insn: NOP);
1296 else {
1297 checkInt(loc, v: val + 0x8000, n: 32, rel);
1298 write16(p: loc, v: ha(v: val));
1299 }
1300 break;
1301 case R_PPC64_ADDR16_HI:
1302 case R_PPC64_REL16_HI:
1303 case R_PPC64_TPREL16_HI:
1304 checkInt(loc, v: val, n: 32, rel);
1305 write16(p: loc, v: hi(v: val));
1306 break;
1307 case R_PPC64_ADDR16_HIGH:
1308 write16(p: loc, v: hi(v: val));
1309 break;
1310 case R_PPC64_ADDR16_HIGHER:
1311 case R_PPC64_TPREL16_HIGHER:
1312 write16(p: loc, v: higher(v: val));
1313 break;
1314 case R_PPC64_ADDR16_HIGHERA:
1315 case R_PPC64_TPREL16_HIGHERA:
1316 write16(p: loc, v: highera(v: val));
1317 break;
1318 case R_PPC64_ADDR16_HIGHEST:
1319 case R_PPC64_TPREL16_HIGHEST:
1320 write16(p: loc, v: highest(v: val));
1321 break;
1322 case R_PPC64_ADDR16_HIGHESTA:
1323 case R_PPC64_TPREL16_HIGHESTA:
1324 write16(p: loc, v: highesta(v: val));
1325 break;
1326 case R_PPC64_ADDR16_LO:
1327 case R_PPC64_REL16_LO:
1328 case R_PPC64_TPREL16_LO:
1329 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1330 // changed into a nop. The lo part then needs to be updated to use the
1331 // toc-pointer register r2, as the base register.
1332 if (config->tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1333 uint32_t insn = readFromHalf16(loc);
1334 if (isInstructionUpdateForm(encoding: insn))
1335 error(msg: getErrorLocation(loc) +
1336 "can't toc-optimize an update instruction: 0x" +
1337 utohexstr(X: insn));
1338 writeFromHalf16(loc, insn: (insn & 0xffe00000) | 0x00020000 | lo(v: val));
1339 } else {
1340 write16(p: loc, v: lo(v: val));
1341 }
1342 break;
1343 case R_PPC64_ADDR16_LO_DS:
1344 case R_PPC64_TPREL16_LO_DS: {
1345 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1346 // DS-form instructions only use bits 30-31.
1347 uint32_t insn = readFromHalf16(loc);
1348 uint16_t mask = isDQFormInstruction(encoding: insn) ? 0xf : 0x3;
1349 checkAlignment(loc, v: lo(v: val), n: mask + 1, rel);
1350 if (config->tocOptimize && shouldTocOptimize && ha(v: val) == 0) {
1351 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1352 // changed into a nop. The lo part then needs to be updated to use the toc
1353 // pointer register r2, as the base register.
1354 if (isInstructionUpdateForm(encoding: insn))
1355 error(msg: getErrorLocation(loc) +
1356 "Can't toc-optimize an update instruction: 0x" +
1357 Twine::utohexstr(Val: insn));
1358 insn &= 0xffe00000 | mask;
1359 writeFromHalf16(loc, insn: insn | 0x00020000 | lo(v: val));
1360 } else {
1361 write16(p: loc, v: (read16(p: loc) & mask) | lo(v: val));
1362 }
1363 } break;
1364 case R_PPC64_TPREL16:
1365 checkInt(loc, v: val, n: 16, rel);
1366 write16(p: loc, v: val);
1367 break;
1368 case R_PPC64_REL32:
1369 checkInt(loc, v: val, n: 32, rel);
1370 write32(p: loc, v: val);
1371 break;
1372 case R_PPC64_ADDR64:
1373 case R_PPC64_REL64:
1374 case R_PPC64_TOC:
1375 write64(p: loc, v: val);
1376 break;
1377 case R_PPC64_REL14: {
1378 uint32_t mask = 0x0000FFFC;
1379 checkInt(loc, v: val, n: 16, rel);
1380 checkAlignment(loc, v: val, n: 4, rel);
1381 write32(p: loc, v: (read32(p: loc) & ~mask) | (val & mask));
1382 break;
1383 }
1384 case R_PPC64_REL24:
1385 case R_PPC64_REL24_NOTOC: {
1386 uint32_t mask = 0x03FFFFFC;
1387 checkInt(loc, v: val, n: 26, rel);
1388 checkAlignment(loc, v: val, n: 4, rel);
1389 write32(p: loc, v: (read32(p: loc) & ~mask) | (val & mask));
1390 break;
1391 }
1392 case R_PPC64_DTPREL64:
1393 write64(p: loc, v: val - dynamicThreadPointerOffset);
1394 break;
1395 case R_PPC64_DTPREL34:
1396 // The Dynamic Thread Vector actually points 0x8000 bytes past the start
1397 // of the TLS block. Therefore, in the case of R_PPC64_DTPREL34 we first
1398 // need to subtract that value then fallthrough to the general case.
1399 val -= dynamicThreadPointerOffset;
1400 [[fallthrough]];
1401 case R_PPC64_PCREL34:
1402 case R_PPC64_GOT_PCREL34:
1403 case R_PPC64_GOT_TLSGD_PCREL34:
1404 case R_PPC64_GOT_TLSLD_PCREL34:
1405 case R_PPC64_GOT_TPREL_PCREL34:
1406 case R_PPC64_TPREL34: {
1407 const uint64_t si0Mask = 0x00000003ffff0000;
1408 const uint64_t si1Mask = 0x000000000000ffff;
1409 const uint64_t fullMask = 0x0003ffff0000ffff;
1410 checkInt(loc, v: val, n: 34, rel);
1411
1412 uint64_t instr = readPrefixedInstruction(loc) & ~fullMask;
1413 writePrefixedInstruction(loc, insn: instr | ((val & si0Mask) << 16) |
1414 (val & si1Mask));
1415 break;
1416 }
1417 // If we encounter a PCREL_OPT relocation that we won't optimize.
1418 case R_PPC64_PCREL_OPT:
1419 break;
1420 default:
1421 llvm_unreachable("unknown relocation");
1422 }
1423}
1424
1425bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
1426 uint64_t branchAddr, const Symbol &s, int64_t a) const {
1427 if (type != R_PPC64_REL14 && type != R_PPC64_REL24 &&
1428 type != R_PPC64_REL24_NOTOC)
1429 return false;
1430
1431 // If a function is in the Plt it needs to be called with a call-stub.
1432 if (s.isInPlt())
1433 return true;
1434
1435 // This check looks at the st_other bits of the callee with relocation
1436 // R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
1437 // clobbers the TOC and we need an R2 save stub.
1438 if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
1439 return true;
1440
1441 if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
1442 return true;
1443
1444 // An undefined weak symbol not in a PLT does not need a thunk. If it is
1445 // hidden, its binding has been converted to local, so we just check
1446 // isUndefined() here. A undefined non-weak symbol has been errored.
1447 if (s.isUndefined())
1448 return false;
1449
1450 // If the offset exceeds the range of the branch type then it will need
1451 // a range-extending thunk.
1452 // See the comment in getRelocTargetVA() about R_PPC64_CALL.
1453 return !inBranchRange(type, src: branchAddr,
1454 dst: s.getVA(addend: a) +
1455 getPPC64GlobalEntryToLocalEntryOffset(stOther: s.stOther));
1456}
1457
1458uint32_t PPC64::getThunkSectionSpacing() const {
1459 // See comment in Arch/ARM.cpp for a more detailed explanation of
1460 // getThunkSectionSpacing(). For PPC64 we pick the constant here based on
1461 // R_PPC64_REL24, which is used by unconditional branch instructions.
1462 // 0x2000000 = (1 << 24-1) * 4
1463 return 0x2000000;
1464}
1465
1466bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
1467 int64_t offset = dst - src;
1468 if (type == R_PPC64_REL14)
1469 return isInt<16>(x: offset);
1470 if (type == R_PPC64_REL24 || type == R_PPC64_REL24_NOTOC)
1471 return isInt<26>(x: offset);
1472 llvm_unreachable("unsupported relocation type used in branch");
1473}
1474
1475RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
1476 if (type != R_PPC64_GOT_TLSGD_PCREL34 && expr == R_RELAX_TLS_GD_TO_IE)
1477 return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
1478 if (expr == R_RELAX_TLS_LD_TO_LE)
1479 return R_RELAX_TLS_LD_TO_LE_ABS;
1480 return expr;
1481}
1482
1483RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
1484 const uint8_t *loc) const {
1485 if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
1486 config->pcRelOptimize) {
1487 // It only makes sense to optimize pld since paddi means that the address
1488 // of the object in the GOT is required rather than the object itself.
1489 if ((readPrefixedInstruction(loc) & 0xfc000000) == 0xe4000000)
1490 return R_PPC64_RELAX_GOT_PC;
1491 }
1492 return R_GOT_PC;
1493}
1494
1495// Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
1496// The general dynamic code sequence for a global `x` uses 4 instructions.
1497// Instruction Relocation Symbol
1498// addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
1499// addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
1500// bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
1501// R_PPC64_REL24 __tls_get_addr
1502// nop None None
1503//
1504// Relaxing to initial-exec entails:
1505// 1) Convert the addis/addi pair that builds the address of the tls_index
1506// struct for 'x' to an addis/ld pair that loads an offset from a got-entry.
1507// 2) Convert the call to __tls_get_addr to a nop.
1508// 3) Convert the nop following the call to an add of the loaded offset to the
1509// thread pointer.
1510// Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
1511// used as the relaxation hint for both steps 2 and 3.
1512void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
1513 uint64_t val) const {
1514 switch (rel.type) {
1515 case R_PPC64_GOT_TLSGD16_HA:
1516 // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
1517 // addis rT, r2, sym@got@tprel@ha.
1518 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_HA, val);
1519 return;
1520 case R_PPC64_GOT_TLSGD16:
1521 case R_PPC64_GOT_TLSGD16_LO: {
1522 // Relax from addi r3, rA, sym@got@tlsgd@l to
1523 // ld r3, sym@got@tprel@l(rA)
1524 uint32_t ra = (readFromHalf16(loc) & (0x1f << 16));
1525 writeFromHalf16(loc, insn: 0xe8600000 | ra);
1526 relocateNoSym(loc, type: R_PPC64_GOT_TPREL16_LO_DS, val);
1527 return;
1528 }
1529 case R_PPC64_GOT_TLSGD_PCREL34: {
1530 // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
1531 // pld r3, sym@got@tprel@pcrel
1532 writePrefixedInstruction(loc, insn: 0x04100000e4600000);
1533 relocateNoSym(loc, type: R_PPC64_GOT_TPREL_PCREL34, val);
1534 return;
1535 }
1536 case R_PPC64_TLSGD: {
1537 // PC Relative Relaxation:
1538 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
1539 // nop
1540 // TOC Relaxation:
1541 // Relax from bl __tls_get_addr(x@tlsgd)
1542 // nop
1543 // to
1544 // nop
1545 // add r3, r3, r13
1546 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
1547 if (locAsInt % 4 == 0) {
1548 write32(p: loc, v: NOP); // bl __tls_get_addr(sym@tlsgd) --> nop
1549 write32(p: loc + 4, v: 0x7c636A14); // nop --> add r3, r3, r13
1550 } else if (locAsInt % 4 == 1) {
1551 // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
1552 write32(p: loc - 1, v: 0x7c636a14);
1553 } else {
1554 errorOrWarn(msg: "R_PPC64_TLSGD has unexpected byte alignment");
1555 }
1556 return;
1557 }
1558 default:
1559 llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
1560 }
1561}
1562
1563void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
1564 uint64_t secAddr = sec.getOutputSection()->addr;
1565 if (auto *s = dyn_cast<InputSection>(Val: &sec))
1566 secAddr += s->outSecOff;
1567 else if (auto *ehIn = dyn_cast<EhInputSection>(Val: &sec))
1568 secAddr += ehIn->getParent()->outSecOff;
1569 uint64_t lastPPCRelaxedRelocOff = -1;
1570 for (const Relocation &rel : sec.relocs()) {
1571 uint8_t *loc = buf + rel.offset;
1572 const uint64_t val =
1573 sec.getRelocTargetVA(File: sec.file, Type: rel.type, A: rel.addend,
1574 P: secAddr + rel.offset, Sym: *rel.sym, Expr: rel.expr);
1575 switch (rel.expr) {
1576 case R_PPC64_RELAX_GOT_PC: {
1577 // The R_PPC64_PCREL_OPT relocation must appear immediately after
1578 // R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
1579 // We can only relax R_PPC64_PCREL_OPT if we have also relaxed
1580 // the associated R_PPC64_GOT_PCREL34 since only the latter has an
1581 // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34
1582 // and only relax the other if the saved offset matches.
1583 if (rel.type == R_PPC64_GOT_PCREL34)
1584 lastPPCRelaxedRelocOff = rel.offset;
1585 if (rel.type == R_PPC64_PCREL_OPT && rel.offset != lastPPCRelaxedRelocOff)
1586 break;
1587 relaxGot(loc, rel, val);
1588 break;
1589 }
1590 case R_PPC64_RELAX_TOC:
1591 // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1592 // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1593 // entry, there may be R_PPC64_TOC16_HA not paired with
1594 // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1595 // opportunities but is safe.
1596 if (ppc64noTocRelax.count(V: {rel.sym, rel.addend}) ||
1597 !tryRelaxPPC64TocIndirection(rel, bufLoc: loc))
1598 relocate(loc, rel, val);
1599 break;
1600 case R_PPC64_CALL:
1601 // If this is a call to __tls_get_addr, it may be part of a TLS
1602 // sequence that has been relaxed and turned into a nop. In this
1603 // case, we don't want to handle it as a call.
1604 if (read32(p: loc) == 0x60000000) // nop
1605 break;
1606
1607 // Patch a nop (0x60000000) to a ld.
1608 if (rel.sym->needsTocRestore()) {
1609 // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1610 // recursive calls even if the function is preemptible. This is not
1611 // wrong in the common case where the function is not preempted at
1612 // runtime. Just ignore.
1613 if ((rel.offset + 8 > sec.content().size() ||
1614 read32(p: loc + 4) != 0x60000000) &&
1615 rel.sym->file != sec.file) {
1616 // Use substr(6) to remove the "__plt_" prefix.
1617 errorOrWarn(msg: getErrorLocation(loc) + "call to " +
1618 lld::toString(*rel.sym).substr(pos: 6) +
1619 " lacks nop, can't restore toc");
1620 break;
1621 }
1622 write32(p: loc + 4, v: 0xe8410018); // ld %r2, 24(%r1)
1623 }
1624 relocate(loc, rel, val);
1625 break;
1626 case R_RELAX_TLS_GD_TO_IE:
1627 case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
1628 relaxTlsGdToIe(loc, rel, val);
1629 break;
1630 case R_RELAX_TLS_GD_TO_LE:
1631 relaxTlsGdToLe(loc, rel, val);
1632 break;
1633 case R_RELAX_TLS_LD_TO_LE_ABS:
1634 relaxTlsLdToLe(loc, rel, val);
1635 break;
1636 case R_RELAX_TLS_IE_TO_LE:
1637 relaxTlsIeToLe(loc, rel, val);
1638 break;
1639 default:
1640 relocate(loc, rel, val);
1641 break;
1642 }
1643 }
1644}
1645
1646// The prologue for a split-stack function is expected to look roughly
1647// like this:
1648// .Lglobal_entry_point:
1649// # TOC pointer initialization.
1650// ...
1651// .Llocal_entry_point:
1652// # load the __private_ss member of the threads tcbhead.
1653// ld r0,-0x7000-64(r13)
1654// # subtract the functions stack size from the stack pointer.
1655// addis r12, r1, ha(-stack-frame size)
1656// addi r12, r12, l(-stack-frame size)
1657// # compare needed to actual and branch to allocate_more_stack if more
1658// # space is needed, otherwise fallthrough to 'normal' function body.
1659// cmpld cr7,r12,r0
1660// blt- cr7, .Lallocate_more_stack
1661//
1662// -) The allocate_more_stack block might be placed after the split-stack
1663// prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body`
1664// instead.
1665// -) If either the addis or addi is not needed due to the stack size being
1666// smaller then 32K or a multiple of 64K they will be replaced with a nop,
1667// but there will always be 2 instructions the linker can overwrite for the
1668// adjusted stack size.
1669//
1670// The linkers job here is to increase the stack size used in the addis/addi
1671// pair by split-stack-size-adjust.
1672// addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
1673// addi r12, r12, l(-stack-frame size - split-stack-adjust-size)
1674bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
1675 uint8_t stOther) const {
1676 // If the caller has a global entry point adjust the buffer past it. The start
1677 // of the split-stack prologue will be at the local entry point.
1678 loc += getPPC64GlobalEntryToLocalEntryOffset(stOther);
1679
1680 // At the very least we expect to see a load of some split-stack data from the
1681 // tcb, and 2 instructions that calculate the ending stack address this
1682 // function will require. If there is not enough room for at least 3
1683 // instructions it can't be a split-stack prologue.
1684 if (loc + 12 >= end)
1685 return false;
1686
1687 // First instruction must be `ld r0, -0x7000-64(r13)`
1688 if (read32(p: loc) != 0xe80d8fc0)
1689 return false;
1690
1691 int16_t hiImm = 0;
1692 int16_t loImm = 0;
1693 // First instruction can be either an addis if the frame size is larger then
1694 // 32K, or an addi if the size is less then 32K.
1695 int32_t firstInstr = read32(p: loc + 4);
1696 if (getPrimaryOpCode(encoding: firstInstr) == 15) {
1697 hiImm = firstInstr & 0xFFFF;
1698 } else if (getPrimaryOpCode(encoding: firstInstr) == 14) {
1699 loImm = firstInstr & 0xFFFF;
1700 } else {
1701 return false;
1702 }
1703
1704 // Second instruction is either an addi or a nop. If the first instruction was
1705 // an addi then LoImm is set and the second instruction must be a nop.
1706 uint32_t secondInstr = read32(p: loc + 8);
1707 if (!loImm && getPrimaryOpCode(encoding: secondInstr) == 14) {
1708 loImm = secondInstr & 0xFFFF;
1709 } else if (secondInstr != NOP) {
1710 return false;
1711 }
1712
1713 // The register operands of the first instruction should be the stack-pointer
1714 // (r1) as the input (RA) and r12 as the output (RT). If the second
1715 // instruction is not a nop, then it should use r12 as both input and output.
1716 auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
1717 uint8_t expectedRA) {
1718 return ((instr & 0x3E00000) >> 21 == expectedRT) &&
1719 ((instr & 0x1F0000) >> 16 == expectedRA);
1720 };
1721 if (!checkRegOperands(firstInstr, 12, 1))
1722 return false;
1723 if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
1724 return false;
1725
1726 int32_t stackFrameSize = (hiImm * 65536) + loImm;
1727 // Check that the adjusted size doesn't overflow what we can represent with 2
1728 // instructions.
1729 if (stackFrameSize < config->splitStackAdjustSize + INT32_MIN) {
1730 error(msg: getErrorLocation(loc) + "split-stack prologue adjustment overflows");
1731 return false;
1732 }
1733
1734 int32_t adjustedStackFrameSize =
1735 stackFrameSize - config->splitStackAdjustSize;
1736
1737 loImm = adjustedStackFrameSize & 0xFFFF;
1738 hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
1739 if (hiImm) {
1740 write32(p: loc + 4, v: 0x3D810000 | (uint16_t)hiImm);
1741 // If the low immediate is zero the second instruction will be a nop.
1742 secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
1743 write32(p: loc + 8, v: secondInstr);
1744 } else {
1745 // addi r12, r1, imm
1746 write32(p: loc + 4, v: (0x39810000) | (uint16_t)loImm);
1747 write32(p: loc + 8, v: NOP);
1748 }
1749
1750 return true;
1751}
1752
1753TargetInfo *elf::getPPC64TargetInfo() {
1754 static PPC64 target;
1755 return &target;
1756}
1757