1//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64FixupKinds.h"
10#include "MCTargetDesc/AArch64MCAsmInfo.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "Utils/AArch64BaseInfo.h"
13#include "llvm/BinaryFormat/MachO.h"
14#include "llvm/MC/MCAsmBackend.h"
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCELFObjectWriter.h"
18#include "llvm/MC/MCFixupKindInfo.h"
19#include "llvm/MC/MCObjectWriter.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/MC/MCSubtargetInfo.h"
22#include "llvm/MC/MCTargetOptions.h"
23#include "llvm/MC/MCValue.h"
24#include "llvm/MC/TargetRegistry.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/TargetParser/Triple.h"
28using namespace llvm;
29
30namespace {
31
32class AArch64AsmBackend : public MCAsmBackend {
33 static const unsigned PCRelFlagVal =
34 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
35protected:
36 Triple TheTriple;
37
38public:
39 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
40 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
41 : llvm::endianness::big),
42 TheTriple(TT) {}
43
44
45 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
46
47 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override {
48 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
49 // This table *must* be in the order that the fixup_* kinds are defined
50 // in AArch64FixupKinds.h.
51 //
52 // Name Offset (bits) Size (bits) Flags
53 {.Name: "fixup_aarch64_pcrel_adr_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: PCRelFlagVal},
54 {.Name: "fixup_aarch64_pcrel_adrp_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: PCRelFlagVal},
55 {.Name: "fixup_aarch64_add_imm12", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
56 {.Name: "fixup_aarch64_ldst_imm12_scale1", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
57 {.Name: "fixup_aarch64_ldst_imm12_scale2", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
58 {.Name: "fixup_aarch64_ldst_imm12_scale4", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
59 {.Name: "fixup_aarch64_ldst_imm12_scale8", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
60 {.Name: "fixup_aarch64_ldst_imm12_scale16", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
61 {.Name: "fixup_aarch64_ldr_pcrel_imm19", .TargetOffset: 5, .TargetSize: 19, .Flags: PCRelFlagVal},
62 {.Name: "fixup_aarch64_movw", .TargetOffset: 5, .TargetSize: 16, .Flags: 0},
63 {.Name: "fixup_aarch64_pcrel_branch9", .TargetOffset: 5, .TargetSize: 9, .Flags: PCRelFlagVal},
64 {.Name: "fixup_aarch64_pcrel_branch14", .TargetOffset: 5, .TargetSize: 14, .Flags: PCRelFlagVal},
65 {.Name: "fixup_aarch64_pcrel_branch16", .TargetOffset: 5, .TargetSize: 16, .Flags: PCRelFlagVal},
66 {.Name: "fixup_aarch64_pcrel_branch19", .TargetOffset: 5, .TargetSize: 19, .Flags: PCRelFlagVal},
67 {.Name: "fixup_aarch64_pcrel_branch26", .TargetOffset: 0, .TargetSize: 26, .Flags: PCRelFlagVal},
68 {.Name: "fixup_aarch64_pcrel_call26", .TargetOffset: 0, .TargetSize: 26, .Flags: PCRelFlagVal}};
69
70 // Fixup kinds from raw relocation types and .reloc directives force
71 // relocations and do not need these fields.
72 if (mc::isRelocation(FixupKind: Kind))
73 return MCAsmBackend::getFixupKindInfo(Kind: FK_NONE);
74
75 if (Kind < FirstTargetFixupKind)
76 return MCAsmBackend::getFixupKindInfo(Kind);
77
78 assert(unsigned(Kind - FirstTargetFixupKind) <
79 AArch64::NumTargetFixupKinds &&
80 "Invalid kind!");
81 return Infos[Kind - FirstTargetFixupKind];
82 }
83
84 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
85 MutableArrayRef<char> Data, uint64_t Value,
86 bool IsResolved) override;
87
88 bool fixupNeedsRelaxation(const MCFixup &Fixup,
89 uint64_t Value) const override;
90 void relaxInstruction(MCInst &Inst,
91 const MCSubtargetInfo &STI) const override;
92 bool writeNopData(raw_ostream &OS, uint64_t Count,
93 const MCSubtargetInfo *STI) const override;
94
95 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
96};
97
98} // end anonymous namespace
99
100/// The number of bytes the fixup may change.
101static unsigned getFixupKindNumBytes(unsigned Kind) {
102 switch (Kind) {
103 default:
104 llvm_unreachable("Unknown fixup kind!");
105
106 case FK_Data_1:
107 return 1;
108
109 case FK_Data_2:
110 case FK_SecRel_2:
111 return 2;
112
113 case AArch64::fixup_aarch64_movw:
114 case AArch64::fixup_aarch64_pcrel_branch9:
115 case AArch64::fixup_aarch64_pcrel_branch14:
116 case AArch64::fixup_aarch64_pcrel_branch16:
117 case AArch64::fixup_aarch64_add_imm12:
118 case AArch64::fixup_aarch64_ldst_imm12_scale1:
119 case AArch64::fixup_aarch64_ldst_imm12_scale2:
120 case AArch64::fixup_aarch64_ldst_imm12_scale4:
121 case AArch64::fixup_aarch64_ldst_imm12_scale8:
122 case AArch64::fixup_aarch64_ldst_imm12_scale16:
123 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
124 case AArch64::fixup_aarch64_pcrel_branch19:
125 return 3;
126
127 case AArch64::fixup_aarch64_pcrel_adr_imm21:
128 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
129 case AArch64::fixup_aarch64_pcrel_branch26:
130 case AArch64::fixup_aarch64_pcrel_call26:
131 case FK_Data_4:
132 case FK_SecRel_4:
133 return 4;
134
135 case FK_Data_8:
136 return 8;
137 }
138}
139
140static unsigned AdrImmBits(unsigned Value) {
141 unsigned lo2 = Value & 0x3;
142 unsigned hi19 = (Value & 0x1ffffc) >> 2;
143 return (hi19 << 5) | (lo2 << 29);
144}
145
146static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
147 uint64_t Value, MCContext &Ctx,
148 const Triple &TheTriple, bool IsResolved) {
149 int64_t SignedValue = static_cast<int64_t>(Value);
150 switch (Fixup.getTargetKind()) {
151 default:
152 llvm_unreachable("Unknown fixup kind!");
153 case AArch64::fixup_aarch64_pcrel_adr_imm21:
154 if (!isInt<21>(x: SignedValue))
155 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
156 return AdrImmBits(Value: Value & 0x1fffffULL);
157 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
158 assert(!IsResolved);
159 if (TheTriple.isOSBinFormatCOFF()) {
160 if (!isInt<21>(x: SignedValue))
161 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
162 return AdrImmBits(Value: Value & 0x1fffffULL);
163 }
164 return AdrImmBits(Value: (Value & 0x1fffff000ULL) >> 12);
165 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
166 case AArch64::fixup_aarch64_pcrel_branch19:
167 // Signed 19-bit immediate which gets multiplied by 4
168 if (!isInt<21>(x: SignedValue))
169 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
170 if (Value & 0x3)
171 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
172 // Low two bits are not encoded.
173 return (Value >> 2) & 0x7ffff;
174 case AArch64::fixup_aarch64_add_imm12:
175 case AArch64::fixup_aarch64_ldst_imm12_scale1:
176 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
177 Value &= 0xfff;
178 // Unsigned 12-bit immediate
179 if (!isUInt<12>(x: Value))
180 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
181 return Value;
182 case AArch64::fixup_aarch64_ldst_imm12_scale2:
183 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
184 Value &= 0xfff;
185 // Unsigned 12-bit immediate which gets multiplied by 2
186 if (!isUInt<13>(x: Value))
187 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
188 if (Value & 0x1)
189 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 2-byte aligned");
190 return Value >> 1;
191 case AArch64::fixup_aarch64_ldst_imm12_scale4:
192 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
193 Value &= 0xfff;
194 // Unsigned 12-bit immediate which gets multiplied by 4
195 if (!isUInt<14>(x: Value))
196 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
197 if (Value & 0x3)
198 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 4-byte aligned");
199 return Value >> 2;
200 case AArch64::fixup_aarch64_ldst_imm12_scale8:
201 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
202 Value &= 0xfff;
203 // Unsigned 12-bit immediate which gets multiplied by 8
204 if (!isUInt<15>(x: Value))
205 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
206 if (Value & 0x7)
207 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 8-byte aligned");
208 return Value >> 3;
209 case AArch64::fixup_aarch64_ldst_imm12_scale16:
210 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
211 Value &= 0xfff;
212 // Unsigned 12-bit immediate which gets multiplied by 16
213 if (!isUInt<16>(x: Value))
214 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
215 if (Value & 0xf)
216 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 16-byte aligned");
217 return Value >> 4;
218 case AArch64::fixup_aarch64_movw: {
219 AArch64::Specifier RefKind =
220 static_cast<AArch64::Specifier>(Target.getSpecifier());
221 if (AArch64::getSymbolLoc(S: RefKind) != AArch64::S_ABS &&
222 AArch64::getSymbolLoc(S: RefKind) != AArch64::S_SABS) {
223 if (!RefKind) {
224 // The fixup is an expression
225 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
226 Ctx.reportError(L: Fixup.getLoc(),
227 Msg: "fixup value out of range [-0xFFFF, 0xFFFF]");
228
229 // Invert the negative immediate because it will feed into a MOVN.
230 if (SignedValue < 0)
231 SignedValue = ~SignedValue;
232 Value = static_cast<uint64_t>(SignedValue);
233 } else
234 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
235 // ever be resolved in the assembler.
236 Ctx.reportError(L: Fixup.getLoc(),
237 Msg: "relocation for a thread-local variable points to an "
238 "absolute symbol");
239 return Value;
240 }
241
242 if (!IsResolved) {
243 // FIXME: Figure out when this can actually happen, and verify our
244 // behavior.
245 Ctx.reportError(L: Fixup.getLoc(), Msg: "unresolved movw fixup not yet "
246 "implemented");
247 return Value;
248 }
249
250 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
251 switch (AArch64::getAddressFrag(S: RefKind)) {
252 case AArch64::S_G0:
253 break;
254 case AArch64::S_G1:
255 SignedValue = SignedValue >> 16;
256 break;
257 case AArch64::S_G2:
258 SignedValue = SignedValue >> 32;
259 break;
260 case AArch64::S_G3:
261 SignedValue = SignedValue >> 48;
262 break;
263 default:
264 llvm_unreachable("Variant kind doesn't correspond to fixup");
265 }
266
267 } else {
268 switch (AArch64::getAddressFrag(S: RefKind)) {
269 case AArch64::S_G0:
270 break;
271 case AArch64::S_G1:
272 Value = Value >> 16;
273 break;
274 case AArch64::S_G2:
275 Value = Value >> 32;
276 break;
277 case AArch64::S_G3:
278 Value = Value >> 48;
279 break;
280 default:
281 llvm_unreachable("Variant kind doesn't correspond to fixup");
282 }
283 }
284
285 if (RefKind & AArch64::S_NC) {
286 Value &= 0xFFFF;
287 } else if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
288 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
289 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
290
291 // Invert the negative immediate because it will feed into a MOVN.
292 if (SignedValue < 0)
293 SignedValue = ~SignedValue;
294 Value = static_cast<uint64_t>(SignedValue);
295 } else if (Value > 0xFFFF) {
296 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
297 }
298 return Value;
299 }
300 case AArch64::fixup_aarch64_pcrel_branch9:
301 // Signed 11-bit(9bits + 2 shifts) label
302 if (!isInt<11>(x: SignedValue))
303 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
304 // Low two bits are not encoded (4-byte alignment assumed).
305 if (Value & 0b11)
306 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
307 return (Value >> 2) & 0x1ff;
308 case AArch64::fixup_aarch64_pcrel_branch14:
309 // Signed 16-bit immediate
310 if (!isInt<16>(x: SignedValue))
311 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
312 // Low two bits are not encoded (4-byte alignment assumed).
313 if (Value & 0x3)
314 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
315 return (Value >> 2) & 0x3fff;
316 case AArch64::fixup_aarch64_pcrel_branch16:
317 // Unsigned PC-relative offset, so invert the negative immediate.
318 SignedValue = -SignedValue;
319 Value = static_cast<uint64_t>(SignedValue);
320 // Check valid 18-bit unsigned range.
321 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
322 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
323 // Low two bits are not encoded (4-byte alignment assumed).
324 if (Value & 0b11)
325 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
326 return (Value >> 2) & 0xffff;
327 case AArch64::fixup_aarch64_pcrel_branch26:
328 case AArch64::fixup_aarch64_pcrel_call26:
329 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
330 // MSVC link.exe and lld do not support this relocation type
331 // with a non-zero offset
332 Ctx.reportError(L: Fixup.getLoc(),
333 Msg: "cannot perform a PC-relative fixup with a non-zero "
334 "symbol offset");
335 }
336 // Signed 28-bit immediate
337 if (!isInt<28>(x: SignedValue))
338 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
339 // Low two bits are not encoded (4-byte alignment assumed).
340 if (Value & 0x3)
341 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
342 return (Value >> 2) & 0x3ffffff;
343 case FK_Data_1:
344 case FK_Data_2:
345 case FK_Data_4:
346 case FK_Data_8:
347 case FK_SecRel_2:
348 case FK_SecRel_4:
349 return Value;
350 }
351}
352
353std::optional<MCFixupKind>
354AArch64AsmBackend::getFixupKind(StringRef Name) const {
355 if (!TheTriple.isOSBinFormatELF())
356 return std::nullopt;
357
358 unsigned Type = llvm::StringSwitch<unsigned>(Name)
359#define ELF_RELOC(X, Y) .Case(#X, Y)
360#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
361#undef ELF_RELOC
362 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_AARCH64_NONE)
363 .Case(S: "BFD_RELOC_16", Value: ELF::R_AARCH64_ABS16)
364 .Case(S: "BFD_RELOC_32", Value: ELF::R_AARCH64_ABS32)
365 .Case(S: "BFD_RELOC_64", Value: ELF::R_AARCH64_ABS64)
366 .Default(Value: -1u);
367 if (Type == -1u)
368 return std::nullopt;
369 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
370}
371
372/// getFixupKindContainereSizeInBytes - The number of bytes of the
373/// container involved in big endian or 0 if the item is little endian
374unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
375 if (Endian == llvm::endianness::little)
376 return 0;
377
378 switch (Kind) {
379 default:
380 llvm_unreachable("Unknown fixup kind!");
381
382 case FK_Data_1:
383 return 1;
384 case FK_Data_2:
385 return 2;
386 case FK_Data_4:
387 return 4;
388 case FK_Data_8:
389 return 8;
390
391 case AArch64::fixup_aarch64_movw:
392 case AArch64::fixup_aarch64_pcrel_branch9:
393 case AArch64::fixup_aarch64_pcrel_branch14:
394 case AArch64::fixup_aarch64_pcrel_branch16:
395 case AArch64::fixup_aarch64_add_imm12:
396 case AArch64::fixup_aarch64_ldst_imm12_scale1:
397 case AArch64::fixup_aarch64_ldst_imm12_scale2:
398 case AArch64::fixup_aarch64_ldst_imm12_scale4:
399 case AArch64::fixup_aarch64_ldst_imm12_scale8:
400 case AArch64::fixup_aarch64_ldst_imm12_scale16:
401 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
402 case AArch64::fixup_aarch64_pcrel_branch19:
403 case AArch64::fixup_aarch64_pcrel_adr_imm21:
404 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
405 case AArch64::fixup_aarch64_pcrel_branch26:
406 case AArch64::fixup_aarch64_pcrel_call26:
407 // Instructions are always little endian
408 return 0;
409 }
410}
411
412static bool shouldForceRelocation(const MCFixup &Fixup) {
413 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
414 // ~0xfff. This means that the required offset to reach a symbol can vary by
415 // up to one step depending on where the ADRP is in memory. For example:
416 //
417 // ADRP x0, there
418 // there:
419 //
420 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
421 // we'll need that as an offset. At any other address "there" will be in the
422 // same page as the ADRP and the instruction should encode 0x0. Assuming the
423 // section isn't 0x1000-aligned, we therefore need to delegate this decision
424 // to the linker -- a relocation!
425 return Fixup.getTargetKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21;
426}
427
428void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
429 const MCValue &Target,
430 MutableArrayRef<char> Data, uint64_t Value,
431 bool IsResolved) {
432 if (shouldForceRelocation(Fixup))
433 IsResolved = false;
434 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
435 MCFixupKind Kind = Fixup.getKind();
436 if (mc::isRelocation(FixupKind: Kind))
437 return;
438
439 if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
440 auto RefKind = static_cast<AArch64::Specifier>(Target.getSpecifier());
441 AArch64::Specifier SymLoc = AArch64::getSymbolLoc(S: RefKind);
442 if (SymLoc == AArch64::S_AUTH || SymLoc == AArch64::S_AUTHADDR) {
443 const auto *Expr = dyn_cast<AArch64AuthMCExpr>(Val: Fixup.getValue());
444 if (!Expr) {
445 getContext().reportError(L: Fixup.getValue()->getLoc(),
446 Msg: "expected relocatable expression");
447 return;
448 }
449 assert(Value == 0);
450 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
451 (uint64_t(Expr->getKey()) << 60) |
452 (uint64_t(Expr->hasAddressDiversity()) << 63);
453 }
454 }
455
456 if (!Value)
457 return; // Doesn't change encoding.
458 unsigned NumBytes = getFixupKindNumBytes(Kind);
459 MCFixupKindInfo Info = getFixupKindInfo(Kind: Fixup.getKind());
460 MCContext &Ctx = getContext();
461 int64_t SignedValue = static_cast<int64_t>(Value);
462 // Apply any target-specific value adjustments.
463 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
464
465 // Shift the value into position.
466 Value <<= Info.TargetOffset;
467
468 unsigned Offset = Fixup.getOffset();
469 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
470
471 // Used to point to big endian bytes.
472 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Kind: Fixup.getKind());
473
474 // For each byte of the fragment that the fixup touches, mask in the
475 // bits from the fixup value.
476 if (FulleSizeInBytes == 0) {
477 // Handle as little-endian
478 for (unsigned i = 0; i != NumBytes; ++i) {
479 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
480 }
481 } else {
482 // Handle as big-endian
483 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
484 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
485 for (unsigned i = 0; i != NumBytes; ++i) {
486 unsigned Idx = FulleSizeInBytes - 1 - i;
487 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
488 }
489 }
490
491 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
492 // handle this more cleanly. This may affect the output of -show-mc-encoding.
493 AArch64::Specifier RefKind =
494 static_cast<AArch64::Specifier>(Target.getSpecifier());
495 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS ||
496 (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
497 // If the immediate is negative, generate MOVN else MOVZ.
498 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
499 if (SignedValue < 0)
500 Data[Offset + 3] &= ~(1 << 6);
501 else
502 Data[Offset + 3] |= (1 << 6);
503 }
504}
505
506bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
507 uint64_t Value) const {
508 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
509 // into the targets for now.
510 //
511 // Relax if the value is too big for a (signed) i8.
512 return int64_t(Value) != int64_t(int8_t(Value));
513}
514
515void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
516 const MCSubtargetInfo &STI) const {
517 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
518}
519
520bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
521 const MCSubtargetInfo *STI) const {
522 // If the count is not 4-byte aligned, we must be writing data into the text
523 // section (otherwise we have unaligned instructions, and thus have far
524 // bigger problems), so just write zeros instead.
525 OS.write_zeros(NumZeros: Count % 4);
526
527 // We are properly aligned, so write NOPs as requested.
528 Count /= 4;
529 for (uint64_t i = 0; i != Count; ++i)
530 OS.write(Ptr: "\x1f\x20\x03\xd5", Size: 4);
531 return true;
532}
533
534namespace {
535
536namespace CU {
537
538/// Compact unwind encoding values.
539enum CompactUnwindEncodings {
540 /// A "frameless" leaf function, where no non-volatile registers are
541 /// saved. The return remains in LR throughout the function.
542 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
543
544 /// No compact unwind encoding available. Instead the low 23-bits of
545 /// the compact unwind encoding is the offset of the DWARF FDE in the
546 /// __eh_frame section. This mode is never used in object files. It is only
547 /// generated by the linker in final linked images, which have only DWARF info
548 /// for a function.
549 UNWIND_ARM64_MODE_DWARF = 0x03000000,
550
551 /// This is a standard arm64 prologue where FP/LR are immediately
552 /// pushed on the stack, then SP is copied to FP. If there are any
553 /// non-volatile register saved, they are copied into the stack fame in pairs
554 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
555 /// five X pairs and four D pairs can be saved, but the memory layout must be
556 /// in register number order.
557 UNWIND_ARM64_MODE_FRAME = 0x04000000,
558
559 /// Frame register pair encodings.
560 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
561 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
562 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
563 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
564 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
565 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
566 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
567 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
568 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
569};
570
571} // end CU namespace
572
573// FIXME: This should be in a separate file.
574class DarwinAArch64AsmBackend : public AArch64AsmBackend {
575 const MCRegisterInfo &MRI;
576
577 /// Encode compact unwind stack adjustment for frameless functions.
578 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
579 /// The stack size always needs to be 16 byte aligned.
580 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
581 return (StackSize / 16) << 12;
582 }
583
584public:
585 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
586 const MCRegisterInfo &MRI)
587 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
588
589 std::unique_ptr<MCObjectTargetWriter>
590 createObjectTargetWriter() const override {
591 uint32_t CPUType = cantFail(ValOrErr: MachO::getCPUType(T: TheTriple));
592 uint32_t CPUSubType = cantFail(ValOrErr: MachO::getCPUSubType(T: TheTriple));
593 return createAArch64MachObjectWriter(CPUType, CPUSubtype: CPUSubType,
594 IsILP32: TheTriple.isArch32Bit());
595 }
596
597 /// Generate the compact unwind encoding from the CFI directives.
598 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
599 const MCContext *Ctxt) const override {
600 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
601 if (Instrs.empty())
602 return CU::UNWIND_ARM64_MODE_FRAMELESS;
603 if (!isDarwinCanonicalPersonality(Sym: FI->Personality) &&
604 !Ctxt->emitCompactUnwindNonCanonical())
605 return CU::UNWIND_ARM64_MODE_DWARF;
606
607 bool HasFP = false;
608 uint64_t StackSize = 0;
609
610 uint64_t CompactUnwindEncoding = 0;
611 int64_t CurOffset = 0;
612 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
613 const MCCFIInstruction &Inst = Instrs[i];
614
615 switch (Inst.getOperation()) {
616 default:
617 // Cannot handle this directive: bail out.
618 return CU::UNWIND_ARM64_MODE_DWARF;
619 case MCCFIInstruction::OpDefCfa: {
620 // Defines a frame pointer.
621 MCRegister XReg =
622 getXRegFromWReg(Reg: *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true));
623
624 // Other CFA registers than FP are not supported by compact unwind.
625 // Fallback on DWARF.
626 // FIXME: When opt-remarks are supported in MC, add a remark to notify
627 // the user.
628 if (XReg != AArch64::FP)
629 return CU::UNWIND_ARM64_MODE_DWARF;
630
631 if (i + 2 >= e)
632 return CU::UNWIND_ARM64_MODE_DWARF;
633
634 const MCCFIInstruction &LRPush = Instrs[++i];
635 if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
636 return CU::UNWIND_ARM64_MODE_DWARF;
637 const MCCFIInstruction &FPPush = Instrs[++i];
638 if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
639 return CU::UNWIND_ARM64_MODE_DWARF;
640
641 if (FPPush.getOffset() + 8 != LRPush.getOffset())
642 return CU::UNWIND_ARM64_MODE_DWARF;
643 CurOffset = FPPush.getOffset();
644
645 MCRegister LRReg = *MRI.getLLVMRegNum(RegNum: LRPush.getRegister(), isEH: true);
646 MCRegister FPReg = *MRI.getLLVMRegNum(RegNum: FPPush.getRegister(), isEH: true);
647
648 LRReg = getXRegFromWReg(Reg: LRReg);
649 FPReg = getXRegFromWReg(Reg: FPReg);
650
651 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
652 return CU::UNWIND_ARM64_MODE_DWARF;
653
654 // Indicate that the function has a frame.
655 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
656 HasFP = true;
657 break;
658 }
659 case MCCFIInstruction::OpDefCfaOffset: {
660 if (StackSize != 0)
661 return CU::UNWIND_ARM64_MODE_DWARF;
662 StackSize = std::abs(i: Inst.getOffset());
663 break;
664 }
665 case MCCFIInstruction::OpOffset: {
666 // Registers are saved in pairs. We expect there to be two consecutive
667 // `.cfi_offset' instructions with the appropriate registers specified.
668 MCRegister Reg1 = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
669 if (i + 1 == e)
670 return CU::UNWIND_ARM64_MODE_DWARF;
671
672 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
673 return CU::UNWIND_ARM64_MODE_DWARF;
674 CurOffset = Inst.getOffset();
675
676 const MCCFIInstruction &Inst2 = Instrs[++i];
677 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
678 return CU::UNWIND_ARM64_MODE_DWARF;
679 MCRegister Reg2 = *MRI.getLLVMRegNum(RegNum: Inst2.getRegister(), isEH: true);
680
681 if (Inst2.getOffset() != CurOffset - 8)
682 return CU::UNWIND_ARM64_MODE_DWARF;
683 CurOffset = Inst2.getOffset();
684
685 // N.B. The encodings must be in register number order, and the X
686 // registers before the D registers.
687
688 // X19/X20 pair = 0x00000001,
689 // X21/X22 pair = 0x00000002,
690 // X23/X24 pair = 0x00000004,
691 // X25/X26 pair = 0x00000008,
692 // X27/X28 pair = 0x00000010
693 Reg1 = getXRegFromWReg(Reg: Reg1);
694 Reg2 = getXRegFromWReg(Reg: Reg2);
695
696 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
697 (CompactUnwindEncoding & 0xF1E) == 0)
698 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
699 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
700 (CompactUnwindEncoding & 0xF1C) == 0)
701 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
702 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
703 (CompactUnwindEncoding & 0xF18) == 0)
704 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
705 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
706 (CompactUnwindEncoding & 0xF10) == 0)
707 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
708 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
709 (CompactUnwindEncoding & 0xF00) == 0)
710 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
711 else {
712 Reg1 = getDRegFromBReg(Reg: Reg1);
713 Reg2 = getDRegFromBReg(Reg: Reg2);
714
715 // D8/D9 pair = 0x00000100,
716 // D10/D11 pair = 0x00000200,
717 // D12/D13 pair = 0x00000400,
718 // D14/D15 pair = 0x00000800
719 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
720 (CompactUnwindEncoding & 0xE00) == 0)
721 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
722 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
723 (CompactUnwindEncoding & 0xC00) == 0)
724 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
725 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
726 (CompactUnwindEncoding & 0x800) == 0)
727 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
728 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
729 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
730 else
731 // A pair was pushed which we cannot handle.
732 return CU::UNWIND_ARM64_MODE_DWARF;
733 }
734
735 break;
736 }
737 }
738 }
739
740 if (!HasFP) {
741 // With compact unwind info we can only represent stack adjustments of up
742 // to 65520 bytes.
743 if (StackSize > 65520)
744 return CU::UNWIND_ARM64_MODE_DWARF;
745
746 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
747 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
748 }
749
750 return CompactUnwindEncoding;
751 }
752};
753
754} // end anonymous namespace
755
756namespace {
757
758class ELFAArch64AsmBackend : public AArch64AsmBackend {
759public:
760 uint8_t OSABI;
761 bool IsILP32;
762
763 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
764 bool IsLittleEndian, bool IsILP32)
765 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
766 IsILP32(IsILP32) {}
767
768 std::unique_ptr<MCObjectTargetWriter>
769 createObjectTargetWriter() const override {
770 return createAArch64ELFObjectWriter(OSABI, IsILP32);
771 }
772};
773
774}
775
776namespace {
777class COFFAArch64AsmBackend : public AArch64AsmBackend {
778public:
779 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
780 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
781
782 std::unique_ptr<MCObjectTargetWriter>
783 createObjectTargetWriter() const override {
784 return createAArch64WinCOFFObjectWriter(TheTriple);
785 }
786};
787}
788
789MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
790 const MCSubtargetInfo &STI,
791 const MCRegisterInfo &MRI,
792 const MCTargetOptions &Options) {
793 const Triple &TheTriple = STI.getTargetTriple();
794 if (TheTriple.isOSBinFormatMachO()) {
795 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
796 }
797
798 if (TheTriple.isOSBinFormatCOFF())
799 return new COFFAArch64AsmBackend(T, TheTriple);
800
801 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
802
803 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
804 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
805 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
806 IsILP32);
807}
808
809MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
810 const MCSubtargetInfo &STI,
811 const MCRegisterInfo &MRI,
812 const MCTargetOptions &Options) {
813 const Triple &TheTriple = STI.getTargetTriple();
814 assert(TheTriple.isOSBinFormatELF() &&
815 "Big endian is only supported for ELF targets!");
816 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
817 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
818 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
819 IsILP32);
820}
821