1//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64FixupKinds.h"
10#include "MCTargetDesc/AArch64MCAsmInfo.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "Utils/AArch64BaseInfo.h"
13#include "llvm/BinaryFormat/MachO.h"
14#include "llvm/MC/MCAsmBackend.h"
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCELFObjectWriter.h"
18#include "llvm/MC/MCObjectWriter.h"
19#include "llvm/MC/MCRegisterInfo.h"
20#include "llvm/MC/MCSubtargetInfo.h"
21#include "llvm/MC/MCTargetOptions.h"
22#include "llvm/MC/MCValue.h"
23#include "llvm/MC/TargetRegistry.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/TargetParser/Triple.h"
27using namespace llvm;
28
29namespace {
30
31class AArch64AsmBackend : public MCAsmBackend {
32protected:
33 Triple TheTriple;
34
35public:
36 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
37 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
38 : llvm::endianness::big),
39 TheTriple(TT) {}
40
41
42 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
43
44 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override {
45 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
46 // This table *must* be in the order that the fixup_* kinds are defined
47 // in AArch64FixupKinds.h.
48 //
49 // Name Offset (bits) Size (bits) Flags
50 {.Name: "fixup_aarch64_pcrel_adr_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
51 {.Name: "fixup_aarch64_pcrel_adrp_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
52 {.Name: "fixup_aarch64_add_imm12", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
53 {.Name: "fixup_aarch64_ldst_imm12_scale1", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
54 {.Name: "fixup_aarch64_ldst_imm12_scale2", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
55 {.Name: "fixup_aarch64_ldst_imm12_scale4", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
56 {.Name: "fixup_aarch64_ldst_imm12_scale8", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
57 {.Name: "fixup_aarch64_ldst_imm12_scale16", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
58 {.Name: "fixup_aarch64_ldr_pcrel_imm19", .TargetOffset: 5, .TargetSize: 19, .Flags: 0},
59 {.Name: "fixup_aarch64_movw", .TargetOffset: 5, .TargetSize: 16, .Flags: 0},
60 {.Name: "fixup_aarch64_pcrel_branch9", .TargetOffset: 5, .TargetSize: 9, .Flags: 0},
61 {.Name: "fixup_aarch64_pcrel_branch14", .TargetOffset: 5, .TargetSize: 14, .Flags: 0},
62 {.Name: "fixup_aarch64_pcrel_branch16", .TargetOffset: 5, .TargetSize: 16, .Flags: 0},
63 {.Name: "fixup_aarch64_pcrel_branch19", .TargetOffset: 5, .TargetSize: 19, .Flags: 0},
64 {.Name: "fixup_aarch64_pcrel_branch26", .TargetOffset: 0, .TargetSize: 26, .Flags: 0},
65 {.Name: "fixup_aarch64_pcrel_call26", .TargetOffset: 0, .TargetSize: 26, .Flags: 0}};
66
67 // Fixup kinds from raw relocation types and .reloc directives force
68 // relocations and do not need these fields.
69 if (mc::isRelocation(FixupKind: Kind))
70 return {};
71
72 if (Kind < FirstTargetFixupKind)
73 return MCAsmBackend::getFixupKindInfo(Kind);
74
75 assert(unsigned(Kind - FirstTargetFixupKind) <
76 AArch64::NumTargetFixupKinds &&
77 "Invalid kind!");
78 return Infos[Kind - FirstTargetFixupKind];
79 }
80
81 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
82 uint8_t *Data, uint64_t Value, bool IsResolved) override;
83
84 bool fixupNeedsRelaxation(const MCFixup &Fixup,
85 uint64_t Value) const override;
86 bool writeNopData(raw_ostream &OS, uint64_t Count,
87 const MCSubtargetInfo *STI) const override;
88
89 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
90};
91
92} // end anonymous namespace
93
94/// The number of bytes the fixup may change.
95static unsigned getFixupKindNumBytes(unsigned Kind) {
96 switch (Kind) {
97 default:
98 llvm_unreachable("Unknown fixup kind!");
99
100 case FK_Data_1:
101 return 1;
102
103 case FK_Data_2:
104 case FK_SecRel_2:
105 return 2;
106
107 case AArch64::fixup_aarch64_movw:
108 case AArch64::fixup_aarch64_pcrel_branch9:
109 case AArch64::fixup_aarch64_pcrel_branch14:
110 case AArch64::fixup_aarch64_pcrel_branch16:
111 case AArch64::fixup_aarch64_add_imm12:
112 case AArch64::fixup_aarch64_ldst_imm12_scale1:
113 case AArch64::fixup_aarch64_ldst_imm12_scale2:
114 case AArch64::fixup_aarch64_ldst_imm12_scale4:
115 case AArch64::fixup_aarch64_ldst_imm12_scale8:
116 case AArch64::fixup_aarch64_ldst_imm12_scale16:
117 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
118 case AArch64::fixup_aarch64_pcrel_branch19:
119 return 3;
120
121 case AArch64::fixup_aarch64_pcrel_adr_imm21:
122 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
123 case AArch64::fixup_aarch64_pcrel_branch26:
124 case AArch64::fixup_aarch64_pcrel_call26:
125 case FK_Data_4:
126 case FK_SecRel_4:
127 return 4;
128
129 case FK_Data_8:
130 return 8;
131 }
132}
133
134static unsigned AdrImmBits(unsigned Value) {
135 unsigned lo2 = Value & 0x3;
136 unsigned hi19 = (Value & 0x1ffffc) >> 2;
137 return (hi19 << 5) | (lo2 << 29);
138}
139
140static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
141 uint64_t Value, MCContext &Ctx,
142 const Triple &TheTriple, bool IsResolved) {
143 int64_t SignedValue = static_cast<int64_t>(Value);
144 switch (Fixup.getKind()) {
145 default:
146 llvm_unreachable("Unknown fixup kind!");
147 case AArch64::fixup_aarch64_pcrel_adr_imm21:
148 if (!isInt<21>(x: SignedValue))
149 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
150 return AdrImmBits(Value: Value & 0x1fffffULL);
151 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
152 assert(!IsResolved);
153 if (TheTriple.isOSBinFormatCOFF()) {
154 if (!isInt<21>(x: SignedValue))
155 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
156 return AdrImmBits(Value: Value & 0x1fffffULL);
157 }
158 return AdrImmBits(Value: (Value & 0x1fffff000ULL) >> 12);
159 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
160 case AArch64::fixup_aarch64_pcrel_branch19:
161 // Signed 19-bit immediate which gets multiplied by 4
162 if (!isInt<21>(x: SignedValue))
163 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
164 if (Value & 0x3)
165 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
166 // Low two bits are not encoded.
167 return (Value >> 2) & 0x7ffff;
168 case AArch64::fixup_aarch64_add_imm12:
169 case AArch64::fixup_aarch64_ldst_imm12_scale1:
170 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
171 Value &= 0xfff;
172 // Unsigned 12-bit immediate
173 if (!isUInt<12>(x: Value))
174 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
175 return Value;
176 case AArch64::fixup_aarch64_ldst_imm12_scale2:
177 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
178 Value &= 0xfff;
179 // Unsigned 12-bit immediate which gets multiplied by 2
180 if (!isUInt<13>(x: Value))
181 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
182 if (Value & 0x1)
183 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 2-byte aligned");
184 return Value >> 1;
185 case AArch64::fixup_aarch64_ldst_imm12_scale4:
186 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
187 Value &= 0xfff;
188 // Unsigned 12-bit immediate which gets multiplied by 4
189 if (!isUInt<14>(x: Value))
190 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
191 if (Value & 0x3)
192 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 4-byte aligned");
193 return Value >> 2;
194 case AArch64::fixup_aarch64_ldst_imm12_scale8:
195 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
196 Value &= 0xfff;
197 // Unsigned 12-bit immediate which gets multiplied by 8
198 if (!isUInt<15>(x: Value))
199 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
200 if (Value & 0x7)
201 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 8-byte aligned");
202 return Value >> 3;
203 case AArch64::fixup_aarch64_ldst_imm12_scale16:
204 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
205 Value &= 0xfff;
206 // Unsigned 12-bit immediate which gets multiplied by 16
207 if (!isUInt<16>(x: Value))
208 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
209 if (Value & 0xf)
210 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 16-byte aligned");
211 return Value >> 4;
212 case AArch64::fixup_aarch64_movw: {
213 AArch64::Specifier RefKind =
214 static_cast<AArch64::Specifier>(Target.getSpecifier());
215 if (AArch64::getSymbolLoc(S: RefKind) != AArch64::S_ABS &&
216 AArch64::getSymbolLoc(S: RefKind) != AArch64::S_SABS) {
217 if (!RefKind) {
218 // The fixup is an expression
219 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
220 Ctx.reportError(L: Fixup.getLoc(),
221 Msg: "fixup value out of range [-0xFFFF, 0xFFFF]");
222
223 // Invert the negative immediate because it will feed into a MOVN.
224 if (SignedValue < 0)
225 SignedValue = ~SignedValue;
226 Value = static_cast<uint64_t>(SignedValue);
227 } else
228 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
229 // ever be resolved in the assembler.
230 Ctx.reportError(L: Fixup.getLoc(),
231 Msg: "relocation for a thread-local variable points to an "
232 "absolute symbol");
233 return Value;
234 }
235
236 if (!IsResolved) {
237 // FIXME: Figure out when this can actually happen, and verify our
238 // behavior.
239 Ctx.reportError(L: Fixup.getLoc(), Msg: "unresolved movw fixup not yet "
240 "implemented");
241 return Value;
242 }
243
244 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
245 switch (AArch64::getAddressFrag(S: RefKind)) {
246 case AArch64::S_G0:
247 break;
248 case AArch64::S_G1:
249 SignedValue = SignedValue >> 16;
250 break;
251 case AArch64::S_G2:
252 SignedValue = SignedValue >> 32;
253 break;
254 case AArch64::S_G3:
255 SignedValue = SignedValue >> 48;
256 break;
257 default:
258 llvm_unreachable("Variant kind doesn't correspond to fixup");
259 }
260
261 } else {
262 switch (AArch64::getAddressFrag(S: RefKind)) {
263 case AArch64::S_G0:
264 break;
265 case AArch64::S_G1:
266 Value = Value >> 16;
267 break;
268 case AArch64::S_G2:
269 Value = Value >> 32;
270 break;
271 case AArch64::S_G3:
272 Value = Value >> 48;
273 break;
274 default:
275 llvm_unreachable("Variant kind doesn't correspond to fixup");
276 }
277 }
278
279 if (RefKind & AArch64::S_NC) {
280 Value &= 0xFFFF;
281 } else if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
282 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
283 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
284
285 // Invert the negative immediate because it will feed into a MOVN.
286 if (SignedValue < 0)
287 SignedValue = ~SignedValue;
288 Value = static_cast<uint64_t>(SignedValue);
289 } else if (Value > 0xFFFF) {
290 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
291 }
292 return Value;
293 }
294 case AArch64::fixup_aarch64_pcrel_branch9:
295 // Signed 11-bit(9bits + 2 shifts) label
296 if (!isInt<11>(x: SignedValue))
297 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
298 // Low two bits are not encoded (4-byte alignment assumed).
299 if (Value & 0b11)
300 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
301 return (Value >> 2) & 0x1ff;
302 case AArch64::fixup_aarch64_pcrel_branch14:
303 // Signed 16-bit immediate
304 if (!isInt<16>(x: SignedValue))
305 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
306 // Low two bits are not encoded (4-byte alignment assumed).
307 if (Value & 0x3)
308 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
309 return (Value >> 2) & 0x3fff;
310 case AArch64::fixup_aarch64_pcrel_branch16:
311 // Unsigned PC-relative offset, so invert the negative immediate.
312 SignedValue = -SignedValue;
313 Value = static_cast<uint64_t>(SignedValue);
314 // Check valid 18-bit unsigned range.
315 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
316 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
317 // Low two bits are not encoded (4-byte alignment assumed).
318 if (Value & 0b11)
319 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
320 return (Value >> 2) & 0xffff;
321 case AArch64::fixup_aarch64_pcrel_branch26:
322 case AArch64::fixup_aarch64_pcrel_call26:
323 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
324 // MSVC link.exe and lld do not support this relocation type
325 // with a non-zero offset
326 Ctx.reportError(L: Fixup.getLoc(),
327 Msg: "cannot perform a PC-relative fixup with a non-zero "
328 "symbol offset");
329 }
330 // Signed 28-bit immediate
331 if (!isInt<28>(x: SignedValue))
332 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
333 // Low two bits are not encoded (4-byte alignment assumed).
334 if (Value & 0x3)
335 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
336 return (Value >> 2) & 0x3ffffff;
337 case FK_Data_1:
338 case FK_Data_2:
339 case FK_Data_4:
340 case FK_Data_8:
341 case FK_SecRel_2:
342 case FK_SecRel_4:
343 return Value;
344 }
345}
346
347std::optional<MCFixupKind>
348AArch64AsmBackend::getFixupKind(StringRef Name) const {
349 if (!TheTriple.isOSBinFormatELF())
350 return std::nullopt;
351
352 unsigned Type = llvm::StringSwitch<unsigned>(Name)
353#define ELF_RELOC(X, Y) .Case(#X, Y)
354#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
355#undef ELF_RELOC
356 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_AARCH64_NONE)
357 .Case(S: "BFD_RELOC_16", Value: ELF::R_AARCH64_ABS16)
358 .Case(S: "BFD_RELOC_32", Value: ELF::R_AARCH64_ABS32)
359 .Case(S: "BFD_RELOC_64", Value: ELF::R_AARCH64_ABS64)
360 .Default(Value: -1u);
361 if (Type == -1u)
362 return std::nullopt;
363 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
364}
365
366/// getFixupKindContainereSizeInBytes - The number of bytes of the
367/// container involved in big endian or 0 if the item is little endian
368unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
369 if (Endian == llvm::endianness::little)
370 return 0;
371
372 switch (Kind) {
373 default:
374 llvm_unreachable("Unknown fixup kind!");
375
376 case FK_Data_1:
377 return 1;
378 case FK_Data_2:
379 return 2;
380 case FK_Data_4:
381 return 4;
382 case FK_Data_8:
383 return 8;
384
385 case AArch64::fixup_aarch64_movw:
386 case AArch64::fixup_aarch64_pcrel_branch9:
387 case AArch64::fixup_aarch64_pcrel_branch14:
388 case AArch64::fixup_aarch64_pcrel_branch16:
389 case AArch64::fixup_aarch64_add_imm12:
390 case AArch64::fixup_aarch64_ldst_imm12_scale1:
391 case AArch64::fixup_aarch64_ldst_imm12_scale2:
392 case AArch64::fixup_aarch64_ldst_imm12_scale4:
393 case AArch64::fixup_aarch64_ldst_imm12_scale8:
394 case AArch64::fixup_aarch64_ldst_imm12_scale16:
395 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
396 case AArch64::fixup_aarch64_pcrel_branch19:
397 case AArch64::fixup_aarch64_pcrel_adr_imm21:
398 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
399 case AArch64::fixup_aarch64_pcrel_branch26:
400 case AArch64::fixup_aarch64_pcrel_call26:
401 // Instructions are always little endian
402 return 0;
403 }
404}
405
406static bool shouldForceRelocation(const MCFixup &Fixup) {
407 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
408 // ~0xfff. This means that the required offset to reach a symbol can vary by
409 // up to one step depending on where the ADRP is in memory. For example:
410 //
411 // ADRP x0, there
412 // there:
413 //
414 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
415 // we'll need that as an offset. At any other address "there" will be in the
416 // same page as the ADRP and the instruction should encode 0x0. Assuming the
417 // section isn't 0x1000-aligned, we therefore need to delegate this decision
418 // to the linker -- a relocation!
419 return Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21;
420}
421
422void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
423 const MCValue &Target, uint8_t *Data,
424 uint64_t Value, bool IsResolved) {
425 if (shouldForceRelocation(Fixup))
426 IsResolved = false;
427 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
428 MCFixupKind Kind = Fixup.getKind();
429 if (mc::isRelocation(FixupKind: Kind))
430 return;
431
432 if (Fixup.getKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
433 auto RefKind = static_cast<AArch64::Specifier>(Target.getSpecifier());
434 AArch64::Specifier SymLoc = AArch64::getSymbolLoc(S: RefKind);
435 if (SymLoc == AArch64::S_AUTH || SymLoc == AArch64::S_AUTHADDR) {
436 const auto *Expr = dyn_cast<AArch64AuthMCExpr>(Val: Fixup.getValue());
437 if (!Expr) {
438 getContext().reportError(L: Fixup.getValue()->getLoc(),
439 Msg: "expected relocatable expression");
440 return;
441 }
442 assert(Value == 0);
443 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
444 (uint64_t(Expr->getKey()) << 60) |
445 (uint64_t(Expr->hasAddressDiversity()) << 63);
446 }
447 }
448
449 if (!Value)
450 return; // Doesn't change encoding.
451 unsigned NumBytes = getFixupKindNumBytes(Kind);
452 MCFixupKindInfo Info = getFixupKindInfo(Kind: Fixup.getKind());
453 MCContext &Ctx = getContext();
454 int64_t SignedValue = static_cast<int64_t>(Value);
455 // Apply any target-specific value adjustments.
456 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
457
458 // Shift the value into position.
459 Value <<= Info.TargetOffset;
460
461 assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
462 "Invalid fixup offset!");
463
464 // Used to point to big endian bytes.
465 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Kind: Fixup.getKind());
466
467 // For each byte of the fragment that the fixup touches, mask in the
468 // bits from the fixup value.
469 if (FulleSizeInBytes == 0) {
470 // Handle as little-endian
471 for (unsigned i = 0; i != NumBytes; ++i) {
472 Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
473 }
474 } else {
475 // Handle as big-endian
476 assert(Fixup.getOffset() + FulleSizeInBytes <= F.getSize() &&
477 "Invalid fixup size!");
478 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
479 for (unsigned i = 0; i != NumBytes; ++i) {
480 unsigned Idx = FulleSizeInBytes - 1 - i;
481 Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
482 }
483 }
484
485 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
486 // handle this more cleanly. This may affect the output of -show-mc-encoding.
487 AArch64::Specifier RefKind =
488 static_cast<AArch64::Specifier>(Target.getSpecifier());
489 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS ||
490 (!RefKind && Fixup.getKind() == AArch64::fixup_aarch64_movw)) {
491 // If the immediate is negative, generate MOVN else MOVZ.
492 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
493 if (SignedValue < 0)
494 Data[3] &= ~(1 << 6);
495 else
496 Data[3] |= (1 << 6);
497 }
498}
499
500bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
501 uint64_t Value) const {
502 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
503 // into the targets for now.
504 //
505 // Relax if the value is too big for a (signed) i8.
506 return int64_t(Value) != int64_t(int8_t(Value));
507}
508
509bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
510 const MCSubtargetInfo *STI) const {
511 // If the count is not 4-byte aligned, we must be writing data into the text
512 // section (otherwise we have unaligned instructions, and thus have far
513 // bigger problems), so just write zeros instead.
514 OS.write_zeros(NumZeros: Count % 4);
515
516 // We are properly aligned, so write NOPs as requested.
517 Count /= 4;
518 for (uint64_t i = 0; i != Count; ++i)
519 OS.write(Ptr: "\x1f\x20\x03\xd5", Size: 4);
520 return true;
521}
522
523namespace {
524
525namespace CU {
526
527/// Compact unwind encoding values.
528enum CompactUnwindEncodings {
529 /// A "frameless" leaf function, where no non-volatile registers are
530 /// saved. The return remains in LR throughout the function.
531 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
532
533 /// No compact unwind encoding available. Instead the low 23-bits of
534 /// the compact unwind encoding is the offset of the DWARF FDE in the
535 /// __eh_frame section. This mode is never used in object files. It is only
536 /// generated by the linker in final linked images, which have only DWARF info
537 /// for a function.
538 UNWIND_ARM64_MODE_DWARF = 0x03000000,
539
540 /// This is a standard arm64 prologue where FP/LR are immediately
541 /// pushed on the stack, then SP is copied to FP. If there are any
542 /// non-volatile register saved, they are copied into the stack fame in pairs
543 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
544 /// five X pairs and four D pairs can be saved, but the memory layout must be
545 /// in register number order.
546 UNWIND_ARM64_MODE_FRAME = 0x04000000,
547
548 /// Frame register pair encodings.
549 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
550 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
551 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
552 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
553 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
554 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
555 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
556 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
557 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
558};
559
560} // end CU namespace
561
562// FIXME: This should be in a separate file.
563class DarwinAArch64AsmBackend : public AArch64AsmBackend {
564 const MCRegisterInfo &MRI;
565
566 /// Encode compact unwind stack adjustment for frameless functions.
567 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
568 /// The stack size always needs to be 16 byte aligned.
569 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
570 return (StackSize / 16) << 12;
571 }
572
573public:
574 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
575 const MCRegisterInfo &MRI)
576 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
577
578 std::unique_ptr<MCObjectTargetWriter>
579 createObjectTargetWriter() const override {
580 uint32_t CPUType = cantFail(ValOrErr: MachO::getCPUType(T: TheTriple));
581 uint32_t CPUSubType = cantFail(ValOrErr: MachO::getCPUSubType(T: TheTriple));
582 return createAArch64MachObjectWriter(CPUType, CPUSubtype: CPUSubType,
583 IsILP32: TheTriple.isArch32Bit());
584 }
585
586 /// Generate the compact unwind encoding from the CFI directives.
587 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
588 const MCContext *Ctxt) const override {
589 // MTE-tagged frames must use DWARF unwinding because compact unwind
590 // doesn't handle MTE tags
591 if (FI->IsMTETaggedFrame)
592 return CU::UNWIND_ARM64_MODE_DWARF;
593
594 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
595 if (Instrs.empty())
596 return CU::UNWIND_ARM64_MODE_FRAMELESS;
597 if (!isDarwinCanonicalPersonality(Sym: FI->Personality) &&
598 !Ctxt->emitCompactUnwindNonCanonical())
599 return CU::UNWIND_ARM64_MODE_DWARF;
600
601 bool HasFP = false;
602 uint64_t StackSize = 0;
603
604 uint64_t CompactUnwindEncoding = 0;
605 int64_t CurOffset = 0;
606 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
607 const MCCFIInstruction &Inst = Instrs[i];
608
609 switch (Inst.getOperation()) {
610 default:
611 // Cannot handle this directive: bail out.
612 return CU::UNWIND_ARM64_MODE_DWARF;
613 case MCCFIInstruction::OpDefCfa: {
614 // Defines a frame pointer.
615 MCRegister XReg =
616 getXRegFromWReg(Reg: *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true));
617
618 // Other CFA registers than FP are not supported by compact unwind.
619 // Fallback on DWARF.
620 // FIXME: When opt-remarks are supported in MC, add a remark to notify
621 // the user.
622 if (XReg != AArch64::FP)
623 return CU::UNWIND_ARM64_MODE_DWARF;
624
625 if (i + 2 >= e)
626 return CU::UNWIND_ARM64_MODE_DWARF;
627
628 const MCCFIInstruction &LRPush = Instrs[++i];
629 if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
630 return CU::UNWIND_ARM64_MODE_DWARF;
631 const MCCFIInstruction &FPPush = Instrs[++i];
632 if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
633 return CU::UNWIND_ARM64_MODE_DWARF;
634
635 if (FPPush.getOffset() + 8 != LRPush.getOffset())
636 return CU::UNWIND_ARM64_MODE_DWARF;
637 CurOffset = FPPush.getOffset();
638
639 MCRegister LRReg = *MRI.getLLVMRegNum(RegNum: LRPush.getRegister(), isEH: true);
640 MCRegister FPReg = *MRI.getLLVMRegNum(RegNum: FPPush.getRegister(), isEH: true);
641
642 LRReg = getXRegFromWReg(Reg: LRReg);
643 FPReg = getXRegFromWReg(Reg: FPReg);
644
645 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
646 return CU::UNWIND_ARM64_MODE_DWARF;
647
648 // Indicate that the function has a frame.
649 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
650 HasFP = true;
651 break;
652 }
653 case MCCFIInstruction::OpDefCfaOffset: {
654 if (StackSize != 0)
655 return CU::UNWIND_ARM64_MODE_DWARF;
656 StackSize = std::abs(i: Inst.getOffset());
657 break;
658 }
659 case MCCFIInstruction::OpOffset: {
660 // Registers are saved in pairs. We expect there to be two consecutive
661 // `.cfi_offset' instructions with the appropriate registers specified.
662 MCRegister Reg1 = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
663 if (i + 1 == e)
664 return CU::UNWIND_ARM64_MODE_DWARF;
665
666 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
667 return CU::UNWIND_ARM64_MODE_DWARF;
668 CurOffset = Inst.getOffset();
669
670 const MCCFIInstruction &Inst2 = Instrs[++i];
671 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
672 return CU::UNWIND_ARM64_MODE_DWARF;
673 MCRegister Reg2 = *MRI.getLLVMRegNum(RegNum: Inst2.getRegister(), isEH: true);
674
675 if (Inst2.getOffset() != CurOffset - 8)
676 return CU::UNWIND_ARM64_MODE_DWARF;
677 CurOffset = Inst2.getOffset();
678
679 // N.B. The encodings must be in register number order, and the X
680 // registers before the D registers.
681
682 // X19/X20 pair = 0x00000001,
683 // X21/X22 pair = 0x00000002,
684 // X23/X24 pair = 0x00000004,
685 // X25/X26 pair = 0x00000008,
686 // X27/X28 pair = 0x00000010
687 Reg1 = getXRegFromWReg(Reg: Reg1);
688 Reg2 = getXRegFromWReg(Reg: Reg2);
689
690 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
691 (CompactUnwindEncoding & 0xF1E) == 0)
692 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
693 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
694 (CompactUnwindEncoding & 0xF1C) == 0)
695 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
696 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
697 (CompactUnwindEncoding & 0xF18) == 0)
698 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
699 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
700 (CompactUnwindEncoding & 0xF10) == 0)
701 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
702 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
703 (CompactUnwindEncoding & 0xF00) == 0)
704 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
705 else {
706 Reg1 = getDRegFromBReg(Reg: Reg1);
707 Reg2 = getDRegFromBReg(Reg: Reg2);
708
709 // D8/D9 pair = 0x00000100,
710 // D10/D11 pair = 0x00000200,
711 // D12/D13 pair = 0x00000400,
712 // D14/D15 pair = 0x00000800
713 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
714 (CompactUnwindEncoding & 0xE00) == 0)
715 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
716 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
717 (CompactUnwindEncoding & 0xC00) == 0)
718 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
719 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
720 (CompactUnwindEncoding & 0x800) == 0)
721 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
722 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
723 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
724 else
725 // A pair was pushed which we cannot handle.
726 return CU::UNWIND_ARM64_MODE_DWARF;
727 }
728
729 break;
730 }
731 }
732 }
733
734 if (!HasFP) {
735 // With compact unwind info we can only represent stack adjustments of up
736 // to 65520 bytes.
737 if (StackSize > 65520)
738 return CU::UNWIND_ARM64_MODE_DWARF;
739
740 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
741 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
742 }
743
744 return CompactUnwindEncoding;
745 }
746};
747
748} // end anonymous namespace
749
750namespace {
751
752class ELFAArch64AsmBackend : public AArch64AsmBackend {
753public:
754 uint8_t OSABI;
755 bool IsILP32;
756
757 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
758 bool IsLittleEndian, bool IsILP32)
759 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
760 IsILP32(IsILP32) {}
761
762 std::unique_ptr<MCObjectTargetWriter>
763 createObjectTargetWriter() const override {
764 return createAArch64ELFObjectWriter(OSABI, IsILP32);
765 }
766};
767
768}
769
770namespace {
771class COFFAArch64AsmBackend : public AArch64AsmBackend {
772public:
773 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
774 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
775
776 std::unique_ptr<MCObjectTargetWriter>
777 createObjectTargetWriter() const override {
778 return createAArch64WinCOFFObjectWriter(TheTriple);
779 }
780};
781}
782
783MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
784 const MCSubtargetInfo &STI,
785 const MCRegisterInfo &MRI,
786 const MCTargetOptions &Options) {
787 const Triple &TheTriple = STI.getTargetTriple();
788 if (TheTriple.isOSBinFormatMachO()) {
789 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
790 }
791
792 if (TheTriple.isOSBinFormatCOFF())
793 return new COFFAArch64AsmBackend(T, TheTriple);
794
795 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
796
797 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
798 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
799 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
800 IsILP32);
801}
802
803MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
804 const MCSubtargetInfo &STI,
805 const MCRegisterInfo &MRI,
806 const MCTargetOptions &Options) {
807 const Triple &TheTriple = STI.getTargetTriple();
808 assert(TheTriple.isOSBinFormatELF() &&
809 "Big endian is only supported for ELF targets!");
810 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
811 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
812 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
813 IsILP32);
814}
815