1//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64FixupKinds.h"
10#include "MCTargetDesc/AArch64MCAsmInfo.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "Utils/AArch64BaseInfo.h"
13#include "llvm/BinaryFormat/MachO.h"
14#include "llvm/MC/MCAsmBackend.h"
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCELFObjectWriter.h"
18#include "llvm/MC/MCObjectWriter.h"
19#include "llvm/MC/MCRegisterInfo.h"
20#include "llvm/MC/MCSubtargetInfo.h"
21#include "llvm/MC/MCTargetOptions.h"
22#include "llvm/MC/MCValue.h"
23#include "llvm/MC/TargetRegistry.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/TargetParser/Triple.h"
27using namespace llvm;
28
29namespace {
30
31class AArch64AsmBackend : public MCAsmBackend {
32protected:
33 Triple TheTriple;
34
35public:
36 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
37 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
38 : llvm::endianness::big),
39 TheTriple(TT) {}
40
41
42 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
43
44 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override {
45 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
46 // This table *must* be in the order that the fixup_* kinds are defined
47 // in AArch64FixupKinds.h.
48 //
49 // Name Offset (bits) Size (bits) Flags
50 {.Name: "fixup_aarch64_pcrel_adr_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
51 {.Name: "fixup_aarch64_pcrel_adrp_imm21", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
52 {.Name: "fixup_aarch64_add_imm12", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
53 {.Name: "fixup_aarch64_ldst_imm12_scale1", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
54 {.Name: "fixup_aarch64_ldst_imm12_scale2", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
55 {.Name: "fixup_aarch64_ldst_imm12_scale4", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
56 {.Name: "fixup_aarch64_ldst_imm12_scale8", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
57 {.Name: "fixup_aarch64_ldst_imm12_scale16", .TargetOffset: 10, .TargetSize: 12, .Flags: 0},
58 {.Name: "fixup_aarch64_ldr_pcrel_imm19", .TargetOffset: 5, .TargetSize: 19, .Flags: 0},
59 {.Name: "fixup_aarch64_movw", .TargetOffset: 5, .TargetSize: 16, .Flags: 0},
60 {.Name: "fixup_aarch64_pcrel_branch9", .TargetOffset: 5, .TargetSize: 9, .Flags: 0},
61 {.Name: "fixup_aarch64_pcrel_branch14", .TargetOffset: 5, .TargetSize: 14, .Flags: 0},
62 {.Name: "fixup_aarch64_pcrel_branch16", .TargetOffset: 5, .TargetSize: 16, .Flags: 0},
63 {.Name: "fixup_aarch64_pcrel_branch19", .TargetOffset: 5, .TargetSize: 19, .Flags: 0},
64 {.Name: "fixup_aarch64_pcrel_branch26", .TargetOffset: 0, .TargetSize: 26, .Flags: 0},
65 {.Name: "fixup_aarch64_pcrel_call26", .TargetOffset: 0, .TargetSize: 26, .Flags: 0}};
66
67 // Fixup kinds from raw relocation types and .reloc directives force
68 // relocations and do not need these fields.
69 if (mc::isRelocation(FixupKind: Kind))
70 return {};
71
72 if (Kind < FirstTargetFixupKind)
73 return MCAsmBackend::getFixupKindInfo(Kind);
74
75 assert(unsigned(Kind - FirstTargetFixupKind) <
76 AArch64::NumTargetFixupKinds &&
77 "Invalid kind!");
78 return Infos[Kind - FirstTargetFixupKind];
79 }
80
81 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
82 uint8_t *Data, uint64_t Value, bool IsResolved) override;
83
84 bool writeNopData(raw_ostream &OS, uint64_t Count,
85 const MCSubtargetInfo *STI) const override;
86
87 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
88};
89
90} // end anonymous namespace
91
92/// The number of bytes the fixup may change.
93static unsigned getFixupKindNumBytes(unsigned Kind) {
94 switch (Kind) {
95 default:
96 llvm_unreachable("Unknown fixup kind!");
97
98 case FK_Data_1:
99 return 1;
100
101 case FK_Data_2:
102 case FK_SecRel_2:
103 return 2;
104
105 case AArch64::fixup_aarch64_movw:
106 case AArch64::fixup_aarch64_pcrel_branch9:
107 case AArch64::fixup_aarch64_pcrel_branch14:
108 case AArch64::fixup_aarch64_pcrel_branch16:
109 case AArch64::fixup_aarch64_add_imm12:
110 case AArch64::fixup_aarch64_ldst_imm12_scale1:
111 case AArch64::fixup_aarch64_ldst_imm12_scale2:
112 case AArch64::fixup_aarch64_ldst_imm12_scale4:
113 case AArch64::fixup_aarch64_ldst_imm12_scale8:
114 case AArch64::fixup_aarch64_ldst_imm12_scale16:
115 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
116 case AArch64::fixup_aarch64_pcrel_branch19:
117 return 3;
118
119 case AArch64::fixup_aarch64_pcrel_adr_imm21:
120 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
121 case AArch64::fixup_aarch64_pcrel_branch26:
122 case AArch64::fixup_aarch64_pcrel_call26:
123 case FK_Data_4:
124 case FK_SecRel_4:
125 return 4;
126
127 case FK_Data_8:
128 return 8;
129 }
130}
131
132static unsigned AdrImmBits(unsigned Value) {
133 unsigned lo2 = Value & 0x3;
134 unsigned hi19 = (Value & 0x1ffffc) >> 2;
135 return (hi19 << 5) | (lo2 << 29);
136}
137
138static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
139 uint64_t Value, MCContext &Ctx,
140 const Triple &TheTriple, bool IsResolved) {
141 int64_t SignedValue = static_cast<int64_t>(Value);
142 switch (Fixup.getKind()) {
143 default:
144 llvm_unreachable("Unknown fixup kind!");
145 case AArch64::fixup_aarch64_pcrel_adr_imm21:
146 if (!isInt<21>(x: SignedValue))
147 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
148 return AdrImmBits(Value: Value & 0x1fffffULL);
149 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
150 assert(!IsResolved);
151 if (TheTriple.isOSBinFormatCOFF()) {
152 if (!isInt<21>(x: SignedValue))
153 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
154 return AdrImmBits(Value: Value & 0x1fffffULL);
155 }
156 return AdrImmBits(Value: (Value & 0x1fffff000ULL) >> 12);
157 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
158 case AArch64::fixup_aarch64_pcrel_branch19:
159 // Signed 19-bit immediate which gets multiplied by 4
160 if (!isInt<21>(x: SignedValue))
161 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
162 if (Value & 0x3)
163 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
164 // Low two bits are not encoded.
165 return (Value >> 2) & 0x7ffff;
166 case AArch64::fixup_aarch64_add_imm12:
167 case AArch64::fixup_aarch64_ldst_imm12_scale1:
168 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
169 Value &= 0xfff;
170 // Unsigned 12-bit immediate
171 if (!isUInt<12>(x: Value))
172 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
173 return Value;
174 case AArch64::fixup_aarch64_ldst_imm12_scale2:
175 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
176 Value &= 0xfff;
177 // Unsigned 12-bit immediate which gets multiplied by 2
178 if (!isUInt<13>(x: Value))
179 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
180 if (Value & 0x1)
181 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 2-byte aligned");
182 return Value >> 1;
183 case AArch64::fixup_aarch64_ldst_imm12_scale4:
184 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
185 Value &= 0xfff;
186 // Unsigned 12-bit immediate which gets multiplied by 4
187 if (!isUInt<14>(x: Value))
188 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
189 if (Value & 0x3)
190 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 4-byte aligned");
191 return Value >> 2;
192 case AArch64::fixup_aarch64_ldst_imm12_scale8:
193 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
194 Value &= 0xfff;
195 // Unsigned 12-bit immediate which gets multiplied by 8
196 if (!isUInt<15>(x: Value))
197 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
198 if (Value & 0x7)
199 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 8-byte aligned");
200 return Value >> 3;
201 case AArch64::fixup_aarch64_ldst_imm12_scale16:
202 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
203 Value &= 0xfff;
204 // Unsigned 12-bit immediate which gets multiplied by 16
205 if (!isUInt<16>(x: Value))
206 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
207 if (Value & 0xf)
208 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 16-byte aligned");
209 return Value >> 4;
210 case AArch64::fixup_aarch64_movw: {
211 AArch64::Specifier RefKind =
212 static_cast<AArch64::Specifier>(Target.getSpecifier());
213 if (AArch64::getSymbolLoc(S: RefKind) != AArch64::S_ABS &&
214 AArch64::getSymbolLoc(S: RefKind) != AArch64::S_SABS) {
215 if (!RefKind) {
216 // The fixup is an expression
217 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
218 Ctx.reportError(L: Fixup.getLoc(),
219 Msg: "fixup value out of range [-0xFFFF, 0xFFFF]");
220
221 // Invert the negative immediate because it will feed into a MOVN.
222 if (SignedValue < 0)
223 SignedValue = ~SignedValue;
224 Value = static_cast<uint64_t>(SignedValue);
225 } else
226 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
227 // ever be resolved in the assembler.
228 Ctx.reportError(L: Fixup.getLoc(),
229 Msg: "relocation for a thread-local variable points to an "
230 "absolute symbol");
231 return Value;
232 }
233
234 if (!IsResolved) {
235 // FIXME: Figure out when this can actually happen, and verify our
236 // behavior.
237 Ctx.reportError(L: Fixup.getLoc(), Msg: "unresolved movw fixup not yet "
238 "implemented");
239 return Value;
240 }
241
242 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
243 switch (AArch64::getAddressFrag(S: RefKind)) {
244 case AArch64::S_G0:
245 break;
246 case AArch64::S_G1:
247 SignedValue = SignedValue >> 16;
248 break;
249 case AArch64::S_G2:
250 SignedValue = SignedValue >> 32;
251 break;
252 case AArch64::S_G3:
253 SignedValue = SignedValue >> 48;
254 break;
255 default:
256 llvm_unreachable("Variant kind doesn't correspond to fixup");
257 }
258
259 } else {
260 switch (AArch64::getAddressFrag(S: RefKind)) {
261 case AArch64::S_G0:
262 break;
263 case AArch64::S_G1:
264 Value = Value >> 16;
265 break;
266 case AArch64::S_G2:
267 Value = Value >> 32;
268 break;
269 case AArch64::S_G3:
270 Value = Value >> 48;
271 break;
272 default:
273 llvm_unreachable("Variant kind doesn't correspond to fixup");
274 }
275 }
276
277 if (RefKind & AArch64::S_NC) {
278 Value &= 0xFFFF;
279 } else if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS) {
280 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
281 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
282
283 // Invert the negative immediate because it will feed into a MOVN.
284 if (SignedValue < 0)
285 SignedValue = ~SignedValue;
286 Value = static_cast<uint64_t>(SignedValue);
287 } else if (Value > 0xFFFF) {
288 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
289 }
290 return Value;
291 }
292 case AArch64::fixup_aarch64_pcrel_branch9:
293 // Signed 11-bit(9bits + 2 shifts) label
294 if (!isInt<11>(x: SignedValue))
295 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
296 // Low two bits are not encoded (4-byte alignment assumed).
297 if (Value & 0b11)
298 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
299 return (Value >> 2) & 0x1ff;
300 case AArch64::fixup_aarch64_pcrel_branch14:
301 // Signed 16-bit immediate
302 if (!isInt<16>(x: SignedValue))
303 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
304 // Low two bits are not encoded (4-byte alignment assumed).
305 if (Value & 0x3)
306 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
307 return (Value >> 2) & 0x3fff;
308 case AArch64::fixup_aarch64_pcrel_branch16:
309 // Unsigned PC-relative offset, so invert the negative immediate.
310 SignedValue = -SignedValue;
311 Value = static_cast<uint64_t>(SignedValue);
312 // Check valid 18-bit unsigned range.
313 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
314 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
315 // Low two bits are not encoded (4-byte alignment assumed).
316 if (Value & 0b11)
317 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
318 return (Value >> 2) & 0xffff;
319 case AArch64::fixup_aarch64_pcrel_branch26:
320 case AArch64::fixup_aarch64_pcrel_call26:
321 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
322 // MSVC link.exe and lld do not support this relocation type
323 // with a non-zero offset
324 Ctx.reportError(L: Fixup.getLoc(),
325 Msg: "cannot perform a PC-relative fixup with a non-zero "
326 "symbol offset");
327 }
328 // Signed 28-bit immediate
329 if (!isInt<28>(x: SignedValue))
330 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
331 // Low two bits are not encoded (4-byte alignment assumed).
332 if (Value & 0x3)
333 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned");
334 return (Value >> 2) & 0x3ffffff;
335 case FK_Data_1:
336 case FK_Data_2:
337 case FK_Data_4:
338 case FK_Data_8:
339 case FK_SecRel_2:
340 case FK_SecRel_4:
341 return Value;
342 }
343}
344
345std::optional<MCFixupKind>
346AArch64AsmBackend::getFixupKind(StringRef Name) const {
347 if (!TheTriple.isOSBinFormatELF())
348 return std::nullopt;
349
350 unsigned Type = llvm::StringSwitch<unsigned>(Name)
351#define ELF_RELOC(X, Y) .Case(#X, Y)
352#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
353#undef ELF_RELOC
354 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_AARCH64_NONE)
355 .Case(S: "BFD_RELOC_16", Value: ELF::R_AARCH64_ABS16)
356 .Case(S: "BFD_RELOC_32", Value: ELF::R_AARCH64_ABS32)
357 .Case(S: "BFD_RELOC_64", Value: ELF::R_AARCH64_ABS64)
358 .Default(Value: -1u);
359 if (Type == -1u)
360 return std::nullopt;
361 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
362}
363
364/// getFixupKindContainereSizeInBytes - The number of bytes of the
365/// container involved in big endian or 0 if the item is little endian
366unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
367 if (Endian == llvm::endianness::little)
368 return 0;
369
370 switch (Kind) {
371 default:
372 llvm_unreachable("Unknown fixup kind!");
373
374 case FK_Data_1:
375 return 1;
376 case FK_Data_2:
377 return 2;
378 case FK_Data_4:
379 return 4;
380 case FK_Data_8:
381 return 8;
382
383 case AArch64::fixup_aarch64_movw:
384 case AArch64::fixup_aarch64_pcrel_branch9:
385 case AArch64::fixup_aarch64_pcrel_branch14:
386 case AArch64::fixup_aarch64_pcrel_branch16:
387 case AArch64::fixup_aarch64_add_imm12:
388 case AArch64::fixup_aarch64_ldst_imm12_scale1:
389 case AArch64::fixup_aarch64_ldst_imm12_scale2:
390 case AArch64::fixup_aarch64_ldst_imm12_scale4:
391 case AArch64::fixup_aarch64_ldst_imm12_scale8:
392 case AArch64::fixup_aarch64_ldst_imm12_scale16:
393 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
394 case AArch64::fixup_aarch64_pcrel_branch19:
395 case AArch64::fixup_aarch64_pcrel_adr_imm21:
396 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
397 case AArch64::fixup_aarch64_pcrel_branch26:
398 case AArch64::fixup_aarch64_pcrel_call26:
399 // Instructions are always little endian
400 return 0;
401 }
402}
403
404static bool shouldForceRelocation(const MCFixup &Fixup) {
405 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
406 // ~0xfff. This means that the required offset to reach a symbol can vary by
407 // up to one step depending on where the ADRP is in memory. For example:
408 //
409 // ADRP x0, there
410 // there:
411 //
412 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
413 // we'll need that as an offset. At any other address "there" will be in the
414 // same page as the ADRP and the instruction should encode 0x0. Assuming the
415 // section isn't 0x1000-aligned, we therefore need to delegate this decision
416 // to the linker -- a relocation!
417 return Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21;
418}
419
420void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
421 const MCValue &Target, uint8_t *Data,
422 uint64_t Value, bool IsResolved) {
423 if (shouldForceRelocation(Fixup))
424 IsResolved = false;
425 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
426 MCFixupKind Kind = Fixup.getKind();
427 if (mc::isRelocation(FixupKind: Kind))
428 return;
429
430 if (Fixup.getKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
431 auto RefKind = static_cast<AArch64::Specifier>(Target.getSpecifier());
432 AArch64::Specifier SymLoc = AArch64::getSymbolLoc(S: RefKind);
433 if (SymLoc == AArch64::S_AUTH || SymLoc == AArch64::S_AUTHADDR) {
434 const auto *Expr = dyn_cast<AArch64AuthMCExpr>(Val: Fixup.getValue());
435 if (!Expr) {
436 getContext().reportError(L: Fixup.getValue()->getLoc(),
437 Msg: "expected relocatable expression");
438 return;
439 }
440 assert(Value == 0);
441 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
442 (uint64_t(Expr->getKey()) << 60) |
443 (uint64_t(Expr->hasAddressDiversity()) << 63);
444 }
445 }
446
447 if (!Value)
448 return; // Doesn't change encoding.
449 unsigned NumBytes = getFixupKindNumBytes(Kind);
450 MCFixupKindInfo Info = getFixupKindInfo(Kind: Fixup.getKind());
451 MCContext &Ctx = getContext();
452 int64_t SignedValue = static_cast<int64_t>(Value);
453 // Apply any target-specific value adjustments.
454 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
455
456 // Shift the value into position.
457 Value <<= Info.TargetOffset;
458
459 assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
460 "Invalid fixup offset!");
461
462 // Used to point to big endian bytes.
463 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Kind: Fixup.getKind());
464
465 // For each byte of the fragment that the fixup touches, mask in the
466 // bits from the fixup value.
467 if (FulleSizeInBytes == 0) {
468 // Handle as little-endian
469 for (unsigned i = 0; i != NumBytes; ++i) {
470 Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
471 }
472 } else {
473 // Handle as big-endian
474 assert(Fixup.getOffset() + FulleSizeInBytes <= F.getSize() &&
475 "Invalid fixup size!");
476 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
477 for (unsigned i = 0; i != NumBytes; ++i) {
478 unsigned Idx = FulleSizeInBytes - 1 - i;
479 Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
480 }
481 }
482
483 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
484 // handle this more cleanly. This may affect the output of -show-mc-encoding.
485 AArch64::Specifier RefKind =
486 static_cast<AArch64::Specifier>(Target.getSpecifier());
487 if (AArch64::getSymbolLoc(S: RefKind) == AArch64::S_SABS ||
488 (!RefKind && Fixup.getKind() == AArch64::fixup_aarch64_movw)) {
489 // If the immediate is negative, generate MOVN else MOVZ.
490 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
491 if (SignedValue < 0)
492 Data[3] &= ~(1 << 6);
493 else
494 Data[3] |= (1 << 6);
495 }
496}
497
498bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
499 const MCSubtargetInfo *STI) const {
500 // If the count is not 4-byte aligned, we must be writing data into the text
501 // section (otherwise we have unaligned instructions, and thus have far
502 // bigger problems), so just write zeros instead.
503 OS.write_zeros(NumZeros: Count % 4);
504
505 // We are properly aligned, so write NOPs as requested.
506 Count /= 4;
507 for (uint64_t i = 0; i != Count; ++i)
508 OS.write(Ptr: "\x1f\x20\x03\xd5", Size: 4);
509 return true;
510}
511
512namespace {
513
514namespace CU {
515
516/// Compact unwind encoding values.
517enum CompactUnwindEncodings {
518 /// A "frameless" leaf function, where no non-volatile registers are
519 /// saved. The return remains in LR throughout the function.
520 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
521
522 /// No compact unwind encoding available. Instead the low 23-bits of
523 /// the compact unwind encoding is the offset of the DWARF FDE in the
524 /// __eh_frame section. This mode is never used in object files. It is only
525 /// generated by the linker in final linked images, which have only DWARF info
526 /// for a function.
527 UNWIND_ARM64_MODE_DWARF = 0x03000000,
528
529 /// This is a standard arm64 prologue where FP/LR are immediately
530 /// pushed on the stack, then SP is copied to FP. If there are any
531 /// non-volatile register saved, they are copied into the stack fame in pairs
532 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
533 /// five X pairs and four D pairs can be saved, but the memory layout must be
534 /// in register number order.
535 UNWIND_ARM64_MODE_FRAME = 0x04000000,
536
537 /// Frame register pair encodings.
538 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
539 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
540 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
541 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
542 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
543 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
544 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
545 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
546 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
547};
548
549} // end CU namespace
550
551// FIXME: This should be in a separate file.
552class DarwinAArch64AsmBackend : public AArch64AsmBackend {
553 const MCRegisterInfo &MRI;
554
555 /// Encode compact unwind stack adjustment for frameless functions.
556 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
557 /// The stack size always needs to be 16 byte aligned.
558 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
559 return (StackSize / 16) << 12;
560 }
561
562public:
563 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
564 const MCRegisterInfo &MRI)
565 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
566
567 std::unique_ptr<MCObjectTargetWriter>
568 createObjectTargetWriter() const override {
569 uint32_t CPUType = cantFail(ValOrErr: MachO::getCPUType(T: TheTriple));
570 uint32_t CPUSubType = cantFail(ValOrErr: MachO::getCPUSubType(T: TheTriple));
571 return createAArch64MachObjectWriter(CPUType, CPUSubtype: CPUSubType,
572 IsILP32: TheTriple.isArch32Bit());
573 }
574
575 /// Generate the compact unwind encoding from the CFI directives.
576 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
577 const MCContext *Ctxt) const override {
578 // MTE-tagged frames must use DWARF unwinding because compact unwind
579 // doesn't handle MTE tags
580 if (FI->IsMTETaggedFrame)
581 return CU::UNWIND_ARM64_MODE_DWARF;
582
583 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
584 if (Instrs.empty())
585 return CU::UNWIND_ARM64_MODE_FRAMELESS;
586 if (!isDarwinCanonicalPersonality(Sym: FI->Personality) &&
587 !Ctxt->emitCompactUnwindNonCanonical())
588 return CU::UNWIND_ARM64_MODE_DWARF;
589
590 bool HasFP = false;
591 uint64_t StackSize = 0;
592
593 uint64_t CompactUnwindEncoding = 0;
594 int64_t CurOffset = 0;
595 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
596 const MCCFIInstruction &Inst = Instrs[i];
597
598 switch (Inst.getOperation()) {
599 default:
600 // Cannot handle this directive: bail out.
601 return CU::UNWIND_ARM64_MODE_DWARF;
602 case MCCFIInstruction::OpDefCfa: {
603 // Defines a frame pointer.
604 MCRegister XReg =
605 getXRegFromWReg(Reg: *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true));
606
607 // Other CFA registers than FP are not supported by compact unwind.
608 // Fallback on DWARF.
609 // FIXME: When opt-remarks are supported in MC, add a remark to notify
610 // the user.
611 if (XReg != AArch64::FP)
612 return CU::UNWIND_ARM64_MODE_DWARF;
613
614 if (i + 2 >= e)
615 return CU::UNWIND_ARM64_MODE_DWARF;
616
617 const MCCFIInstruction &LRPush = Instrs[++i];
618 if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
619 return CU::UNWIND_ARM64_MODE_DWARF;
620 const MCCFIInstruction &FPPush = Instrs[++i];
621 if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
622 return CU::UNWIND_ARM64_MODE_DWARF;
623
624 if (FPPush.getOffset() + 8 != LRPush.getOffset())
625 return CU::UNWIND_ARM64_MODE_DWARF;
626 CurOffset = FPPush.getOffset();
627
628 MCRegister LRReg = *MRI.getLLVMRegNum(RegNum: LRPush.getRegister(), isEH: true);
629 MCRegister FPReg = *MRI.getLLVMRegNum(RegNum: FPPush.getRegister(), isEH: true);
630
631 LRReg = getXRegFromWReg(Reg: LRReg);
632 FPReg = getXRegFromWReg(Reg: FPReg);
633
634 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
635 return CU::UNWIND_ARM64_MODE_DWARF;
636
637 // Indicate that the function has a frame.
638 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
639 HasFP = true;
640 break;
641 }
642 case MCCFIInstruction::OpDefCfaOffset: {
643 if (StackSize != 0)
644 return CU::UNWIND_ARM64_MODE_DWARF;
645 StackSize = std::abs(i: Inst.getOffset());
646 break;
647 }
648 case MCCFIInstruction::OpOffset: {
649 // Registers are saved in pairs. We expect there to be two consecutive
650 // `.cfi_offset' instructions with the appropriate registers specified.
651 MCRegister Reg1 = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
652 if (i + 1 == e)
653 return CU::UNWIND_ARM64_MODE_DWARF;
654
655 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
656 return CU::UNWIND_ARM64_MODE_DWARF;
657 CurOffset = Inst.getOffset();
658
659 const MCCFIInstruction &Inst2 = Instrs[++i];
660 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
661 return CU::UNWIND_ARM64_MODE_DWARF;
662 MCRegister Reg2 = *MRI.getLLVMRegNum(RegNum: Inst2.getRegister(), isEH: true);
663
664 if (Inst2.getOffset() != CurOffset - 8)
665 return CU::UNWIND_ARM64_MODE_DWARF;
666 CurOffset = Inst2.getOffset();
667
668 // N.B. The encodings must be in register number order, and the X
669 // registers before the D registers.
670
671 // X19/X20 pair = 0x00000001,
672 // X21/X22 pair = 0x00000002,
673 // X23/X24 pair = 0x00000004,
674 // X25/X26 pair = 0x00000008,
675 // X27/X28 pair = 0x00000010
676 Reg1 = getXRegFromWReg(Reg: Reg1);
677 Reg2 = getXRegFromWReg(Reg: Reg2);
678
679 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
680 (CompactUnwindEncoding & 0xF1E) == 0)
681 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
682 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
683 (CompactUnwindEncoding & 0xF1C) == 0)
684 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
685 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
686 (CompactUnwindEncoding & 0xF18) == 0)
687 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
688 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
689 (CompactUnwindEncoding & 0xF10) == 0)
690 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
691 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
692 (CompactUnwindEncoding & 0xF00) == 0)
693 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
694 else {
695 Reg1 = getDRegFromBReg(Reg: Reg1);
696 Reg2 = getDRegFromBReg(Reg: Reg2);
697
698 // D8/D9 pair = 0x00000100,
699 // D10/D11 pair = 0x00000200,
700 // D12/D13 pair = 0x00000400,
701 // D14/D15 pair = 0x00000800
702 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
703 (CompactUnwindEncoding & 0xE00) == 0)
704 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
705 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
706 (CompactUnwindEncoding & 0xC00) == 0)
707 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
708 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
709 (CompactUnwindEncoding & 0x800) == 0)
710 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
711 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
712 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
713 else
714 // A pair was pushed which we cannot handle.
715 return CU::UNWIND_ARM64_MODE_DWARF;
716 }
717
718 break;
719 }
720 }
721 }
722
723 if (!HasFP) {
724 // With compact unwind info we can only represent stack adjustments of up
725 // to 65520 bytes.
726 if (StackSize > 65520)
727 return CU::UNWIND_ARM64_MODE_DWARF;
728
729 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
730 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
731 }
732
733 return CompactUnwindEncoding;
734 }
735};
736
737} // end anonymous namespace
738
739namespace {
740
741class ELFAArch64AsmBackend : public AArch64AsmBackend {
742public:
743 uint8_t OSABI;
744 bool IsILP32;
745
746 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
747 bool IsLittleEndian, bool IsILP32)
748 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
749 IsILP32(IsILP32) {}
750
751 std::unique_ptr<MCObjectTargetWriter>
752 createObjectTargetWriter() const override {
753 return createAArch64ELFObjectWriter(OSABI, IsILP32);
754 }
755};
756
757}
758
759namespace {
760class COFFAArch64AsmBackend : public AArch64AsmBackend {
761public:
762 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
763 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
764
765 std::unique_ptr<MCObjectTargetWriter>
766 createObjectTargetWriter() const override {
767 return createAArch64WinCOFFObjectWriter(TheTriple);
768 }
769};
770}
771
772MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
773 const MCSubtargetInfo &STI,
774 const MCRegisterInfo &MRI,
775 const MCTargetOptions &Options) {
776 const Triple &TheTriple = STI.getTargetTriple();
777 if (TheTriple.isOSBinFormatMachO()) {
778 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
779 }
780
781 if (TheTriple.isOSBinFormatCOFF())
782 return new COFFAArch64AsmBackend(T, TheTriple);
783
784 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
785
786 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
787 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
788 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
789 IsILP32);
790}
791
792MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
793 const MCSubtargetInfo &STI,
794 const MCRegisterInfo &MRI,
795 const MCTargetOptions &Options) {
796 const Triple &TheTriple = STI.getTargetTriple();
797 assert(TheTriple.isOSBinFormatELF() &&
798 "Big endian is only supported for ELF targets!");
799 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
800 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
801 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
802 IsILP32);
803}
804