1 | //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "MCTargetDesc/AArch64FixupKinds.h" |
10 | #include "MCTargetDesc/AArch64MCExpr.h" |
11 | #include "MCTargetDesc/AArch64MCTargetDesc.h" |
12 | #include "Utils/AArch64BaseInfo.h" |
13 | #include "llvm/BinaryFormat/MachO.h" |
14 | #include "llvm/MC/MCAsmBackend.h" |
15 | #include "llvm/MC/MCAssembler.h" |
16 | #include "llvm/MC/MCContext.h" |
17 | #include "llvm/MC/MCDirectives.h" |
18 | #include "llvm/MC/MCELFObjectWriter.h" |
19 | #include "llvm/MC/MCFixupKindInfo.h" |
20 | #include "llvm/MC/MCObjectWriter.h" |
21 | #include "llvm/MC/MCRegisterInfo.h" |
22 | #include "llvm/MC/MCSectionELF.h" |
23 | #include "llvm/MC/MCSectionMachO.h" |
24 | #include "llvm/MC/MCSubtargetInfo.h" |
25 | #include "llvm/MC/MCTargetOptions.h" |
26 | #include "llvm/MC/MCValue.h" |
27 | #include "llvm/MC/TargetRegistry.h" |
28 | #include "llvm/Support/ErrorHandling.h" |
29 | #include "llvm/Support/MathExtras.h" |
30 | #include "llvm/TargetParser/Triple.h" |
31 | using namespace llvm; |
32 | |
33 | namespace { |
34 | |
35 | class AArch64AsmBackend : public MCAsmBackend { |
36 | static const unsigned PCRelFlagVal = |
37 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; |
38 | protected: |
39 | Triple TheTriple; |
40 | |
41 | public: |
42 | AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian) |
43 | : MCAsmBackend(IsLittleEndian ? llvm::endianness::little |
44 | : llvm::endianness::big), |
45 | TheTriple(TT) {} |
46 | |
47 | unsigned getNumFixupKinds() const override { |
48 | return AArch64::NumTargetFixupKinds; |
49 | } |
50 | |
51 | std::optional<MCFixupKind> getFixupKind(StringRef Name) const override; |
52 | |
53 | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { |
54 | const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { |
55 | // This table *must* be in the order that the fixup_* kinds are defined |
56 | // in AArch64FixupKinds.h. |
57 | // |
58 | // Name Offset (bits) Size (bits) Flags |
59 | {.Name: "fixup_aarch64_pcrel_adr_imm21" , .TargetOffset: 0, .TargetSize: 32, .Flags: PCRelFlagVal}, |
60 | {.Name: "fixup_aarch64_pcrel_adrp_imm21" , .TargetOffset: 0, .TargetSize: 32, .Flags: PCRelFlagVal}, |
61 | {.Name: "fixup_aarch64_add_imm12" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
62 | {.Name: "fixup_aarch64_ldst_imm12_scale1" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
63 | {.Name: "fixup_aarch64_ldst_imm12_scale2" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
64 | {.Name: "fixup_aarch64_ldst_imm12_scale4" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
65 | {.Name: "fixup_aarch64_ldst_imm12_scale8" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
66 | {.Name: "fixup_aarch64_ldst_imm12_scale16" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
67 | {.Name: "fixup_aarch64_ldr_pcrel_imm19" , .TargetOffset: 5, .TargetSize: 19, .Flags: PCRelFlagVal}, |
68 | {.Name: "fixup_aarch64_movw" , .TargetOffset: 5, .TargetSize: 16, .Flags: 0}, |
69 | {.Name: "fixup_aarch64_pcrel_branch14" , .TargetOffset: 5, .TargetSize: 14, .Flags: PCRelFlagVal}, |
70 | {.Name: "fixup_aarch64_pcrel_branch16" , .TargetOffset: 5, .TargetSize: 16, .Flags: PCRelFlagVal}, |
71 | {.Name: "fixup_aarch64_pcrel_branch19" , .TargetOffset: 5, .TargetSize: 19, .Flags: PCRelFlagVal}, |
72 | {.Name: "fixup_aarch64_pcrel_branch26" , .TargetOffset: 0, .TargetSize: 26, .Flags: PCRelFlagVal}, |
73 | {.Name: "fixup_aarch64_pcrel_call26" , .TargetOffset: 0, .TargetSize: 26, .Flags: PCRelFlagVal}}; |
74 | |
75 | // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not |
76 | // require any extra processing. |
77 | if (Kind >= FirstLiteralRelocationKind) |
78 | return MCAsmBackend::getFixupKindInfo(Kind: FK_NONE); |
79 | |
80 | if (Kind < FirstTargetFixupKind) |
81 | return MCAsmBackend::getFixupKindInfo(Kind); |
82 | |
83 | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && |
84 | "Invalid kind!" ); |
85 | return Infos[Kind - FirstTargetFixupKind]; |
86 | } |
87 | |
88 | void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
89 | const MCValue &Target, MutableArrayRef<char> Data, |
90 | uint64_t Value, bool IsResolved, |
91 | const MCSubtargetInfo *STI) const override; |
92 | |
93 | bool fixupNeedsRelaxation(const MCFixup &Fixup, |
94 | uint64_t Value) const override; |
95 | void relaxInstruction(MCInst &Inst, |
96 | const MCSubtargetInfo &STI) const override; |
97 | bool writeNopData(raw_ostream &OS, uint64_t Count, |
98 | const MCSubtargetInfo *STI) const override; |
99 | |
100 | unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; |
101 | |
102 | bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, |
103 | const MCValue &Target, |
104 | const MCSubtargetInfo *STI) override; |
105 | }; |
106 | |
107 | } // end anonymous namespace |
108 | |
109 | /// The number of bytes the fixup may change. |
110 | static unsigned getFixupKindNumBytes(unsigned Kind) { |
111 | switch (Kind) { |
112 | default: |
113 | llvm_unreachable("Unknown fixup kind!" ); |
114 | |
115 | case FK_Data_1: |
116 | return 1; |
117 | |
118 | case FK_Data_2: |
119 | case FK_SecRel_2: |
120 | return 2; |
121 | |
122 | case AArch64::fixup_aarch64_movw: |
123 | case AArch64::fixup_aarch64_pcrel_branch14: |
124 | case AArch64::fixup_aarch64_pcrel_branch16: |
125 | case AArch64::fixup_aarch64_add_imm12: |
126 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
127 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
128 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
129 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
130 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
131 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
132 | case AArch64::fixup_aarch64_pcrel_branch19: |
133 | return 3; |
134 | |
135 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
136 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
137 | case AArch64::fixup_aarch64_pcrel_branch26: |
138 | case AArch64::fixup_aarch64_pcrel_call26: |
139 | case FK_Data_4: |
140 | case FK_SecRel_4: |
141 | return 4; |
142 | |
143 | case FK_Data_8: |
144 | return 8; |
145 | } |
146 | } |
147 | |
148 | static unsigned AdrImmBits(unsigned Value) { |
149 | unsigned lo2 = Value & 0x3; |
150 | unsigned hi19 = (Value & 0x1ffffc) >> 2; |
151 | return (hi19 << 5) | (lo2 << 29); |
152 | } |
153 | |
154 | static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, |
155 | uint64_t Value, MCContext &Ctx, |
156 | const Triple &TheTriple, bool IsResolved) { |
157 | int64_t SignedValue = static_cast<int64_t>(Value); |
158 | switch (Fixup.getTargetKind()) { |
159 | default: |
160 | llvm_unreachable("Unknown fixup kind!" ); |
161 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
162 | if (!isInt<21>(x: SignedValue)) |
163 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
164 | return AdrImmBits(Value: Value & 0x1fffffULL); |
165 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
166 | assert(!IsResolved); |
167 | if (TheTriple.isOSBinFormatCOFF()) { |
168 | if (!isInt<21>(x: SignedValue)) |
169 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
170 | return AdrImmBits(Value: Value & 0x1fffffULL); |
171 | } |
172 | return AdrImmBits(Value: (Value & 0x1fffff000ULL) >> 12); |
173 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
174 | case AArch64::fixup_aarch64_pcrel_branch19: |
175 | // Signed 19-bit immediate which gets multiplied by 4 |
176 | if (!isInt<21>(x: SignedValue)) |
177 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
178 | if (Value & 0x3) |
179 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned" ); |
180 | // Low two bits are not encoded. |
181 | return (Value >> 2) & 0x7ffff; |
182 | case AArch64::fixup_aarch64_add_imm12: |
183 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
184 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
185 | Value &= 0xfff; |
186 | // Unsigned 12-bit immediate |
187 | if (!isUInt<12>(x: Value)) |
188 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
189 | return Value; |
190 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
191 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
192 | Value &= 0xfff; |
193 | // Unsigned 12-bit immediate which gets multiplied by 2 |
194 | if (!isUInt<13>(x: Value)) |
195 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
196 | if (Value & 0x1) |
197 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 2-byte aligned" ); |
198 | return Value >> 1; |
199 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
200 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
201 | Value &= 0xfff; |
202 | // Unsigned 12-bit immediate which gets multiplied by 4 |
203 | if (!isUInt<14>(x: Value)) |
204 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
205 | if (Value & 0x3) |
206 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 4-byte aligned" ); |
207 | return Value >> 2; |
208 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
209 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
210 | Value &= 0xfff; |
211 | // Unsigned 12-bit immediate which gets multiplied by 8 |
212 | if (!isUInt<15>(x: Value)) |
213 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
214 | if (Value & 0x7) |
215 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 8-byte aligned" ); |
216 | return Value >> 3; |
217 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
218 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
219 | Value &= 0xfff; |
220 | // Unsigned 12-bit immediate which gets multiplied by 16 |
221 | if (!isUInt<16>(x: Value)) |
222 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
223 | if (Value & 0xf) |
224 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup must be 16-byte aligned" ); |
225 | return Value >> 4; |
226 | case AArch64::fixup_aarch64_movw: { |
227 | AArch64MCExpr::VariantKind RefKind = |
228 | static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); |
229 | if (AArch64MCExpr::getSymbolLoc(Kind: RefKind) != AArch64MCExpr::VK_ABS && |
230 | AArch64MCExpr::getSymbolLoc(Kind: RefKind) != AArch64MCExpr::VK_SABS) { |
231 | if (!RefKind) { |
232 | // The fixup is an expression |
233 | if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) |
234 | Ctx.reportError(L: Fixup.getLoc(), |
235 | Msg: "fixup value out of range [-0xFFFF, 0xFFFF]" ); |
236 | |
237 | // Invert the negative immediate because it will feed into a MOVN. |
238 | if (SignedValue < 0) |
239 | SignedValue = ~SignedValue; |
240 | Value = static_cast<uint64_t>(SignedValue); |
241 | } else |
242 | // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't |
243 | // ever be resolved in the assembler. |
244 | Ctx.reportError(L: Fixup.getLoc(), |
245 | Msg: "relocation for a thread-local variable points to an " |
246 | "absolute symbol" ); |
247 | return Value; |
248 | } |
249 | |
250 | if (!IsResolved) { |
251 | // FIXME: Figure out when this can actually happen, and verify our |
252 | // behavior. |
253 | Ctx.reportError(L: Fixup.getLoc(), Msg: "unresolved movw fixup not yet " |
254 | "implemented" ); |
255 | return Value; |
256 | } |
257 | |
258 | if (AArch64MCExpr::getSymbolLoc(Kind: RefKind) == AArch64MCExpr::VK_SABS) { |
259 | switch (AArch64MCExpr::getAddressFrag(Kind: RefKind)) { |
260 | case AArch64MCExpr::VK_G0: |
261 | break; |
262 | case AArch64MCExpr::VK_G1: |
263 | SignedValue = SignedValue >> 16; |
264 | break; |
265 | case AArch64MCExpr::VK_G2: |
266 | SignedValue = SignedValue >> 32; |
267 | break; |
268 | case AArch64MCExpr::VK_G3: |
269 | SignedValue = SignedValue >> 48; |
270 | break; |
271 | default: |
272 | llvm_unreachable("Variant kind doesn't correspond to fixup" ); |
273 | } |
274 | |
275 | } else { |
276 | switch (AArch64MCExpr::getAddressFrag(Kind: RefKind)) { |
277 | case AArch64MCExpr::VK_G0: |
278 | break; |
279 | case AArch64MCExpr::VK_G1: |
280 | Value = Value >> 16; |
281 | break; |
282 | case AArch64MCExpr::VK_G2: |
283 | Value = Value >> 32; |
284 | break; |
285 | case AArch64MCExpr::VK_G3: |
286 | Value = Value >> 48; |
287 | break; |
288 | default: |
289 | llvm_unreachable("Variant kind doesn't correspond to fixup" ); |
290 | } |
291 | } |
292 | |
293 | if (RefKind & AArch64MCExpr::VK_NC) { |
294 | Value &= 0xFFFF; |
295 | } |
296 | else if (AArch64MCExpr::getSymbolLoc(Kind: RefKind) == AArch64MCExpr::VK_SABS) { |
297 | if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) |
298 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
299 | |
300 | // Invert the negative immediate because it will feed into a MOVN. |
301 | if (SignedValue < 0) |
302 | SignedValue = ~SignedValue; |
303 | Value = static_cast<uint64_t>(SignedValue); |
304 | } |
305 | else if (Value > 0xFFFF) { |
306 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
307 | } |
308 | return Value; |
309 | } |
310 | case AArch64::fixup_aarch64_pcrel_branch14: |
311 | // Signed 16-bit immediate |
312 | if (!isInt<16>(x: SignedValue)) |
313 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
314 | // Low two bits are not encoded (4-byte alignment assumed). |
315 | if (Value & 0x3) |
316 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned" ); |
317 | return (Value >> 2) & 0x3fff; |
318 | case AArch64::fixup_aarch64_pcrel_branch16: |
319 | // Unsigned PC-relative offset, so invert the negative immediate. |
320 | SignedValue = -SignedValue; |
321 | Value = static_cast<uint64_t>(SignedValue); |
322 | // Check valid 18-bit unsigned range. |
323 | if (SignedValue < 0 || SignedValue > ((1 << 18) - 1)) |
324 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
325 | // Low two bits are not encoded (4-byte alignment assumed). |
326 | if (Value & 0b11) |
327 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned" ); |
328 | return (Value >> 2) & 0xffff; |
329 | case AArch64::fixup_aarch64_pcrel_branch26: |
330 | case AArch64::fixup_aarch64_pcrel_call26: |
331 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) { |
332 | // MSVC link.exe and lld do not support this relocation type |
333 | // with a non-zero offset |
334 | Ctx.reportError(L: Fixup.getLoc(), |
335 | Msg: "cannot perform a PC-relative fixup with a non-zero " |
336 | "symbol offset" ); |
337 | } |
338 | // Signed 28-bit immediate |
339 | if (!isInt<28>(x: SignedValue)) |
340 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range" ); |
341 | // Low two bits are not encoded (4-byte alignment assumed). |
342 | if (Value & 0x3) |
343 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup not sufficiently aligned" ); |
344 | return (Value >> 2) & 0x3ffffff; |
345 | case FK_Data_1: |
346 | case FK_Data_2: |
347 | case FK_Data_4: |
348 | case FK_Data_8: |
349 | case FK_SecRel_2: |
350 | case FK_SecRel_4: |
351 | return Value; |
352 | } |
353 | } |
354 | |
355 | std::optional<MCFixupKind> |
356 | AArch64AsmBackend::getFixupKind(StringRef Name) const { |
357 | if (!TheTriple.isOSBinFormatELF()) |
358 | return std::nullopt; |
359 | |
360 | unsigned Type = llvm::StringSwitch<unsigned>(Name) |
361 | #define ELF_RELOC(X, Y) .Case(#X, Y) |
362 | #include "llvm/BinaryFormat/ELFRelocs/AArch64.def" |
363 | #undef ELF_RELOC |
364 | .Case(S: "BFD_RELOC_NONE" , Value: ELF::R_AARCH64_NONE) |
365 | .Case(S: "BFD_RELOC_16" , Value: ELF::R_AARCH64_ABS16) |
366 | .Case(S: "BFD_RELOC_32" , Value: ELF::R_AARCH64_ABS32) |
367 | .Case(S: "BFD_RELOC_64" , Value: ELF::R_AARCH64_ABS64) |
368 | .Default(Value: -1u); |
369 | if (Type == -1u) |
370 | return std::nullopt; |
371 | return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); |
372 | } |
373 | |
374 | /// getFixupKindContainereSizeInBytes - The number of bytes of the |
375 | /// container involved in big endian or 0 if the item is little endian |
376 | unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { |
377 | if (Endian == llvm::endianness::little) |
378 | return 0; |
379 | |
380 | switch (Kind) { |
381 | default: |
382 | llvm_unreachable("Unknown fixup kind!" ); |
383 | |
384 | case FK_Data_1: |
385 | return 1; |
386 | case FK_Data_2: |
387 | return 2; |
388 | case FK_Data_4: |
389 | return 4; |
390 | case FK_Data_8: |
391 | return 8; |
392 | |
393 | case AArch64::fixup_aarch64_movw: |
394 | case AArch64::fixup_aarch64_pcrel_branch14: |
395 | case AArch64::fixup_aarch64_pcrel_branch16: |
396 | case AArch64::fixup_aarch64_add_imm12: |
397 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
398 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
399 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
400 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
401 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
402 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
403 | case AArch64::fixup_aarch64_pcrel_branch19: |
404 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
405 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
406 | case AArch64::fixup_aarch64_pcrel_branch26: |
407 | case AArch64::fixup_aarch64_pcrel_call26: |
408 | // Instructions are always little endian |
409 | return 0; |
410 | } |
411 | } |
412 | |
413 | void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
414 | const MCValue &Target, |
415 | MutableArrayRef<char> Data, uint64_t Value, |
416 | bool IsResolved, |
417 | const MCSubtargetInfo *STI) const { |
418 | if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) { |
419 | auto RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); |
420 | AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(Kind: RefKind); |
421 | if (SymLoc == AArch64AuthMCExpr::VK_AUTH || |
422 | SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) { |
423 | assert(Value == 0); |
424 | const auto *Expr = cast<AArch64AuthMCExpr>(Val: Fixup.getValue()); |
425 | Value = (uint64_t(Expr->getDiscriminator()) << 32) | |
426 | (uint64_t(Expr->getKey()) << 60) | |
427 | (uint64_t(Expr->hasAddressDiversity()) << 63); |
428 | } |
429 | } |
430 | |
431 | if (!Value) |
432 | return; // Doesn't change encoding. |
433 | unsigned Kind = Fixup.getKind(); |
434 | if (Kind >= FirstLiteralRelocationKind) |
435 | return; |
436 | unsigned NumBytes = getFixupKindNumBytes(Kind); |
437 | MCFixupKindInfo Info = getFixupKindInfo(Kind: Fixup.getKind()); |
438 | MCContext &Ctx = Asm.getContext(); |
439 | int64_t SignedValue = static_cast<int64_t>(Value); |
440 | // Apply any target-specific value adjustments. |
441 | Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved); |
442 | |
443 | // Shift the value into position. |
444 | Value <<= Info.TargetOffset; |
445 | |
446 | unsigned Offset = Fixup.getOffset(); |
447 | assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!" ); |
448 | |
449 | // Used to point to big endian bytes. |
450 | unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Kind: Fixup.getKind()); |
451 | |
452 | // For each byte of the fragment that the fixup touches, mask in the |
453 | // bits from the fixup value. |
454 | if (FulleSizeInBytes == 0) { |
455 | // Handle as little-endian |
456 | for (unsigned i = 0; i != NumBytes; ++i) { |
457 | Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); |
458 | } |
459 | } else { |
460 | // Handle as big-endian |
461 | assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!" ); |
462 | assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!" ); |
463 | for (unsigned i = 0; i != NumBytes; ++i) { |
464 | unsigned Idx = FulleSizeInBytes - 1 - i; |
465 | Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); |
466 | } |
467 | } |
468 | |
469 | // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to |
470 | // handle this more cleanly. This may affect the output of -show-mc-encoding. |
471 | AArch64MCExpr::VariantKind RefKind = |
472 | static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); |
473 | if (AArch64MCExpr::getSymbolLoc(Kind: RefKind) == AArch64MCExpr::VK_SABS || |
474 | (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) { |
475 | // If the immediate is negative, generate MOVN else MOVZ. |
476 | // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ. |
477 | if (SignedValue < 0) |
478 | Data[Offset + 3] &= ~(1 << 6); |
479 | else |
480 | Data[Offset + 3] |= (1 << 6); |
481 | } |
482 | } |
483 | |
484 | bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
485 | uint64_t Value) const { |
486 | // FIXME: This isn't correct for AArch64. Just moving the "generic" logic |
487 | // into the targets for now. |
488 | // |
489 | // Relax if the value is too big for a (signed) i8. |
490 | return int64_t(Value) != int64_t(int8_t(Value)); |
491 | } |
492 | |
493 | void AArch64AsmBackend::relaxInstruction(MCInst &Inst, |
494 | const MCSubtargetInfo &STI) const { |
495 | llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented" ); |
496 | } |
497 | |
498 | bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, |
499 | const MCSubtargetInfo *STI) const { |
500 | // If the count is not 4-byte aligned, we must be writing data into the text |
501 | // section (otherwise we have unaligned instructions, and thus have far |
502 | // bigger problems), so just write zeros instead. |
503 | OS.write_zeros(NumZeros: Count % 4); |
504 | |
505 | // We are properly aligned, so write NOPs as requested. |
506 | Count /= 4; |
507 | for (uint64_t i = 0; i != Count; ++i) |
508 | OS.write(Ptr: "\x1f\x20\x03\xd5" , Size: 4); |
509 | return true; |
510 | } |
511 | |
512 | bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, |
513 | const MCFixup &Fixup, |
514 | const MCValue &Target, |
515 | const MCSubtargetInfo *STI) { |
516 | unsigned Kind = Fixup.getKind(); |
517 | if (Kind >= FirstLiteralRelocationKind) |
518 | return true; |
519 | |
520 | // The ADRP instruction adds some multiple of 0x1000 to the current PC & |
521 | // ~0xfff. This means that the required offset to reach a symbol can vary by |
522 | // up to one step depending on where the ADRP is in memory. For example: |
523 | // |
524 | // ADRP x0, there |
525 | // there: |
526 | // |
527 | // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and |
528 | // we'll need that as an offset. At any other address "there" will be in the |
529 | // same page as the ADRP and the instruction should encode 0x0. Assuming the |
530 | // section isn't 0x1000-aligned, we therefore need to delegate this decision |
531 | // to the linker -- a relocation! |
532 | if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21) |
533 | return true; |
534 | |
535 | return false; |
536 | } |
537 | |
538 | namespace { |
539 | |
540 | namespace CU { |
541 | |
542 | /// Compact unwind encoding values. |
543 | enum CompactUnwindEncodings { |
544 | /// A "frameless" leaf function, where no non-volatile registers are |
545 | /// saved. The return remains in LR throughout the function. |
546 | UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, |
547 | |
548 | /// No compact unwind encoding available. Instead the low 23-bits of |
549 | /// the compact unwind encoding is the offset of the DWARF FDE in the |
550 | /// __eh_frame section. This mode is never used in object files. It is only |
551 | /// generated by the linker in final linked images, which have only DWARF info |
552 | /// for a function. |
553 | UNWIND_ARM64_MODE_DWARF = 0x03000000, |
554 | |
555 | /// This is a standard arm64 prologue where FP/LR are immediately |
556 | /// pushed on the stack, then SP is copied to FP. If there are any |
557 | /// non-volatile register saved, they are copied into the stack fame in pairs |
558 | /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the |
559 | /// five X pairs and four D pairs can be saved, but the memory layout must be |
560 | /// in register number order. |
561 | UNWIND_ARM64_MODE_FRAME = 0x04000000, |
562 | |
563 | /// Frame register pair encodings. |
564 | UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, |
565 | UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, |
566 | UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, |
567 | UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, |
568 | UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, |
569 | UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, |
570 | UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, |
571 | UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, |
572 | UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 |
573 | }; |
574 | |
575 | } // end CU namespace |
576 | |
577 | // FIXME: This should be in a separate file. |
578 | class DarwinAArch64AsmBackend : public AArch64AsmBackend { |
579 | const MCRegisterInfo &MRI; |
580 | |
581 | /// Encode compact unwind stack adjustment for frameless functions. |
582 | /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. |
583 | /// The stack size always needs to be 16 byte aligned. |
584 | uint32_t encodeStackAdjustment(uint32_t StackSize) const { |
585 | return (StackSize / 16) << 12; |
586 | } |
587 | |
588 | public: |
589 | DarwinAArch64AsmBackend(const Target &T, const Triple &TT, |
590 | const MCRegisterInfo &MRI) |
591 | : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {} |
592 | |
593 | std::unique_ptr<MCObjectTargetWriter> |
594 | createObjectTargetWriter() const override { |
595 | uint32_t CPUType = cantFail(ValOrErr: MachO::getCPUType(T: TheTriple)); |
596 | uint32_t CPUSubType = cantFail(ValOrErr: MachO::getCPUSubType(T: TheTriple)); |
597 | return createAArch64MachObjectWriter(CPUType, CPUSubtype: CPUSubType, |
598 | IsILP32: TheTriple.isArch32Bit()); |
599 | } |
600 | |
601 | /// Generate the compact unwind encoding from the CFI directives. |
602 | uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI, |
603 | const MCContext *Ctxt) const override { |
604 | ArrayRef<MCCFIInstruction> Instrs = FI->Instructions; |
605 | if (Instrs.empty()) |
606 | return CU::UNWIND_ARM64_MODE_FRAMELESS; |
607 | if (!isDarwinCanonicalPersonality(Sym: FI->Personality) && |
608 | !Ctxt->emitCompactUnwindNonCanonical()) |
609 | return CU::UNWIND_ARM64_MODE_DWARF; |
610 | |
611 | bool HasFP = false; |
612 | uint64_t StackSize = 0; |
613 | |
614 | uint64_t CompactUnwindEncoding = 0; |
615 | int64_t CurOffset = 0; |
616 | for (size_t i = 0, e = Instrs.size(); i != e; ++i) { |
617 | const MCCFIInstruction &Inst = Instrs[i]; |
618 | |
619 | switch (Inst.getOperation()) { |
620 | default: |
621 | // Cannot handle this directive: bail out. |
622 | return CU::UNWIND_ARM64_MODE_DWARF; |
623 | case MCCFIInstruction::OpDefCfa: { |
624 | // Defines a frame pointer. |
625 | unsigned XReg = |
626 | getXRegFromWReg(Reg: *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true)); |
627 | |
628 | // Other CFA registers than FP are not supported by compact unwind. |
629 | // Fallback on DWARF. |
630 | // FIXME: When opt-remarks are supported in MC, add a remark to notify |
631 | // the user. |
632 | if (XReg != AArch64::FP) |
633 | return CU::UNWIND_ARM64_MODE_DWARF; |
634 | |
635 | if (i + 2 >= e) |
636 | return CU::UNWIND_ARM64_MODE_DWARF; |
637 | |
638 | const MCCFIInstruction &LRPush = Instrs[++i]; |
639 | if (LRPush.getOperation() != MCCFIInstruction::OpOffset) |
640 | return CU::UNWIND_ARM64_MODE_DWARF; |
641 | const MCCFIInstruction &FPPush = Instrs[++i]; |
642 | if (FPPush.getOperation() != MCCFIInstruction::OpOffset) |
643 | return CU::UNWIND_ARM64_MODE_DWARF; |
644 | |
645 | if (FPPush.getOffset() + 8 != LRPush.getOffset()) |
646 | return CU::UNWIND_ARM64_MODE_DWARF; |
647 | CurOffset = FPPush.getOffset(); |
648 | |
649 | unsigned LRReg = *MRI.getLLVMRegNum(RegNum: LRPush.getRegister(), isEH: true); |
650 | unsigned FPReg = *MRI.getLLVMRegNum(RegNum: FPPush.getRegister(), isEH: true); |
651 | |
652 | LRReg = getXRegFromWReg(Reg: LRReg); |
653 | FPReg = getXRegFromWReg(Reg: FPReg); |
654 | |
655 | if (LRReg != AArch64::LR || FPReg != AArch64::FP) |
656 | return CU::UNWIND_ARM64_MODE_DWARF; |
657 | |
658 | // Indicate that the function has a frame. |
659 | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; |
660 | HasFP = true; |
661 | break; |
662 | } |
663 | case MCCFIInstruction::OpDefCfaOffset: { |
664 | if (StackSize != 0) |
665 | return CU::UNWIND_ARM64_MODE_DWARF; |
666 | StackSize = std::abs(i: Inst.getOffset()); |
667 | break; |
668 | } |
669 | case MCCFIInstruction::OpOffset: { |
670 | // Registers are saved in pairs. We expect there to be two consecutive |
671 | // `.cfi_offset' instructions with the appropriate registers specified. |
672 | unsigned Reg1 = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true); |
673 | if (i + 1 == e) |
674 | return CU::UNWIND_ARM64_MODE_DWARF; |
675 | |
676 | if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8) |
677 | return CU::UNWIND_ARM64_MODE_DWARF; |
678 | CurOffset = Inst.getOffset(); |
679 | |
680 | const MCCFIInstruction &Inst2 = Instrs[++i]; |
681 | if (Inst2.getOperation() != MCCFIInstruction::OpOffset) |
682 | return CU::UNWIND_ARM64_MODE_DWARF; |
683 | unsigned Reg2 = *MRI.getLLVMRegNum(RegNum: Inst2.getRegister(), isEH: true); |
684 | |
685 | if (Inst2.getOffset() != CurOffset - 8) |
686 | return CU::UNWIND_ARM64_MODE_DWARF; |
687 | CurOffset = Inst2.getOffset(); |
688 | |
689 | // N.B. The encodings must be in register number order, and the X |
690 | // registers before the D registers. |
691 | |
692 | // X19/X20 pair = 0x00000001, |
693 | // X21/X22 pair = 0x00000002, |
694 | // X23/X24 pair = 0x00000004, |
695 | // X25/X26 pair = 0x00000008, |
696 | // X27/X28 pair = 0x00000010 |
697 | Reg1 = getXRegFromWReg(Reg: Reg1); |
698 | Reg2 = getXRegFromWReg(Reg: Reg2); |
699 | |
700 | if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 && |
701 | (CompactUnwindEncoding & 0xF1E) == 0) |
702 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; |
703 | else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 && |
704 | (CompactUnwindEncoding & 0xF1C) == 0) |
705 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; |
706 | else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 && |
707 | (CompactUnwindEncoding & 0xF18) == 0) |
708 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; |
709 | else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 && |
710 | (CompactUnwindEncoding & 0xF10) == 0) |
711 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; |
712 | else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 && |
713 | (CompactUnwindEncoding & 0xF00) == 0) |
714 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; |
715 | else { |
716 | Reg1 = getDRegFromBReg(Reg: Reg1); |
717 | Reg2 = getDRegFromBReg(Reg: Reg2); |
718 | |
719 | // D8/D9 pair = 0x00000100, |
720 | // D10/D11 pair = 0x00000200, |
721 | // D12/D13 pair = 0x00000400, |
722 | // D14/D15 pair = 0x00000800 |
723 | if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 && |
724 | (CompactUnwindEncoding & 0xE00) == 0) |
725 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; |
726 | else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 && |
727 | (CompactUnwindEncoding & 0xC00) == 0) |
728 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; |
729 | else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 && |
730 | (CompactUnwindEncoding & 0x800) == 0) |
731 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; |
732 | else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15) |
733 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; |
734 | else |
735 | // A pair was pushed which we cannot handle. |
736 | return CU::UNWIND_ARM64_MODE_DWARF; |
737 | } |
738 | |
739 | break; |
740 | } |
741 | } |
742 | } |
743 | |
744 | if (!HasFP) { |
745 | // With compact unwind info we can only represent stack adjustments of up |
746 | // to 65520 bytes. |
747 | if (StackSize > 65520) |
748 | return CU::UNWIND_ARM64_MODE_DWARF; |
749 | |
750 | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; |
751 | CompactUnwindEncoding |= encodeStackAdjustment(StackSize); |
752 | } |
753 | |
754 | return CompactUnwindEncoding; |
755 | } |
756 | }; |
757 | |
758 | } // end anonymous namespace |
759 | |
760 | namespace { |
761 | |
762 | class ELFAArch64AsmBackend : public AArch64AsmBackend { |
763 | public: |
764 | uint8_t OSABI; |
765 | bool IsILP32; |
766 | |
767 | ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI, |
768 | bool IsLittleEndian, bool IsILP32) |
769 | : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI), |
770 | IsILP32(IsILP32) {} |
771 | |
772 | std::unique_ptr<MCObjectTargetWriter> |
773 | createObjectTargetWriter() const override { |
774 | return createAArch64ELFObjectWriter(OSABI, IsILP32); |
775 | } |
776 | }; |
777 | |
778 | } |
779 | |
780 | namespace { |
781 | class COFFAArch64AsmBackend : public AArch64AsmBackend { |
782 | public: |
783 | COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple) |
784 | : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {} |
785 | |
786 | std::unique_ptr<MCObjectTargetWriter> |
787 | createObjectTargetWriter() const override { |
788 | return createAArch64WinCOFFObjectWriter(TheTriple); |
789 | } |
790 | }; |
791 | } |
792 | |
793 | MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, |
794 | const MCSubtargetInfo &STI, |
795 | const MCRegisterInfo &MRI, |
796 | const MCTargetOptions &Options) { |
797 | const Triple &TheTriple = STI.getTargetTriple(); |
798 | if (TheTriple.isOSBinFormatMachO()) { |
799 | return new DarwinAArch64AsmBackend(T, TheTriple, MRI); |
800 | } |
801 | |
802 | if (TheTriple.isOSBinFormatCOFF()) |
803 | return new COFFAArch64AsmBackend(T, TheTriple); |
804 | |
805 | assert(TheTriple.isOSBinFormatELF() && "Invalid target" ); |
806 | |
807 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS()); |
808 | bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
809 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true, |
810 | IsILP32); |
811 | } |
812 | |
813 | MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, |
814 | const MCSubtargetInfo &STI, |
815 | const MCRegisterInfo &MRI, |
816 | const MCTargetOptions &Options) { |
817 | const Triple &TheTriple = STI.getTargetTriple(); |
818 | assert(TheTriple.isOSBinFormatELF() && |
819 | "Big endian is only supported for ELF targets!" ); |
820 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS()); |
821 | bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
822 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false, |
823 | IsILP32); |
824 | } |
825 | |