1//===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/ARMAsmBackend.h"
10#include "MCTargetDesc/ARMAddressingModes.h"
11#include "MCTargetDesc/ARMAsmBackendDarwin.h"
12#include "MCTargetDesc/ARMAsmBackendELF.h"
13#include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14#include "MCTargetDesc/ARMFixupKinds.h"
15#include "MCTargetDesc/ARMMCAsmInfo.h"
16#include "MCTargetDesc/ARMMCTargetDesc.h"
17#include "llvm/ADT/StringSwitch.h"
18#include "llvm/BinaryFormat/ELF.h"
19#include "llvm/BinaryFormat/MachO.h"
20#include "llvm/MC/MCAsmBackend.h"
21#include "llvm/MC/MCAssembler.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCELFObjectWriter.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCObjectWriter.h"
26#include "llvm/MC/MCRegisterInfo.h"
27#include "llvm/MC/MCSubtargetInfo.h"
28#include "llvm/MC/MCSymbolMachO.h"
29#include "llvm/MC/MCTargetOptions.h"
30#include "llvm/MC/MCValue.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/EndianStream.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/raw_ostream.h"
36using namespace llvm;
37
38namespace {
39class ARMELFObjectWriter : public MCELFObjectTargetWriter {
40public:
41 ARMELFObjectWriter(uint8_t OSABI)
42 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
43 /*HasRelocationAddend*/ false) {}
44};
45} // end anonymous namespace
46
47std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
48 return std::nullopt;
49}
50
51std::optional<MCFixupKind>
52ARMAsmBackendELF::getFixupKind(StringRef Name) const {
53 unsigned Type = llvm::StringSwitch<unsigned>(Name)
54#define ELF_RELOC(X, Y) .Case(#X, Y)
55#include "llvm/BinaryFormat/ELFRelocs/ARM.def"
56#undef ELF_RELOC
57 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_ARM_NONE)
58 .Case(S: "BFD_RELOC_8", Value: ELF::R_ARM_ABS8)
59 .Case(S: "BFD_RELOC_16", Value: ELF::R_ARM_ABS16)
60 .Case(S: "BFD_RELOC_32", Value: ELF::R_ARM_ABS32)
61 .Default(Value: -1u);
62 if (Type == -1u)
63 return std::nullopt;
64 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
65}
66
67MCFixupKindInfo ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
68 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
69 // This table *must* be in the order that the fixup_* kinds are defined in
70 // ARMFixupKinds.h.
71 //
72 // Name Offset (bits) Size (bits) Flags
73 {.Name: "fixup_arm_ldst_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
74 {.Name: "fixup_t2_ldst_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
75 {.Name: "fixup_arm_pcrel_10_unscaled", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
76 {.Name: "fixup_arm_pcrel_10", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
77 {.Name: "fixup_t2_pcrel_10", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
78 {.Name: "fixup_arm_pcrel_9", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
79 {.Name: "fixup_t2_pcrel_9", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
80 {.Name: "fixup_arm_ldst_abs_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
81 {.Name: "fixup_thumb_adr_pcrel_10", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
82 {.Name: "fixup_arm_adr_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
83 {.Name: "fixup_t2_adr_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
84 {.Name: "fixup_arm_condbranch", .TargetOffset: 0, .TargetSize: 24, .Flags: 0},
85 {.Name: "fixup_arm_uncondbranch", .TargetOffset: 0, .TargetSize: 24, .Flags: 0},
86 {.Name: "fixup_t2_condbranch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
87 {.Name: "fixup_t2_uncondbranch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
88 {.Name: "fixup_arm_thumb_br", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
89 {.Name: "fixup_arm_uncondbl", .TargetOffset: 0, .TargetSize: 24, .Flags: 0},
90 {.Name: "fixup_arm_condbl", .TargetOffset: 0, .TargetSize: 24, .Flags: 0},
91 {.Name: "fixup_arm_blx", .TargetOffset: 0, .TargetSize: 24, .Flags: 0},
92 {.Name: "fixup_arm_thumb_bl", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
93 {.Name: "fixup_arm_thumb_blx", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
94 {.Name: "fixup_arm_thumb_cb", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
95 {.Name: "fixup_arm_thumb_cp", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
96 {.Name: "fixup_arm_thumb_bcc", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
97 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
98 // - 19.
99 {.Name: "fixup_arm_movt_hi16", .TargetOffset: 0, .TargetSize: 20, .Flags: 0},
100 {.Name: "fixup_arm_movw_lo16", .TargetOffset: 0, .TargetSize: 20, .Flags: 0},
101 {.Name: "fixup_t2_movt_hi16", .TargetOffset: 0, .TargetSize: 20, .Flags: 0},
102 {.Name: "fixup_t2_movw_lo16", .TargetOffset: 0, .TargetSize: 20, .Flags: 0},
103 {.Name: "fixup_arm_thumb_upper_8_15", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
104 {.Name: "fixup_arm_thumb_upper_0_7", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
105 {.Name: "fixup_arm_thumb_lower_8_15", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
106 {.Name: "fixup_arm_thumb_lower_0_7", .TargetOffset: 0, .TargetSize: 8, .Flags: 0},
107 {.Name: "fixup_arm_mod_imm", .TargetOffset: 0, .TargetSize: 12, .Flags: 0},
108 {.Name: "fixup_t2_so_imm", .TargetOffset: 0, .TargetSize: 26, .Flags: 0},
109 {.Name: "fixup_bf_branch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
110 {.Name: "fixup_bf_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
111 {.Name: "fixup_bfl_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
112 {.Name: "fixup_bfc_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
113 {.Name: "fixup_bfcsel_else_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
114 {.Name: "fixup_wls", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
115 {.Name: "fixup_le", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
116 };
117 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
118 // This table *must* be in the order that the fixup_* kinds are defined in
119 // ARMFixupKinds.h.
120 //
121 // Name Offset (bits) Size (bits) Flags
122 {.Name: "fixup_arm_ldst_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
123 {.Name: "fixup_t2_ldst_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
124 {.Name: "fixup_arm_pcrel_10_unscaled", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
125 {.Name: "fixup_arm_pcrel_10", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
126 {.Name: "fixup_t2_pcrel_10", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
127 {.Name: "fixup_arm_pcrel_9", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
128 {.Name: "fixup_t2_pcrel_9", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
129 {.Name: "fixup_arm_ldst_abs_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
130 {.Name: "fixup_thumb_adr_pcrel_10", .TargetOffset: 8, .TargetSize: 8, .Flags: 0},
131 {.Name: "fixup_arm_adr_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
132 {.Name: "fixup_t2_adr_pcrel_12", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
133 {.Name: "fixup_arm_condbranch", .TargetOffset: 8, .TargetSize: 24, .Flags: 0},
134 {.Name: "fixup_arm_uncondbranch", .TargetOffset: 8, .TargetSize: 24, .Flags: 0},
135 {.Name: "fixup_t2_condbranch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
136 {.Name: "fixup_t2_uncondbranch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
137 {.Name: "fixup_arm_thumb_br", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
138 {.Name: "fixup_arm_uncondbl", .TargetOffset: 8, .TargetSize: 24, .Flags: 0},
139 {.Name: "fixup_arm_condbl", .TargetOffset: 8, .TargetSize: 24, .Flags: 0},
140 {.Name: "fixup_arm_blx", .TargetOffset: 8, .TargetSize: 24, .Flags: 0},
141 {.Name: "fixup_arm_thumb_bl", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
142 {.Name: "fixup_arm_thumb_blx", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
143 {.Name: "fixup_arm_thumb_cb", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
144 {.Name: "fixup_arm_thumb_cp", .TargetOffset: 8, .TargetSize: 8, .Flags: 0},
145 {.Name: "fixup_arm_thumb_bcc", .TargetOffset: 8, .TargetSize: 8, .Flags: 0},
146 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
147 // - 19.
148 {.Name: "fixup_arm_movt_hi16", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
149 {.Name: "fixup_arm_movw_lo16", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
150 {.Name: "fixup_t2_movt_hi16", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
151 {.Name: "fixup_t2_movw_lo16", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
152 {.Name: "fixup_arm_thumb_upper_8_15", .TargetOffset: 24, .TargetSize: 8, .Flags: 0},
153 {.Name: "fixup_arm_thumb_upper_0_7", .TargetOffset: 24, .TargetSize: 8, .Flags: 0},
154 {.Name: "fixup_arm_thumb_lower_8_15", .TargetOffset: 24, .TargetSize: 8, .Flags: 0},
155 {.Name: "fixup_arm_thumb_lower_0_7", .TargetOffset: 24, .TargetSize: 8, .Flags: 0},
156 {.Name: "fixup_arm_mod_imm", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
157 {.Name: "fixup_t2_so_imm", .TargetOffset: 26, .TargetSize: 6, .Flags: 0},
158 {.Name: "fixup_bf_branch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
159 {.Name: "fixup_bf_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
160 {.Name: "fixup_bfl_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
161 {.Name: "fixup_bfc_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
162 {.Name: "fixup_bfcsel_else_target", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
163 {.Name: "fixup_wls", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
164 {.Name: "fixup_le", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
165 };
166
167 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
168 // any extra processing.
169 if (mc::isRelocation(FixupKind: Kind))
170 return {};
171
172 if (Kind < FirstTargetFixupKind)
173 return MCAsmBackend::getFixupKindInfo(Kind);
174
175 assert(unsigned(Kind - FirstTargetFixupKind) < ARM::NumTargetFixupKinds &&
176 "Invalid kind!");
177 return (Endian == llvm::endianness::little
178 ? InfosLE
179 : InfosBE)[Kind - FirstTargetFixupKind];
180}
181
182unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
183 const MCSubtargetInfo &STI) const {
184 bool HasThumb2 = STI.hasFeature(Feature: ARM::FeatureThumb2);
185 bool HasV8MBaselineOps = STI.hasFeature(Feature: ARM::HasV8MBaselineOps);
186
187 switch (Op) {
188 default:
189 return Op;
190 case ARM::tBcc:
191 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
192 case ARM::tLDRpci:
193 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
194 case ARM::tADR:
195 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
196 case ARM::tB:
197 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
198 case ARM::tCBZ:
199 return ARM::tHINT;
200 case ARM::tCBNZ:
201 return ARM::tHINT;
202 }
203}
204
205bool ARMAsmBackend::mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand>,
206 const MCSubtargetInfo &STI) const {
207 return getRelaxedOpcode(Op: Opcode, STI) != Opcode;
208}
209
210static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
211 int64_t Offset = int64_t(Value) - 4;
212 if (Offset < Min || Offset > Max)
213 return "out of range pc-relative fixup value";
214 return nullptr;
215}
216
217const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
218 uint64_t Value) const {
219 switch (Fixup.getKind()) {
220 case ARM::fixup_arm_thumb_br: {
221 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
222 // low bit being an implied zero. There's an implied +4 offset for the
223 // branch, so we adjust the other way here to determine what's
224 // encodable.
225 //
226 // Relax if the value is too big for a (signed) i8.
227 int64_t Offset = int64_t(Value) - 4;
228 if (Offset > 2046 || Offset < -2048)
229 return "out of range pc-relative fixup value";
230 break;
231 }
232 case ARM::fixup_arm_thumb_bcc: {
233 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
234 // low bit being an implied zero. There's an implied +4 offset for the
235 // branch, so we adjust the other way here to determine what's
236 // encodable.
237 //
238 // Relax if the value is too big for a (signed) i8.
239 int64_t Offset = int64_t(Value) - 4;
240 if (Offset > 254 || Offset < -256)
241 return "out of range pc-relative fixup value";
242 break;
243 }
244 case ARM::fixup_thumb_adr_pcrel_10:
245 case ARM::fixup_arm_thumb_cp: {
246 // If the immediate is negative, greater than 1020, or not a multiple
247 // of four, the wide version of the instruction must be used.
248 int64_t Offset = int64_t(Value) - 4;
249 if (Offset & 3)
250 return "misaligned pc-relative fixup value";
251 else if (Offset > 1020 || Offset < 0)
252 return "out of range pc-relative fixup value";
253 break;
254 }
255 case ARM::fixup_arm_thumb_cb: {
256 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
257 // instruction it is actually out of range for the instruction.
258 // It will be changed to a NOP.
259 int64_t Offset = (Value & ~1);
260 if (Offset == 2)
261 return "will be converted to nop";
262 break;
263 }
264 case ARM::fixup_bf_branch:
265 return checkPCRelOffset(Value, Min: 0, Max: 30);
266 case ARM::fixup_bf_target:
267 return checkPCRelOffset(Value, Min: -0x10000, Max: +0xfffe);
268 case ARM::fixup_bfl_target:
269 return checkPCRelOffset(Value, Min: -0x40000, Max: +0x3fffe);
270 case ARM::fixup_bfc_target:
271 return checkPCRelOffset(Value, Min: -0x1000, Max: +0xffe);
272 case ARM::fixup_wls:
273 return checkPCRelOffset(Value, Min: 0, Max: +0xffe);
274 case ARM::fixup_le:
275 // The offset field in the LE and LETP instructions is an 11-bit
276 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
277 // interpreted as a negative offset from the value read from pc,
278 // i.e. from instruction_address+4.
279 //
280 // So an LE instruction can in principle address the instruction
281 // immediately after itself, or (not very usefully) the address
282 // half way through the 4-byte LE.
283 return checkPCRelOffset(Value, Min: -0xffe, Max: 0);
284 case ARM::fixup_bfcsel_else_target: {
285 if (Value != 2 && Value != 4)
286 return "out of range label-relative fixup value";
287 break;
288 }
289
290 default:
291 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
292 }
293 return nullptr;
294}
295
296static bool needsInterworking(const MCAssembler &Asm, const MCSymbol *Sym,
297 unsigned FixupKind) {
298 // Create relocations for unconditional branches to function symbols with
299 // different execution mode in ELF binaries.
300 if (!Sym || !Asm.getContext().isELF())
301 return false;
302 unsigned Type = static_cast<const MCSymbolELF *>(Sym)->getType();
303 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
304 if (Asm.isThumbFunc(Func: Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
305 return true;
306 if (!Asm.isThumbFunc(Func: Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
307 FixupKind == ARM::fixup_arm_thumb_bl ||
308 FixupKind == ARM::fixup_t2_condbranch ||
309 FixupKind == ARM::fixup_t2_uncondbranch))
310 return true;
311 }
312 return false;
313}
314
315bool ARMAsmBackend::fixupNeedsRelaxationAdvanced(const MCFragment &,
316 const MCFixup &Fixup,
317 const MCValue &Target,
318 uint64_t Value,
319 bool Resolved) const {
320 const MCSymbol *Sym = Target.getAddSym();
321 if (needsInterworking(Asm: *Asm, Sym, FixupKind: Fixup.getKind()))
322 return true;
323
324 if (!Resolved)
325 return true;
326 return reasonForFixupRelaxation(Fixup, Value);
327}
328
329void ARMAsmBackend::relaxInstruction(MCInst &Inst,
330 const MCSubtargetInfo &STI) const {
331 unsigned RelaxedOp = getRelaxedOpcode(Op: Inst.getOpcode(), STI);
332 assert(RelaxedOp != Inst.getOpcode());
333
334 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
335 // have to change the operands too.
336 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
337 RelaxedOp == ARM::tHINT) {
338 MCInst Res;
339 Res.setOpcode(RelaxedOp);
340 Res.addOperand(Op: MCOperand::createImm(Val: 0));
341 Res.addOperand(Op: MCOperand::createImm(Val: 14));
342 Res.addOperand(Op: MCOperand::createReg(Reg: 0));
343 Inst = std::move(Res);
344 return;
345 }
346
347 // The rest of instructions we're relaxing have the same operands.
348 // We just need to update to the proper opcode.
349 Inst.setOpcode(RelaxedOp);
350}
351
352bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
353 const MCSubtargetInfo *STI) const {
354 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
355 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
356 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
357 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
358 if (STI->hasFeature(Feature: ARM::ModeThumb)) {
359 const uint16_t nopEncoding =
360 hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
361 uint64_t NumNops = Count / 2;
362 for (uint64_t i = 0; i != NumNops; ++i)
363 support::endian::write(os&: OS, value: nopEncoding, endian: Endian);
364 if (Count & 1)
365 OS << '\0';
366 return true;
367 }
368 // ARM mode
369 const uint32_t nopEncoding =
370 hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
371 uint64_t NumNops = Count / 4;
372 for (uint64_t i = 0; i != NumNops; ++i)
373 support::endian::write(os&: OS, value: nopEncoding, endian: Endian);
374 // FIXME: should this function return false when unable to write exactly
375 // 'Count' bytes with NOP encodings?
376 switch (Count % 4) {
377 default:
378 break; // No leftover bytes to write
379 case 1:
380 OS << '\0';
381 break;
382 case 2:
383 OS.write(Ptr: "\0\0", Size: 2);
384 break;
385 case 3:
386 OS.write(Ptr: "\0\0\xa0", Size: 3);
387 break;
388 }
389
390 return true;
391}
392
393static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
394 if (IsLittleEndian) {
395 // Note that the halfwords are stored high first and low second in thumb;
396 // so we need to swap the fixup value here to map properly.
397 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
398 Swapped |= (Value & 0x0000FFFF) << 16;
399 return Swapped;
400 } else
401 return Value;
402}
403
404static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
405 bool IsLittleEndian) {
406 uint32_t Value;
407
408 if (IsLittleEndian) {
409 Value = (SecondHalf & 0xFFFF) << 16;
410 Value |= (FirstHalf & 0xFFFF);
411 } else {
412 Value = (SecondHalf & 0xFFFF);
413 Value |= (FirstHalf & 0xFFFF) << 16;
414 }
415
416 return Value;
417}
418
419unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
420 const MCFixup &Fixup,
421 const MCValue &Target, uint64_t Value,
422 bool IsResolved, MCContext &Ctx,
423 const MCSubtargetInfo* STI) const {
424 unsigned Kind = Fixup.getKind();
425 int64_t Addend = Target.getConstant();
426
427 // For MOVW/MOVT Instructions, the fixup value must already be within a
428 // signed 16bit range.
429 if ((Kind == ARM::fixup_arm_movw_lo16 || Kind == ARM::fixup_arm_movt_hi16 ||
430 Kind == ARM::fixup_t2_movw_lo16 || Kind == ARM::fixup_t2_movt_hi16) &&
431 !IsResolved && (Addend < minIntN(N: 16) || Addend > maxIntN(N: 16))) {
432 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation Not In Range");
433 return 0;
434 }
435
436 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
437 // and .word relocations they put the Thumb bit into the addend if possible.
438 // Other relocation types don't want this bit though (branches couldn't encode
439 // it if it *was* present, and no other relocations exist) and it can
440 // interfere with checking valid expressions.
441 if (getContext().getObjectFileType() == MCContext::IsMachO) {
442 if (auto *SA = static_cast<const MCSymbolMachO *>(Target.getAddSym())) {
443 if (Asm.isThumbFunc(Func: SA) && SA->isExternal() &&
444 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
445 Kind == ARM::fixup_arm_movt_hi16 ||
446 Kind == ARM::fixup_t2_movw_lo16 || Kind == ARM::fixup_t2_movt_hi16))
447 Value |= 1;
448 }
449 }
450
451 switch (Kind) {
452 default:
453 return 0;
454 case FK_Data_1:
455 case FK_Data_2:
456 case FK_Data_4:
457 return Value;
458 case FK_SecRel_2:
459 return Value;
460 case FK_SecRel_4:
461 return Value;
462 case ARM::fixup_arm_movt_hi16:
463 assert(STI != nullptr);
464 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
465 Value >>= 16;
466 [[fallthrough]];
467 case ARM::fixup_arm_movw_lo16: {
468 unsigned Hi4 = (Value & 0xF000) >> 12;
469 unsigned Lo12 = Value & 0x0FFF;
470 // inst{19-16} = Hi4;
471 // inst{11-0} = Lo12;
472 Value = (Hi4 << 16) | (Lo12);
473 return Value;
474 }
475 case ARM::fixup_t2_movt_hi16:
476 assert(STI != nullptr);
477 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
478 Value >>= 16;
479 [[fallthrough]];
480 case ARM::fixup_t2_movw_lo16: {
481 unsigned Hi4 = (Value & 0xF000) >> 12;
482 unsigned i = (Value & 0x800) >> 11;
483 unsigned Mid3 = (Value & 0x700) >> 8;
484 unsigned Lo8 = Value & 0x0FF;
485 // inst{19-16} = Hi4;
486 // inst{26} = i;
487 // inst{14-12} = Mid3;
488 // inst{7-0} = Lo8;
489 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
490 return swapHalfWords(Value, IsLittleEndian: Endian == llvm::endianness::little);
491 }
492 case ARM::fixup_arm_thumb_upper_8_15:
493 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
494 return (Value & 0xff000000) >> 24;
495 return Value & 0xff;
496 case ARM::fixup_arm_thumb_upper_0_7:
497 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
498 return (Value & 0x00ff0000) >> 16;
499 return Value & 0xff;
500 case ARM::fixup_arm_thumb_lower_8_15:
501 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
502 return (Value & 0x0000ff00) >> 8;
503 return Value & 0xff;
504 case ARM::fixup_arm_thumb_lower_0_7:
505 return Value & 0x000000ff;
506 case ARM::fixup_arm_ldst_pcrel_12:
507 // ARM PC-relative values are offset by 8.
508 Value -= 4;
509 [[fallthrough]];
510 case ARM::fixup_t2_ldst_pcrel_12:
511 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
512 Value -= 4;
513 [[fallthrough]];
514 case ARM::fixup_arm_ldst_abs_12: {
515 bool isAdd = true;
516 if ((int64_t)Value < 0) {
517 Value = -Value;
518 isAdd = false;
519 }
520 if (Value >= 4096) {
521 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
522 return 0;
523 }
524 Value |= isAdd << 23;
525
526 // Same addressing mode as fixup_arm_pcrel_10,
527 // but with 16-bit halfwords swapped.
528 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
529 return swapHalfWords(Value, IsLittleEndian: Endian == llvm::endianness::little);
530
531 return Value;
532 }
533 case ARM::fixup_arm_adr_pcrel_12: {
534 // ARM PC-relative values are offset by 8.
535 Value -= 8;
536 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
537 if ((int64_t)Value < 0) {
538 Value = -Value;
539 opc = 2; // 0b0010
540 }
541 if (ARM_AM::getSOImmVal(Arg: Value) == -1) {
542 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
543 return 0;
544 }
545 // Encode the immediate and shift the opcode into place.
546 return ARM_AM::getSOImmVal(Arg: Value) | (opc << 21);
547 }
548
549 case ARM::fixup_t2_adr_pcrel_12: {
550 Value -= 4;
551 unsigned opc = 0;
552 if ((int64_t)Value < 0) {
553 Value = -Value;
554 opc = 5;
555 }
556
557 uint32_t out = (opc << 21);
558 out |= (Value & 0x800) << 15;
559 out |= (Value & 0x700) << 4;
560 out |= (Value & 0x0FF);
561
562 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
563 }
564
565 case ARM::fixup_arm_condbranch:
566 case ARM::fixup_arm_uncondbranch:
567 case ARM::fixup_arm_uncondbl:
568 case ARM::fixup_arm_condbl:
569 case ARM::fixup_arm_blx:
570 // Check that the relocation value is legal.
571 Value -= 8;
572 if (!isInt<26>(x: Value)) {
573 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation out of range");
574 return 0;
575 }
576 // Alignment differs for blx. Because we are switching to thumb ISA, we use
577 // 16-bit alignment. Otherwise, use 32-bit.
578 if ((Kind == ARM::fixup_arm_blx && Value % 2 != 0) ||
579 (Kind != ARM::fixup_arm_blx && Value % 4 != 0)) {
580 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation not aligned");
581 return 0;
582 }
583
584 // These values don't encode the low two bits since they're always zero.
585 // Offset by 8 just as above.
586 if (const MCSymbolRefExpr *SRE =
587 dyn_cast<MCSymbolRefExpr>(Val: Fixup.getValue()))
588 if (SRE->getSpecifier() == ARM::S_TLSCALL)
589 return 0;
590 return 0xffffff & (Value >> 2);
591 case ARM::fixup_t2_uncondbranch: {
592 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
593 Value != 4) {
594 // MSVC link.exe and lld do not support this relocation type
595 // with a non-zero offset. ("Value" is offset by 4 at this point.)
596 Ctx.reportError(L: Fixup.getLoc(),
597 Msg: "cannot perform a PC-relative fixup with a non-zero "
598 "symbol offset");
599 }
600 Value = Value - 4;
601 if (!isInt<25>(x: Value)) {
602 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation out of range");
603 return 0;
604 }
605
606 Value >>= 1; // Low bit is not encoded.
607
608 uint32_t out = 0;
609 bool I = Value & 0x800000;
610 bool J1 = Value & 0x400000;
611 bool J2 = Value & 0x200000;
612 J1 ^= I;
613 J2 ^= I;
614
615 out |= I << 26; // S bit
616 out |= !J1 << 13; // J1 bit
617 out |= !J2 << 11; // J2 bit
618 out |= (Value & 0x1FF800) << 5; // imm6 field
619 out |= (Value & 0x0007FF); // imm11 field
620
621 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
622 }
623 case ARM::fixup_t2_condbranch: {
624 Value = Value - 4;
625 if (!isInt<21>(x: Value)) {
626 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation out of range");
627 return 0;
628 }
629
630 Value >>= 1; // Low bit is not encoded.
631
632 uint64_t out = 0;
633 out |= (Value & 0x80000) << 7; // S bit
634 out |= (Value & 0x40000) >> 7; // J2 bit
635 out |= (Value & 0x20000) >> 4; // J1 bit
636 out |= (Value & 0x1F800) << 5; // imm6 field
637 out |= (Value & 0x007FF); // imm11 field
638
639 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
640 }
641 case ARM::fixup_arm_thumb_bl: {
642 if (!isInt<25>(x: Value - 4) ||
643 (!STI->hasFeature(Feature: ARM::FeatureThumb2) &&
644 !STI->hasFeature(Feature: ARM::HasV8MBaselineOps) &&
645 !STI->hasFeature(Feature: ARM::HasV6MOps) &&
646 !isInt<23>(x: Value - 4))) {
647 Ctx.reportError(L: Fixup.getLoc(), Msg: "Relocation out of range");
648 return 0;
649 }
650 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
651 Value != 4) {
652 // MSVC link.exe and lld do not support this relocation type
653 // with a non-zero offset. ("Value" is offset by 4 at this point.)
654 Ctx.reportError(L: Fixup.getLoc(),
655 Msg: "cannot perform a PC-relative fixup with a non-zero "
656 "symbol offset");
657 }
658
659 // The value doesn't encode the low bit (always zero) and is offset by
660 // four. The 32-bit immediate value is encoded as
661 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
662 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
663 // The value is encoded into disjoint bit positions in the destination
664 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
665 // J = either J1 or J2 bit
666 //
667 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
668 //
669 // Note that the halfwords are stored high first, low second; so we need
670 // to transpose the fixup value here to map properly.
671 uint32_t offset = (Value - 4) >> 1;
672 uint32_t signBit = (offset & 0x800000) >> 23;
673 uint32_t I1Bit = (offset & 0x400000) >> 22;
674 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
675 uint32_t I2Bit = (offset & 0x200000) >> 21;
676 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
677 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
678 uint32_t imm11Bits = (offset & 0x000007FF);
679
680 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
681 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
682 (uint16_t)imm11Bits);
683 return joinHalfWords(FirstHalf, SecondHalf,
684 IsLittleEndian: Endian == llvm::endianness::little);
685 }
686 case ARM::fixup_arm_thumb_blx: {
687 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
688 Value != 4) {
689 // MSVC link.exe and lld do not support this relocation type
690 // with a non-zero offset. ("Value" is offset by 4 at this point.)
691 Ctx.reportError(L: Fixup.getLoc(),
692 Msg: "cannot perform a PC-relative fixup with a non-zero "
693 "symbol offset");
694 }
695 // The value doesn't encode the low two bits (always zero) and is offset by
696 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
697 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
698 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
699 // The value is encoded into disjoint bit positions in the destination
700 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
701 // J = either J1 or J2 bit, 0 = zero.
702 //
703 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
704 //
705 // Note that the halfwords are stored high first, low second; so we need
706 // to transpose the fixup value here to map properly.
707 if (Value % 4 != 0) {
708 Ctx.reportError(L: Fixup.getLoc(), Msg: "misaligned ARM call destination");
709 return 0;
710 }
711
712 uint32_t offset = (Value - 4) >> 2;
713 if (const MCSymbolRefExpr *SRE =
714 dyn_cast<MCSymbolRefExpr>(Val: Fixup.getValue()))
715 if (SRE->getSpecifier() == ARM::S_TLSCALL)
716 offset = 0;
717 uint32_t signBit = (offset & 0x400000) >> 22;
718 uint32_t I1Bit = (offset & 0x200000) >> 21;
719 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
720 uint32_t I2Bit = (offset & 0x100000) >> 20;
721 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
722 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
723 uint32_t imm10LBits = (offset & 0x3FF);
724
725 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
726 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
727 ((uint16_t)imm10LBits) << 1);
728 return joinHalfWords(FirstHalf, SecondHalf,
729 IsLittleEndian: Endian == llvm::endianness::little);
730 }
731 case ARM::fixup_thumb_adr_pcrel_10:
732 case ARM::fixup_arm_thumb_cp:
733 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
734 // could have an error on our hands.
735 assert(STI != nullptr);
736 if (!STI->hasFeature(Feature: ARM::FeatureThumb2) && IsResolved) {
737 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
738 if (FixupDiagnostic) {
739 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
740 return 0;
741 }
742 }
743 // Offset by 4, and don't encode the low two bits.
744 return ((Value - 4) >> 2) & 0xff;
745 case ARM::fixup_arm_thumb_cb: {
746 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
747 // so ensure that the raw value LSB is zero and it lies in [2, 130].
748 // An offset of 2 will be relaxed to a NOP.
749 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
750 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
751 return 0;
752 }
753 // Offset by 4 and don't encode the lower bit, which is always 0.
754 // FIXME: diagnose if no Thumb2
755 uint32_t Binary = (Value - 4) >> 1;
756 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
757 }
758 case ARM::fixup_arm_thumb_br:
759 // Offset by 4 and don't encode the lower bit, which is always 0.
760 assert(STI != nullptr);
761 if (!STI->hasFeature(Feature: ARM::FeatureThumb2) &&
762 !STI->hasFeature(Feature: ARM::HasV8MBaselineOps)) {
763 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
764 if (FixupDiagnostic) {
765 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
766 return 0;
767 }
768 }
769 return ((Value - 4) >> 1) & 0x7ff;
770 case ARM::fixup_arm_thumb_bcc:
771 // Offset by 4 and don't encode the lower bit, which is always 0.
772 assert(STI != nullptr);
773 if (!STI->hasFeature(Feature: ARM::FeatureThumb2)) {
774 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
775 if (FixupDiagnostic) {
776 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
777 return 0;
778 }
779 }
780 return ((Value - 4) >> 1) & 0xff;
781 case ARM::fixup_arm_pcrel_10_unscaled: {
782 Value = Value - 8; // ARM fixups offset by an additional word and don't
783 // need to adjust for the half-word ordering.
784 bool isAdd = true;
785 if ((int64_t)Value < 0) {
786 Value = -Value;
787 isAdd = false;
788 }
789 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
790 if (Value >= 256) {
791 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
792 return 0;
793 }
794 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
795 return Value | (isAdd << 23);
796 }
797 case ARM::fixup_arm_pcrel_10:
798 Value = Value - 4; // ARM fixups offset by an additional word and don't
799 // need to adjust for the half-word ordering.
800 [[fallthrough]];
801 case ARM::fixup_t2_pcrel_10: {
802 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
803 Value = Value - 4;
804 bool isAdd = true;
805 if ((int64_t)Value < 0) {
806 Value = -Value;
807 isAdd = false;
808 }
809 // These values don't encode the low two bits since they're always zero.
810 Value >>= 2;
811 if (Value >= 256) {
812 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
813 return 0;
814 }
815 Value |= isAdd << 23;
816
817 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
818 // swapped.
819 if (Kind == ARM::fixup_t2_pcrel_10)
820 return swapHalfWords(Value, IsLittleEndian: Endian == llvm::endianness::little);
821
822 return Value;
823 }
824 case ARM::fixup_arm_pcrel_9:
825 Value = Value - 4; // ARM fixups offset by an additional word and don't
826 // need to adjust for the half-word ordering.
827 [[fallthrough]];
828 case ARM::fixup_t2_pcrel_9: {
829 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
830 Value = Value - 4;
831 bool isAdd = true;
832 if ((int64_t)Value < 0) {
833 Value = -Value;
834 isAdd = false;
835 }
836 // These values don't encode the low bit since it's always zero.
837 if (Value & 1) {
838 Ctx.reportError(L: Fixup.getLoc(), Msg: "invalid value for this fixup");
839 return 0;
840 }
841 Value >>= 1;
842 if (Value >= 256) {
843 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range pc-relative fixup value");
844 return 0;
845 }
846 Value |= isAdd << 23;
847
848 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
849 // swapped.
850 if (Kind == ARM::fixup_t2_pcrel_9)
851 return swapHalfWords(Value, IsLittleEndian: Endian == llvm::endianness::little);
852
853 return Value;
854 }
855 case ARM::fixup_arm_mod_imm:
856 Value = ARM_AM::getSOImmVal(Arg: Value);
857 if (Value >> 12) {
858 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range immediate fixup value");
859 return 0;
860 }
861 return Value;
862 case ARM::fixup_t2_so_imm: {
863 Value = ARM_AM::getT2SOImmVal(Arg: Value);
864 if ((int64_t)Value < 0) {
865 Ctx.reportError(L: Fixup.getLoc(), Msg: "out of range immediate fixup value");
866 return 0;
867 }
868 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
869 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
870 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
871 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
872 // half-word.
873 uint64_t EncValue = 0;
874 EncValue |= (Value & 0x800) << 15;
875 EncValue |= (Value & 0x700) << 4;
876 EncValue |= (Value & 0xff);
877 return swapHalfWords(Value: EncValue, IsLittleEndian: Endian == llvm::endianness::little);
878 }
879 case ARM::fixup_bf_branch: {
880 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
881 if (FixupDiagnostic) {
882 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
883 return 0;
884 }
885 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
886 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
887 }
888 case ARM::fixup_bf_target:
889 case ARM::fixup_bfl_target:
890 case ARM::fixup_bfc_target: {
891 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
892 if (FixupDiagnostic) {
893 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
894 return 0;
895 }
896 uint32_t out = 0;
897 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
898 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
899 out |= (((Value - 4) >> 1) & 0x1) << 11;
900 out |= (((Value - 4) >> 1) & 0x7fe);
901 out |= (((Value - 4) >> 1) & HighBitMask) << 5;
902 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
903 }
904 case ARM::fixup_bfcsel_else_target: {
905 // If this is a fixup of a branch future's else target then it should be a
906 // constant MCExpr representing the distance between the branch targetted
907 // and the instruction after that same branch.
908 Value = Target.getConstant();
909
910 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
911 if (FixupDiagnostic) {
912 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
913 return 0;
914 }
915 uint32_t out = ((Value >> 2) & 1) << 17;
916 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
917 }
918 case ARM::fixup_wls:
919 case ARM::fixup_le: {
920 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
921 if (FixupDiagnostic) {
922 Ctx.reportError(L: Fixup.getLoc(), Msg: FixupDiagnostic);
923 return 0;
924 }
925 uint64_t real_value = Value - 4;
926 uint32_t out = 0;
927 if (Kind == ARM::fixup_le)
928 real_value = -real_value;
929 out |= ((real_value >> 1) & 0x1) << 11;
930 out |= ((real_value >> 1) & 0x7fe);
931 return swapHalfWords(Value: out, IsLittleEndian: Endian == llvm::endianness::little);
932 }
933 }
934}
935
936bool ARMAsmBackend::shouldForceRelocation(const MCFixup &Fixup,
937 const MCValue &Target) {
938 const MCSymbol *Sym = Target.getAddSym();
939 const unsigned FixupKind = Fixup.getKind();
940 // Create relocations for unconditional branches to function symbols with
941 // different execution mode in ELF binaries.
942 if (needsInterworking(Asm: *Asm, Sym, FixupKind: Fixup.getKind()))
943 return true;
944 // We must always generate a relocation for BL/BLX instructions if we have
945 // a symbol to reference, as the linker relies on knowing the destination
946 // symbol's thumb-ness to get interworking right.
947 if (Sym && (FixupKind == ARM::fixup_arm_thumb_blx ||
948 FixupKind == ARM::fixup_arm_blx ||
949 FixupKind == ARM::fixup_arm_uncondbl ||
950 FixupKind == ARM::fixup_arm_condbl))
951 return true;
952 return Target.getSpecifier();
953}
954
955/// getFixupKindNumBytes - The number of bytes the fixup may change.
956static unsigned getFixupKindNumBytes(unsigned Kind) {
957 switch (Kind) {
958 default:
959 llvm_unreachable("Unknown fixup kind!");
960
961 case FK_Data_1:
962 case ARM::fixup_arm_thumb_bcc:
963 case ARM::fixup_arm_thumb_cp:
964 case ARM::fixup_thumb_adr_pcrel_10:
965 case ARM::fixup_arm_thumb_upper_8_15:
966 case ARM::fixup_arm_thumb_upper_0_7:
967 case ARM::fixup_arm_thumb_lower_8_15:
968 case ARM::fixup_arm_thumb_lower_0_7:
969 return 1;
970
971 case FK_Data_2:
972 case ARM::fixup_arm_thumb_br:
973 case ARM::fixup_arm_thumb_cb:
974 case ARM::fixup_arm_mod_imm:
975 return 2;
976
977 case ARM::fixup_arm_pcrel_10_unscaled:
978 case ARM::fixup_arm_ldst_pcrel_12:
979 case ARM::fixup_arm_pcrel_10:
980 case ARM::fixup_arm_pcrel_9:
981 case ARM::fixup_arm_ldst_abs_12:
982 case ARM::fixup_arm_adr_pcrel_12:
983 case ARM::fixup_arm_uncondbl:
984 case ARM::fixup_arm_condbl:
985 case ARM::fixup_arm_blx:
986 case ARM::fixup_arm_condbranch:
987 case ARM::fixup_arm_uncondbranch:
988 return 3;
989
990 case FK_Data_4:
991 case ARM::fixup_t2_ldst_pcrel_12:
992 case ARM::fixup_t2_condbranch:
993 case ARM::fixup_t2_uncondbranch:
994 case ARM::fixup_t2_pcrel_10:
995 case ARM::fixup_t2_pcrel_9:
996 case ARM::fixup_t2_adr_pcrel_12:
997 case ARM::fixup_arm_thumb_bl:
998 case ARM::fixup_arm_thumb_blx:
999 case ARM::fixup_arm_movt_hi16:
1000 case ARM::fixup_arm_movw_lo16:
1001 case ARM::fixup_t2_movt_hi16:
1002 case ARM::fixup_t2_movw_lo16:
1003 case ARM::fixup_t2_so_imm:
1004 case ARM::fixup_bf_branch:
1005 case ARM::fixup_bf_target:
1006 case ARM::fixup_bfl_target:
1007 case ARM::fixup_bfc_target:
1008 case ARM::fixup_bfcsel_else_target:
1009 case ARM::fixup_wls:
1010 case ARM::fixup_le:
1011 return 4;
1012
1013 case FK_SecRel_2:
1014 return 2;
1015 case FK_SecRel_4:
1016 return 4;
1017 }
1018}
1019
1020/// getFixupKindContainerSizeBytes - The number of bytes of the
1021/// container involved in big endian.
1022static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
1023 switch (Kind) {
1024 default:
1025 llvm_unreachable("Unknown fixup kind!");
1026
1027 case FK_Data_1:
1028 return 1;
1029 case FK_Data_2:
1030 return 2;
1031 case FK_Data_4:
1032 return 4;
1033
1034 case ARM::fixup_arm_thumb_bcc:
1035 case ARM::fixup_arm_thumb_cp:
1036 case ARM::fixup_thumb_adr_pcrel_10:
1037 case ARM::fixup_arm_thumb_br:
1038 case ARM::fixup_arm_thumb_cb:
1039 case ARM::fixup_arm_thumb_upper_8_15:
1040 case ARM::fixup_arm_thumb_upper_0_7:
1041 case ARM::fixup_arm_thumb_lower_8_15:
1042 case ARM::fixup_arm_thumb_lower_0_7:
1043 // Instruction size is 2 bytes.
1044 return 2;
1045
1046 case ARM::fixup_arm_pcrel_10_unscaled:
1047 case ARM::fixup_arm_ldst_pcrel_12:
1048 case ARM::fixup_arm_pcrel_10:
1049 case ARM::fixup_arm_pcrel_9:
1050 case ARM::fixup_arm_adr_pcrel_12:
1051 case ARM::fixup_arm_uncondbl:
1052 case ARM::fixup_arm_condbl:
1053 case ARM::fixup_arm_blx:
1054 case ARM::fixup_arm_condbranch:
1055 case ARM::fixup_arm_uncondbranch:
1056 case ARM::fixup_t2_ldst_pcrel_12:
1057 case ARM::fixup_t2_condbranch:
1058 case ARM::fixup_t2_uncondbranch:
1059 case ARM::fixup_t2_pcrel_10:
1060 case ARM::fixup_t2_pcrel_9:
1061 case ARM::fixup_t2_adr_pcrel_12:
1062 case ARM::fixup_arm_thumb_bl:
1063 case ARM::fixup_arm_thumb_blx:
1064 case ARM::fixup_arm_movt_hi16:
1065 case ARM::fixup_arm_movw_lo16:
1066 case ARM::fixup_t2_movt_hi16:
1067 case ARM::fixup_t2_movw_lo16:
1068 case ARM::fixup_arm_mod_imm:
1069 case ARM::fixup_t2_so_imm:
1070 case ARM::fixup_bf_branch:
1071 case ARM::fixup_bf_target:
1072 case ARM::fixup_bfl_target:
1073 case ARM::fixup_bfc_target:
1074 case ARM::fixup_bfcsel_else_target:
1075 case ARM::fixup_wls:
1076 case ARM::fixup_le:
1077 // Instruction size is 4 bytes.
1078 return 4;
1079 }
1080}
1081
1082std::optional<bool> ARMAsmBackend::evaluateFixup(const MCFragment &F,
1083 MCFixup &Fixup, MCValue &,
1084 uint64_t &Value) {
1085 // For a few PC-relative fixups in Thumb mode, offsets need to be aligned
1086 // down. We compensate here because the default handler's `Value` decrement
1087 // doesn't account for this alignment.
1088 switch (Fixup.getKind()) {
1089 case ARM::fixup_t2_ldst_pcrel_12:
1090 case ARM::fixup_t2_pcrel_10:
1091 case ARM::fixup_t2_pcrel_9:
1092 case ARM::fixup_thumb_adr_pcrel_10:
1093 case ARM::fixup_t2_adr_pcrel_12:
1094 case ARM::fixup_arm_thumb_blx:
1095 case ARM::fixup_arm_thumb_cp:
1096 Value = (Asm->getFragmentOffset(F) + Fixup.getOffset()) % 4;
1097 }
1098 return {};
1099}
1100
1101void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
1102 const MCValue &Target, uint8_t *Data,
1103 uint64_t Value, bool IsResolved) {
1104 if (IsResolved && shouldForceRelocation(Fixup, Target))
1105 IsResolved = false;
1106 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
1107 auto Kind = Fixup.getKind();
1108 if (mc::isRelocation(FixupKind: Kind))
1109 return;
1110 MCContext &Ctx = getContext();
1111 Value = adjustFixupValue(Asm: *Asm, Fixup, Target, Value, IsResolved, Ctx,
1112 STI: getSubtargetInfo(F));
1113 if (!Value)
1114 return; // Doesn't change encoding.
1115 const unsigned NumBytes = getFixupKindNumBytes(Kind);
1116
1117 assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
1118 "Invalid fixup offset!");
1119
1120 // Used to point to big endian bytes.
1121 unsigned FullSizeBytes;
1122 if (Endian == llvm::endianness::big) {
1123 FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1124 assert(Fixup.getOffset() + FullSizeBytes <= F.getSize() &&
1125 "Invalid fixup size!");
1126 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1127 }
1128
1129 // For each byte of the fragment that the fixup touches, mask in the bits from
1130 // the fixup value. The Value has been "split up" into the appropriate
1131 // bitfields above.
1132 for (unsigned i = 0; i != NumBytes; ++i) {
1133 unsigned Idx =
1134 Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
1135 Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1136 }
1137}
1138
1139namespace CU {
1140
1141/// Compact unwind encoding values.
1142enum CompactUnwindEncodings {
1143 UNWIND_ARM_MODE_MASK = 0x0F000000,
1144 UNWIND_ARM_MODE_FRAME = 0x01000000,
1145 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
1146 UNWIND_ARM_MODE_DWARF = 0x04000000,
1147
1148 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
1149
1150 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
1151 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
1152 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
1153
1154 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
1155 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
1156 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
1157 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
1158 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
1159
1160 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
1161
1162 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
1163};
1164
1165} // end CU namespace
1166
1167/// Generate compact unwind encoding for the function based on the CFI
1168/// instructions. If the CFI instructions describe a frame that cannot be
1169/// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1170/// tells the runtime to fallback and unwind using dwarf.
1171uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1172 const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const {
1173 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1174 // Only armv7k uses CFI based unwinding.
1175 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1176 return 0;
1177 // No .cfi directives means no frame.
1178 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
1179 if (Instrs.empty())
1180 return 0;
1181 if (!isDarwinCanonicalPersonality(Sym: FI->Personality) &&
1182 !Ctxt->emitCompactUnwindNonCanonical())
1183 return CU::UNWIND_ARM_MODE_DWARF;
1184
1185 // Start off assuming CFA is at SP+0.
1186 MCRegister CFARegister = ARM::SP;
1187 int CFARegisterOffset = 0;
1188 // Mark savable registers as initially unsaved
1189 DenseMap<MCRegister, int> RegOffsets;
1190 int FloatRegCount = 0;
1191 // Process each .cfi directive and build up compact unwind info.
1192 for (const MCCFIInstruction &Inst : Instrs) {
1193 MCRegister Reg;
1194 switch (Inst.getOperation()) {
1195 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1196 CFARegisterOffset = Inst.getOffset();
1197 CFARegister = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
1198 break;
1199 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1200 CFARegisterOffset = Inst.getOffset();
1201 break;
1202 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1203 CFARegister = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
1204 break;
1205 case MCCFIInstruction::OpOffset: // DW_CFA_offset
1206 Reg = *MRI.getLLVMRegNum(RegNum: Inst.getRegister(), isEH: true);
1207 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1208 RegOffsets[Reg] = Inst.getOffset();
1209 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1210 RegOffsets[Reg] = Inst.getOffset();
1211 ++FloatRegCount;
1212 } else {
1213 DEBUG_WITH_TYPE("compact-unwind",
1214 llvm::dbgs() << ".cfi_offset on unknown register="
1215 << Inst.getRegister() << "\n");
1216 return CU::UNWIND_ARM_MODE_DWARF;
1217 }
1218 break;
1219 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1220 // Ignore
1221 break;
1222 default:
1223 // Directive not convertable to compact unwind, bail out.
1224 DEBUG_WITH_TYPE("compact-unwind",
1225 llvm::dbgs()
1226 << "CFI directive not compatible with compact "
1227 "unwind encoding, opcode="
1228 << uint8_t(Inst.getOperation()) << "\n");
1229 return CU::UNWIND_ARM_MODE_DWARF;
1230 break;
1231 }
1232 }
1233
1234 // If no frame set up, return no unwind info.
1235 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1236 return 0;
1237
1238 // Verify standard frame (lr/r7) was used.
1239 if (CFARegister != ARM::R7) {
1240 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1241 << CFARegister.id()
1242 << " instead of r7\n");
1243 return CU::UNWIND_ARM_MODE_DWARF;
1244 }
1245 int StackAdjust = CFARegisterOffset - 8;
1246 if (RegOffsets.lookup(Val: ARM::LR) != (-4 - StackAdjust)) {
1247 DEBUG_WITH_TYPE(
1248 "compact-unwind",
1249 llvm::dbgs() << "LR not saved as standard frame, StackAdjust="
1250 << StackAdjust
1251 << ", CFARegisterOffset=" << CFARegisterOffset
1252 << ", lr save at offset=" << RegOffsets[ARM::LR] << "\n");
1253 return CU::UNWIND_ARM_MODE_DWARF;
1254 }
1255 if (RegOffsets.lookup(Val: ARM::R7) != (-8 - StackAdjust)) {
1256 DEBUG_WITH_TYPE("compact-unwind",
1257 llvm::dbgs() << "r7 not saved as standard frame\n");
1258 return CU::UNWIND_ARM_MODE_DWARF;
1259 }
1260 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1261
1262 // If var-args are used, there may be a stack adjust required.
1263 switch (StackAdjust) {
1264 case 0:
1265 break;
1266 case 4:
1267 CompactUnwindEncoding |= 0x00400000;
1268 break;
1269 case 8:
1270 CompactUnwindEncoding |= 0x00800000;
1271 break;
1272 case 12:
1273 CompactUnwindEncoding |= 0x00C00000;
1274 break;
1275 default:
1276 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1277 << ".cfi_def_cfa stack adjust ("
1278 << StackAdjust << ") out of range\n");
1279 return CU::UNWIND_ARM_MODE_DWARF;
1280 }
1281
1282 // If r6 is saved, it must be right below r7.
1283 static struct {
1284 unsigned Reg;
1285 unsigned Encoding;
1286 } GPRCSRegs[] = {{.Reg: ARM::R6, .Encoding: CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1287 {.Reg: ARM::R5, .Encoding: CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1288 {.Reg: ARM::R4, .Encoding: CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1289 {.Reg: ARM::R12, .Encoding: CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1290 {.Reg: ARM::R11, .Encoding: CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1291 {.Reg: ARM::R10, .Encoding: CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1292 {.Reg: ARM::R9, .Encoding: CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1293 {.Reg: ARM::R8, .Encoding: CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1294
1295 int CurOffset = -8 - StackAdjust;
1296 for (auto CSReg : GPRCSRegs) {
1297 auto Offset = RegOffsets.find(Val: CSReg.Reg);
1298 if (Offset == RegOffsets.end())
1299 continue;
1300
1301 int RegOffset = Offset->second;
1302 if (RegOffset != CurOffset - 4) {
1303 DEBUG_WITH_TYPE("compact-unwind",
1304 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1305 << RegOffset << " but only supported at "
1306 << CurOffset << "\n");
1307 return CU::UNWIND_ARM_MODE_DWARF;
1308 }
1309 CompactUnwindEncoding |= CSReg.Encoding;
1310 CurOffset -= 4;
1311 }
1312
1313 // If no floats saved, we are done.
1314 if (FloatRegCount == 0)
1315 return CompactUnwindEncoding;
1316
1317 // Switch mode to include D register saving.
1318 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1319 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1320
1321 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1322 // but needs coordination with the linker and libunwind.
1323 if (FloatRegCount > 4) {
1324 DEBUG_WITH_TYPE("compact-unwind",
1325 llvm::dbgs() << "unsupported number of D registers saved ("
1326 << FloatRegCount << ")\n");
1327 return CU::UNWIND_ARM_MODE_DWARF;
1328 }
1329
1330 // Floating point registers must either be saved sequentially, or we defer to
1331 // DWARF. No gaps allowed here so check that each saved d-register is
1332 // precisely where it should be.
1333 static MCPhysReg FPRCSRegs[] = {ARM::D8, ARM::D10, ARM::D12, ARM::D14};
1334 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1335 auto Offset = RegOffsets.find(Val: FPRCSRegs[Idx]);
1336 if (Offset == RegOffsets.end()) {
1337 DEBUG_WITH_TYPE("compact-unwind",
1338 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1339 << MRI.getName(FPRCSRegs[Idx])
1340 << " not saved\n");
1341 return CU::UNWIND_ARM_MODE_DWARF;
1342 } else if (Offset->second != CurOffset - 8) {
1343 DEBUG_WITH_TYPE("compact-unwind",
1344 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1345 << MRI.getName(FPRCSRegs[Idx])
1346 << " saved at " << Offset->second
1347 << ", expected at " << CurOffset - 8
1348 << "\n");
1349 return CU::UNWIND_ARM_MODE_DWARF;
1350 }
1351 CurOffset -= 8;
1352 }
1353
1354 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1355}
1356
1357static MCAsmBackend *createARMAsmBackend(const Target &T,
1358 const MCSubtargetInfo &STI,
1359 const MCRegisterInfo &MRI,
1360 const MCTargetOptions &Options,
1361 llvm::endianness Endian) {
1362 const Triple &TheTriple = STI.getTargetTriple();
1363 switch (TheTriple.getObjectFormat()) {
1364 default:
1365 llvm_unreachable("unsupported object format");
1366 case Triple::MachO:
1367 return new ARMAsmBackendDarwin(T, STI, MRI);
1368 case Triple::COFF:
1369 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1370 return new ARMAsmBackendWinCOFF(T);
1371 case Triple::ELF:
1372 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1373 uint8_t OSABI = Options.FDPIC
1374 ? static_cast<uint8_t>(ELF::ELFOSABI_ARM_FDPIC)
1375 : MCELFObjectTargetWriter::getOSABI(OSType: TheTriple.getOS());
1376 return new ARMAsmBackendELF(T, OSABI, Endian);
1377 }
1378}
1379
1380MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1381 const MCSubtargetInfo &STI,
1382 const MCRegisterInfo &MRI,
1383 const MCTargetOptions &Options) {
1384 return createARMAsmBackend(T, STI, MRI, Options, Endian: llvm::endianness::little);
1385}
1386
1387MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1388 const MCSubtargetInfo &STI,
1389 const MCRegisterInfo &MRI,
1390 const MCTargetOptions &Options) {
1391 return createARMAsmBackend(T, STI, MRI, Options, Endian: llvm::endianness::big);
1392}
1393