1 | //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the AArch64MCCodeEmitter class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MCTargetDesc/AArch64AddressingModes.h" |
14 | #include "MCTargetDesc/AArch64FixupKinds.h" |
15 | #include "MCTargetDesc/AArch64MCExpr.h" |
16 | #include "Utils/AArch64BaseInfo.h" |
17 | #include "llvm/ADT/SmallVector.h" |
18 | #include "llvm/ADT/Statistic.h" |
19 | #include "llvm/BinaryFormat/ELF.h" |
20 | #include "llvm/MC/MCCodeEmitter.h" |
21 | #include "llvm/MC/MCContext.h" |
22 | #include "llvm/MC/MCFixup.h" |
23 | #include "llvm/MC/MCInst.h" |
24 | #include "llvm/MC/MCInstrInfo.h" |
25 | #include "llvm/MC/MCRegisterInfo.h" |
26 | #include "llvm/MC/MCSubtargetInfo.h" |
27 | #include "llvm/Support/Casting.h" |
28 | #include "llvm/Support/Endian.h" |
29 | #include "llvm/Support/EndianStream.h" |
30 | #include "llvm/Support/ErrorHandling.h" |
31 | #include "llvm/Support/raw_ostream.h" |
32 | #include <cassert> |
33 | #include <cstdint> |
34 | |
35 | using namespace llvm; |
36 | |
37 | #define DEBUG_TYPE "mccodeemitter" |
38 | |
39 | STATISTIC(MCNumEmitted, "Number of MC instructions emitted." ); |
40 | STATISTIC(MCNumFixups, "Number of MC fixups created." ); |
41 | |
42 | namespace { |
43 | |
44 | class AArch64MCCodeEmitter : public MCCodeEmitter { |
45 | MCContext &Ctx; |
46 | |
47 | public: |
48 | AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {} |
49 | AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete; |
50 | void operator=(const AArch64MCCodeEmitter &) = delete; |
51 | ~AArch64MCCodeEmitter() override = default; |
52 | |
53 | // getBinaryCodeForInstr - TableGen'erated function for getting the |
54 | // binary encoding for an instruction. |
55 | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
56 | SmallVectorImpl<MCFixup> &Fixups, |
57 | const MCSubtargetInfo &STI) const; |
58 | |
59 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
60 | /// operand requires relocation, record the relocation and return zero. |
61 | unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
62 | SmallVectorImpl<MCFixup> &Fixups, |
63 | const MCSubtargetInfo &STI) const; |
64 | |
65 | /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate |
66 | /// attached to a load, store or prfm instruction. If operand requires a |
67 | /// relocation, record it and return zero in that part of the encoding. |
68 | template <uint32_t FixupKind> |
69 | uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
70 | SmallVectorImpl<MCFixup> &Fixups, |
71 | const MCSubtargetInfo &STI) const; |
72 | |
73 | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
74 | /// target. |
75 | uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
76 | SmallVectorImpl<MCFixup> &Fixups, |
77 | const MCSubtargetInfo &STI) const; |
78 | |
79 | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
80 | /// the 2-bit shift field. |
81 | uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
82 | SmallVectorImpl<MCFixup> &Fixups, |
83 | const MCSubtargetInfo &STI) const; |
84 | |
85 | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
86 | /// branch target. |
87 | uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
88 | SmallVectorImpl<MCFixup> &Fixups, |
89 | const MCSubtargetInfo &STI) const; |
90 | |
91 | /// getPAuthPCRelOpValue - Return the encoded value for a pointer |
92 | /// authentication pc-relative operand. |
93 | uint32_t getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, |
94 | SmallVectorImpl<MCFixup> &Fixups, |
95 | const MCSubtargetInfo &STI) const; |
96 | |
97 | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
98 | /// pc-relative address. |
99 | uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
100 | SmallVectorImpl<MCFixup> &Fixups, |
101 | const MCSubtargetInfo &STI) const; |
102 | |
103 | /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store |
104 | /// instruction: bit 0 is whether a shift is present, bit 1 is whether the |
105 | /// operation is a sign extend (as opposed to a zero extend). |
106 | uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
107 | SmallVectorImpl<MCFixup> &Fixups, |
108 | const MCSubtargetInfo &STI) const; |
109 | |
110 | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
111 | /// branch target. |
112 | uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
113 | SmallVectorImpl<MCFixup> &Fixups, |
114 | const MCSubtargetInfo &STI) const; |
115 | |
116 | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
117 | /// branch target. |
118 | uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
119 | SmallVectorImpl<MCFixup> &Fixups, |
120 | const MCSubtargetInfo &STI) const; |
121 | |
122 | /// getMoveWideImmOpValue - Return the encoded value for the immediate operand |
123 | /// of a MOVZ or MOVK instruction. |
124 | uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
125 | SmallVectorImpl<MCFixup> &Fixups, |
126 | const MCSubtargetInfo &STI) const; |
127 | |
128 | /// getVecShifterOpValue - Return the encoded value for the vector shifter. |
129 | uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
130 | SmallVectorImpl<MCFixup> &Fixups, |
131 | const MCSubtargetInfo &STI) const; |
132 | |
133 | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
134 | /// shifter (MSL). |
135 | uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
136 | SmallVectorImpl<MCFixup> &Fixups, |
137 | const MCSubtargetInfo &STI) const; |
138 | |
139 | /// getFixedPointScaleOpValue - Return the encoded value for the |
140 | // FP-to-fixed-point scale factor. |
141 | uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, |
142 | SmallVectorImpl<MCFixup> &Fixups, |
143 | const MCSubtargetInfo &STI) const; |
144 | |
145 | uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
146 | SmallVectorImpl<MCFixup> &Fixups, |
147 | const MCSubtargetInfo &STI) const; |
148 | uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
149 | SmallVectorImpl<MCFixup> &Fixups, |
150 | const MCSubtargetInfo &STI) const; |
151 | uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
152 | SmallVectorImpl<MCFixup> &Fixups, |
153 | const MCSubtargetInfo &STI) const; |
154 | uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
155 | SmallVectorImpl<MCFixup> &Fixups, |
156 | const MCSubtargetInfo &STI) const; |
157 | uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
158 | SmallVectorImpl<MCFixup> &Fixups, |
159 | const MCSubtargetInfo &STI) const; |
160 | uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
161 | SmallVectorImpl<MCFixup> &Fixups, |
162 | const MCSubtargetInfo &STI) const; |
163 | uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
164 | SmallVectorImpl<MCFixup> &Fixups, |
165 | const MCSubtargetInfo &STI) const; |
166 | uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
167 | SmallVectorImpl<MCFixup> &Fixups, |
168 | const MCSubtargetInfo &STI) const; |
169 | |
170 | uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx, |
171 | SmallVectorImpl<MCFixup> &Fixups, |
172 | const MCSubtargetInfo &STI) const; |
173 | uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, |
174 | SmallVectorImpl<MCFixup> &Fixups, |
175 | const MCSubtargetInfo &STI) const; |
176 | |
177 | unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
178 | const MCSubtargetInfo &STI) const; |
179 | |
180 | void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, |
181 | SmallVectorImpl<MCFixup> &Fixups, |
182 | const MCSubtargetInfo &STI) const override; |
183 | |
184 | unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, |
185 | const MCSubtargetInfo &STI) const; |
186 | |
187 | template<int hasRs, int hasRt2> unsigned |
188 | fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, |
189 | const MCSubtargetInfo &STI) const; |
190 | |
191 | unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue, |
192 | const MCSubtargetInfo &STI) const; |
193 | |
194 | template <unsigned Multiple> |
195 | uint32_t EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx, |
196 | SmallVectorImpl<MCFixup> &Fixups, |
197 | const MCSubtargetInfo &STI) const; |
198 | uint32_t EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, |
199 | SmallVectorImpl<MCFixup> &Fixups, |
200 | const MCSubtargetInfo &STI) const; |
201 | |
202 | uint32_t EncodeZPR2StridedRegisterClass(const MCInst &MI, unsigned OpIdx, |
203 | SmallVectorImpl<MCFixup> &Fixups, |
204 | const MCSubtargetInfo &STI) const; |
205 | uint32_t EncodeZPR4StridedRegisterClass(const MCInst &MI, unsigned OpIdx, |
206 | SmallVectorImpl<MCFixup> &Fixups, |
207 | const MCSubtargetInfo &STI) const; |
208 | |
209 | uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx, |
210 | SmallVectorImpl<MCFixup> &Fixups, |
211 | const MCSubtargetInfo &STI) const; |
212 | template <unsigned BaseReg> |
213 | uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, |
214 | SmallVectorImpl<MCFixup> &Fixups, |
215 | const MCSubtargetInfo &STI) const; |
216 | }; |
217 | |
218 | } // end anonymous namespace |
219 | |
220 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
221 | /// operand requires relocation, record the relocation and return zero. |
222 | unsigned |
223 | AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
224 | SmallVectorImpl<MCFixup> &Fixups, |
225 | const MCSubtargetInfo &STI) const { |
226 | if (MO.isReg()) |
227 | return Ctx.getRegisterInfo()->getEncodingValue(RegNo: MO.getReg()); |
228 | |
229 | assert(MO.isImm() && "did not expect relocated expression" ); |
230 | return static_cast<unsigned>(MO.getImm()); |
231 | } |
232 | |
233 | template<unsigned FixupKind> uint32_t |
234 | AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
235 | SmallVectorImpl<MCFixup> &Fixups, |
236 | const MCSubtargetInfo &STI) const { |
237 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
238 | uint32_t ImmVal = 0; |
239 | |
240 | if (MO.isImm()) |
241 | ImmVal = static_cast<uint32_t>(MO.getImm()); |
242 | else { |
243 | assert(MO.isExpr() && "unable to encode load/store imm operand" ); |
244 | MCFixupKind Kind = MCFixupKind(FixupKind); |
245 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
246 | ++MCNumFixups; |
247 | } |
248 | |
249 | return ImmVal; |
250 | } |
251 | |
252 | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
253 | /// target. |
254 | uint32_t |
255 | AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
256 | SmallVectorImpl<MCFixup> &Fixups, |
257 | const MCSubtargetInfo &STI) const { |
258 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
259 | |
260 | // If the destination is an immediate, we have nothing to do. |
261 | if (MO.isImm()) |
262 | return MO.getImm(); |
263 | assert(MO.isExpr() && "Unexpected target type!" ); |
264 | const MCExpr *Expr = MO.getExpr(); |
265 | |
266 | MCFixupKind Kind = MI.getOpcode() == AArch64::ADR |
267 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21) |
268 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21); |
269 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
270 | |
271 | MCNumFixups += 1; |
272 | |
273 | // All of the information is in the fixup. |
274 | return 0; |
275 | } |
276 | |
277 | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
278 | /// the 2-bit shift field. The shift field is stored in bits 13-14 of the |
279 | /// return value. |
280 | uint32_t |
281 | AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
282 | SmallVectorImpl<MCFixup> &Fixups, |
283 | const MCSubtargetInfo &STI) const { |
284 | // Suboperands are [imm, shifter]. |
285 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
286 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
287 | assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && |
288 | "unexpected shift type for add/sub immediate" ); |
289 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: MO1.getImm()); |
290 | assert((ShiftVal == 0 || ShiftVal == 12) && |
291 | "unexpected shift value for add/sub immediate" ); |
292 | if (MO.isImm()) |
293 | return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); |
294 | assert(MO.isExpr() && "Unable to encode MCOperand!" ); |
295 | const MCExpr *Expr = MO.getExpr(); |
296 | |
297 | // Encode the 12 bits of the fixup. |
298 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12); |
299 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
300 | |
301 | ++MCNumFixups; |
302 | |
303 | // Set the shift bit of the add instruction for relocation types |
304 | // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12. |
305 | if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Val: Expr)) { |
306 | AArch64MCExpr::VariantKind RefKind = A64E->getKind(); |
307 | if (RefKind == AArch64MCExpr::VK_TPREL_HI12 || |
308 | RefKind == AArch64MCExpr::VK_DTPREL_HI12 || |
309 | RefKind == AArch64MCExpr::VK_SECREL_HI12) |
310 | ShiftVal = 12; |
311 | } |
312 | return ShiftVal == 0 ? 0 : (1 << ShiftVal); |
313 | } |
314 | |
315 | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
316 | /// branch target. |
317 | uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue( |
318 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
319 | const MCSubtargetInfo &STI) const { |
320 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
321 | |
322 | // If the destination is an immediate, we have nothing to do. |
323 | if (MO.isImm()) |
324 | return MO.getImm(); |
325 | assert(MO.isExpr() && "Unexpected target type!" ); |
326 | |
327 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19); |
328 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
329 | |
330 | ++MCNumFixups; |
331 | |
332 | // All of the information is in the fixup. |
333 | return 0; |
334 | } |
335 | |
336 | /// getPAuthPCRelOpValue - Return the encoded value for a pointer |
337 | /// authentication pc-relative operand. |
338 | uint32_t |
339 | AArch64MCCodeEmitter::getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, |
340 | SmallVectorImpl<MCFixup> &Fixups, |
341 | const MCSubtargetInfo &STI) const { |
342 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
343 | |
344 | // If the destination is an immediate, invert sign as it's a negative value |
345 | // that should be encoded as unsigned |
346 | if (MO.isImm()) |
347 | return -(MO.getImm()); |
348 | assert(MO.isExpr() && "Unexpected target type!" ); |
349 | |
350 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch16); |
351 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
352 | |
353 | ++MCNumFixups; |
354 | |
355 | // All of the information is in the fixup. |
356 | return 0; |
357 | } |
358 | |
359 | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
360 | /// pc-relative address. |
361 | uint32_t |
362 | AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
363 | SmallVectorImpl<MCFixup> &Fixups, |
364 | const MCSubtargetInfo &STI) const { |
365 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
366 | |
367 | // If the destination is an immediate, we have nothing to do. |
368 | if (MO.isImm()) |
369 | return MO.getImm(); |
370 | assert(MO.isExpr() && "Unexpected target type!" ); |
371 | |
372 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19); |
373 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
374 | |
375 | ++MCNumFixups; |
376 | |
377 | // All of the information is in the fixup. |
378 | return 0; |
379 | } |
380 | |
381 | uint32_t |
382 | AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
383 | SmallVectorImpl<MCFixup> &Fixups, |
384 | const MCSubtargetInfo &STI) const { |
385 | unsigned SignExtend = MI.getOperand(i: OpIdx).getImm(); |
386 | unsigned DoShift = MI.getOperand(i: OpIdx + 1).getImm(); |
387 | return (SignExtend << 1) | DoShift; |
388 | } |
389 | |
390 | uint32_t |
391 | AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
392 | SmallVectorImpl<MCFixup> &Fixups, |
393 | const MCSubtargetInfo &STI) const { |
394 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
395 | |
396 | if (MO.isImm()) |
397 | return MO.getImm(); |
398 | assert(MO.isExpr() && "Unexpected movz/movk immediate" ); |
399 | |
400 | Fixups.push_back(Elt: MCFixup::create( |
401 | Offset: 0, Value: MO.getExpr(), Kind: MCFixupKind(AArch64::fixup_aarch64_movw), Loc: MI.getLoc())); |
402 | |
403 | ++MCNumFixups; |
404 | |
405 | return 0; |
406 | } |
407 | |
408 | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
409 | /// branch target. |
410 | uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue( |
411 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
412 | const MCSubtargetInfo &STI) const { |
413 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
414 | |
415 | // If the destination is an immediate, we have nothing to do. |
416 | if (MO.isImm()) |
417 | return MO.getImm(); |
418 | assert(MO.isExpr() && "Unexpected ADR target type!" ); |
419 | |
420 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14); |
421 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
422 | |
423 | ++MCNumFixups; |
424 | |
425 | // All of the information is in the fixup. |
426 | return 0; |
427 | } |
428 | |
429 | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
430 | /// branch target. |
431 | uint32_t |
432 | AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
433 | SmallVectorImpl<MCFixup> &Fixups, |
434 | const MCSubtargetInfo &STI) const { |
435 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
436 | |
437 | // If the destination is an immediate, we have nothing to do. |
438 | if (MO.isImm()) |
439 | return MO.getImm(); |
440 | assert(MO.isExpr() && "Unexpected ADR target type!" ); |
441 | |
442 | MCFixupKind Kind = MI.getOpcode() == AArch64::BL |
443 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26) |
444 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26); |
445 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
446 | |
447 | ++MCNumFixups; |
448 | |
449 | // All of the information is in the fixup. |
450 | return 0; |
451 | } |
452 | |
453 | /// getVecShifterOpValue - Return the encoded value for the vector shifter: |
454 | /// |
455 | /// 00 -> 0 |
456 | /// 01 -> 8 |
457 | /// 10 -> 16 |
458 | /// 11 -> 24 |
459 | uint32_t |
460 | AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
461 | SmallVectorImpl<MCFixup> &Fixups, |
462 | const MCSubtargetInfo &STI) const { |
463 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
464 | assert(MO.isImm() && "Expected an immediate value for the shift amount!" ); |
465 | |
466 | switch (MO.getImm()) { |
467 | default: |
468 | break; |
469 | case 0: |
470 | return 0; |
471 | case 8: |
472 | return 1; |
473 | case 16: |
474 | return 2; |
475 | case 24: |
476 | return 3; |
477 | } |
478 | |
479 | llvm_unreachable("Invalid value for vector shift amount!" ); |
480 | } |
481 | |
482 | /// getFixedPointScaleOpValue - Return the encoded value for the |
483 | // FP-to-fixed-point scale factor. |
484 | uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue( |
485 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
486 | const MCSubtargetInfo &STI) const { |
487 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
488 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
489 | return 64 - MO.getImm(); |
490 | } |
491 | |
492 | uint32_t |
493 | AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
494 | SmallVectorImpl<MCFixup> &Fixups, |
495 | const MCSubtargetInfo &STI) const { |
496 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
497 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
498 | return 64 - MO.getImm(); |
499 | } |
500 | |
501 | uint32_t |
502 | AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
503 | SmallVectorImpl<MCFixup> &Fixups, |
504 | const MCSubtargetInfo &STI) const { |
505 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
506 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
507 | return 32 - MO.getImm(); |
508 | } |
509 | |
510 | uint32_t |
511 | AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
512 | SmallVectorImpl<MCFixup> &Fixups, |
513 | const MCSubtargetInfo &STI) const { |
514 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
515 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
516 | return 16 - MO.getImm(); |
517 | } |
518 | |
519 | uint32_t |
520 | AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
521 | SmallVectorImpl<MCFixup> &Fixups, |
522 | const MCSubtargetInfo &STI) const { |
523 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
524 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
525 | return 8 - MO.getImm(); |
526 | } |
527 | |
528 | uint32_t |
529 | AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
530 | SmallVectorImpl<MCFixup> &Fixups, |
531 | const MCSubtargetInfo &STI) const { |
532 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
533 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
534 | return MO.getImm() - 64; |
535 | } |
536 | |
537 | uint32_t |
538 | AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
539 | SmallVectorImpl<MCFixup> &Fixups, |
540 | const MCSubtargetInfo &STI) const { |
541 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
542 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
543 | return MO.getImm() - 32; |
544 | } |
545 | |
546 | uint32_t |
547 | AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
548 | SmallVectorImpl<MCFixup> &Fixups, |
549 | const MCSubtargetInfo &STI) const { |
550 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
551 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
552 | return MO.getImm() - 16; |
553 | } |
554 | |
555 | uint32_t |
556 | AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
557 | SmallVectorImpl<MCFixup> &Fixups, |
558 | const MCSubtargetInfo &STI) const { |
559 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
560 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
561 | return MO.getImm() - 8; |
562 | } |
563 | |
564 | template <unsigned Multiple> |
565 | uint32_t |
566 | AArch64MCCodeEmitter::EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx, |
567 | SmallVectorImpl<MCFixup> &Fixups, |
568 | const MCSubtargetInfo &STI) const { |
569 | assert(llvm::isPowerOf2_32(Multiple) && "Multiple is not a power of 2" ); |
570 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
571 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegNo: RegOpnd); |
572 | return RegVal / Multiple; |
573 | } |
574 | |
575 | uint32_t |
576 | AArch64MCCodeEmitter::EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, |
577 | SmallVectorImpl<MCFixup> &Fixups, |
578 | const MCSubtargetInfo &STI) const { |
579 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
580 | return RegOpnd - AArch64::PN8; |
581 | } |
582 | |
583 | uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass( |
584 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
585 | const MCSubtargetInfo &STI) const { |
586 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
587 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegNo: RegOpnd); |
588 | unsigned T = (RegVal & 0x10) >> 1; |
589 | unsigned Zt = RegVal & 0x7; |
590 | return T | Zt; |
591 | } |
592 | |
593 | uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass( |
594 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
595 | const MCSubtargetInfo &STI) const { |
596 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
597 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegNo: RegOpnd); |
598 | unsigned T = (RegVal & 0x10) >> 2; |
599 | unsigned Zt = RegVal & 0x3; |
600 | return T | Zt; |
601 | } |
602 | |
603 | uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass( |
604 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
605 | const MCSubtargetInfo &STI) const { |
606 | unsigned RegMask = MI.getOperand(i: OpIdx).getImm(); |
607 | assert(RegMask <= 0xFF && "Invalid register mask!" ); |
608 | return RegMask; |
609 | } |
610 | |
611 | template <unsigned BaseReg> |
612 | uint32_t |
613 | AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, |
614 | SmallVectorImpl<MCFixup> &Fixups, |
615 | const MCSubtargetInfo &STI) const { |
616 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
617 | return RegOpnd - BaseReg; |
618 | } |
619 | |
620 | uint32_t |
621 | AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx, |
622 | SmallVectorImpl<MCFixup> &Fixups, |
623 | const MCSubtargetInfo &STI) const { |
624 | // Test shift |
625 | auto ShiftOpnd = MI.getOperand(i: OpIdx + 1).getImm(); |
626 | assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && |
627 | "Unexpected shift type for imm8_opt_lsl immediate." ); |
628 | |
629 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: ShiftOpnd); |
630 | assert((ShiftVal == 0 || ShiftVal == 8) && |
631 | "Unexpected shift value for imm8_opt_lsl immediate." ); |
632 | |
633 | // Test immediate |
634 | auto Immediate = MI.getOperand(i: OpIdx).getImm(); |
635 | return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); |
636 | } |
637 | |
638 | uint32_t |
639 | AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, |
640 | SmallVectorImpl<MCFixup> &Fixups, |
641 | const MCSubtargetInfo &STI) const { |
642 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
643 | assert(MO.isImm() && "Expected an immediate value!" ); |
644 | // Normalize 1-16 range to 0-15. |
645 | return MO.getImm() - 1; |
646 | } |
647 | |
648 | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
649 | /// shifter (MSL). |
650 | uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue( |
651 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
652 | const MCSubtargetInfo &STI) const { |
653 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
654 | assert(MO.isImm() && |
655 | "Expected an immediate value for the move shift amount!" ); |
656 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: MO.getImm()); |
657 | assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!" ); |
658 | return ShiftVal == 8 ? 0 : 1; |
659 | } |
660 | |
661 | unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
662 | const MCSubtargetInfo &STI) const { |
663 | // If one of the signed fixup kinds is applied to a MOVZ instruction, the |
664 | // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's |
665 | // job to ensure that any bits possibly affected by this are 0. This means we |
666 | // must zero out bit 30 (essentially emitting a MOVN). |
667 | MCOperand UImm16MO = MI.getOperand(i: 1); |
668 | |
669 | // Nothing to do if there's no fixup. |
670 | if (UImm16MO.isImm()) |
671 | return EncodedValue; |
672 | |
673 | const MCExpr *E = UImm16MO.getExpr(); |
674 | if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Val: E)) { |
675 | switch (A64E->getKind()) { |
676 | case AArch64MCExpr::VK_DTPREL_G2: |
677 | case AArch64MCExpr::VK_DTPREL_G1: |
678 | case AArch64MCExpr::VK_DTPREL_G0: |
679 | case AArch64MCExpr::VK_GOTTPREL_G1: |
680 | case AArch64MCExpr::VK_TPREL_G2: |
681 | case AArch64MCExpr::VK_TPREL_G1: |
682 | case AArch64MCExpr::VK_TPREL_G0: |
683 | return EncodedValue & ~(1u << 30); |
684 | default: |
685 | // Nothing to do for an unsigned fixup. |
686 | return EncodedValue; |
687 | } |
688 | } |
689 | |
690 | return EncodedValue; |
691 | } |
692 | |
693 | void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, |
694 | SmallVectorImpl<char> &CB, |
695 | |
696 | SmallVectorImpl<MCFixup> &Fixups, |
697 | const MCSubtargetInfo &STI) const { |
698 | if (MI.getOpcode() == AArch64::TLSDESCCALL) { |
699 | // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the |
700 | // following (BLR) instruction. It doesn't emit any code itself so it |
701 | // doesn't go through the normal TableGenerated channels. |
702 | auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32 |
703 | ? ELF::R_AARCH64_P32_TLSDESC_CALL |
704 | : ELF::R_AARCH64_TLSDESC_CALL; |
705 | Fixups.push_back( |
706 | Elt: MCFixup::create(Offset: 0, Value: MI.getOperand(i: 0).getExpr(), |
707 | Kind: MCFixupKind(FirstLiteralRelocationKind + Reloc))); |
708 | return; |
709 | } |
710 | |
711 | if (MI.getOpcode() == AArch64::SPACE) { |
712 | // SPACE just increases basic block size, in both cases no actual code. |
713 | return; |
714 | } |
715 | |
716 | uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); |
717 | support::endian::write<uint32_t>(Out&: CB, V: Binary, E: llvm::endianness::little); |
718 | ++MCNumEmitted; // Keep track of the # of mi's emitted. |
719 | } |
720 | |
721 | unsigned |
722 | AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, |
723 | unsigned EncodedValue, |
724 | const MCSubtargetInfo &STI) const { |
725 | // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 |
726 | // (i.e. all bits 1) but is ignored by the processor. |
727 | EncodedValue |= 0x1f << 10; |
728 | return EncodedValue; |
729 | } |
730 | |
731 | template<int hasRs, int hasRt2> unsigned |
732 | AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, |
733 | unsigned EncodedValue, |
734 | const MCSubtargetInfo &STI) const { |
735 | if (!hasRs) EncodedValue |= 0x001F0000; |
736 | if (!hasRt2) EncodedValue |= 0x00007C00; |
737 | |
738 | return EncodedValue; |
739 | } |
740 | |
741 | unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison( |
742 | const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const { |
743 | // The Rm field of FCMP and friends is unused - it should be assembled |
744 | // as 0, but is ignored by the processor. |
745 | EncodedValue &= ~(0x1f << 16); |
746 | return EncodedValue; |
747 | } |
748 | |
749 | #include "AArch64GenMCCodeEmitter.inc" |
750 | |
751 | MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, |
752 | MCContext &Ctx) { |
753 | return new AArch64MCCodeEmitter(MCII, Ctx); |
754 | } |
755 | |