1 | //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the AArch64MCCodeEmitter class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MCTargetDesc/AArch64AddressingModes.h" |
14 | #include "MCTargetDesc/AArch64FixupKinds.h" |
15 | #include "MCTargetDesc/AArch64MCAsmInfo.h" |
16 | #include "llvm/ADT/SmallVector.h" |
17 | #include "llvm/ADT/Statistic.h" |
18 | #include "llvm/BinaryFormat/ELF.h" |
19 | #include "llvm/MC/MCCodeEmitter.h" |
20 | #include "llvm/MC/MCContext.h" |
21 | #include "llvm/MC/MCFixup.h" |
22 | #include "llvm/MC/MCInst.h" |
23 | #include "llvm/MC/MCInstrInfo.h" |
24 | #include "llvm/MC/MCRegisterInfo.h" |
25 | #include "llvm/MC/MCSubtargetInfo.h" |
26 | #include "llvm/Support/Casting.h" |
27 | #include "llvm/Support/EndianStream.h" |
28 | #include "llvm/Support/ErrorHandling.h" |
29 | #include <cassert> |
30 | #include <cstdint> |
31 | |
32 | using namespace llvm; |
33 | |
34 | #define DEBUG_TYPE "mccodeemitter" |
35 | |
36 | STATISTIC(MCNumEmitted, "Number of MC instructions emitted." ); |
37 | STATISTIC(MCNumFixups, "Number of MC fixups created." ); |
38 | |
39 | namespace { |
40 | |
41 | class AArch64MCCodeEmitter : public MCCodeEmitter { |
42 | MCContext &Ctx; |
43 | |
44 | public: |
45 | AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {} |
46 | AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete; |
47 | void operator=(const AArch64MCCodeEmitter &) = delete; |
48 | ~AArch64MCCodeEmitter() override = default; |
49 | |
50 | // getBinaryCodeForInstr - TableGen'erated function for getting the |
51 | // binary encoding for an instruction. |
52 | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
53 | SmallVectorImpl<MCFixup> &Fixups, |
54 | const MCSubtargetInfo &STI) const; |
55 | |
56 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
57 | /// operand requires relocation, record the relocation and return zero. |
58 | unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
59 | SmallVectorImpl<MCFixup> &Fixups, |
60 | const MCSubtargetInfo &STI) const; |
61 | |
62 | /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate |
63 | /// attached to a load, store or prfm instruction. If operand requires a |
64 | /// relocation, record it and return zero in that part of the encoding. |
65 | template <uint32_t FixupKind> |
66 | uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
67 | SmallVectorImpl<MCFixup> &Fixups, |
68 | const MCSubtargetInfo &STI) const; |
69 | |
70 | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
71 | /// target. |
72 | uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
73 | SmallVectorImpl<MCFixup> &Fixups, |
74 | const MCSubtargetInfo &STI) const; |
75 | |
76 | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
77 | /// the 2-bit shift field. |
78 | uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
79 | SmallVectorImpl<MCFixup> &Fixups, |
80 | const MCSubtargetInfo &STI) const; |
81 | |
82 | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
83 | /// branch target. |
84 | uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
85 | SmallVectorImpl<MCFixup> &Fixups, |
86 | const MCSubtargetInfo &STI) const; |
87 | |
88 | /// getCondCompBranchTargetOpValue - Return the encoded value for a |
89 | /// conditional compare-and-branch target. |
90 | uint32_t getCondCompBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
91 | SmallVectorImpl<MCFixup> &Fixups, |
92 | const MCSubtargetInfo &STI) const; |
93 | |
94 | /// getPAuthPCRelOpValue - Return the encoded value for a pointer |
95 | /// authentication pc-relative operand. |
96 | uint32_t getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, |
97 | SmallVectorImpl<MCFixup> &Fixups, |
98 | const MCSubtargetInfo &STI) const; |
99 | |
100 | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
101 | /// pc-relative address. |
102 | uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
103 | SmallVectorImpl<MCFixup> &Fixups, |
104 | const MCSubtargetInfo &STI) const; |
105 | |
106 | /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store |
107 | /// instruction: bit 0 is whether a shift is present, bit 1 is whether the |
108 | /// operation is a sign extend (as opposed to a zero extend). |
109 | uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
110 | SmallVectorImpl<MCFixup> &Fixups, |
111 | const MCSubtargetInfo &STI) const; |
112 | |
113 | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
114 | /// branch target. |
115 | uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
116 | SmallVectorImpl<MCFixup> &Fixups, |
117 | const MCSubtargetInfo &STI) const; |
118 | |
119 | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
120 | /// branch target. |
121 | uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
122 | SmallVectorImpl<MCFixup> &Fixups, |
123 | const MCSubtargetInfo &STI) const; |
124 | |
125 | /// getMoveWideImmOpValue - Return the encoded value for the immediate operand |
126 | /// of a MOVZ or MOVK instruction. |
127 | uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
128 | SmallVectorImpl<MCFixup> &Fixups, |
129 | const MCSubtargetInfo &STI) const; |
130 | |
131 | /// getVecShifterOpValue - Return the encoded value for the vector shifter. |
132 | uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
133 | SmallVectorImpl<MCFixup> &Fixups, |
134 | const MCSubtargetInfo &STI) const; |
135 | |
136 | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
137 | /// shifter (MSL). |
138 | uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
139 | SmallVectorImpl<MCFixup> &Fixups, |
140 | const MCSubtargetInfo &STI) const; |
141 | |
142 | /// getFixedPointScaleOpValue - Return the encoded value for the |
143 | // FP-to-fixed-point scale factor. |
144 | uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, |
145 | SmallVectorImpl<MCFixup> &Fixups, |
146 | const MCSubtargetInfo &STI) const; |
147 | |
148 | uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
149 | SmallVectorImpl<MCFixup> &Fixups, |
150 | const MCSubtargetInfo &STI) const; |
151 | uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
152 | SmallVectorImpl<MCFixup> &Fixups, |
153 | const MCSubtargetInfo &STI) const; |
154 | uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
155 | SmallVectorImpl<MCFixup> &Fixups, |
156 | const MCSubtargetInfo &STI) const; |
157 | uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
158 | SmallVectorImpl<MCFixup> &Fixups, |
159 | const MCSubtargetInfo &STI) const; |
160 | uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
161 | SmallVectorImpl<MCFixup> &Fixups, |
162 | const MCSubtargetInfo &STI) const; |
163 | uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
164 | SmallVectorImpl<MCFixup> &Fixups, |
165 | const MCSubtargetInfo &STI) const; |
166 | uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
167 | SmallVectorImpl<MCFixup> &Fixups, |
168 | const MCSubtargetInfo &STI) const; |
169 | uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
170 | SmallVectorImpl<MCFixup> &Fixups, |
171 | const MCSubtargetInfo &STI) const; |
172 | |
173 | uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx, |
174 | SmallVectorImpl<MCFixup> &Fixups, |
175 | const MCSubtargetInfo &STI) const; |
176 | uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, |
177 | SmallVectorImpl<MCFixup> &Fixups, |
178 | const MCSubtargetInfo &STI) const; |
179 | |
180 | unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
181 | const MCSubtargetInfo &STI) const; |
182 | |
183 | void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, |
184 | SmallVectorImpl<MCFixup> &Fixups, |
185 | const MCSubtargetInfo &STI) const override; |
186 | |
187 | unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, |
188 | const MCSubtargetInfo &STI) const; |
189 | |
190 | template<int hasRs, int hasRt2> unsigned |
191 | fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, |
192 | const MCSubtargetInfo &STI) const; |
193 | |
194 | unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue, |
195 | const MCSubtargetInfo &STI) const; |
196 | |
197 | template <unsigned Multiple, unsigned Min, unsigned Max> |
198 | uint32_t EncodeRegMul_MinMax(const MCInst &MI, unsigned OpIdx, |
199 | SmallVectorImpl<MCFixup> &Fixups, |
200 | const MCSubtargetInfo &STI) const; |
201 | uint32_t EncodeZK(const MCInst &MI, unsigned OpIdx, |
202 | SmallVectorImpl<MCFixup> &Fixups, |
203 | const MCSubtargetInfo &STI) const; |
204 | uint32_t EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, |
205 | SmallVectorImpl<MCFixup> &Fixups, |
206 | const MCSubtargetInfo &STI) const; |
207 | |
208 | uint32_t EncodeZPR2StridedRegisterClass(const MCInst &MI, unsigned OpIdx, |
209 | SmallVectorImpl<MCFixup> &Fixups, |
210 | const MCSubtargetInfo &STI) const; |
211 | uint32_t EncodeZPR4StridedRegisterClass(const MCInst &MI, unsigned OpIdx, |
212 | SmallVectorImpl<MCFixup> &Fixups, |
213 | const MCSubtargetInfo &STI) const; |
214 | |
215 | uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx, |
216 | SmallVectorImpl<MCFixup> &Fixups, |
217 | const MCSubtargetInfo &STI) const; |
218 | template <unsigned BaseReg> |
219 | uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, |
220 | SmallVectorImpl<MCFixup> &Fixups, |
221 | const MCSubtargetInfo &STI) const; |
222 | }; |
223 | |
224 | } // end anonymous namespace |
225 | |
226 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
227 | /// operand requires relocation, record the relocation and return zero. |
228 | unsigned |
229 | AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
230 | SmallVectorImpl<MCFixup> &Fixups, |
231 | const MCSubtargetInfo &STI) const { |
232 | if (MO.isReg()) |
233 | return Ctx.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
234 | |
235 | assert(MO.isImm() && "did not expect relocated expression" ); |
236 | return static_cast<unsigned>(MO.getImm()); |
237 | } |
238 | |
239 | template<unsigned FixupKind> uint32_t |
240 | AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
241 | SmallVectorImpl<MCFixup> &Fixups, |
242 | const MCSubtargetInfo &STI) const { |
243 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
244 | uint32_t ImmVal = 0; |
245 | |
246 | if (MO.isImm()) |
247 | ImmVal = static_cast<uint32_t>(MO.getImm()); |
248 | else { |
249 | assert(MO.isExpr() && "unable to encode load/store imm operand" ); |
250 | MCFixupKind Kind = MCFixupKind(FixupKind); |
251 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
252 | ++MCNumFixups; |
253 | } |
254 | |
255 | return ImmVal; |
256 | } |
257 | |
258 | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
259 | /// target. |
260 | uint32_t |
261 | AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
262 | SmallVectorImpl<MCFixup> &Fixups, |
263 | const MCSubtargetInfo &STI) const { |
264 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
265 | |
266 | // If the destination is an immediate, we have nothing to do. |
267 | if (MO.isImm()) |
268 | return MO.getImm(); |
269 | assert(MO.isExpr() && "Unexpected target type!" ); |
270 | const MCExpr *Expr = MO.getExpr(); |
271 | |
272 | MCFixupKind Kind = MI.getOpcode() == AArch64::ADR |
273 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21) |
274 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21); |
275 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
276 | |
277 | MCNumFixups += 1; |
278 | |
279 | // All of the information is in the fixup. |
280 | return 0; |
281 | } |
282 | |
283 | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
284 | /// the 2-bit shift field. The shift field is stored in bits 13-14 of the |
285 | /// return value. |
286 | uint32_t |
287 | AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
288 | SmallVectorImpl<MCFixup> &Fixups, |
289 | const MCSubtargetInfo &STI) const { |
290 | // Suboperands are [imm, shifter]. |
291 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
292 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
293 | assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && |
294 | "unexpected shift type for add/sub immediate" ); |
295 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: MO1.getImm()); |
296 | assert((ShiftVal == 0 || ShiftVal == 12) && |
297 | "unexpected shift value for add/sub immediate" ); |
298 | if (MO.isImm()) |
299 | return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); |
300 | assert(MO.isExpr() && "Unable to encode MCOperand!" ); |
301 | const MCExpr *Expr = MO.getExpr(); |
302 | |
303 | // Encode the 12 bits of the fixup. |
304 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12); |
305 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
306 | |
307 | ++MCNumFixups; |
308 | |
309 | // Set the shift bit of the add instruction for relocation types |
310 | // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12. |
311 | if (auto *A64E = dyn_cast<MCSpecifierExpr>(Val: Expr)) { |
312 | AArch64::Specifier RefKind = A64E->getSpecifier(); |
313 | if (RefKind == AArch64::S_TPREL_HI12 || RefKind == AArch64::S_DTPREL_HI12 || |
314 | RefKind == AArch64::S_SECREL_HI12) |
315 | ShiftVal = 12; |
316 | } |
317 | return ShiftVal == 0 ? 0 : (1 << ShiftVal); |
318 | } |
319 | |
320 | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
321 | /// branch target. |
322 | uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue( |
323 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
324 | const MCSubtargetInfo &STI) const { |
325 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
326 | |
327 | // If the destination is an immediate, we have nothing to do. |
328 | if (MO.isImm()) |
329 | return MO.getImm(); |
330 | assert(MO.isExpr() && "Unexpected target type!" ); |
331 | |
332 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19); |
333 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
334 | |
335 | ++MCNumFixups; |
336 | |
337 | // All of the information is in the fixup. |
338 | return 0; |
339 | } |
340 | |
341 | /// getCondCompBranchTargetOpValue - Return the encoded value for a conditional |
342 | /// compare-and-branch target. |
343 | uint32_t AArch64MCCodeEmitter::getCondCompBranchTargetOpValue( |
344 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
345 | const MCSubtargetInfo &STI) const { |
346 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
347 | |
348 | // If the destination is an immediate, we have nothing to do. |
349 | if (MO.isImm()) |
350 | return MO.getImm(); |
351 | assert(MO.isExpr() && "Unexpected target type!" ); |
352 | |
353 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch9); |
354 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
355 | |
356 | ++MCNumFixups; |
357 | |
358 | // All of the information is in the fixup. |
359 | return 0; |
360 | } |
361 | |
362 | /// getPAuthPCRelOpValue - Return the encoded value for a pointer |
363 | /// authentication pc-relative operand. |
364 | uint32_t |
365 | AArch64MCCodeEmitter::getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, |
366 | SmallVectorImpl<MCFixup> &Fixups, |
367 | const MCSubtargetInfo &STI) const { |
368 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
369 | |
370 | // If the destination is an immediate, invert sign as it's a negative value |
371 | // that should be encoded as unsigned |
372 | if (MO.isImm()) |
373 | return -(MO.getImm()); |
374 | assert(MO.isExpr() && "Unexpected target type!" ); |
375 | |
376 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch16); |
377 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
378 | |
379 | ++MCNumFixups; |
380 | |
381 | // All of the information is in the fixup. |
382 | return 0; |
383 | } |
384 | |
385 | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
386 | /// pc-relative address. |
387 | uint32_t |
388 | AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
389 | SmallVectorImpl<MCFixup> &Fixups, |
390 | const MCSubtargetInfo &STI) const { |
391 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
392 | |
393 | // If the destination is an immediate, we have nothing to do. |
394 | if (MO.isImm()) |
395 | return MO.getImm(); |
396 | assert(MO.isExpr() && "Unexpected target type!" ); |
397 | |
398 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19); |
399 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
400 | |
401 | ++MCNumFixups; |
402 | |
403 | // All of the information is in the fixup. |
404 | return 0; |
405 | } |
406 | |
407 | uint32_t |
408 | AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
409 | SmallVectorImpl<MCFixup> &Fixups, |
410 | const MCSubtargetInfo &STI) const { |
411 | unsigned SignExtend = MI.getOperand(i: OpIdx).getImm(); |
412 | unsigned DoShift = MI.getOperand(i: OpIdx + 1).getImm(); |
413 | return (SignExtend << 1) | DoShift; |
414 | } |
415 | |
416 | uint32_t |
417 | AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
418 | SmallVectorImpl<MCFixup> &Fixups, |
419 | const MCSubtargetInfo &STI) const { |
420 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
421 | |
422 | if (MO.isImm()) |
423 | return MO.getImm(); |
424 | assert(MO.isExpr() && "Unexpected movz/movk immediate" ); |
425 | |
426 | Fixups.push_back(Elt: MCFixup::create( |
427 | Offset: 0, Value: MO.getExpr(), Kind: MCFixupKind(AArch64::fixup_aarch64_movw), Loc: MI.getLoc())); |
428 | |
429 | ++MCNumFixups; |
430 | |
431 | return 0; |
432 | } |
433 | |
434 | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
435 | /// branch target. |
436 | uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue( |
437 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
438 | const MCSubtargetInfo &STI) const { |
439 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
440 | |
441 | // If the destination is an immediate, we have nothing to do. |
442 | if (MO.isImm()) |
443 | return MO.getImm(); |
444 | assert(MO.isExpr() && "Unexpected ADR target type!" ); |
445 | |
446 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14); |
447 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
448 | |
449 | ++MCNumFixups; |
450 | |
451 | // All of the information is in the fixup. |
452 | return 0; |
453 | } |
454 | |
455 | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
456 | /// branch target. |
457 | uint32_t |
458 | AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
459 | SmallVectorImpl<MCFixup> &Fixups, |
460 | const MCSubtargetInfo &STI) const { |
461 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
462 | |
463 | // If the destination is an immediate, we have nothing to do. |
464 | if (MO.isImm()) |
465 | return MO.getImm(); |
466 | assert(MO.isExpr() && "Unexpected ADR target type!" ); |
467 | |
468 | MCFixupKind Kind = MI.getOpcode() == AArch64::BL |
469 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26) |
470 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26); |
471 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
472 | |
473 | ++MCNumFixups; |
474 | |
475 | // All of the information is in the fixup. |
476 | return 0; |
477 | } |
478 | |
479 | /// getVecShifterOpValue - Return the encoded value for the vector shifter: |
480 | /// |
481 | /// 00 -> 0 |
482 | /// 01 -> 8 |
483 | /// 10 -> 16 |
484 | /// 11 -> 24 |
485 | uint32_t |
486 | AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
487 | SmallVectorImpl<MCFixup> &Fixups, |
488 | const MCSubtargetInfo &STI) const { |
489 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
490 | assert(MO.isImm() && "Expected an immediate value for the shift amount!" ); |
491 | |
492 | switch (MO.getImm()) { |
493 | default: |
494 | break; |
495 | case 0: |
496 | return 0; |
497 | case 8: |
498 | return 1; |
499 | case 16: |
500 | return 2; |
501 | case 24: |
502 | return 3; |
503 | } |
504 | |
505 | llvm_unreachable("Invalid value for vector shift amount!" ); |
506 | } |
507 | |
508 | /// getFixedPointScaleOpValue - Return the encoded value for the |
509 | // FP-to-fixed-point scale factor. |
510 | uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue( |
511 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
512 | const MCSubtargetInfo &STI) const { |
513 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
514 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
515 | return 64 - MO.getImm(); |
516 | } |
517 | |
518 | uint32_t |
519 | AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
520 | SmallVectorImpl<MCFixup> &Fixups, |
521 | const MCSubtargetInfo &STI) const { |
522 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
523 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
524 | return 64 - MO.getImm(); |
525 | } |
526 | |
527 | uint32_t |
528 | AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
529 | SmallVectorImpl<MCFixup> &Fixups, |
530 | const MCSubtargetInfo &STI) const { |
531 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
532 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
533 | return 32 - MO.getImm(); |
534 | } |
535 | |
536 | uint32_t |
537 | AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
538 | SmallVectorImpl<MCFixup> &Fixups, |
539 | const MCSubtargetInfo &STI) const { |
540 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
541 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
542 | return 16 - MO.getImm(); |
543 | } |
544 | |
545 | uint32_t |
546 | AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
547 | SmallVectorImpl<MCFixup> &Fixups, |
548 | const MCSubtargetInfo &STI) const { |
549 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
550 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
551 | return 8 - MO.getImm(); |
552 | } |
553 | |
554 | uint32_t |
555 | AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
556 | SmallVectorImpl<MCFixup> &Fixups, |
557 | const MCSubtargetInfo &STI) const { |
558 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
559 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
560 | return MO.getImm() - 64; |
561 | } |
562 | |
563 | uint32_t |
564 | AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
565 | SmallVectorImpl<MCFixup> &Fixups, |
566 | const MCSubtargetInfo &STI) const { |
567 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
568 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
569 | return MO.getImm() - 32; |
570 | } |
571 | |
572 | uint32_t |
573 | AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
574 | SmallVectorImpl<MCFixup> &Fixups, |
575 | const MCSubtargetInfo &STI) const { |
576 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
577 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
578 | return MO.getImm() - 16; |
579 | } |
580 | |
581 | uint32_t |
582 | AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
583 | SmallVectorImpl<MCFixup> &Fixups, |
584 | const MCSubtargetInfo &STI) const { |
585 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
586 | assert(MO.isImm() && "Expected an immediate value for the scale amount!" ); |
587 | return MO.getImm() - 8; |
588 | } |
589 | |
590 | template <unsigned Multiple, unsigned Min, unsigned Max> |
591 | uint32_t |
592 | AArch64MCCodeEmitter::EncodeRegMul_MinMax(const MCInst &MI, unsigned OpIdx, |
593 | SmallVectorImpl<MCFixup> &Fixups, |
594 | const MCSubtargetInfo &STI) const { |
595 | assert(llvm::isPowerOf2_32(Multiple) && "Multiple is not a power of 2" ); |
596 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
597 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(Reg: RegOpnd); |
598 | assert(RegVal >= Min && RegVal <= Max && (RegVal & (Multiple - 1)) == 0); |
599 | return (RegVal - Min) / Multiple; |
600 | } |
601 | |
602 | // Zk Is the name of the control vector register Z20-Z23 or Z28-Z31, encoded in |
603 | // the "K:Zk" fields. Z20-Z23 = 000, 001,010, 011 and Z28-Z31 = 100, 101, 110, |
604 | // 111 |
605 | uint32_t AArch64MCCodeEmitter::EncodeZK(const MCInst &MI, unsigned OpIdx, |
606 | SmallVectorImpl<MCFixup> &Fixups, |
607 | const MCSubtargetInfo &STI) const { |
608 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
609 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(Reg: RegOpnd); |
610 | |
611 | // ZZ8-Z31 => Reg is in 3..7 (offset 24) |
612 | if (RegOpnd > AArch64::Z27) |
613 | return (RegVal - 24); |
614 | |
615 | assert((RegOpnd > AArch64::Z19 && RegOpnd < AArch64::Z24) && |
616 | "Expected ZK in Z20..Z23 or Z28..Z31" ); |
617 | // Z20-Z23 => Reg is in 0..3 (offset 20) |
618 | return (RegVal - 20); |
619 | } |
620 | |
621 | uint32_t |
622 | AArch64MCCodeEmitter::EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, |
623 | SmallVectorImpl<MCFixup> &Fixups, |
624 | const MCSubtargetInfo &STI) const { |
625 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
626 | return RegOpnd - AArch64::PN8; |
627 | } |
628 | |
629 | uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass( |
630 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
631 | const MCSubtargetInfo &STI) const { |
632 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
633 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(Reg: RegOpnd); |
634 | unsigned T = (RegVal & 0x10) >> 1; |
635 | unsigned Zt = RegVal & 0x7; |
636 | return T | Zt; |
637 | } |
638 | |
639 | uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass( |
640 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
641 | const MCSubtargetInfo &STI) const { |
642 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
643 | unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(Reg: RegOpnd); |
644 | unsigned T = (RegVal & 0x10) >> 2; |
645 | unsigned Zt = RegVal & 0x3; |
646 | return T | Zt; |
647 | } |
648 | |
649 | uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass( |
650 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
651 | const MCSubtargetInfo &STI) const { |
652 | unsigned RegMask = MI.getOperand(i: OpIdx).getImm(); |
653 | assert(RegMask <= 0xFF && "Invalid register mask!" ); |
654 | return RegMask; |
655 | } |
656 | |
657 | template <unsigned BaseReg> |
658 | uint32_t |
659 | AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, |
660 | SmallVectorImpl<MCFixup> &Fixups, |
661 | const MCSubtargetInfo &STI) const { |
662 | auto RegOpnd = MI.getOperand(i: OpIdx).getReg(); |
663 | return RegOpnd - BaseReg; |
664 | } |
665 | |
666 | uint32_t |
667 | AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx, |
668 | SmallVectorImpl<MCFixup> &Fixups, |
669 | const MCSubtargetInfo &STI) const { |
670 | // Test shift |
671 | auto ShiftOpnd = MI.getOperand(i: OpIdx + 1).getImm(); |
672 | assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && |
673 | "Unexpected shift type for imm8_opt_lsl immediate." ); |
674 | |
675 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: ShiftOpnd); |
676 | assert((ShiftVal == 0 || ShiftVal == 8) && |
677 | "Unexpected shift value for imm8_opt_lsl immediate." ); |
678 | |
679 | // Test immediate |
680 | auto Immediate = MI.getOperand(i: OpIdx).getImm(); |
681 | return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); |
682 | } |
683 | |
684 | uint32_t |
685 | AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, |
686 | SmallVectorImpl<MCFixup> &Fixups, |
687 | const MCSubtargetInfo &STI) const { |
688 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
689 | assert(MO.isImm() && "Expected an immediate value!" ); |
690 | // Normalize 1-16 range to 0-15. |
691 | return MO.getImm() - 1; |
692 | } |
693 | |
694 | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
695 | /// shifter (MSL). |
696 | uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue( |
697 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
698 | const MCSubtargetInfo &STI) const { |
699 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
700 | assert(MO.isImm() && |
701 | "Expected an immediate value for the move shift amount!" ); |
702 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm: MO.getImm()); |
703 | assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!" ); |
704 | return ShiftVal == 8 ? 0 : 1; |
705 | } |
706 | |
707 | unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
708 | const MCSubtargetInfo &STI) const { |
709 | // If one of the signed fixup kinds is applied to a MOVZ instruction, the |
710 | // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's |
711 | // job to ensure that any bits possibly affected by this are 0. This means we |
712 | // must zero out bit 30 (essentially emitting a MOVN). |
713 | MCOperand UImm16MO = MI.getOperand(i: 1); |
714 | |
715 | // Nothing to do if there's no fixup. |
716 | if (UImm16MO.isImm()) |
717 | return EncodedValue; |
718 | |
719 | const MCExpr *E = UImm16MO.getExpr(); |
720 | if (auto *A64E = dyn_cast<MCSpecifierExpr>(Val: E)) { |
721 | switch (A64E->getSpecifier()) { |
722 | case AArch64::S_DTPREL_G2: |
723 | case AArch64::S_DTPREL_G1: |
724 | case AArch64::S_DTPREL_G0: |
725 | case AArch64::S_GOTTPREL_G1: |
726 | case AArch64::S_TPREL_G2: |
727 | case AArch64::S_TPREL_G1: |
728 | case AArch64::S_TPREL_G0: |
729 | return EncodedValue & ~(1u << 30); |
730 | default: |
731 | // Nothing to do for an unsigned fixup. |
732 | return EncodedValue; |
733 | } |
734 | } |
735 | |
736 | return EncodedValue; |
737 | } |
738 | |
739 | void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, |
740 | SmallVectorImpl<char> &CB, |
741 | |
742 | SmallVectorImpl<MCFixup> &Fixups, |
743 | const MCSubtargetInfo &STI) const { |
744 | if (MI.getOpcode() == AArch64::TLSDESCCALL) { |
745 | // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the |
746 | // following (BLR) instruction. It doesn't emit any code itself so it |
747 | // doesn't go through the normal TableGenerated channels. |
748 | auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32 |
749 | ? ELF::R_AARCH64_P32_TLSDESC_CALL |
750 | : ELF::R_AARCH64_TLSDESC_CALL; |
751 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MI.getOperand(i: 0).getExpr(), Kind: Reloc)); |
752 | return; |
753 | } |
754 | |
755 | if (MI.getOpcode() == AArch64::SPACE) { |
756 | // SPACE just increases basic block size, in both cases no actual code. |
757 | return; |
758 | } |
759 | |
760 | uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); |
761 | support::endian::write<uint32_t>(Out&: CB, V: Binary, E: llvm::endianness::little); |
762 | ++MCNumEmitted; // Keep track of the # of mi's emitted. |
763 | } |
764 | |
765 | unsigned |
766 | AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, |
767 | unsigned EncodedValue, |
768 | const MCSubtargetInfo &STI) const { |
769 | // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 |
770 | // (i.e. all bits 1) but is ignored by the processor. |
771 | EncodedValue |= 0x1f << 10; |
772 | return EncodedValue; |
773 | } |
774 | |
775 | template<int hasRs, int hasRt2> unsigned |
776 | AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, |
777 | unsigned EncodedValue, |
778 | const MCSubtargetInfo &STI) const { |
779 | if (!hasRs) EncodedValue |= 0x001F0000; |
780 | if (!hasRt2) EncodedValue |= 0x00007C00; |
781 | |
782 | return EncodedValue; |
783 | } |
784 | |
785 | unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison( |
786 | const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const { |
787 | // The Rm field of FCMP and friends is unused - it should be assembled |
788 | // as 0, but is ignored by the processor. |
789 | EncodedValue &= ~(0x1f << 16); |
790 | return EncodedValue; |
791 | } |
792 | |
793 | #include "AArch64GenMCCodeEmitter.inc" |
794 | |
795 | MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, |
796 | MCContext &Ctx) { |
797 | return new AArch64MCCodeEmitter(MCII, Ctx); |
798 | } |
799 | |