1//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This class prints an AArch64 MCInst to a .s file.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64InstPrinter.h"
14#include "MCTargetDesc/AArch64AddressingModes.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/StringExtras.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCRegisterInfo.h"
22#include "llvm/MC/MCSubtargetInfo.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/Format.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28#include <cassert>
29#include <cstdint>
30#include <string>
31
32using namespace llvm;
33
34#define DEBUG_TYPE "asm-printer"
35
36#define GET_INSTRUCTION_NAME
37#define PRINT_ALIAS_INSTR
38#include "AArch64GenAsmWriter.inc"
39#define GET_INSTRUCTION_NAME
40#define PRINT_ALIAS_INSTR
41#include "AArch64GenAsmWriter1.inc"
42
43AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
44 const MCInstrInfo &MII,
45 const MCRegisterInfo &MRI)
46 : MCInstPrinter(MAI, MII, MRI) {}
47
48AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
49 const MCInstrInfo &MII,
50 const MCRegisterInfo &MRI)
51 : AArch64InstPrinter(MAI, MII, MRI) {}
52
53bool AArch64InstPrinter::applyTargetSpecificCLOption(StringRef Opt) {
54 if (Opt == "no-aliases") {
55 PrintAliases = false;
56 return true;
57 }
58 return false;
59}
60
61void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) {
62 markup(OS, M: Markup::Register) << getRegisterName(Reg);
63}
64
65void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg,
66 unsigned AltIdx) {
67 markup(OS, M: Markup::Register) << getRegisterName(Reg, AltIdx);
68}
69
70StringRef AArch64InstPrinter::getRegName(MCRegister Reg) const {
71 return getRegisterName(Reg);
72}
73
74void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
75 StringRef Annot, const MCSubtargetInfo &STI,
76 raw_ostream &O) {
77 // Check for special encodings and print the canonical alias instead.
78
79 unsigned Opcode = MI->getOpcode();
80
81 if (Opcode == AArch64::SYSxt)
82 if (printSysAlias(MI, STI, O)) {
83 printAnnotation(OS&: O, Annot);
84 return;
85 }
86
87 if (Opcode == AArch64::SYSLxt)
88 if (printSyslAlias(MI, STI, O)) {
89 printAnnotation(OS&: O, Annot);
90 return;
91 }
92
93 if (Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR)
94 if (printSyspAlias(MI, STI, O)) {
95 printAnnotation(OS&: O, Annot);
96 return;
97 }
98
99 // RPRFM overlaps PRFM (reg), so try to print it as RPRFM here.
100 if ((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) {
101 if (printRangePrefetchAlias(MI, STI, O, Annot))
102 return;
103 }
104
105 // SBFM/UBFM should print to a nicer aliased form if possible.
106 if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
107 Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
108 const MCOperand &Op0 = MI->getOperand(i: 0);
109 const MCOperand &Op1 = MI->getOperand(i: 1);
110 const MCOperand &Op2 = MI->getOperand(i: 2);
111 const MCOperand &Op3 = MI->getOperand(i: 3);
112
113 bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
114 bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
115 if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
116 const char *AsmMnemonic = nullptr;
117
118 switch (Op3.getImm()) {
119 default:
120 break;
121 case 7:
122 if (IsSigned)
123 AsmMnemonic = "sxtb";
124 else if (!Is64Bit)
125 AsmMnemonic = "uxtb";
126 break;
127 case 15:
128 if (IsSigned)
129 AsmMnemonic = "sxth";
130 else if (!Is64Bit)
131 AsmMnemonic = "uxth";
132 break;
133 case 31:
134 // *xtw is only valid for signed 64-bit operations.
135 if (Is64Bit && IsSigned)
136 AsmMnemonic = "sxtw";
137 break;
138 }
139
140 if (AsmMnemonic) {
141 O << '\t' << AsmMnemonic << '\t';
142 printRegName(OS&: O, Reg: Op0.getReg());
143 O << ", ";
144 printRegName(OS&: O, Reg: getWRegFromXReg(Reg: Op1.getReg()));
145 printAnnotation(OS&: O, Annot);
146 return;
147 }
148 }
149
150 // All immediate shifts are aliases, implemented using the Bitfield
151 // instruction. In all cases the immediate shift amount shift must be in
152 // the range 0 to (reg.size -1).
153 if (Op2.isImm() && Op3.isImm()) {
154 const char *AsmMnemonic = nullptr;
155 int shift = 0;
156 int64_t immr = Op2.getImm();
157 int64_t imms = Op3.getImm();
158 if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
159 AsmMnemonic = "lsl";
160 shift = 31 - imms;
161 } else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
162 ((imms + 1 == immr))) {
163 AsmMnemonic = "lsl";
164 shift = 63 - imms;
165 } else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
166 AsmMnemonic = "lsr";
167 shift = immr;
168 } else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
169 AsmMnemonic = "lsr";
170 shift = immr;
171 } else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
172 AsmMnemonic = "asr";
173 shift = immr;
174 } else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
175 AsmMnemonic = "asr";
176 shift = immr;
177 }
178 if (AsmMnemonic) {
179 O << '\t' << AsmMnemonic << '\t';
180 printRegName(OS&: O, Reg: Op0.getReg());
181 O << ", ";
182 printRegName(OS&: O, Reg: Op1.getReg());
183 O << ", ";
184 markup(OS&: O, M: Markup::Immediate) << "#" << shift;
185 printAnnotation(OS&: O, Annot);
186 return;
187 }
188 }
189
190 // SBFIZ/UBFIZ aliases
191 if (Op2.getImm() > Op3.getImm()) {
192 O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t';
193 printRegName(OS&: O, Reg: Op0.getReg());
194 O << ", ";
195 printRegName(OS&: O, Reg: Op1.getReg());
196 O << ", ";
197 markup(OS&: O, M: Markup::Immediate) << "#" << (Is64Bit ? 64 : 32) - Op2.getImm();
198 O << ", ";
199 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() + 1;
200 printAnnotation(OS&: O, Annot);
201 return;
202 }
203
204 // Otherwise SBFX/UBFX is the preferred form
205 O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t';
206 printRegName(OS&: O, Reg: Op0.getReg());
207 O << ", ";
208 printRegName(OS&: O, Reg: Op1.getReg());
209 O << ", ";
210 markup(OS&: O, M: Markup::Immediate) << "#" << Op2.getImm();
211 O << ", ";
212 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() - Op2.getImm() + 1;
213 printAnnotation(OS&: O, Annot);
214 return;
215 }
216
217 if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
218 const MCOperand &Op0 = MI->getOperand(i: 0); // Op1 == Op0
219 const MCOperand &Op2 = MI->getOperand(i: 2);
220 int ImmR = MI->getOperand(i: 3).getImm();
221 int ImmS = MI->getOperand(i: 4).getImm();
222
223 if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
224 (ImmR == 0 || ImmS < ImmR) && STI.hasFeature(Feature: AArch64::HasV8_2aOps)) {
225 // BFC takes precedence over its entire range, slightly differently to BFI.
226 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
227 int LSB = (BitWidth - ImmR) % BitWidth;
228 int Width = ImmS + 1;
229
230 O << "\tbfc\t";
231 printRegName(OS&: O, Reg: Op0.getReg());
232 O << ", ";
233 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
234 O << ", ";
235 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
236 printAnnotation(OS&: O, Annot);
237 return;
238 } else if (ImmS < ImmR) {
239 // BFI alias
240 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
241 int LSB = (BitWidth - ImmR) % BitWidth;
242 int Width = ImmS + 1;
243
244 O << "\tbfi\t";
245 printRegName(OS&: O, Reg: Op0.getReg());
246 O << ", ";
247 printRegName(OS&: O, Reg: Op2.getReg());
248 O << ", ";
249 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
250 O << ", ";
251 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
252 printAnnotation(OS&: O, Annot);
253 return;
254 }
255
256 int LSB = ImmR;
257 int Width = ImmS - ImmR + 1;
258 // Otherwise BFXIL the preferred form
259 O << "\tbfxil\t";
260 printRegName(OS&: O, Reg: Op0.getReg());
261 O << ", ";
262 printRegName(OS&: O, Reg: Op2.getReg());
263 O << ", ";
264 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
265 O << ", ";
266 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
267 printAnnotation(OS&: O, Annot);
268 return;
269 }
270
271 // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
272 // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
273 // printed.
274 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
275 Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
276 MI->getOperand(i: 1).isExpr()) {
277 if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
278 O << "\tmovz\t";
279 else
280 O << "\tmovn\t";
281
282 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
283 O << ", ";
284 {
285 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
286 O << "#";
287 MAI.printExpr(O, *MI->getOperand(i: 1).getExpr());
288 }
289 return;
290 }
291
292 if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
293 MI->getOperand(i: 2).isExpr()) {
294 O << "\tmovk\t";
295 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
296 O << ", ";
297 {
298 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
299 O << "#";
300 MAI.printExpr(O, *MI->getOperand(i: 2).getExpr());
301 }
302 return;
303 }
304
305 auto PrintMovImm = [&](uint64_t Value, int RegWidth) {
306 int64_t SExtVal = SignExtend64(X: Value, B: RegWidth);
307 O << "\tmov\t";
308 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
309 O << ", ";
310 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: SExtVal);
311 if (CommentStream) {
312 // Do the opposite to that used for instruction operands.
313 if (getPrintImmHex())
314 *CommentStream << '=' << formatDec(Value: SExtVal) << '\n';
315 else {
316 uint64_t Mask = maskTrailingOnes<uint64_t>(N: RegWidth);
317 *CommentStream << '=' << formatHex(Value: SExtVal & Mask) << '\n';
318 }
319 }
320 };
321
322 // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
323 // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
324 // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
325 // that can represent the move is the MOV alias, and the rest get printed
326 // normally.
327 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
328 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
329 int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
330 int Shift = MI->getOperand(i: 2).getImm();
331 uint64_t Value = (uint64_t)MI->getOperand(i: 1).getImm() << Shift;
332
333 if (AArch64_AM::isMOVZMovAlias(Value, Shift,
334 RegWidth: Opcode == AArch64::MOVZXi ? 64 : 32)) {
335 PrintMovImm(Value, RegWidth);
336 return;
337 }
338 }
339
340 if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
341 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
342 int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
343 int Shift = MI->getOperand(i: 2).getImm();
344 uint64_t Value = ~((uint64_t)MI->getOperand(i: 1).getImm() << Shift);
345 if (RegWidth == 32)
346 Value = Value & 0xffffffff;
347
348 if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
349 PrintMovImm(Value, RegWidth);
350 return;
351 }
352 }
353
354 if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
355 (MI->getOperand(i: 1).getReg() == AArch64::XZR ||
356 MI->getOperand(i: 1).getReg() == AArch64::WZR) &&
357 MI->getOperand(i: 2).isImm()) {
358 int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
359 uint64_t Value = AArch64_AM::decodeLogicalImmediate(
360 val: MI->getOperand(i: 2).getImm(), regSize: RegWidth);
361 if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
362 PrintMovImm(Value, RegWidth);
363 return;
364 }
365 }
366
367 if (Opcode == AArch64::SPACE) {
368 O << '\t' << MAI.getCommentString() << " SPACE "
369 << MI->getOperand(i: 1).getImm();
370 printAnnotation(OS&: O, Annot);
371 return;
372 }
373
374 if (!PrintAliases || !printAliasInstr(MI, Address, STI, OS&: O))
375 printInstruction(MI, Address, STI, O);
376
377 printAnnotation(OS&: O, Annot);
378
379 if (atomicBarrierDroppedOnZero(Opcode) &&
380 (MI->getOperand(i: 0).getReg() == AArch64::XZR ||
381 MI->getOperand(i: 0).getReg() == AArch64::WZR)) {
382 printAnnotation(OS&: O, Annot: "acquire semantics dropped since destination is zero");
383 }
384}
385
386static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
387 bool &IsTbx) {
388 switch (Opcode) {
389 case AArch64::TBXv8i8One:
390 case AArch64::TBXv8i8Two:
391 case AArch64::TBXv8i8Three:
392 case AArch64::TBXv8i8Four:
393 IsTbx = true;
394 Layout = ".8b";
395 return true;
396 case AArch64::TBLv8i8One:
397 case AArch64::TBLv8i8Two:
398 case AArch64::TBLv8i8Three:
399 case AArch64::TBLv8i8Four:
400 IsTbx = false;
401 Layout = ".8b";
402 return true;
403 case AArch64::TBXv16i8One:
404 case AArch64::TBXv16i8Two:
405 case AArch64::TBXv16i8Three:
406 case AArch64::TBXv16i8Four:
407 IsTbx = true;
408 Layout = ".16b";
409 return true;
410 case AArch64::TBLv16i8One:
411 case AArch64::TBLv16i8Two:
412 case AArch64::TBLv16i8Three:
413 case AArch64::TBLv16i8Four:
414 IsTbx = false;
415 Layout = ".16b";
416 return true;
417 default:
418 return false;
419 }
420}
421
422struct LdStNInstrDesc {
423 unsigned Opcode;
424 const char *Mnemonic;
425 const char *Layout;
426 int ListOperand;
427 bool HasLane;
428 int NaturalOffset;
429};
430
431static const LdStNInstrDesc LdStNInstInfo[] = {
432 { .Opcode: AArch64::LD1i8, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
433 { .Opcode: AArch64::LD1i16, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
434 { .Opcode: AArch64::LD1i32, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
435 { .Opcode: AArch64::LD1i64, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
436 { .Opcode: AArch64::LD1i8_POST, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 1 },
437 { .Opcode: AArch64::LD1i16_POST, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
438 { .Opcode: AArch64::LD1i32_POST, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
439 { .Opcode: AArch64::LD1i64_POST, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
440 { .Opcode: AArch64::LD1Rv16b, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
441 { .Opcode: AArch64::LD1Rv8h, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
442 { .Opcode: AArch64::LD1Rv4s, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
443 { .Opcode: AArch64::LD1Rv2d, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
444 { .Opcode: AArch64::LD1Rv8b, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
445 { .Opcode: AArch64::LD1Rv4h, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
446 { .Opcode: AArch64::LD1Rv2s, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
447 { .Opcode: AArch64::LD1Rv1d, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
448 { .Opcode: AArch64::LD1Rv16b_POST, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
449 { .Opcode: AArch64::LD1Rv8h_POST, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
450 { .Opcode: AArch64::LD1Rv4s_POST, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
451 { .Opcode: AArch64::LD1Rv2d_POST, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
452 { .Opcode: AArch64::LD1Rv8b_POST, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
453 { .Opcode: AArch64::LD1Rv4h_POST, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
454 { .Opcode: AArch64::LD1Rv2s_POST, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
455 { .Opcode: AArch64::LD1Rv1d_POST, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
456 { .Opcode: AArch64::LD1Onev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
457 { .Opcode: AArch64::LD1Onev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
458 { .Opcode: AArch64::LD1Onev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
459 { .Opcode: AArch64::LD1Onev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
460 { .Opcode: AArch64::LD1Onev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
461 { .Opcode: AArch64::LD1Onev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
462 { .Opcode: AArch64::LD1Onev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
463 { .Opcode: AArch64::LD1Onev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
464 { .Opcode: AArch64::LD1Onev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
465 { .Opcode: AArch64::LD1Onev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
466 { .Opcode: AArch64::LD1Onev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
467 { .Opcode: AArch64::LD1Onev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
468 { .Opcode: AArch64::LD1Onev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
469 { .Opcode: AArch64::LD1Onev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
470 { .Opcode: AArch64::LD1Onev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
471 { .Opcode: AArch64::LD1Onev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
472 { .Opcode: AArch64::LD1Twov16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
473 { .Opcode: AArch64::LD1Twov8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
474 { .Opcode: AArch64::LD1Twov4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
475 { .Opcode: AArch64::LD1Twov2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
476 { .Opcode: AArch64::LD1Twov8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
477 { .Opcode: AArch64::LD1Twov4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
478 { .Opcode: AArch64::LD1Twov2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
479 { .Opcode: AArch64::LD1Twov1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
480 { .Opcode: AArch64::LD1Twov16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
481 { .Opcode: AArch64::LD1Twov8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
482 { .Opcode: AArch64::LD1Twov4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
483 { .Opcode: AArch64::LD1Twov2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
484 { .Opcode: AArch64::LD1Twov8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
485 { .Opcode: AArch64::LD1Twov4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
486 { .Opcode: AArch64::LD1Twov2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
487 { .Opcode: AArch64::LD1Twov1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
488 { .Opcode: AArch64::LD1Threev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
489 { .Opcode: AArch64::LD1Threev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
490 { .Opcode: AArch64::LD1Threev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
491 { .Opcode: AArch64::LD1Threev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
492 { .Opcode: AArch64::LD1Threev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
493 { .Opcode: AArch64::LD1Threev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
494 { .Opcode: AArch64::LD1Threev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
495 { .Opcode: AArch64::LD1Threev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
496 { .Opcode: AArch64::LD1Threev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
497 { .Opcode: AArch64::LD1Threev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
498 { .Opcode: AArch64::LD1Threev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
499 { .Opcode: AArch64::LD1Threev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
500 { .Opcode: AArch64::LD1Threev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
501 { .Opcode: AArch64::LD1Threev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
502 { .Opcode: AArch64::LD1Threev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
503 { .Opcode: AArch64::LD1Threev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
504 { .Opcode: AArch64::LD1Fourv16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
505 { .Opcode: AArch64::LD1Fourv8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
506 { .Opcode: AArch64::LD1Fourv4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
507 { .Opcode: AArch64::LD1Fourv2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
508 { .Opcode: AArch64::LD1Fourv8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
509 { .Opcode: AArch64::LD1Fourv4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
510 { .Opcode: AArch64::LD1Fourv2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
511 { .Opcode: AArch64::LD1Fourv1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
512 { .Opcode: AArch64::LD1Fourv16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
513 { .Opcode: AArch64::LD1Fourv8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
514 { .Opcode: AArch64::LD1Fourv4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
515 { .Opcode: AArch64::LD1Fourv2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
516 { .Opcode: AArch64::LD1Fourv8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
517 { .Opcode: AArch64::LD1Fourv4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
518 { .Opcode: AArch64::LD1Fourv2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
519 { .Opcode: AArch64::LD1Fourv1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
520 { .Opcode: AArch64::LD2i8, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
521 { .Opcode: AArch64::LD2i16, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
522 { .Opcode: AArch64::LD2i32, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
523 { .Opcode: AArch64::LD2i64, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
524 { .Opcode: AArch64::LD2i8_POST, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
525 { .Opcode: AArch64::LD2i16_POST, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
526 { .Opcode: AArch64::LD2i32_POST, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
527 { .Opcode: AArch64::LD2i64_POST, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
528 { .Opcode: AArch64::LD2Rv16b, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
529 { .Opcode: AArch64::LD2Rv8h, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
530 { .Opcode: AArch64::LD2Rv4s, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
531 { .Opcode: AArch64::LD2Rv2d, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
532 { .Opcode: AArch64::LD2Rv8b, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
533 { .Opcode: AArch64::LD2Rv4h, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
534 { .Opcode: AArch64::LD2Rv2s, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
535 { .Opcode: AArch64::LD2Rv1d, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
536 { .Opcode: AArch64::LD2Rv16b_POST, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
537 { .Opcode: AArch64::LD2Rv8h_POST, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
538 { .Opcode: AArch64::LD2Rv4s_POST, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
539 { .Opcode: AArch64::LD2Rv2d_POST, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
540 { .Opcode: AArch64::LD2Rv8b_POST, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
541 { .Opcode: AArch64::LD2Rv4h_POST, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
542 { .Opcode: AArch64::LD2Rv2s_POST, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
543 { .Opcode: AArch64::LD2Rv1d_POST, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
544 { .Opcode: AArch64::LD2Twov16b, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
545 { .Opcode: AArch64::LD2Twov8h, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
546 { .Opcode: AArch64::LD2Twov4s, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
547 { .Opcode: AArch64::LD2Twov2d, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
548 { .Opcode: AArch64::LD2Twov8b, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
549 { .Opcode: AArch64::LD2Twov4h, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
550 { .Opcode: AArch64::LD2Twov2s, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
551 { .Opcode: AArch64::LD2Twov16b_POST, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
552 { .Opcode: AArch64::LD2Twov8h_POST, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
553 { .Opcode: AArch64::LD2Twov4s_POST, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
554 { .Opcode: AArch64::LD2Twov2d_POST, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
555 { .Opcode: AArch64::LD2Twov8b_POST, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
556 { .Opcode: AArch64::LD2Twov4h_POST, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
557 { .Opcode: AArch64::LD2Twov2s_POST, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
558 { .Opcode: AArch64::LD3i8, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
559 { .Opcode: AArch64::LD3i16, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
560 { .Opcode: AArch64::LD3i32, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
561 { .Opcode: AArch64::LD3i64, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
562 { .Opcode: AArch64::LD3i8_POST, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 3 },
563 { .Opcode: AArch64::LD3i16_POST, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 6 },
564 { .Opcode: AArch64::LD3i32_POST, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 12 },
565 { .Opcode: AArch64::LD3i64_POST, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 24 },
566 { .Opcode: AArch64::LD3Rv16b, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
567 { .Opcode: AArch64::LD3Rv8h, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
568 { .Opcode: AArch64::LD3Rv4s, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
569 { .Opcode: AArch64::LD3Rv2d, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
570 { .Opcode: AArch64::LD3Rv8b, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
571 { .Opcode: AArch64::LD3Rv4h, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
572 { .Opcode: AArch64::LD3Rv2s, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
573 { .Opcode: AArch64::LD3Rv1d, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
574 { .Opcode: AArch64::LD3Rv16b_POST, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
575 { .Opcode: AArch64::LD3Rv8h_POST, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
576 { .Opcode: AArch64::LD3Rv4s_POST, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
577 { .Opcode: AArch64::LD3Rv2d_POST, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
578 { .Opcode: AArch64::LD3Rv8b_POST, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
579 { .Opcode: AArch64::LD3Rv4h_POST, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
580 { .Opcode: AArch64::LD3Rv2s_POST, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
581 { .Opcode: AArch64::LD3Rv1d_POST, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
582 { .Opcode: AArch64::LD3Threev16b, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
583 { .Opcode: AArch64::LD3Threev8h, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
584 { .Opcode: AArch64::LD3Threev4s, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
585 { .Opcode: AArch64::LD3Threev2d, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
586 { .Opcode: AArch64::LD3Threev8b, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
587 { .Opcode: AArch64::LD3Threev4h, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
588 { .Opcode: AArch64::LD3Threev2s, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
589 { .Opcode: AArch64::LD3Threev16b_POST, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
590 { .Opcode: AArch64::LD3Threev8h_POST, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
591 { .Opcode: AArch64::LD3Threev4s_POST, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
592 { .Opcode: AArch64::LD3Threev2d_POST, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
593 { .Opcode: AArch64::LD3Threev8b_POST, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
594 { .Opcode: AArch64::LD3Threev4h_POST, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
595 { .Opcode: AArch64::LD3Threev2s_POST, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
596 { .Opcode: AArch64::LD4i8, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
597 { .Opcode: AArch64::LD4i16, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
598 { .Opcode: AArch64::LD4i32, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
599 { .Opcode: AArch64::LD4i64, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
600 { .Opcode: AArch64::LD4i8_POST, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
601 { .Opcode: AArch64::LD4i16_POST, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
602 { .Opcode: AArch64::LD4i32_POST, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
603 { .Opcode: AArch64::LD4i64_POST, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 32 },
604 { .Opcode: AArch64::LD4Rv16b, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
605 { .Opcode: AArch64::LD4Rv8h, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
606 { .Opcode: AArch64::LD4Rv4s, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
607 { .Opcode: AArch64::LD4Rv2d, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
608 { .Opcode: AArch64::LD4Rv8b, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
609 { .Opcode: AArch64::LD4Rv4h, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
610 { .Opcode: AArch64::LD4Rv2s, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
611 { .Opcode: AArch64::LD4Rv1d, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
612 { .Opcode: AArch64::LD4Rv16b_POST, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
613 { .Opcode: AArch64::LD4Rv8h_POST, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
614 { .Opcode: AArch64::LD4Rv4s_POST, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
615 { .Opcode: AArch64::LD4Rv2d_POST, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
616 { .Opcode: AArch64::LD4Rv8b_POST, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
617 { .Opcode: AArch64::LD4Rv4h_POST, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
618 { .Opcode: AArch64::LD4Rv2s_POST, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
619 { .Opcode: AArch64::LD4Rv1d_POST, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
620 { .Opcode: AArch64::LD4Fourv16b, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
621 { .Opcode: AArch64::LD4Fourv8h, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
622 { .Opcode: AArch64::LD4Fourv4s, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
623 { .Opcode: AArch64::LD4Fourv2d, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
624 { .Opcode: AArch64::LD4Fourv8b, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
625 { .Opcode: AArch64::LD4Fourv4h, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
626 { .Opcode: AArch64::LD4Fourv2s, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
627 { .Opcode: AArch64::LD4Fourv16b_POST, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
628 { .Opcode: AArch64::LD4Fourv8h_POST, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
629 { .Opcode: AArch64::LD4Fourv4s_POST, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
630 { .Opcode: AArch64::LD4Fourv2d_POST, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
631 { .Opcode: AArch64::LD4Fourv8b_POST, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
632 { .Opcode: AArch64::LD4Fourv4h_POST, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
633 { .Opcode: AArch64::LD4Fourv2s_POST, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
634 { .Opcode: AArch64::ST1i8, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
635 { .Opcode: AArch64::ST1i16, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
636 { .Opcode: AArch64::ST1i32, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
637 { .Opcode: AArch64::ST1i64, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
638 { .Opcode: AArch64::ST1i8_POST, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 1 },
639 { .Opcode: AArch64::ST1i16_POST, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
640 { .Opcode: AArch64::ST1i32_POST, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
641 { .Opcode: AArch64::ST1i64_POST, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
642 { .Opcode: AArch64::ST1Onev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
643 { .Opcode: AArch64::ST1Onev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
644 { .Opcode: AArch64::ST1Onev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
645 { .Opcode: AArch64::ST1Onev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
646 { .Opcode: AArch64::ST1Onev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
647 { .Opcode: AArch64::ST1Onev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
648 { .Opcode: AArch64::ST1Onev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
649 { .Opcode: AArch64::ST1Onev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
650 { .Opcode: AArch64::ST1Onev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
651 { .Opcode: AArch64::ST1Onev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
652 { .Opcode: AArch64::ST1Onev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
653 { .Opcode: AArch64::ST1Onev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
654 { .Opcode: AArch64::ST1Onev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
655 { .Opcode: AArch64::ST1Onev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
656 { .Opcode: AArch64::ST1Onev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
657 { .Opcode: AArch64::ST1Onev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
658 { .Opcode: AArch64::ST1Twov16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
659 { .Opcode: AArch64::ST1Twov8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
660 { .Opcode: AArch64::ST1Twov4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
661 { .Opcode: AArch64::ST1Twov2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
662 { .Opcode: AArch64::ST1Twov8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
663 { .Opcode: AArch64::ST1Twov4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
664 { .Opcode: AArch64::ST1Twov2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
665 { .Opcode: AArch64::ST1Twov1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
666 { .Opcode: AArch64::ST1Twov16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
667 { .Opcode: AArch64::ST1Twov8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
668 { .Opcode: AArch64::ST1Twov4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
669 { .Opcode: AArch64::ST1Twov2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
670 { .Opcode: AArch64::ST1Twov8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
671 { .Opcode: AArch64::ST1Twov4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
672 { .Opcode: AArch64::ST1Twov2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
673 { .Opcode: AArch64::ST1Twov1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
674 { .Opcode: AArch64::ST1Threev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
675 { .Opcode: AArch64::ST1Threev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
676 { .Opcode: AArch64::ST1Threev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
677 { .Opcode: AArch64::ST1Threev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
678 { .Opcode: AArch64::ST1Threev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
679 { .Opcode: AArch64::ST1Threev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
680 { .Opcode: AArch64::ST1Threev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
681 { .Opcode: AArch64::ST1Threev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
682 { .Opcode: AArch64::ST1Threev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
683 { .Opcode: AArch64::ST1Threev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
684 { .Opcode: AArch64::ST1Threev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
685 { .Opcode: AArch64::ST1Threev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
686 { .Opcode: AArch64::ST1Threev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
687 { .Opcode: AArch64::ST1Threev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
688 { .Opcode: AArch64::ST1Threev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
689 { .Opcode: AArch64::ST1Threev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
690 { .Opcode: AArch64::ST1Fourv16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
691 { .Opcode: AArch64::ST1Fourv8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
692 { .Opcode: AArch64::ST1Fourv4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
693 { .Opcode: AArch64::ST1Fourv2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
694 { .Opcode: AArch64::ST1Fourv8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
695 { .Opcode: AArch64::ST1Fourv4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
696 { .Opcode: AArch64::ST1Fourv2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
697 { .Opcode: AArch64::ST1Fourv1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
698 { .Opcode: AArch64::ST1Fourv16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
699 { .Opcode: AArch64::ST1Fourv8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
700 { .Opcode: AArch64::ST1Fourv4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
701 { .Opcode: AArch64::ST1Fourv2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
702 { .Opcode: AArch64::ST1Fourv8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
703 { .Opcode: AArch64::ST1Fourv4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
704 { .Opcode: AArch64::ST1Fourv2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
705 { .Opcode: AArch64::ST1Fourv1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
706 { .Opcode: AArch64::ST2i8, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
707 { .Opcode: AArch64::ST2i16, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
708 { .Opcode: AArch64::ST2i32, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
709 { .Opcode: AArch64::ST2i64, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
710 { .Opcode: AArch64::ST2i8_POST, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
711 { .Opcode: AArch64::ST2i16_POST, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
712 { .Opcode: AArch64::ST2i32_POST, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
713 { .Opcode: AArch64::ST2i64_POST, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
714 { .Opcode: AArch64::ST2Twov16b, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
715 { .Opcode: AArch64::ST2Twov8h, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
716 { .Opcode: AArch64::ST2Twov4s, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
717 { .Opcode: AArch64::ST2Twov2d, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
718 { .Opcode: AArch64::ST2Twov8b, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
719 { .Opcode: AArch64::ST2Twov4h, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
720 { .Opcode: AArch64::ST2Twov2s, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
721 { .Opcode: AArch64::ST2Twov16b_POST, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
722 { .Opcode: AArch64::ST2Twov8h_POST, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
723 { .Opcode: AArch64::ST2Twov4s_POST, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
724 { .Opcode: AArch64::ST2Twov2d_POST, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
725 { .Opcode: AArch64::ST2Twov8b_POST, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
726 { .Opcode: AArch64::ST2Twov4h_POST, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
727 { .Opcode: AArch64::ST2Twov2s_POST, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
728 { .Opcode: AArch64::ST3i8, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
729 { .Opcode: AArch64::ST3i16, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
730 { .Opcode: AArch64::ST3i32, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
731 { .Opcode: AArch64::ST3i64, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
732 { .Opcode: AArch64::ST3i8_POST, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 3 },
733 { .Opcode: AArch64::ST3i16_POST, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 6 },
734 { .Opcode: AArch64::ST3i32_POST, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 12 },
735 { .Opcode: AArch64::ST3i64_POST, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 24 },
736 { .Opcode: AArch64::ST3Threev16b, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
737 { .Opcode: AArch64::ST3Threev8h, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
738 { .Opcode: AArch64::ST3Threev4s, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
739 { .Opcode: AArch64::ST3Threev2d, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
740 { .Opcode: AArch64::ST3Threev8b, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
741 { .Opcode: AArch64::ST3Threev4h, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
742 { .Opcode: AArch64::ST3Threev2s, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
743 { .Opcode: AArch64::ST3Threev16b_POST, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
744 { .Opcode: AArch64::ST3Threev8h_POST, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
745 { .Opcode: AArch64::ST3Threev4s_POST, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
746 { .Opcode: AArch64::ST3Threev2d_POST, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
747 { .Opcode: AArch64::ST3Threev8b_POST, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
748 { .Opcode: AArch64::ST3Threev4h_POST, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
749 { .Opcode: AArch64::ST3Threev2s_POST, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
750 { .Opcode: AArch64::ST4i8, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
751 { .Opcode: AArch64::ST4i16, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
752 { .Opcode: AArch64::ST4i32, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
753 { .Opcode: AArch64::ST4i64, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
754 { .Opcode: AArch64::ST4i8_POST, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
755 { .Opcode: AArch64::ST4i16_POST, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
756 { .Opcode: AArch64::ST4i32_POST, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
757 { .Opcode: AArch64::ST4i64_POST, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 32 },
758 { .Opcode: AArch64::ST4Fourv16b, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
759 { .Opcode: AArch64::ST4Fourv8h, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
760 { .Opcode: AArch64::ST4Fourv4s, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
761 { .Opcode: AArch64::ST4Fourv2d, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
762 { .Opcode: AArch64::ST4Fourv8b, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
763 { .Opcode: AArch64::ST4Fourv4h, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
764 { .Opcode: AArch64::ST4Fourv2s, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
765 { .Opcode: AArch64::ST4Fourv16b_POST, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
766 { .Opcode: AArch64::ST4Fourv8h_POST, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
767 { .Opcode: AArch64::ST4Fourv4s_POST, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
768 { .Opcode: AArch64::ST4Fourv2d_POST, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
769 { .Opcode: AArch64::ST4Fourv8b_POST, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
770 { .Opcode: AArch64::ST4Fourv4h_POST, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
771 { .Opcode: AArch64::ST4Fourv2s_POST, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
772};
773
774static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
775 for (const auto &Info : LdStNInstInfo)
776 if (Info.Opcode == Opcode)
777 return &Info;
778
779 return nullptr;
780}
781
782void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
783 StringRef Annot,
784 const MCSubtargetInfo &STI,
785 raw_ostream &O) {
786 unsigned Opcode = MI->getOpcode();
787 StringRef Layout;
788
789 bool IsTbx;
790 if (isTblTbxInstruction(Opcode: MI->getOpcode(), Layout, IsTbx)) {
791 O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t';
792 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg(), AltIdx: AArch64::vreg);
793 O << ", ";
794
795 unsigned ListOpNum = IsTbx ? 2 : 1;
796 printVectorList(MI, OpNum: ListOpNum, STI, O, LayoutSuffix: "");
797
798 O << ", ";
799 printRegName(OS&: O, Reg: MI->getOperand(i: ListOpNum + 1).getReg(), AltIdx: AArch64::vreg);
800 printAnnotation(OS&: O, Annot);
801 return;
802 }
803
804 if (const LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
805 O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
806
807 // Now onto the operands: first a vector list with possible lane
808 // specifier. E.g. { v0 }[2]
809 int OpNum = LdStDesc->ListOperand;
810 printVectorList(MI, OpNum: OpNum++, STI, O, LayoutSuffix: "");
811
812 if (LdStDesc->HasLane)
813 O << '[' << MI->getOperand(i: OpNum++).getImm() << ']';
814
815 // Next the address: [xN]
816 MCRegister AddrReg = MI->getOperand(i: OpNum++).getReg();
817 O << ", [";
818 printRegName(OS&: O, Reg: AddrReg);
819 O << ']';
820
821 // Finally, there might be a post-indexed offset.
822 if (LdStDesc->NaturalOffset != 0) {
823 MCRegister Reg = MI->getOperand(i: OpNum++).getReg();
824 if (Reg != AArch64::XZR) {
825 O << ", ";
826 printRegName(OS&: O, Reg);
827 } else {
828 assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
829 O << ", ";
830 markup(OS&: O, M: Markup::Immediate) << "#" << LdStDesc->NaturalOffset;
831 }
832 }
833
834 printAnnotation(OS&: O, Annot);
835 return;
836 }
837
838 AArch64InstPrinter::printInst(MI, Address, Annot, STI, O);
839}
840
841StringRef AArch64AppleInstPrinter::getRegName(MCRegister Reg) const {
842 return getRegisterName(Reg);
843}
844
845bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst *MI,
846 const MCSubtargetInfo &STI,
847 raw_ostream &O,
848 StringRef Annot) {
849 unsigned Opcode = MI->getOpcode();
850
851#ifndef NDEBUG
852 assert(((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) &&
853 "Invalid opcode for RPRFM alias!");
854#endif
855
856 unsigned PRFOp = MI->getOperand(i: 0).getImm();
857 unsigned Mask = 0x18; // 0b11000
858 if ((PRFOp & Mask) != Mask)
859 return false; // Rt != '11xxx', it's a PRFM instruction.
860
861 MCRegister Rm = MI->getOperand(i: 2).getReg();
862
863 // "Rm" must be a 64-bit GPR for RPRFM.
864 if (MRI.getRegClass(i: AArch64::GPR32RegClassID).contains(Reg: Rm))
865 Rm = MRI.getMatchingSuperReg(Reg: Rm, SubIdx: AArch64::sub_32,
866 RC: &MRI.getRegClass(i: AArch64::GPR64RegClassID));
867
868 unsigned SignExtend = MI->getOperand(i: 3).getImm(); // encoded in "option<2>".
869 unsigned Shift = MI->getOperand(i: 4).getImm(); // encoded in "S".
870
871 assert((SignExtend <= 1) && "sign extend should be a single bit!");
872 assert((Shift <= 1) && "Shift should be a single bit!");
873
874 unsigned Option0 = (Opcode == AArch64::PRFMroX) ? 1 : 0;
875
876 // encoded in "option<2>:option<0>:S:Rt<2:0>".
877 unsigned RPRFOp =
878 (SignExtend << 5) | (Option0 << 4) | (Shift << 3) | (PRFOp & 0x7);
879
880 O << "\trprfm ";
881 if (auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: RPRFOp))
882 O << RPRFM->Name << ", ";
883 else
884 O << "#" << formatImm(Value: RPRFOp) << ", ";
885 O << getRegisterName(Reg: Rm);
886 O << ", [";
887 printOperand(MI, OpNo: 1, STI, O); // "Rn".
888 O << "]";
889
890 printAnnotation(OS&: O, Annot);
891
892 return true;
893}
894
895bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
896 const MCSubtargetInfo &STI,
897 raw_ostream &O) {
898#ifndef NDEBUG
899 unsigned Opcode = MI->getOpcode();
900 assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
901#endif
902
903 const MCOperand &Op1 = MI->getOperand(i: 0);
904 const MCOperand &Cn = MI->getOperand(i: 1);
905 const MCOperand &Cm = MI->getOperand(i: 2);
906 const MCOperand &Op2 = MI->getOperand(i: 3);
907
908 unsigned Op1Val = Op1.getImm();
909 unsigned CnVal = Cn.getImm();
910 unsigned CmVal = Cm.getImm();
911 unsigned Op2Val = Op2.getImm();
912
913 uint16_t Encoding = Op2Val;
914 Encoding |= CmVal << 3;
915 Encoding |= CnVal << 7;
916 Encoding |= Op1Val << 11;
917
918 bool NeedsReg = false;
919 bool OptionalReg = false;
920 std::string Ins;
921 std::string Name;
922
923 if (CnVal == 7) {
924 switch (CmVal) {
925 default: return false;
926 // MLBI aliases
927 case 0: {
928 const AArch64MLBI::MLBI *MLBI =
929 AArch64MLBI::lookupMLBIByEncoding(Encoding);
930 if (!MLBI || !MLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
931 return false;
932
933 NeedsReg = MLBI->NeedsReg;
934 Ins = "mlbi\t";
935 Name = std::string(MLBI->Name);
936 } break;
937 // Maybe IC, maybe Prediction Restriction
938 case 1:
939 switch (Op1Val) {
940 default: return false;
941 case 0: goto Search_IC;
942 case 3: goto Search_PRCTX;
943 }
944 // Prediction Restriction aliases
945 case 3: {
946 Search_PRCTX:
947 if (Op1Val != 3 || CnVal != 7 || CmVal != 3)
948 return false;
949
950 const auto Requires =
951 Op2Val == 6 ? AArch64::FeatureSPECRES2 : AArch64::FeaturePredRes;
952 if (!(STI.hasFeature(Feature: AArch64::FeatureAll) || STI.hasFeature(Feature: Requires)))
953 return false;
954
955 NeedsReg = true;
956 switch (Op2Val) {
957 default: return false;
958 case 4: Ins = "cfp\t"; break;
959 case 5: Ins = "dvp\t"; break;
960 case 6: Ins = "cosp\t"; break;
961 case 7: Ins = "cpp\t"; break;
962 }
963 Name = "RCTX";
964 }
965 break;
966 // IC aliases
967 case 5: {
968 Search_IC:
969 const AArch64IC::IC *IC = AArch64IC::lookupICByEncoding(Encoding);
970 if (!IC || !IC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
971 return false;
972
973 NeedsReg = IC->NeedsReg;
974 Ins = "ic\t";
975 Name = std::string(IC->Name);
976 }
977 break;
978 // DC aliases
979 case 4: case 6: case 10: case 11: case 12: case 13: case 14:
980 {
981 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
982 if (!DC || !DC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
983 return false;
984
985 NeedsReg = true;
986 Ins = "dc\t";
987 Name = std::string(DC->Name);
988 }
989 break;
990 // AT aliases
991 case 8: case 9: {
992 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
993 if (!AT || !AT->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
994 return false;
995
996 NeedsReg = true;
997 Ins = "at\t";
998 Name = std::string(AT->Name);
999 }
1000 break;
1001 // Overlaps with AT and DC
1002 case 15: {
1003 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
1004 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
1005 if (AT && AT->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1006 NeedsReg = true;
1007 Ins = "at\t";
1008 Name = std::string(AT->Name);
1009 } else if (DC && DC->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1010 NeedsReg = true;
1011 Ins = "dc\t";
1012 Name = std::string(DC->Name);
1013 } else {
1014 return false;
1015 }
1016 } break;
1017 }
1018 } else if (CnVal == 8 || CnVal == 9) {
1019 // TLBI aliases
1020 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
1021 if (!TLBI || !TLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1022 return false;
1023
1024 NeedsReg = TLBI->NeedsReg;
1025 if (STI.hasFeature(Feature: AArch64::FeatureAll) ||
1026 STI.hasFeature(Feature: AArch64::FeatureTLBID))
1027 OptionalReg = TLBI->OptionalReg;
1028 Ins = "tlbi\t";
1029 Name = std::string(TLBI->Name);
1030 } else if (CnVal == 12) {
1031 if (CmVal != 0) {
1032 // GIC aliases
1033 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByEncoding(Encoding);
1034 if (!GIC || !GIC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1035 return false;
1036
1037 NeedsReg = GIC->NeedsReg;
1038 Ins = "gic\t";
1039 Name = std::string(GIC->Name);
1040 } else {
1041 // GSB aliases
1042 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByEncoding(Encoding);
1043 if (!GSB || !GSB->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1044 return false;
1045
1046 NeedsReg = false;
1047 Ins = "gsb\t";
1048 Name = std::string(GSB->Name);
1049 }
1050 } else if (CnVal == 10) {
1051 // PLBI aliases
1052 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByEncoding(Encoding);
1053 if (!PLBI || !PLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1054 return false;
1055
1056 NeedsReg = PLBI->NeedsReg;
1057 if (STI.hasFeature(Feature: AArch64::FeatureAll) ||
1058 STI.hasFeature(Feature: AArch64::FeatureTLBID))
1059 OptionalReg = PLBI->OptionalReg;
1060 Ins = "plbi\t";
1061 Name = std::string(PLBI->Name);
1062 } else
1063 return false;
1064
1065 StringRef Reg = getRegisterName(Reg: MI->getOperand(i: 4).getReg());
1066 bool NotXZR = Reg != "xzr";
1067
1068 // If a mandatory or optional register is not specified in the TableGen
1069 // (i.e. no register operand should be present), and the register value
1070 // is not xzr/x31, then disassemble to a SYS alias instead.
1071 if (NotXZR && !NeedsReg && !OptionalReg)
1072 return false;
1073
1074 std::string Str = Ins + Name;
1075 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1076
1077 O << '\t' << Str;
1078
1079 // For optional registers, don't print the value if it's xzr/x31
1080 // since this defaults to xzr/x31 if register is not specified.
1081 if (NeedsReg || (OptionalReg && NotXZR))
1082 O << ", " << Reg;
1083
1084 return true;
1085}
1086
1087bool AArch64InstPrinter::printSyslAlias(const MCInst *MI,
1088 const MCSubtargetInfo &STI,
1089 raw_ostream &O) {
1090#ifndef NDEBUG
1091 unsigned Opcode = MI->getOpcode();
1092 assert(Opcode == AArch64::SYSLxt && "Invalid opcode for SYSL alias!");
1093#endif
1094
1095 StringRef Reg = getRegisterName(Reg: MI->getOperand(i: 0).getReg());
1096 const MCOperand &Op1 = MI->getOperand(i: 1);
1097 const MCOperand &Cn = MI->getOperand(i: 2);
1098 const MCOperand &Cm = MI->getOperand(i: 3);
1099 const MCOperand &Op2 = MI->getOperand(i: 4);
1100
1101 unsigned Op1Val = Op1.getImm();
1102 unsigned CnVal = Cn.getImm();
1103 unsigned CmVal = Cm.getImm();
1104 unsigned Op2Val = Op2.getImm();
1105
1106 uint16_t Encoding = Op2Val;
1107 Encoding |= CmVal << 3;
1108 Encoding |= CnVal << 7;
1109 Encoding |= Op1Val << 11;
1110
1111 std::string Ins;
1112 std::string Name;
1113
1114 if (CnVal == 12) {
1115 if (CmVal == 3) {
1116 // GICR aliases
1117 const AArch64GICR::GICR *GICR =
1118 AArch64GICR::lookupGICRByEncoding(Encoding);
1119 if (!GICR || !GICR->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1120 return false;
1121
1122 Ins = "gicr";
1123 Name = std::string(GICR->Name);
1124 } else
1125 return false;
1126 } else
1127 return false;
1128
1129 llvm::transform(Range&: Name, d_first: Name.begin(), F: ::tolower);
1130
1131 O << '\t' << Ins << '\t' << Reg.str() << ", " << Name;
1132
1133 return true;
1134}
1135
1136bool AArch64InstPrinter::printSyspAlias(const MCInst *MI,
1137 const MCSubtargetInfo &STI,
1138 raw_ostream &O) {
1139#ifndef NDEBUG
1140 unsigned Opcode = MI->getOpcode();
1141 assert((Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR) &&
1142 "Invalid opcode for SYSP alias!");
1143#endif
1144
1145 const MCOperand &Op1 = MI->getOperand(i: 0);
1146 const MCOperand &Cn = MI->getOperand(i: 1);
1147 const MCOperand &Cm = MI->getOperand(i: 2);
1148 const MCOperand &Op2 = MI->getOperand(i: 3);
1149
1150 unsigned Op1Val = Op1.getImm();
1151 unsigned CnVal = Cn.getImm();
1152 unsigned CmVal = Cm.getImm();
1153 unsigned Op2Val = Op2.getImm();
1154
1155 uint16_t Encoding = Op2Val;
1156 Encoding |= CmVal << 3;
1157 Encoding |= CnVal << 7;
1158 Encoding |= Op1Val << 11;
1159
1160 std::string Ins;
1161 std::string Name;
1162
1163 if (CnVal == 8 || CnVal == 9) {
1164 // TLBIP aliases
1165
1166 if (CnVal == 9) {
1167 if (!STI.hasFeature(Feature: AArch64::FeatureXS))
1168 return false;
1169 Encoding &= ~(1 << 7);
1170 }
1171
1172 const AArch64TLBIP::TLBIP *TLBIP =
1173 AArch64TLBIP::lookupTLBIPByEncoding(Encoding);
1174 if (!TLBIP || !TLBIP->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1175 return false;
1176
1177 Ins = "tlbip\t";
1178 Name = std::string(TLBIP->Name);
1179 if (CnVal == 9)
1180 Name += "nXS";
1181 } else
1182 return false;
1183
1184 std::string Str = Ins + Name;
1185 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1186
1187 O << '\t' << Str;
1188 O << ", ";
1189 if (MI->getOperand(i: 4).getReg() == AArch64::XZR)
1190 printSyspXzrPair(MI, OpNum: 4, STI, O);
1191 else
1192 printGPRSeqPairsClassOperand<64>(MI, OpNum: 4, STI, O);
1193
1194 return true;
1195}
1196
1197template <int EltSize>
1198void AArch64InstPrinter::printMatrix(const MCInst *MI, unsigned OpNum,
1199 const MCSubtargetInfo &STI,
1200 raw_ostream &O) {
1201 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1202 assert(RegOp.isReg() && "Unexpected operand type!");
1203
1204 printRegName(OS&: O, Reg: RegOp.getReg());
1205 switch (EltSize) {
1206 case 0:
1207 break;
1208 case 8:
1209 O << ".b";
1210 break;
1211 case 16:
1212 O << ".h";
1213 break;
1214 case 32:
1215 O << ".s";
1216 break;
1217 case 64:
1218 O << ".d";
1219 break;
1220 case 128:
1221 O << ".q";
1222 break;
1223 default:
1224 llvm_unreachable("Unsupported element size");
1225 }
1226}
1227
1228template <bool IsVertical>
1229void AArch64InstPrinter::printMatrixTileVector(const MCInst *MI, unsigned OpNum,
1230 const MCSubtargetInfo &STI,
1231 raw_ostream &O) {
1232 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1233 assert(RegOp.isReg() && "Unexpected operand type!");
1234 StringRef RegName = getRegisterName(Reg: RegOp.getReg());
1235
1236 // Insert the horizontal/vertical flag before the suffix.
1237 StringRef Base, Suffix;
1238 std::tie(args&: Base, args&: Suffix) = RegName.split(Separator: '.');
1239 O << Base << (IsVertical ? "v" : "h") << '.' << Suffix;
1240}
1241
1242void AArch64InstPrinter::printMatrixTile(const MCInst *MI, unsigned OpNum,
1243 const MCSubtargetInfo &STI,
1244 raw_ostream &O) {
1245 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1246 assert(RegOp.isReg() && "Unexpected operand type!");
1247 printRegName(OS&: O, Reg: RegOp.getReg());
1248}
1249
1250void AArch64InstPrinter::printSVCROp(const MCInst *MI, unsigned OpNum,
1251 const MCSubtargetInfo &STI,
1252 raw_ostream &O) {
1253 const MCOperand &MO = MI->getOperand(i: OpNum);
1254 assert(MO.isImm() && "Unexpected operand type!");
1255 unsigned svcrop = MO.getImm();
1256 const auto *SVCR = AArch64SVCR::lookupSVCRByEncoding(Encoding: svcrop);
1257 assert(SVCR && "Unexpected SVCR operand!");
1258 O << SVCR->Name;
1259}
1260
1261void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
1262 const MCSubtargetInfo &STI,
1263 raw_ostream &O) {
1264 const MCOperand &Op = MI->getOperand(i: OpNo);
1265 if (Op.isReg()) {
1266 printRegName(OS&: O, Reg: Op.getReg());
1267 } else if (Op.isImm()) {
1268 printImm(MI, OpNo, STI, O);
1269 } else {
1270 assert(Op.isExpr() && "unknown operand kind in printOperand");
1271 MAI.printExpr(O, *Op.getExpr());
1272 }
1273}
1274
1275void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
1276 const MCSubtargetInfo &STI,
1277 raw_ostream &O) {
1278 const MCOperand &Op = MI->getOperand(i: OpNo);
1279 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1280}
1281
1282void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
1283 const MCSubtargetInfo &STI,
1284 raw_ostream &O) {
1285 const MCOperand &Op = MI->getOperand(i: OpNo);
1286 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#llx", Vals: Op.getImm());
1287}
1288
1289template<int Size>
1290void AArch64InstPrinter::printSImm(const MCInst *MI, unsigned OpNo,
1291 const MCSubtargetInfo &STI,
1292 raw_ostream &O) {
1293 const MCOperand &Op = MI->getOperand(i: OpNo);
1294 if (Size == 8)
1295 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed char)Op.getImm());
1296 else if (Size == 16)
1297 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed short)Op.getImm());
1298 else
1299 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1300}
1301
1302void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
1303 unsigned Imm, raw_ostream &O) {
1304 const MCOperand &Op = MI->getOperand(i: OpNo);
1305 if (Op.isReg()) {
1306 MCRegister Reg = Op.getReg();
1307 if (Reg == AArch64::XZR)
1308 markup(OS&: O, M: Markup::Immediate) << "#" << Imm;
1309 else
1310 printRegName(OS&: O, Reg);
1311 } else
1312 llvm_unreachable("unknown operand kind in printPostIncOperand64");
1313}
1314
1315void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
1316 const MCSubtargetInfo &STI,
1317 raw_ostream &O) {
1318 const MCOperand &Op = MI->getOperand(i: OpNo);
1319 assert(Op.isReg() && "Non-register vreg operand!");
1320 printRegName(OS&: O, Reg: Op.getReg(), AltIdx: AArch64::vreg);
1321}
1322
1323void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
1324 const MCSubtargetInfo &STI,
1325 raw_ostream &O) {
1326 const MCOperand &Op = MI->getOperand(i: OpNo);
1327 assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
1328 O << "c" << Op.getImm();
1329}
1330
1331void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
1332 const MCSubtargetInfo &STI,
1333 raw_ostream &O) {
1334 const MCOperand &MO = MI->getOperand(i: OpNum);
1335 if (MO.isImm()) {
1336 unsigned Val = (MO.getImm() & 0xfff);
1337 assert(Val == MO.getImm() && "Add/sub immediate out of range!");
1338 unsigned Shift =
1339 AArch64_AM::getShiftValue(Imm: MI->getOperand(i: OpNum + 1).getImm());
1340 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
1341 if (Shift != 0) {
1342 printShifter(MI, OpNum: OpNum + 1, STI, O);
1343 if (CommentStream)
1344 *CommentStream << '=' << formatImm(Value: Val << Shift) << '\n';
1345 }
1346 } else {
1347 assert(MO.isExpr() && "Unexpected operand type!");
1348 MAI.printExpr(O, *MO.getExpr());
1349 printShifter(MI, OpNum: OpNum + 1, STI, O);
1350 }
1351}
1352
1353template <typename T>
1354void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
1355 const MCSubtargetInfo &STI,
1356 raw_ostream &O) {
1357 uint64_t Val = MI->getOperand(i: OpNum).getImm();
1358 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1359 O << "#0x";
1360 O.write_hex(N: AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 8 * sizeof(T)));
1361}
1362
1363void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
1364 const MCSubtargetInfo &STI,
1365 raw_ostream &O) {
1366 unsigned Val = MI->getOperand(i: OpNum).getImm();
1367 // LSL #0 should not be printed.
1368 if (AArch64_AM::getShiftType(Imm: Val) == AArch64_AM::LSL &&
1369 AArch64_AM::getShiftValue(Imm: Val) == 0)
1370 return;
1371 O << ", " << AArch64_AM::getShiftExtendName(ST: AArch64_AM::getShiftType(Imm: Val))
1372 << " ";
1373 markup(OS&: O, M: Markup::Immediate) << "#" << AArch64_AM::getShiftValue(Imm: Val);
1374}
1375
1376void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
1377 const MCSubtargetInfo &STI,
1378 raw_ostream &O) {
1379 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1380 printShifter(MI, OpNum: OpNum + 1, STI, O);
1381}
1382
1383void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
1384 const MCSubtargetInfo &STI,
1385 raw_ostream &O) {
1386 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1387 printArithExtend(MI, OpNum: OpNum + 1, STI, O);
1388}
1389
1390void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
1391 const MCSubtargetInfo &STI,
1392 raw_ostream &O) {
1393 unsigned Val = MI->getOperand(i: OpNum).getImm();
1394 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Imm: Val);
1395 unsigned ShiftVal = AArch64_AM::getArithShiftValue(Imm: Val);
1396
1397 // If the destination or first source register operand is [W]SP, print
1398 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
1399 // all.
1400 if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
1401 MCRegister Dest = MI->getOperand(i: 0).getReg();
1402 MCRegister Src1 = MI->getOperand(i: 1).getReg();
1403 if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
1404 ExtType == AArch64_AM::UXTX) ||
1405 ((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
1406 ExtType == AArch64_AM::UXTW) ) {
1407 if (ShiftVal != 0) {
1408 O << ", lsl ";
1409 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1410 }
1411 return;
1412 }
1413 }
1414 O << ", " << AArch64_AM::getShiftExtendName(ST: ExtType);
1415 if (ShiftVal != 0) {
1416 O << " ";
1417 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1418 }
1419}
1420
1421void AArch64InstPrinter::printMemExtendImpl(bool SignExtend, bool DoShift,
1422 unsigned Width, char SrcRegKind,
1423 raw_ostream &O) {
1424 // sxtw, sxtx, uxtw or lsl (== uxtx)
1425 bool IsLSL = !SignExtend && SrcRegKind == 'x';
1426 if (IsLSL)
1427 O << "lsl";
1428 else
1429 O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
1430
1431 if (DoShift || IsLSL) {
1432 O << " ";
1433 markup(OS&: O, M: Markup::Immediate) << "#" << Log2_32(Value: Width / 8);
1434 }
1435}
1436
1437void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
1438 raw_ostream &O, char SrcRegKind,
1439 unsigned Width) {
1440 bool SignExtend = MI->getOperand(i: OpNum).getImm();
1441 bool DoShift = MI->getOperand(i: OpNum + 1).getImm();
1442 printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
1443}
1444
1445template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
1446void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
1447 unsigned OpNum,
1448 const MCSubtargetInfo &STI,
1449 raw_ostream &O) {
1450 printOperand(MI, OpNo: OpNum, STI, O);
1451 if (Suffix == 's' || Suffix == 'd')
1452 O << '.' << Suffix;
1453 else
1454 assert(Suffix == 0 && "Unsupported suffix size");
1455
1456 bool DoShift = ExtWidth != 8;
1457 if (SignExtend || DoShift || SrcRegKind == 'w') {
1458 O << ", ";
1459 printMemExtendImpl(SignExtend, DoShift, Width: ExtWidth, SrcRegKind, O);
1460 }
1461}
1462
1463template <int EltSize>
1464void AArch64InstPrinter::printPredicateAsCounter(const MCInst *MI,
1465 unsigned OpNum,
1466 const MCSubtargetInfo &STI,
1467 raw_ostream &O) {
1468 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1469 if (Reg < AArch64::PN0 || Reg > AArch64::PN15)
1470 llvm_unreachable("Unsupported predicate-as-counter register");
1471 O << "pn" << Reg - AArch64::PN0;
1472
1473 switch (EltSize) {
1474 case 0:
1475 break;
1476 case 8:
1477 O << ".b";
1478 break;
1479 case 16:
1480 O << ".h";
1481 break;
1482 case 32:
1483 O << ".s";
1484 break;
1485 case 64:
1486 O << ".d";
1487 break;
1488 default:
1489 llvm_unreachable("Unsupported element size");
1490 }
1491}
1492
1493void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
1494 const MCSubtargetInfo &STI,
1495 raw_ostream &O) {
1496 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1497 O << AArch64CC::getCondCodeName(Code: CC);
1498}
1499
1500void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
1501 const MCSubtargetInfo &STI,
1502 raw_ostream &O) {
1503 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1504 O << AArch64CC::getCondCodeName(Code: AArch64CC::getInvertedCondCode(Code: CC));
1505}
1506
1507void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
1508 const MCSubtargetInfo &STI,
1509 raw_ostream &O) {
1510 O << '[';
1511 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1512 O << ']';
1513}
1514
1515template <int Scale>
1516void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
1517 const MCSubtargetInfo &STI,
1518 raw_ostream &O) {
1519 markup(OS&: O, M: Markup::Immediate)
1520 << '#' << formatImm(Value: Scale * MI->getOperand(i: OpNum).getImm());
1521}
1522
1523template <int Scale, int Offset>
1524void AArch64InstPrinter::printImmRangeScale(const MCInst *MI, unsigned OpNum,
1525 const MCSubtargetInfo &STI,
1526 raw_ostream &O) {
1527 unsigned FirstImm = Scale * MI->getOperand(i: OpNum).getImm();
1528 O << formatImm(Value: FirstImm);
1529 O << ":" << formatImm(Value: FirstImm + Offset);
1530}
1531
1532void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
1533 unsigned Scale, raw_ostream &O) {
1534 const MCOperand MO = MI->getOperand(i: OpNum);
1535 if (MO.isImm()) {
1536 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: MO.getImm() * Scale);
1537 } else {
1538 assert(MO.isExpr() && "Unexpected operand type!");
1539 MAI.printExpr(O, *MO.getExpr());
1540 }
1541}
1542
1543void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
1544 unsigned Scale, raw_ostream &O) {
1545 const MCOperand MO1 = MI->getOperand(i: OpNum + 1);
1546 O << '[';
1547 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1548 if (MO1.isImm()) {
1549 O << ", ";
1550 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: MO1.getImm() * Scale);
1551 } else {
1552 assert(MO1.isExpr() && "Unexpected operand type!");
1553 O << ", ";
1554 MAI.printExpr(O, *MO1.getExpr());
1555 }
1556 O << ']';
1557}
1558
1559void AArch64InstPrinter::printRPRFMOperand(const MCInst *MI, unsigned OpNum,
1560 const MCSubtargetInfo &STI,
1561 raw_ostream &O) {
1562 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1563 if (auto PRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: prfop)) {
1564 O << PRFM->Name;
1565 return;
1566 }
1567
1568 O << '#' << formatImm(Value: prfop);
1569}
1570
1571template <bool IsSVEPrefetch>
1572void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
1573 const MCSubtargetInfo &STI,
1574 raw_ostream &O) {
1575 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1576 if (IsSVEPrefetch) {
1577 if (auto PRFM = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: prfop)) {
1578 O << PRFM->Name;
1579 return;
1580 }
1581 } else {
1582 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(Encoding: prfop);
1583 if (PRFM && PRFM->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1584 O << PRFM->Name;
1585 return;
1586 }
1587 }
1588
1589 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: prfop);
1590}
1591
1592void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
1593 const MCSubtargetInfo &STI,
1594 raw_ostream &O) {
1595 unsigned psbhintop = MI->getOperand(i: OpNum).getImm();
1596 auto PSB = AArch64PSBHint::lookupPSBByEncoding(Encoding: psbhintop);
1597 if (PSB)
1598 O << PSB->Name;
1599 else
1600 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: psbhintop);
1601}
1602
1603void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
1604 const MCSubtargetInfo &STI,
1605 raw_ostream &O) {
1606 unsigned btihintop = MI->getOperand(i: OpNum).getImm() ^ 32;
1607 auto BTI = AArch64BTIHint::lookupBTIByEncoding(Encoding: btihintop);
1608 if (BTI)
1609 O << BTI->Name;
1610 else
1611 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: btihintop);
1612}
1613
1614void AArch64InstPrinter::printCMHPriorityHintOp(const MCInst *MI,
1615 unsigned OpNum,
1616 const MCSubtargetInfo &STI,
1617 raw_ostream &O) {
1618 unsigned priorityhint_op = MI->getOperand(i: OpNum).getImm();
1619 auto PHint =
1620 AArch64CMHPriorityHint::lookupCMHPriorityHintByEncoding(Encoding: priorityhint_op);
1621 if (PHint)
1622 O << PHint->Name;
1623 else
1624 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: priorityhint_op);
1625}
1626
1627void AArch64InstPrinter::printTIndexHintOp(const MCInst *MI, unsigned OpNum,
1628 const MCSubtargetInfo &STI,
1629 raw_ostream &O) {
1630 unsigned tindexhintop = MI->getOperand(i: OpNum).getImm();
1631 auto TIndex = AArch64TIndexHint::lookupTIndexByEncoding(Encoding: tindexhintop);
1632 if (TIndex)
1633 O << TIndex->Name;
1634 else
1635 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: tindexhintop);
1636}
1637
1638void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
1639 const MCSubtargetInfo &STI,
1640 raw_ostream &O) {
1641 const MCOperand &MO = MI->getOperand(i: OpNum);
1642 float FPImm = MO.isDFPImm() ? bit_cast<double>(from: MO.getDFPImm())
1643 : AArch64_AM::getFPImmFloat(Imm: MO.getImm());
1644
1645 // 8 decimal places are enough to perfectly represent permitted floats.
1646 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%.8f", Vals: FPImm);
1647}
1648
1649static MCRegister getNextVectorRegister(MCRegister Reg, unsigned Stride = 1) {
1650 while (Stride--) {
1651 switch (Reg.id()) {
1652 default:
1653 llvm_unreachable("Vector register expected!");
1654 case AArch64::Q0: Reg = AArch64::Q1; break;
1655 case AArch64::Q1: Reg = AArch64::Q2; break;
1656 case AArch64::Q2: Reg = AArch64::Q3; break;
1657 case AArch64::Q3: Reg = AArch64::Q4; break;
1658 case AArch64::Q4: Reg = AArch64::Q5; break;
1659 case AArch64::Q5: Reg = AArch64::Q6; break;
1660 case AArch64::Q6: Reg = AArch64::Q7; break;
1661 case AArch64::Q7: Reg = AArch64::Q8; break;
1662 case AArch64::Q8: Reg = AArch64::Q9; break;
1663 case AArch64::Q9: Reg = AArch64::Q10; break;
1664 case AArch64::Q10: Reg = AArch64::Q11; break;
1665 case AArch64::Q11: Reg = AArch64::Q12; break;
1666 case AArch64::Q12: Reg = AArch64::Q13; break;
1667 case AArch64::Q13: Reg = AArch64::Q14; break;
1668 case AArch64::Q14: Reg = AArch64::Q15; break;
1669 case AArch64::Q15: Reg = AArch64::Q16; break;
1670 case AArch64::Q16: Reg = AArch64::Q17; break;
1671 case AArch64::Q17: Reg = AArch64::Q18; break;
1672 case AArch64::Q18: Reg = AArch64::Q19; break;
1673 case AArch64::Q19: Reg = AArch64::Q20; break;
1674 case AArch64::Q20: Reg = AArch64::Q21; break;
1675 case AArch64::Q21: Reg = AArch64::Q22; break;
1676 case AArch64::Q22: Reg = AArch64::Q23; break;
1677 case AArch64::Q23: Reg = AArch64::Q24; break;
1678 case AArch64::Q24: Reg = AArch64::Q25; break;
1679 case AArch64::Q25: Reg = AArch64::Q26; break;
1680 case AArch64::Q26: Reg = AArch64::Q27; break;
1681 case AArch64::Q27: Reg = AArch64::Q28; break;
1682 case AArch64::Q28: Reg = AArch64::Q29; break;
1683 case AArch64::Q29: Reg = AArch64::Q30; break;
1684 case AArch64::Q30: Reg = AArch64::Q31; break;
1685 // Vector lists can wrap around.
1686 case AArch64::Q31:
1687 Reg = AArch64::Q0;
1688 break;
1689 case AArch64::Z0: Reg = AArch64::Z1; break;
1690 case AArch64::Z1: Reg = AArch64::Z2; break;
1691 case AArch64::Z2: Reg = AArch64::Z3; break;
1692 case AArch64::Z3: Reg = AArch64::Z4; break;
1693 case AArch64::Z4: Reg = AArch64::Z5; break;
1694 case AArch64::Z5: Reg = AArch64::Z6; break;
1695 case AArch64::Z6: Reg = AArch64::Z7; break;
1696 case AArch64::Z7: Reg = AArch64::Z8; break;
1697 case AArch64::Z8: Reg = AArch64::Z9; break;
1698 case AArch64::Z9: Reg = AArch64::Z10; break;
1699 case AArch64::Z10: Reg = AArch64::Z11; break;
1700 case AArch64::Z11: Reg = AArch64::Z12; break;
1701 case AArch64::Z12: Reg = AArch64::Z13; break;
1702 case AArch64::Z13: Reg = AArch64::Z14; break;
1703 case AArch64::Z14: Reg = AArch64::Z15; break;
1704 case AArch64::Z15: Reg = AArch64::Z16; break;
1705 case AArch64::Z16: Reg = AArch64::Z17; break;
1706 case AArch64::Z17: Reg = AArch64::Z18; break;
1707 case AArch64::Z18: Reg = AArch64::Z19; break;
1708 case AArch64::Z19: Reg = AArch64::Z20; break;
1709 case AArch64::Z20: Reg = AArch64::Z21; break;
1710 case AArch64::Z21: Reg = AArch64::Z22; break;
1711 case AArch64::Z22: Reg = AArch64::Z23; break;
1712 case AArch64::Z23: Reg = AArch64::Z24; break;
1713 case AArch64::Z24: Reg = AArch64::Z25; break;
1714 case AArch64::Z25: Reg = AArch64::Z26; break;
1715 case AArch64::Z26: Reg = AArch64::Z27; break;
1716 case AArch64::Z27: Reg = AArch64::Z28; break;
1717 case AArch64::Z28: Reg = AArch64::Z29; break;
1718 case AArch64::Z29: Reg = AArch64::Z30; break;
1719 case AArch64::Z30: Reg = AArch64::Z31; break;
1720 // Vector lists can wrap around.
1721 case AArch64::Z31:
1722 Reg = AArch64::Z0;
1723 break;
1724 case AArch64::P0: Reg = AArch64::P1; break;
1725 case AArch64::P1: Reg = AArch64::P2; break;
1726 case AArch64::P2: Reg = AArch64::P3; break;
1727 case AArch64::P3: Reg = AArch64::P4; break;
1728 case AArch64::P4: Reg = AArch64::P5; break;
1729 case AArch64::P5: Reg = AArch64::P6; break;
1730 case AArch64::P6: Reg = AArch64::P7; break;
1731 case AArch64::P7: Reg = AArch64::P8; break;
1732 case AArch64::P8: Reg = AArch64::P9; break;
1733 case AArch64::P9: Reg = AArch64::P10; break;
1734 case AArch64::P10: Reg = AArch64::P11; break;
1735 case AArch64::P11: Reg = AArch64::P12; break;
1736 case AArch64::P12: Reg = AArch64::P13; break;
1737 case AArch64::P13: Reg = AArch64::P14; break;
1738 case AArch64::P14: Reg = AArch64::P15; break;
1739 // Vector lists can wrap around.
1740 case AArch64::P15: Reg = AArch64::P0; break;
1741 }
1742 }
1743 return Reg;
1744}
1745
1746template<unsigned size>
1747void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
1748 unsigned OpNum,
1749 const MCSubtargetInfo &STI,
1750 raw_ostream &O) {
1751 static_assert(size == 64 || size == 32,
1752 "Template parameter must be either 32 or 64");
1753 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1754
1755 unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
1756 unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
1757
1758 MCRegister Even = MRI.getSubReg(Reg, Idx: Sube);
1759 MCRegister Odd = MRI.getSubReg(Reg, Idx: Subo);
1760 printRegName(OS&: O, Reg: Even);
1761 O << ", ";
1762 printRegName(OS&: O, Reg: Odd);
1763}
1764
1765void AArch64InstPrinter::printMatrixTileList(const MCInst *MI, unsigned OpNum,
1766 const MCSubtargetInfo &STI,
1767 raw_ostream &O) {
1768 unsigned MaxRegs = 8;
1769 unsigned RegMask = MI->getOperand(i: OpNum).getImm();
1770
1771 unsigned NumRegs = 0;
1772 for (unsigned I = 0; I < MaxRegs; ++I)
1773 if ((RegMask & (1 << I)) != 0)
1774 ++NumRegs;
1775
1776 O << "{";
1777 unsigned Printed = 0;
1778 for (unsigned I = 0; I < MaxRegs; ++I) {
1779 unsigned Reg = RegMask & (1 << I);
1780 if (Reg == 0)
1781 continue;
1782 printRegName(OS&: O, Reg: AArch64::ZAD0 + I);
1783 if (Printed + 1 != NumRegs)
1784 O << ", ";
1785 ++Printed;
1786 }
1787 O << "}";
1788}
1789
1790void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
1791 const MCSubtargetInfo &STI,
1792 raw_ostream &O,
1793 StringRef LayoutSuffix) {
1794 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1795
1796 O << "{ ";
1797
1798 // Work out how many registers there are in the list (if there is an actual
1799 // list).
1800 unsigned NumRegs = 1;
1801 if (MRI.getRegClass(i: AArch64::DDRegClassID).contains(Reg) ||
1802 MRI.getRegClass(i: AArch64::ZPR2RegClassID).contains(Reg) ||
1803 MRI.getRegClass(i: AArch64::QQRegClassID).contains(Reg) ||
1804 MRI.getRegClass(i: AArch64::PPR2RegClassID).contains(Reg) ||
1805 MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1806 NumRegs = 2;
1807 else if (MRI.getRegClass(i: AArch64::DDDRegClassID).contains(Reg) ||
1808 MRI.getRegClass(i: AArch64::ZPR3RegClassID).contains(Reg) ||
1809 MRI.getRegClass(i: AArch64::QQQRegClassID).contains(Reg))
1810 NumRegs = 3;
1811 else if (MRI.getRegClass(i: AArch64::DDDDRegClassID).contains(Reg) ||
1812 MRI.getRegClass(i: AArch64::ZPR4RegClassID).contains(Reg) ||
1813 MRI.getRegClass(i: AArch64::QQQQRegClassID).contains(Reg) ||
1814 MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1815 NumRegs = 4;
1816
1817 unsigned Stride = 1;
1818 if (MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1819 Stride = 8;
1820 else if (MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1821 Stride = 4;
1822
1823 // Now forget about the list and find out what the first register is.
1824 if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::dsub0))
1825 Reg = FirstReg;
1826 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::qsub0))
1827 Reg = FirstReg;
1828 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::zsub0))
1829 Reg = FirstReg;
1830 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::psub0))
1831 Reg = FirstReg;
1832
1833 // If it's a D-reg, we need to promote it to the equivalent Q-reg before
1834 // printing (otherwise getRegisterName fails).
1835 if (MRI.getRegClass(i: AArch64::FPR64RegClassID).contains(Reg)) {
1836 const MCRegisterClass &FPR128RC =
1837 MRI.getRegClass(i: AArch64::FPR128RegClassID);
1838 Reg = MRI.getMatchingSuperReg(Reg, SubIdx: AArch64::dsub, RC: &FPR128RC);
1839 }
1840
1841 if ((MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1842 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg)) &&
1843 NumRegs > 1 && Stride == 1 &&
1844 // Do not print the range when the last register is lower than the first.
1845 // Because it is a wrap-around register.
1846 Reg < getNextVectorRegister(Reg, Stride: NumRegs - 1)) {
1847 printRegName(OS&: O, Reg);
1848 O << LayoutSuffix;
1849 if (NumRegs > 1) {
1850 // Set of two sve registers should be separated by ','
1851 StringRef split_char = NumRegs == 2 ? ", " : " - ";
1852 O << split_char;
1853 printRegName(OS&: O, Reg: (getNextVectorRegister(Reg, Stride: NumRegs - 1)));
1854 O << LayoutSuffix;
1855 }
1856 } else {
1857 for (unsigned i = 0; i < NumRegs;
1858 ++i, Reg = getNextVectorRegister(Reg, Stride)) {
1859 // wrap-around sve register
1860 if (MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1861 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg))
1862 printRegName(OS&: O, Reg);
1863 else
1864 printRegName(OS&: O, Reg, AltIdx: AArch64::vreg);
1865 O << LayoutSuffix;
1866 if (i + 1 != NumRegs)
1867 O << ", ";
1868 }
1869 }
1870 O << " }";
1871}
1872
1873void
1874AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
1875 unsigned OpNum,
1876 const MCSubtargetInfo &STI,
1877 raw_ostream &O) {
1878 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1879}
1880
1881template <unsigned NumLanes, char LaneKind>
1882void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
1883 const MCSubtargetInfo &STI,
1884 raw_ostream &O) {
1885 if (LaneKind == 0) {
1886 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1887 return;
1888 }
1889 std::string Suffix(".");
1890 if (NumLanes)
1891 Suffix += itostr(X: NumLanes) + LaneKind;
1892 else
1893 Suffix += LaneKind;
1894
1895 printVectorList(MI, OpNum, STI, O, LayoutSuffix: Suffix);
1896}
1897
1898template <unsigned Scale>
1899void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
1900 const MCSubtargetInfo &STI,
1901 raw_ostream &O) {
1902 O << "[" << Scale * MI->getOperand(i: OpNum).getImm() << "]";
1903}
1904
1905template <unsigned Scale>
1906void AArch64InstPrinter::printMatrixIndex(const MCInst *MI, unsigned OpNum,
1907 const MCSubtargetInfo &STI,
1908 raw_ostream &O) {
1909 O << Scale * MI->getOperand(i: OpNum).getImm();
1910}
1911
1912void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
1913 unsigned OpNum,
1914 const MCSubtargetInfo &STI,
1915 raw_ostream &O) {
1916 // Do not print the numeric target address when symbolizing.
1917 if (SymbolizeOperands)
1918 return;
1919
1920 const MCOperand &Op = MI->getOperand(i: OpNum);
1921
1922 // If the label has already been resolved to an immediate offset (say, when
1923 // we're running the disassembler), just print the immediate.
1924 if (Op.isImm()) {
1925 int64_t Offset = Op.getImm() * 4;
1926 if (PrintBranchImmAsAddress)
1927 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1928 else
1929 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Offset);
1930 return;
1931 }
1932
1933 // If the branch target is simply an address then print it in hex.
1934 const MCConstantExpr *BranchTarget =
1935 dyn_cast<MCConstantExpr>(Val: MI->getOperand(i: OpNum).getExpr());
1936 int64_t TargetAddress;
1937 if (BranchTarget && BranchTarget->evaluateAsAbsolute(Res&: TargetAddress)) {
1938 markup(OS&: O, M: Markup::Target) << formatHex(Value: (uint64_t)TargetAddress);
1939 } else {
1940 // Otherwise, just print the expression.
1941 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1942 }
1943}
1944
1945void AArch64InstPrinter::printAdrAdrpLabel(const MCInst *MI, uint64_t Address,
1946 unsigned OpNum,
1947 const MCSubtargetInfo &STI,
1948 raw_ostream &O) {
1949 // Do not print the numeric target address when symbolizing.
1950 // However, do print for ADRP, as this is typically used together with an ADD
1951 // or an immediate-offset ldr/str and the label is likely at the wrong point.
1952 if (SymbolizeOperands && MI->getOpcode() != AArch64::ADRP)
1953 return;
1954
1955 const MCOperand &Op = MI->getOperand(i: OpNum);
1956
1957 // If the label has already been resolved to an immediate offset (say, when
1958 // we're running the disassembler), just print the immediate.
1959 if (Op.isImm()) {
1960 int64_t Offset = Op.getImm();
1961 if (MI->getOpcode() == AArch64::ADRP) {
1962 Offset = Offset * 4096;
1963 Address = Address & -4096;
1964 }
1965 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1966 if (PrintBranchImmAsAddress)
1967 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1968 else
1969 markup(OS&: O, M: Markup::Immediate) << "#" << Offset;
1970 return;
1971 }
1972
1973 // Otherwise, just print the expression.
1974 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1975}
1976
1977void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
1978 const MCSubtargetInfo &STI,
1979 raw_ostream &O) {
1980 unsigned Val = MI->getOperand(i: OpNo).getImm();
1981 unsigned Opcode = MI->getOpcode();
1982
1983 StringRef Name;
1984 if (Opcode == AArch64::ISB) {
1985 auto ISB = AArch64ISB::lookupISBByEncoding(Encoding: Val);
1986 Name = ISB ? ISB->Name : "";
1987 } else if (Opcode == AArch64::TSB) {
1988 auto TSB = AArch64TSB::lookupTSBByEncoding(Encoding: Val);
1989 Name = TSB ? TSB->Name : "";
1990 } else {
1991 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Val);
1992 Name = DB ? DB->Name : "";
1993 }
1994 if (!Name.empty())
1995 O << Name;
1996 else
1997 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
1998}
1999
2000void AArch64InstPrinter::printBarriernXSOption(const MCInst *MI, unsigned OpNo,
2001 const MCSubtargetInfo &STI,
2002 raw_ostream &O) {
2003 unsigned Val = MI->getOperand(i: OpNo).getImm();
2004 assert(MI->getOpcode() == AArch64::DSBnXS);
2005
2006 StringRef Name;
2007 auto DB = AArch64DBnXS::lookupDBnXSByEncoding(Encoding: Val);
2008 Name = DB ? DB->Name : "";
2009
2010 if (!Name.empty())
2011 O << Name;
2012 else
2013 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
2014}
2015
2016static bool isValidSysReg(const AArch64SysReg::SysReg &Reg, bool Read,
2017 const MCSubtargetInfo &STI) {
2018 return (Read ? Reg.Readable : Reg.Writeable) &&
2019 Reg.haveFeatures(ActiveFeatures: STI.getFeatureBits());
2020}
2021
2022// Looks up a system register either by encoding. Some system
2023// registers share the same encoding between different architectures,
2024// to work around this tablegen will return a range of registers with the same
2025// encodings. We need to check each register in the range to see if it valid.
2026static const AArch64SysReg::SysReg *lookupSysReg(unsigned Val, bool Read,
2027 const MCSubtargetInfo &STI) {
2028 auto Range = AArch64SysReg::lookupSysRegByEncoding(Encoding: Val);
2029 for (auto &Reg : Range) {
2030 if (isValidSysReg(Reg, Read, STI))
2031 return &Reg;
2032 }
2033
2034 return nullptr;
2035}
2036
2037void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
2038 const MCSubtargetInfo &STI,
2039 raw_ostream &O) {
2040 unsigned Val = MI->getOperand(i: OpNo).getImm();
2041
2042 // Horrible hack for the one register that has identical encodings but
2043 // different names in MSR and MRS. Because of this, one of MRS and MSR is
2044 // going to get the wrong entry
2045 if (Val == AArch64SysReg::DBGDTRRX_EL0) {
2046 O << "DBGDTRRX_EL0";
2047 return;
2048 }
2049
2050 // Horrible hack for two different registers having the same encoding.
2051 if (Val == AArch64SysReg::TRCEXTINSELR) {
2052 O << "TRCEXTINSELR";
2053 return;
2054 }
2055
2056 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: true /*Read*/, STI);
2057
2058 if (Reg)
2059 O << Reg->Name;
2060 else
2061 O << AArch64SysReg::genericRegisterString(Bits: Val);
2062}
2063
2064void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
2065 const MCSubtargetInfo &STI,
2066 raw_ostream &O) {
2067 unsigned Val = MI->getOperand(i: OpNo).getImm();
2068
2069 // Horrible hack for the one register that has identical encodings but
2070 // different names in MSR and MRS. Because of this, one of MRS and MSR is
2071 // going to get the wrong entry
2072 if (Val == AArch64SysReg::DBGDTRTX_EL0) {
2073 O << "DBGDTRTX_EL0";
2074 return;
2075 }
2076
2077 // Horrible hack for two different registers having the same encoding.
2078 if (Val == AArch64SysReg::TRCEXTINSELR) {
2079 O << "TRCEXTINSELR";
2080 return;
2081 }
2082
2083 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: false /*Read*/, STI);
2084
2085 if (Reg)
2086 O << Reg->Name;
2087 else
2088 O << AArch64SysReg::genericRegisterString(Bits: Val);
2089}
2090
2091void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
2092 const MCSubtargetInfo &STI,
2093 raw_ostream &O) {
2094 unsigned Val = MI->getOperand(i: OpNo).getImm();
2095
2096 auto PStateImm15 = AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: Val);
2097 auto PStateImm1 = AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: Val);
2098 if (PStateImm15 && PStateImm15->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
2099 O << PStateImm15->Name;
2100 else if (PStateImm1 && PStateImm1->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
2101 O << PStateImm1->Name;
2102 else
2103 O << "#" << formatImm(Value: Val);
2104}
2105
2106void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
2107 const MCSubtargetInfo &STI,
2108 raw_ostream &O) {
2109 unsigned RawVal = MI->getOperand(i: OpNo).getImm();
2110 uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(Imm: RawVal);
2111 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#016llx", Vals: Val);
2112}
2113
2114template<int64_t Angle, int64_t Remainder>
2115void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
2116 const MCSubtargetInfo &STI,
2117 raw_ostream &O) {
2118 unsigned Val = MI->getOperand(i: OpNo).getImm();
2119 markup(OS&: O, M: Markup::Immediate) << "#" << (Val * Angle) + Remainder;
2120}
2121
2122void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
2123 const MCSubtargetInfo &STI,
2124 raw_ostream &O) {
2125 unsigned Val = MI->getOperand(i: OpNum).getImm();
2126 if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Encoding: Val))
2127 O << Pat->Name;
2128 else
2129 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
2130}
2131
2132void AArch64InstPrinter::printSVEVecLenSpecifier(const MCInst *MI,
2133 unsigned OpNum,
2134 const MCSubtargetInfo &STI,
2135 raw_ostream &O) {
2136 unsigned Val = MI->getOperand(i: OpNum).getImm();
2137 // Pattern has only 1 bit
2138 if (Val > 1)
2139 llvm_unreachable("Invalid vector length specifier");
2140 if (auto Pat =
2141 AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByEncoding(Encoding: Val))
2142 O << Pat->Name;
2143 else
2144 llvm_unreachable("Invalid vector length specifier");
2145}
2146
2147template <char suffix>
2148void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
2149 const MCSubtargetInfo &STI,
2150 raw_ostream &O) {
2151 switch (suffix) {
2152 case 0:
2153 case 'b':
2154 case 'h':
2155 case 's':
2156 case 'd':
2157 case 'q':
2158 break;
2159 default: llvm_unreachable("Invalid kind specifier.");
2160 }
2161
2162 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2163 printRegName(OS&: O, Reg);
2164 if (suffix != 0)
2165 O << '.' << suffix;
2166}
2167
2168template <typename T>
2169void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
2170 std::make_unsigned_t<T> HexValue = Value;
2171
2172 if (getPrintImmHex())
2173 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)HexValue);
2174 else
2175 markup(OS&: O, M: Markup::Immediate) << '#' << formatDec(Value);
2176
2177 if (CommentStream) {
2178 // Do the opposite to that used for instruction operands.
2179 if (getPrintImmHex())
2180 *CommentStream << '=' << formatDec(Value: HexValue) << '\n';
2181 else
2182 *CommentStream << '=' << formatHex(Value: (uint64_t)Value) << '\n';
2183 }
2184}
2185
2186template <typename T>
2187void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
2188 const MCSubtargetInfo &STI,
2189 raw_ostream &O) {
2190 unsigned UnscaledVal = MI->getOperand(i: OpNum).getImm();
2191 unsigned Shift = MI->getOperand(i: OpNum + 1).getImm();
2192 assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
2193 "Unexpected shift type!");
2194
2195 // #0 lsl #8 is never pretty printed
2196 if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Imm: Shift) != 0)) {
2197 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: UnscaledVal);
2198 printShifter(MI, OpNum: OpNum + 1, STI, O);
2199 return;
2200 }
2201
2202 T Val;
2203 if (std::is_signed<T>())
2204 Val = (int8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2205 else
2206 Val = (uint8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2207
2208 printImmSVE(Val, O);
2209}
2210
2211template <typename T>
2212void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
2213 const MCSubtargetInfo &STI,
2214 raw_ostream &O) {
2215 typedef std::make_signed_t<T> SignedT;
2216 typedef std::make_unsigned_t<T> UnsignedT;
2217
2218 uint64_t Val = MI->getOperand(i: OpNum).getImm();
2219 UnsignedT PrintVal = AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 64);
2220
2221 // Prefer the default format for 16bit values, hex otherwise.
2222 if ((int16_t)PrintVal == (SignedT)PrintVal)
2223 printImmSVE((T)PrintVal, O);
2224 else if ((uint16_t)PrintVal == PrintVal)
2225 printImmSVE(PrintVal, O);
2226 else
2227 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)PrintVal);
2228}
2229
2230template <int Width>
2231void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
2232 const MCSubtargetInfo &STI,
2233 raw_ostream &O) {
2234 unsigned Base;
2235 switch (Width) {
2236 case 8: Base = AArch64::B0; break;
2237 case 16: Base = AArch64::H0; break;
2238 case 32: Base = AArch64::S0; break;
2239 case 64: Base = AArch64::D0; break;
2240 case 128: Base = AArch64::Q0; break;
2241 default:
2242 llvm_unreachable("Unsupported width");
2243 }
2244 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2245 printRegName(OS&: O, Reg: Reg - AArch64::Z0 + Base);
2246}
2247
2248template <unsigned ImmIs0, unsigned ImmIs1>
2249void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
2250 const MCSubtargetInfo &STI,
2251 raw_ostream &O) {
2252 auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs0);
2253 auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs1);
2254 unsigned Val = MI->getOperand(i: OpNum).getImm();
2255 markup(OS&: O, M: Markup::Immediate)
2256 << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
2257}
2258
2259void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
2260 const MCSubtargetInfo &STI,
2261 raw_ostream &O) {
2262 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2263 printRegName(OS&: O, Reg: getWRegFromXReg(Reg));
2264}
2265
2266void AArch64InstPrinter::printGPR64x8(const MCInst *MI, unsigned OpNum,
2267 const MCSubtargetInfo &STI,
2268 raw_ostream &O) {
2269 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2270 printRegName(OS&: O, Reg: MRI.getSubReg(Reg, Idx: AArch64::x8sub_0));
2271}
2272
2273void AArch64InstPrinter::printSyspXzrPair(const MCInst *MI, unsigned OpNum,
2274 const MCSubtargetInfo &STI,
2275 raw_ostream &O) {
2276 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2277 assert(Reg == AArch64::XZR &&
2278 "MC representation of SyspXzrPair should be XZR");
2279 O << getRegisterName(Reg) << ", " << getRegisterName(Reg);
2280}
2281
2282void AArch64InstPrinter::printPHintOp(const MCInst *MI, unsigned OpNum,
2283 const MCSubtargetInfo &STI,
2284 raw_ostream &O) {
2285 unsigned Op = MI->getOperand(i: OpNum).getImm();
2286 auto PH = AArch64PHint::lookupPHintByEncoding(Op);
2287 if (PH)
2288 O << PH->Name;
2289 else
2290 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Op);
2291}
2292