1//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This class prints an AArch64 MCInst to a .s file.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64InstPrinter.h"
14#include "MCTargetDesc/AArch64AddressingModes.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/StringExtras.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCRegisterInfo.h"
22#include "llvm/MC/MCSubtargetInfo.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/Format.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28#include <cassert>
29#include <cstdint>
30#include <string>
31
32using namespace llvm;
33
34#define DEBUG_TYPE "asm-printer"
35
36#define GET_INSTRUCTION_NAME
37#define PRINT_ALIAS_INSTR
38#include "AArch64GenAsmWriter.inc"
39#define GET_INSTRUCTION_NAME
40#define PRINT_ALIAS_INSTR
41#include "AArch64GenAsmWriter1.inc"
42
43AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
44 const MCInstrInfo &MII,
45 const MCRegisterInfo &MRI)
46 : MCInstPrinter(MAI, MII, MRI) {}
47
48AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
49 const MCInstrInfo &MII,
50 const MCRegisterInfo &MRI)
51 : AArch64InstPrinter(MAI, MII, MRI) {}
52
53bool AArch64InstPrinter::applyTargetSpecificCLOption(StringRef Opt) {
54 if (Opt == "no-aliases") {
55 PrintAliases = false;
56 return true;
57 }
58 return false;
59}
60
61void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) {
62 markup(OS, M: Markup::Register) << getRegisterName(Reg);
63}
64
65void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg,
66 unsigned AltIdx) {
67 markup(OS, M: Markup::Register) << getRegisterName(Reg, AltIdx);
68}
69
70StringRef AArch64InstPrinter::getRegName(MCRegister Reg) const {
71 return getRegisterName(Reg);
72}
73
74void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
75 StringRef Annot, const MCSubtargetInfo &STI,
76 raw_ostream &O) {
77 // Check for special encodings and print the canonical alias instead.
78
79 unsigned Opcode = MI->getOpcode();
80
81 if (Opcode == AArch64::SYSxt)
82 if (printSysAlias(MI, STI, O)) {
83 printAnnotation(OS&: O, Annot);
84 return;
85 }
86
87 if (Opcode == AArch64::SYSLxt)
88 if (printSyslAlias(MI, STI, O)) {
89 printAnnotation(OS&: O, Annot);
90 return;
91 }
92
93 if (Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR)
94 if (printSyspAlias(MI, STI, O)) {
95 printAnnotation(OS&: O, Annot);
96 return;
97 }
98
99 // RPRFM overlaps PRFM (reg), so try to print it as RPRFM here.
100 if ((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) {
101 if (printRangePrefetchAlias(MI, STI, O, Annot))
102 return;
103 }
104
105 // SBFM/UBFM should print to a nicer aliased form if possible.
106 if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
107 Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
108 const MCOperand &Op0 = MI->getOperand(i: 0);
109 const MCOperand &Op1 = MI->getOperand(i: 1);
110 const MCOperand &Op2 = MI->getOperand(i: 2);
111 const MCOperand &Op3 = MI->getOperand(i: 3);
112
113 bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
114 bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
115 if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
116 const char *AsmMnemonic = nullptr;
117
118 switch (Op3.getImm()) {
119 default:
120 break;
121 case 7:
122 if (IsSigned)
123 AsmMnemonic = "sxtb";
124 else if (!Is64Bit)
125 AsmMnemonic = "uxtb";
126 break;
127 case 15:
128 if (IsSigned)
129 AsmMnemonic = "sxth";
130 else if (!Is64Bit)
131 AsmMnemonic = "uxth";
132 break;
133 case 31:
134 // *xtw is only valid for signed 64-bit operations.
135 if (Is64Bit && IsSigned)
136 AsmMnemonic = "sxtw";
137 break;
138 }
139
140 if (AsmMnemonic) {
141 O << '\t' << AsmMnemonic << '\t';
142 printRegName(OS&: O, Reg: Op0.getReg());
143 O << ", ";
144 printRegName(OS&: O, Reg: getWRegFromXReg(Reg: Op1.getReg()));
145 printAnnotation(OS&: O, Annot);
146 return;
147 }
148 }
149
150 // All immediate shifts are aliases, implemented using the Bitfield
151 // instruction. In all cases the immediate shift amount shift must be in
152 // the range 0 to (reg.size -1).
153 if (Op2.isImm() && Op3.isImm()) {
154 const char *AsmMnemonic = nullptr;
155 int shift = 0;
156 int64_t immr = Op2.getImm();
157 int64_t imms = Op3.getImm();
158 if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
159 AsmMnemonic = "lsl";
160 shift = 31 - imms;
161 } else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
162 ((imms + 1 == immr))) {
163 AsmMnemonic = "lsl";
164 shift = 63 - imms;
165 } else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
166 AsmMnemonic = "lsr";
167 shift = immr;
168 } else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
169 AsmMnemonic = "lsr";
170 shift = immr;
171 } else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
172 AsmMnemonic = "asr";
173 shift = immr;
174 } else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
175 AsmMnemonic = "asr";
176 shift = immr;
177 }
178 if (AsmMnemonic) {
179 O << '\t' << AsmMnemonic << '\t';
180 printRegName(OS&: O, Reg: Op0.getReg());
181 O << ", ";
182 printRegName(OS&: O, Reg: Op1.getReg());
183 O << ", ";
184 markup(OS&: O, M: Markup::Immediate) << "#" << shift;
185 printAnnotation(OS&: O, Annot);
186 return;
187 }
188 }
189
190 // SBFIZ/UBFIZ aliases
191 if (Op2.getImm() > Op3.getImm()) {
192 O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t';
193 printRegName(OS&: O, Reg: Op0.getReg());
194 O << ", ";
195 printRegName(OS&: O, Reg: Op1.getReg());
196 O << ", ";
197 markup(OS&: O, M: Markup::Immediate) << "#" << (Is64Bit ? 64 : 32) - Op2.getImm();
198 O << ", ";
199 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() + 1;
200 printAnnotation(OS&: O, Annot);
201 return;
202 }
203
204 // Otherwise SBFX/UBFX is the preferred form
205 O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t';
206 printRegName(OS&: O, Reg: Op0.getReg());
207 O << ", ";
208 printRegName(OS&: O, Reg: Op1.getReg());
209 O << ", ";
210 markup(OS&: O, M: Markup::Immediate) << "#" << Op2.getImm();
211 O << ", ";
212 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() - Op2.getImm() + 1;
213 printAnnotation(OS&: O, Annot);
214 return;
215 }
216
217 if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
218 const MCOperand &Op0 = MI->getOperand(i: 0); // Op1 == Op0
219 const MCOperand &Op2 = MI->getOperand(i: 2);
220 int ImmR = MI->getOperand(i: 3).getImm();
221 int ImmS = MI->getOperand(i: 4).getImm();
222
223 if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
224 (ImmR == 0 || ImmS < ImmR) && STI.hasFeature(Feature: AArch64::HasV8_2aOps)) {
225 // BFC takes precedence over its entire range, slightly differently to BFI.
226 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
227 int LSB = (BitWidth - ImmR) % BitWidth;
228 int Width = ImmS + 1;
229
230 O << "\tbfc\t";
231 printRegName(OS&: O, Reg: Op0.getReg());
232 O << ", ";
233 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
234 O << ", ";
235 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
236 printAnnotation(OS&: O, Annot);
237 return;
238 } else if (ImmS < ImmR) {
239 // BFI alias
240 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
241 int LSB = (BitWidth - ImmR) % BitWidth;
242 int Width = ImmS + 1;
243
244 O << "\tbfi\t";
245 printRegName(OS&: O, Reg: Op0.getReg());
246 O << ", ";
247 printRegName(OS&: O, Reg: Op2.getReg());
248 O << ", ";
249 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
250 O << ", ";
251 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
252 printAnnotation(OS&: O, Annot);
253 return;
254 }
255
256 int LSB = ImmR;
257 int Width = ImmS - ImmR + 1;
258 // Otherwise BFXIL the preferred form
259 O << "\tbfxil\t";
260 printRegName(OS&: O, Reg: Op0.getReg());
261 O << ", ";
262 printRegName(OS&: O, Reg: Op2.getReg());
263 O << ", ";
264 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
265 O << ", ";
266 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
267 printAnnotation(OS&: O, Annot);
268 return;
269 }
270
271 // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
272 // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
273 // printed.
274 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
275 Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
276 MI->getOperand(i: 1).isExpr()) {
277 if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
278 O << "\tmovz\t";
279 else
280 O << "\tmovn\t";
281
282 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
283 O << ", ";
284 {
285 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
286 O << "#";
287 MAI.printExpr(O, *MI->getOperand(i: 1).getExpr());
288 }
289 return;
290 }
291
292 if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
293 MI->getOperand(i: 2).isExpr()) {
294 O << "\tmovk\t";
295 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
296 O << ", ";
297 {
298 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
299 O << "#";
300 MAI.printExpr(O, *MI->getOperand(i: 2).getExpr());
301 }
302 return;
303 }
304
305 auto PrintMovImm = [&](uint64_t Value, int RegWidth) {
306 int64_t SExtVal = SignExtend64(X: Value, B: RegWidth);
307 O << "\tmov\t";
308 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
309 O << ", ";
310 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: SExtVal);
311 if (CommentStream) {
312 // Do the opposite to that used for instruction operands.
313 if (getPrintImmHex())
314 *CommentStream << '=' << formatDec(Value: SExtVal) << '\n';
315 else {
316 uint64_t Mask = maskTrailingOnes<uint64_t>(N: RegWidth);
317 *CommentStream << '=' << formatHex(Value: SExtVal & Mask) << '\n';
318 }
319 }
320 };
321
322 // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
323 // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
324 // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
325 // that can represent the move is the MOV alias, and the rest get printed
326 // normally.
327 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
328 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
329 int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
330 int Shift = MI->getOperand(i: 2).getImm();
331 uint64_t Value = (uint64_t)MI->getOperand(i: 1).getImm() << Shift;
332
333 if (AArch64_AM::isMOVZMovAlias(Value, Shift,
334 RegWidth: Opcode == AArch64::MOVZXi ? 64 : 32)) {
335 PrintMovImm(Value, RegWidth);
336 return;
337 }
338 }
339
340 if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
341 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
342 int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
343 int Shift = MI->getOperand(i: 2).getImm();
344 uint64_t Value = ~((uint64_t)MI->getOperand(i: 1).getImm() << Shift);
345 if (RegWidth == 32)
346 Value = Value & 0xffffffff;
347
348 if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
349 PrintMovImm(Value, RegWidth);
350 return;
351 }
352 }
353
354 if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
355 (MI->getOperand(i: 1).getReg() == AArch64::XZR ||
356 MI->getOperand(i: 1).getReg() == AArch64::WZR) &&
357 MI->getOperand(i: 2).isImm()) {
358 int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
359 uint64_t Value = AArch64_AM::decodeLogicalImmediate(
360 val: MI->getOperand(i: 2).getImm(), regSize: RegWidth);
361 if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
362 PrintMovImm(Value, RegWidth);
363 return;
364 }
365 }
366
367 if (Opcode == AArch64::SPACE) {
368 O << '\t' << MAI.getCommentString() << " SPACE "
369 << MI->getOperand(i: 1).getImm();
370 printAnnotation(OS&: O, Annot);
371 return;
372 }
373
374 if (!PrintAliases || !printAliasInstr(MI, Address, STI, OS&: O))
375 printInstruction(MI, Address, STI, O);
376
377 printAnnotation(OS&: O, Annot);
378
379 if (atomicBarrierDroppedOnZero(Opcode) &&
380 (MI->getOperand(i: 0).getReg() == AArch64::XZR ||
381 MI->getOperand(i: 0).getReg() == AArch64::WZR)) {
382 printAnnotation(OS&: O, Annot: "acquire semantics dropped since destination is zero");
383 }
384}
385
386static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
387 bool &IsTbx) {
388 switch (Opcode) {
389 case AArch64::TBXv8i8One:
390 case AArch64::TBXv8i8Two:
391 case AArch64::TBXv8i8Three:
392 case AArch64::TBXv8i8Four:
393 IsTbx = true;
394 Layout = ".8b";
395 return true;
396 case AArch64::TBLv8i8One:
397 case AArch64::TBLv8i8Two:
398 case AArch64::TBLv8i8Three:
399 case AArch64::TBLv8i8Four:
400 IsTbx = false;
401 Layout = ".8b";
402 return true;
403 case AArch64::TBXv16i8One:
404 case AArch64::TBXv16i8Two:
405 case AArch64::TBXv16i8Three:
406 case AArch64::TBXv16i8Four:
407 IsTbx = true;
408 Layout = ".16b";
409 return true;
410 case AArch64::TBLv16i8One:
411 case AArch64::TBLv16i8Two:
412 case AArch64::TBLv16i8Three:
413 case AArch64::TBLv16i8Four:
414 IsTbx = false;
415 Layout = ".16b";
416 return true;
417 default:
418 return false;
419 }
420}
421
422struct LdStNInstrDesc {
423 unsigned Opcode;
424 const char *Mnemonic;
425 const char *Layout;
426 int ListOperand;
427 bool HasLane;
428 int NaturalOffset;
429};
430
431static const LdStNInstrDesc LdStNInstInfo[] = {
432 { .Opcode: AArch64::LD1i8, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
433 { .Opcode: AArch64::LD1i16, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
434 { .Opcode: AArch64::LD1i32, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
435 { .Opcode: AArch64::LD1i64, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
436 { .Opcode: AArch64::LD1i8_POST, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 1 },
437 { .Opcode: AArch64::LD1i16_POST, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
438 { .Opcode: AArch64::LD1i32_POST, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
439 { .Opcode: AArch64::LD1i64_POST, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
440 { .Opcode: AArch64::LD1Rv16b, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
441 { .Opcode: AArch64::LD1Rv8h, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
442 { .Opcode: AArch64::LD1Rv4s, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
443 { .Opcode: AArch64::LD1Rv2d, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
444 { .Opcode: AArch64::LD1Rv8b, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
445 { .Opcode: AArch64::LD1Rv4h, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
446 { .Opcode: AArch64::LD1Rv2s, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
447 { .Opcode: AArch64::LD1Rv1d, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
448 { .Opcode: AArch64::LD1Rv16b_POST, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
449 { .Opcode: AArch64::LD1Rv8h_POST, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
450 { .Opcode: AArch64::LD1Rv4s_POST, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
451 { .Opcode: AArch64::LD1Rv2d_POST, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
452 { .Opcode: AArch64::LD1Rv8b_POST, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
453 { .Opcode: AArch64::LD1Rv4h_POST, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
454 { .Opcode: AArch64::LD1Rv2s_POST, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
455 { .Opcode: AArch64::LD1Rv1d_POST, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
456 { .Opcode: AArch64::LD1Onev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
457 { .Opcode: AArch64::LD1Onev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
458 { .Opcode: AArch64::LD1Onev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
459 { .Opcode: AArch64::LD1Onev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
460 { .Opcode: AArch64::LD1Onev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
461 { .Opcode: AArch64::LD1Onev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
462 { .Opcode: AArch64::LD1Onev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
463 { .Opcode: AArch64::LD1Onev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
464 { .Opcode: AArch64::LD1Onev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
465 { .Opcode: AArch64::LD1Onev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
466 { .Opcode: AArch64::LD1Onev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
467 { .Opcode: AArch64::LD1Onev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
468 { .Opcode: AArch64::LD1Onev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
469 { .Opcode: AArch64::LD1Onev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
470 { .Opcode: AArch64::LD1Onev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
471 { .Opcode: AArch64::LD1Onev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
472 { .Opcode: AArch64::LD1Twov16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
473 { .Opcode: AArch64::LD1Twov8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
474 { .Opcode: AArch64::LD1Twov4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
475 { .Opcode: AArch64::LD1Twov2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
476 { .Opcode: AArch64::LD1Twov8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
477 { .Opcode: AArch64::LD1Twov4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
478 { .Opcode: AArch64::LD1Twov2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
479 { .Opcode: AArch64::LD1Twov1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
480 { .Opcode: AArch64::LD1Twov16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
481 { .Opcode: AArch64::LD1Twov8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
482 { .Opcode: AArch64::LD1Twov4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
483 { .Opcode: AArch64::LD1Twov2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
484 { .Opcode: AArch64::LD1Twov8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
485 { .Opcode: AArch64::LD1Twov4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
486 { .Opcode: AArch64::LD1Twov2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
487 { .Opcode: AArch64::LD1Twov1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
488 { .Opcode: AArch64::LD1Threev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
489 { .Opcode: AArch64::LD1Threev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
490 { .Opcode: AArch64::LD1Threev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
491 { .Opcode: AArch64::LD1Threev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
492 { .Opcode: AArch64::LD1Threev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
493 { .Opcode: AArch64::LD1Threev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
494 { .Opcode: AArch64::LD1Threev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
495 { .Opcode: AArch64::LD1Threev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
496 { .Opcode: AArch64::LD1Threev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
497 { .Opcode: AArch64::LD1Threev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
498 { .Opcode: AArch64::LD1Threev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
499 { .Opcode: AArch64::LD1Threev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
500 { .Opcode: AArch64::LD1Threev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
501 { .Opcode: AArch64::LD1Threev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
502 { .Opcode: AArch64::LD1Threev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
503 { .Opcode: AArch64::LD1Threev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
504 { .Opcode: AArch64::LD1Fourv16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
505 { .Opcode: AArch64::LD1Fourv8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
506 { .Opcode: AArch64::LD1Fourv4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
507 { .Opcode: AArch64::LD1Fourv2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
508 { .Opcode: AArch64::LD1Fourv8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
509 { .Opcode: AArch64::LD1Fourv4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
510 { .Opcode: AArch64::LD1Fourv2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
511 { .Opcode: AArch64::LD1Fourv1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
512 { .Opcode: AArch64::LD1Fourv16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
513 { .Opcode: AArch64::LD1Fourv8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
514 { .Opcode: AArch64::LD1Fourv4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
515 { .Opcode: AArch64::LD1Fourv2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
516 { .Opcode: AArch64::LD1Fourv8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
517 { .Opcode: AArch64::LD1Fourv4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
518 { .Opcode: AArch64::LD1Fourv2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
519 { .Opcode: AArch64::LD1Fourv1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
520 { .Opcode: AArch64::LD2i8, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
521 { .Opcode: AArch64::LD2i16, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
522 { .Opcode: AArch64::LD2i32, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
523 { .Opcode: AArch64::LD2i64, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
524 { .Opcode: AArch64::LD2i8_POST, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
525 { .Opcode: AArch64::LD2i16_POST, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
526 { .Opcode: AArch64::LD2i32_POST, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
527 { .Opcode: AArch64::LD2i64_POST, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
528 { .Opcode: AArch64::LD2Rv16b, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
529 { .Opcode: AArch64::LD2Rv8h, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
530 { .Opcode: AArch64::LD2Rv4s, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
531 { .Opcode: AArch64::LD2Rv2d, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
532 { .Opcode: AArch64::LD2Rv8b, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
533 { .Opcode: AArch64::LD2Rv4h, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
534 { .Opcode: AArch64::LD2Rv2s, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
535 { .Opcode: AArch64::LD2Rv1d, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
536 { .Opcode: AArch64::LD2Rv16b_POST, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
537 { .Opcode: AArch64::LD2Rv8h_POST, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
538 { .Opcode: AArch64::LD2Rv4s_POST, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
539 { .Opcode: AArch64::LD2Rv2d_POST, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
540 { .Opcode: AArch64::LD2Rv8b_POST, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
541 { .Opcode: AArch64::LD2Rv4h_POST, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
542 { .Opcode: AArch64::LD2Rv2s_POST, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
543 { .Opcode: AArch64::LD2Rv1d_POST, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
544 { .Opcode: AArch64::LD2Twov16b, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
545 { .Opcode: AArch64::LD2Twov8h, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
546 { .Opcode: AArch64::LD2Twov4s, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
547 { .Opcode: AArch64::LD2Twov2d, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
548 { .Opcode: AArch64::LD2Twov8b, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
549 { .Opcode: AArch64::LD2Twov4h, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
550 { .Opcode: AArch64::LD2Twov2s, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
551 { .Opcode: AArch64::LD2Twov16b_POST, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
552 { .Opcode: AArch64::LD2Twov8h_POST, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
553 { .Opcode: AArch64::LD2Twov4s_POST, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
554 { .Opcode: AArch64::LD2Twov2d_POST, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
555 { .Opcode: AArch64::LD2Twov8b_POST, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
556 { .Opcode: AArch64::LD2Twov4h_POST, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
557 { .Opcode: AArch64::LD2Twov2s_POST, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
558 { .Opcode: AArch64::LD3i8, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
559 { .Opcode: AArch64::LD3i16, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
560 { .Opcode: AArch64::LD3i32, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
561 { .Opcode: AArch64::LD3i64, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
562 { .Opcode: AArch64::LD3i8_POST, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 3 },
563 { .Opcode: AArch64::LD3i16_POST, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 6 },
564 { .Opcode: AArch64::LD3i32_POST, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 12 },
565 { .Opcode: AArch64::LD3i64_POST, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 24 },
566 { .Opcode: AArch64::LD3Rv16b, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
567 { .Opcode: AArch64::LD3Rv8h, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
568 { .Opcode: AArch64::LD3Rv4s, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
569 { .Opcode: AArch64::LD3Rv2d, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
570 { .Opcode: AArch64::LD3Rv8b, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
571 { .Opcode: AArch64::LD3Rv4h, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
572 { .Opcode: AArch64::LD3Rv2s, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
573 { .Opcode: AArch64::LD3Rv1d, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
574 { .Opcode: AArch64::LD3Rv16b_POST, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
575 { .Opcode: AArch64::LD3Rv8h_POST, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
576 { .Opcode: AArch64::LD3Rv4s_POST, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
577 { .Opcode: AArch64::LD3Rv2d_POST, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
578 { .Opcode: AArch64::LD3Rv8b_POST, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
579 { .Opcode: AArch64::LD3Rv4h_POST, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
580 { .Opcode: AArch64::LD3Rv2s_POST, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
581 { .Opcode: AArch64::LD3Rv1d_POST, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
582 { .Opcode: AArch64::LD3Threev16b, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
583 { .Opcode: AArch64::LD3Threev8h, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
584 { .Opcode: AArch64::LD3Threev4s, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
585 { .Opcode: AArch64::LD3Threev2d, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
586 { .Opcode: AArch64::LD3Threev8b, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
587 { .Opcode: AArch64::LD3Threev4h, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
588 { .Opcode: AArch64::LD3Threev2s, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
589 { .Opcode: AArch64::LD3Threev16b_POST, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
590 { .Opcode: AArch64::LD3Threev8h_POST, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
591 { .Opcode: AArch64::LD3Threev4s_POST, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
592 { .Opcode: AArch64::LD3Threev2d_POST, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
593 { .Opcode: AArch64::LD3Threev8b_POST, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
594 { .Opcode: AArch64::LD3Threev4h_POST, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
595 { .Opcode: AArch64::LD3Threev2s_POST, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
596 { .Opcode: AArch64::LD4i8, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
597 { .Opcode: AArch64::LD4i16, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
598 { .Opcode: AArch64::LD4i32, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
599 { .Opcode: AArch64::LD4i64, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
600 { .Opcode: AArch64::LD4i8_POST, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
601 { .Opcode: AArch64::LD4i16_POST, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
602 { .Opcode: AArch64::LD4i32_POST, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
603 { .Opcode: AArch64::LD4i64_POST, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 32 },
604 { .Opcode: AArch64::LD4Rv16b, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
605 { .Opcode: AArch64::LD4Rv8h, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
606 { .Opcode: AArch64::LD4Rv4s, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
607 { .Opcode: AArch64::LD4Rv2d, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
608 { .Opcode: AArch64::LD4Rv8b, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
609 { .Opcode: AArch64::LD4Rv4h, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
610 { .Opcode: AArch64::LD4Rv2s, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
611 { .Opcode: AArch64::LD4Rv1d, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
612 { .Opcode: AArch64::LD4Rv16b_POST, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
613 { .Opcode: AArch64::LD4Rv8h_POST, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
614 { .Opcode: AArch64::LD4Rv4s_POST, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
615 { .Opcode: AArch64::LD4Rv2d_POST, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
616 { .Opcode: AArch64::LD4Rv8b_POST, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
617 { .Opcode: AArch64::LD4Rv4h_POST, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
618 { .Opcode: AArch64::LD4Rv2s_POST, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
619 { .Opcode: AArch64::LD4Rv1d_POST, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
620 { .Opcode: AArch64::LD4Fourv16b, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
621 { .Opcode: AArch64::LD4Fourv8h, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
622 { .Opcode: AArch64::LD4Fourv4s, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
623 { .Opcode: AArch64::LD4Fourv2d, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
624 { .Opcode: AArch64::LD4Fourv8b, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
625 { .Opcode: AArch64::LD4Fourv4h, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
626 { .Opcode: AArch64::LD4Fourv2s, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
627 { .Opcode: AArch64::LD4Fourv16b_POST, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
628 { .Opcode: AArch64::LD4Fourv8h_POST, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
629 { .Opcode: AArch64::LD4Fourv4s_POST, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
630 { .Opcode: AArch64::LD4Fourv2d_POST, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
631 { .Opcode: AArch64::LD4Fourv8b_POST, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
632 { .Opcode: AArch64::LD4Fourv4h_POST, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
633 { .Opcode: AArch64::LD4Fourv2s_POST, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
634 { .Opcode: AArch64::ST1i8, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
635 { .Opcode: AArch64::ST1i16, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
636 { .Opcode: AArch64::ST1i32, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
637 { .Opcode: AArch64::ST1i64, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
638 { .Opcode: AArch64::ST1i8_POST, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 1 },
639 { .Opcode: AArch64::ST1i16_POST, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
640 { .Opcode: AArch64::ST1i32_POST, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
641 { .Opcode: AArch64::ST1i64_POST, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
642 { .Opcode: AArch64::ST1Onev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
643 { .Opcode: AArch64::ST1Onev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
644 { .Opcode: AArch64::ST1Onev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
645 { .Opcode: AArch64::ST1Onev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
646 { .Opcode: AArch64::ST1Onev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
647 { .Opcode: AArch64::ST1Onev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
648 { .Opcode: AArch64::ST1Onev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
649 { .Opcode: AArch64::ST1Onev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
650 { .Opcode: AArch64::ST1Onev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
651 { .Opcode: AArch64::ST1Onev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
652 { .Opcode: AArch64::ST1Onev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
653 { .Opcode: AArch64::ST1Onev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
654 { .Opcode: AArch64::ST1Onev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
655 { .Opcode: AArch64::ST1Onev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
656 { .Opcode: AArch64::ST1Onev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
657 { .Opcode: AArch64::ST1Onev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
658 { .Opcode: AArch64::ST1Twov16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
659 { .Opcode: AArch64::ST1Twov8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
660 { .Opcode: AArch64::ST1Twov4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
661 { .Opcode: AArch64::ST1Twov2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
662 { .Opcode: AArch64::ST1Twov8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
663 { .Opcode: AArch64::ST1Twov4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
664 { .Opcode: AArch64::ST1Twov2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
665 { .Opcode: AArch64::ST1Twov1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
666 { .Opcode: AArch64::ST1Twov16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
667 { .Opcode: AArch64::ST1Twov8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
668 { .Opcode: AArch64::ST1Twov4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
669 { .Opcode: AArch64::ST1Twov2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
670 { .Opcode: AArch64::ST1Twov8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
671 { .Opcode: AArch64::ST1Twov4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
672 { .Opcode: AArch64::ST1Twov2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
673 { .Opcode: AArch64::ST1Twov1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
674 { .Opcode: AArch64::ST1Threev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
675 { .Opcode: AArch64::ST1Threev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
676 { .Opcode: AArch64::ST1Threev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
677 { .Opcode: AArch64::ST1Threev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
678 { .Opcode: AArch64::ST1Threev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
679 { .Opcode: AArch64::ST1Threev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
680 { .Opcode: AArch64::ST1Threev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
681 { .Opcode: AArch64::ST1Threev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
682 { .Opcode: AArch64::ST1Threev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
683 { .Opcode: AArch64::ST1Threev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
684 { .Opcode: AArch64::ST1Threev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
685 { .Opcode: AArch64::ST1Threev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
686 { .Opcode: AArch64::ST1Threev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
687 { .Opcode: AArch64::ST1Threev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
688 { .Opcode: AArch64::ST1Threev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
689 { .Opcode: AArch64::ST1Threev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
690 { .Opcode: AArch64::ST1Fourv16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
691 { .Opcode: AArch64::ST1Fourv8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
692 { .Opcode: AArch64::ST1Fourv4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
693 { .Opcode: AArch64::ST1Fourv2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
694 { .Opcode: AArch64::ST1Fourv8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
695 { .Opcode: AArch64::ST1Fourv4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
696 { .Opcode: AArch64::ST1Fourv2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
697 { .Opcode: AArch64::ST1Fourv1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
698 { .Opcode: AArch64::ST1Fourv16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
699 { .Opcode: AArch64::ST1Fourv8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
700 { .Opcode: AArch64::ST1Fourv4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
701 { .Opcode: AArch64::ST1Fourv2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
702 { .Opcode: AArch64::ST1Fourv8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
703 { .Opcode: AArch64::ST1Fourv4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
704 { .Opcode: AArch64::ST1Fourv2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
705 { .Opcode: AArch64::ST1Fourv1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
706 { .Opcode: AArch64::ST2i8, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
707 { .Opcode: AArch64::ST2i16, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
708 { .Opcode: AArch64::ST2i32, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
709 { .Opcode: AArch64::ST2i64, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
710 { .Opcode: AArch64::ST2i8_POST, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
711 { .Opcode: AArch64::ST2i16_POST, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
712 { .Opcode: AArch64::ST2i32_POST, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
713 { .Opcode: AArch64::ST2i64_POST, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
714 { .Opcode: AArch64::ST2Twov16b, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
715 { .Opcode: AArch64::ST2Twov8h, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
716 { .Opcode: AArch64::ST2Twov4s, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
717 { .Opcode: AArch64::ST2Twov2d, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
718 { .Opcode: AArch64::ST2Twov8b, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
719 { .Opcode: AArch64::ST2Twov4h, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
720 { .Opcode: AArch64::ST2Twov2s, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
721 { .Opcode: AArch64::ST2Twov16b_POST, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
722 { .Opcode: AArch64::ST2Twov8h_POST, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
723 { .Opcode: AArch64::ST2Twov4s_POST, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
724 { .Opcode: AArch64::ST2Twov2d_POST, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
725 { .Opcode: AArch64::ST2Twov8b_POST, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
726 { .Opcode: AArch64::ST2Twov4h_POST, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
727 { .Opcode: AArch64::ST2Twov2s_POST, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
728 { .Opcode: AArch64::ST3i8, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
729 { .Opcode: AArch64::ST3i16, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
730 { .Opcode: AArch64::ST3i32, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
731 { .Opcode: AArch64::ST3i64, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
732 { .Opcode: AArch64::ST3i8_POST, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 3 },
733 { .Opcode: AArch64::ST3i16_POST, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 6 },
734 { .Opcode: AArch64::ST3i32_POST, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 12 },
735 { .Opcode: AArch64::ST3i64_POST, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 24 },
736 { .Opcode: AArch64::ST3Threev16b, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
737 { .Opcode: AArch64::ST3Threev8h, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
738 { .Opcode: AArch64::ST3Threev4s, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
739 { .Opcode: AArch64::ST3Threev2d, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
740 { .Opcode: AArch64::ST3Threev8b, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
741 { .Opcode: AArch64::ST3Threev4h, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
742 { .Opcode: AArch64::ST3Threev2s, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
743 { .Opcode: AArch64::ST3Threev16b_POST, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
744 { .Opcode: AArch64::ST3Threev8h_POST, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
745 { .Opcode: AArch64::ST3Threev4s_POST, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
746 { .Opcode: AArch64::ST3Threev2d_POST, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
747 { .Opcode: AArch64::ST3Threev8b_POST, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
748 { .Opcode: AArch64::ST3Threev4h_POST, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
749 { .Opcode: AArch64::ST3Threev2s_POST, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
750 { .Opcode: AArch64::ST4i8, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
751 { .Opcode: AArch64::ST4i16, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
752 { .Opcode: AArch64::ST4i32, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
753 { .Opcode: AArch64::ST4i64, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
754 { .Opcode: AArch64::ST4i8_POST, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
755 { .Opcode: AArch64::ST4i16_POST, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
756 { .Opcode: AArch64::ST4i32_POST, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
757 { .Opcode: AArch64::ST4i64_POST, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 32 },
758 { .Opcode: AArch64::ST4Fourv16b, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
759 { .Opcode: AArch64::ST4Fourv8h, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
760 { .Opcode: AArch64::ST4Fourv4s, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
761 { .Opcode: AArch64::ST4Fourv2d, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
762 { .Opcode: AArch64::ST4Fourv8b, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
763 { .Opcode: AArch64::ST4Fourv4h, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
764 { .Opcode: AArch64::ST4Fourv2s, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
765 { .Opcode: AArch64::ST4Fourv16b_POST, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
766 { .Opcode: AArch64::ST4Fourv8h_POST, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
767 { .Opcode: AArch64::ST4Fourv4s_POST, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
768 { .Opcode: AArch64::ST4Fourv2d_POST, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
769 { .Opcode: AArch64::ST4Fourv8b_POST, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
770 { .Opcode: AArch64::ST4Fourv4h_POST, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
771 { .Opcode: AArch64::ST4Fourv2s_POST, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
772};
773
774static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
775 for (const auto &Info : LdStNInstInfo)
776 if (Info.Opcode == Opcode)
777 return &Info;
778
779 return nullptr;
780}
781
782void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
783 StringRef Annot,
784 const MCSubtargetInfo &STI,
785 raw_ostream &O) {
786 unsigned Opcode = MI->getOpcode();
787 StringRef Layout;
788
789 bool IsTbx;
790 if (isTblTbxInstruction(Opcode: MI->getOpcode(), Layout, IsTbx)) {
791 O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t';
792 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg(), AltIdx: AArch64::vreg);
793 O << ", ";
794
795 unsigned ListOpNum = IsTbx ? 2 : 1;
796 printVectorList(MI, OpNum: ListOpNum, STI, O, LayoutSuffix: "");
797
798 O << ", ";
799 printRegName(OS&: O, Reg: MI->getOperand(i: ListOpNum + 1).getReg(), AltIdx: AArch64::vreg);
800 printAnnotation(OS&: O, Annot);
801 return;
802 }
803
804 if (const LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
805 O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
806
807 // Now onto the operands: first a vector list with possible lane
808 // specifier. E.g. { v0 }[2]
809 int OpNum = LdStDesc->ListOperand;
810 printVectorList(MI, OpNum: OpNum++, STI, O, LayoutSuffix: "");
811
812 if (LdStDesc->HasLane)
813 O << '[' << MI->getOperand(i: OpNum++).getImm() << ']';
814
815 // Next the address: [xN]
816 MCRegister AddrReg = MI->getOperand(i: OpNum++).getReg();
817 O << ", [";
818 printRegName(OS&: O, Reg: AddrReg);
819 O << ']';
820
821 // Finally, there might be a post-indexed offset.
822 if (LdStDesc->NaturalOffset != 0) {
823 MCRegister Reg = MI->getOperand(i: OpNum++).getReg();
824 if (Reg != AArch64::XZR) {
825 O << ", ";
826 printRegName(OS&: O, Reg);
827 } else {
828 assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
829 O << ", ";
830 markup(OS&: O, M: Markup::Immediate) << "#" << LdStDesc->NaturalOffset;
831 }
832 }
833
834 printAnnotation(OS&: O, Annot);
835 return;
836 }
837
838 AArch64InstPrinter::printInst(MI, Address, Annot, STI, O);
839}
840
841StringRef AArch64AppleInstPrinter::getRegName(MCRegister Reg) const {
842 return getRegisterName(Reg);
843}
844
845bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst *MI,
846 const MCSubtargetInfo &STI,
847 raw_ostream &O,
848 StringRef Annot) {
849 unsigned Opcode = MI->getOpcode();
850
851#ifndef NDEBUG
852 assert(((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) &&
853 "Invalid opcode for RPRFM alias!");
854#endif
855
856 unsigned PRFOp = MI->getOperand(i: 0).getImm();
857 unsigned Mask = 0x18; // 0b11000
858 if ((PRFOp & Mask) != Mask)
859 return false; // Rt != '11xxx', it's a PRFM instruction.
860
861 MCRegister Rm = MI->getOperand(i: 2).getReg();
862
863 // "Rm" must be a 64-bit GPR for RPRFM.
864 if (MRI.getRegClass(i: AArch64::GPR32RegClassID).contains(Reg: Rm))
865 Rm = MRI.getMatchingSuperReg(Reg: Rm, SubIdx: AArch64::sub_32,
866 RC: &MRI.getRegClass(i: AArch64::GPR64RegClassID));
867
868 unsigned SignExtend = MI->getOperand(i: 3).getImm(); // encoded in "option<2>".
869 unsigned Shift = MI->getOperand(i: 4).getImm(); // encoded in "S".
870
871 assert((SignExtend <= 1) && "sign extend should be a single bit!");
872 assert((Shift <= 1) && "Shift should be a single bit!");
873
874 unsigned Option0 = (Opcode == AArch64::PRFMroX) ? 1 : 0;
875
876 // encoded in "option<2>:option<0>:S:Rt<2:0>".
877 unsigned RPRFOp =
878 (SignExtend << 5) | (Option0 << 4) | (Shift << 3) | (PRFOp & 0x7);
879
880 O << "\trprfm ";
881 if (auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: RPRFOp))
882 O << RPRFM->Name << ", ";
883 else
884 O << "#" << formatImm(Value: RPRFOp) << ", ";
885 O << getRegisterName(Reg: Rm);
886 O << ", [";
887 printOperand(MI, OpNo: 1, STI, O); // "Rn".
888 O << "]";
889
890 printAnnotation(OS&: O, Annot);
891
892 return true;
893}
894
895bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
896 const MCSubtargetInfo &STI,
897 raw_ostream &O) {
898#ifndef NDEBUG
899 unsigned Opcode = MI->getOpcode();
900 assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
901#endif
902
903 const MCOperand &Op1 = MI->getOperand(i: 0);
904 const MCOperand &Cn = MI->getOperand(i: 1);
905 const MCOperand &Cm = MI->getOperand(i: 2);
906 const MCOperand &Op2 = MI->getOperand(i: 3);
907
908 unsigned Op1Val = Op1.getImm();
909 unsigned CnVal = Cn.getImm();
910 unsigned CmVal = Cm.getImm();
911 unsigned Op2Val = Op2.getImm();
912
913 uint16_t Encoding = Op2Val;
914 Encoding |= CmVal << 3;
915 Encoding |= CnVal << 7;
916 Encoding |= Op1Val << 11;
917
918 bool NeedsReg = false;
919 bool OptionalReg = false;
920 std::string Ins;
921 std::string Name;
922
923 if (CnVal == 7) {
924 switch (CmVal) {
925 default: return false;
926 // MLBI aliases
927 case 0: {
928 const AArch64MLBI::MLBI *MLBI =
929 AArch64MLBI::lookupMLBIByEncoding(Encoding);
930 if (!MLBI || !MLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
931 return false;
932
933 NeedsReg = MLBI->NeedsReg;
934 Ins = "mlbi\t";
935 Name = std::string(MLBI->Name);
936 } break;
937 // Maybe IC, maybe Prediction Restriction
938 case 1:
939 switch (Op1Val) {
940 default: return false;
941 case 0: goto Search_IC;
942 case 3: goto Search_PRCTX;
943 }
944 // Prediction Restriction aliases
945 case 3: {
946 Search_PRCTX:
947 if (Op1Val != 3 || CnVal != 7 || CmVal != 3)
948 return false;
949
950 const auto Requires =
951 Op2Val == 6 ? AArch64::FeatureSPECRES2 : AArch64::FeaturePredRes;
952 if (!(STI.hasFeature(Feature: AArch64::FeatureAll) || STI.hasFeature(Feature: Requires)))
953 return false;
954
955 NeedsReg = true;
956 switch (Op2Val) {
957 default: return false;
958 case 4: Ins = "cfp\t"; break;
959 case 5: Ins = "dvp\t"; break;
960 case 6: Ins = "cosp\t"; break;
961 case 7: Ins = "cpp\t"; break;
962 }
963 Name = "RCTX";
964 }
965 break;
966 // IC aliases
967 case 5: {
968 Search_IC:
969 const AArch64IC::IC *IC = AArch64IC::lookupICByEncoding(Encoding);
970 if (!IC || !IC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
971 return false;
972
973 NeedsReg = IC->NeedsReg;
974 Ins = "ic\t";
975 Name = std::string(IC->Name);
976 }
977 break;
978 // DC aliases
979 case 4: case 6: case 10: case 11: case 12: case 13: case 14:
980 {
981 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
982 if (!DC || !DC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
983 return false;
984
985 NeedsReg = true;
986 Ins = "dc\t";
987 Name = std::string(DC->Name);
988 }
989 break;
990 // AT aliases
991 case 8: case 9: {
992 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
993 if (!AT || !AT->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
994 return false;
995
996 NeedsReg = true;
997 Ins = "at\t";
998 Name = std::string(AT->Name);
999 }
1000 break;
1001 // Overlaps with AT and DC
1002 case 15: {
1003 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
1004 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
1005 if (AT && AT->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1006 NeedsReg = true;
1007 Ins = "at\t";
1008 Name = std::string(AT->Name);
1009 } else if (DC && DC->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1010 NeedsReg = true;
1011 Ins = "dc\t";
1012 Name = std::string(DC->Name);
1013 } else {
1014 return false;
1015 }
1016 } break;
1017 }
1018 } else if (CnVal == 8 || CnVal == 9) {
1019 // TLBI aliases
1020 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
1021 if (!TLBI || !TLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1022 return false;
1023
1024 NeedsReg = TLBI->NeedsReg;
1025 if (STI.hasFeature(Feature: AArch64::FeatureAll) ||
1026 STI.hasFeature(Feature: AArch64::FeatureTLBID))
1027 OptionalReg = TLBI->OptionalReg;
1028 Ins = "tlbi\t";
1029 Name = std::string(TLBI->Name);
1030 } else if (CnVal == 12) {
1031 if (CmVal != 0) {
1032 // GIC aliases
1033 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByEncoding(Encoding);
1034 if (!GIC || !GIC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1035 return false;
1036
1037 NeedsReg = GIC->NeedsReg;
1038 Ins = "gic\t";
1039 Name = std::string(GIC->Name);
1040 } else {
1041 // GSB aliases
1042 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByEncoding(Encoding);
1043 if (!GSB || !GSB->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1044 return false;
1045
1046 NeedsReg = false;
1047 Ins = "gsb\t";
1048 Name = std::string(GSB->Name);
1049 }
1050 } else if (CnVal == 10) {
1051 // PLBI aliases
1052 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByEncoding(Encoding);
1053 if (!PLBI || !PLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1054 return false;
1055
1056 NeedsReg = PLBI->NeedsReg;
1057 if (STI.hasFeature(Feature: AArch64::FeatureAll) ||
1058 STI.hasFeature(Feature: AArch64::FeatureTLBID))
1059 OptionalReg = PLBI->OptionalReg;
1060 Ins = "plbi\t";
1061 Name = std::string(PLBI->Name);
1062 } else
1063 return false;
1064
1065 StringRef Reg = getRegisterName(Reg: MI->getOperand(i: 4).getReg());
1066 bool NotXZR = Reg != "xzr";
1067
1068 // If a mandatory or optional register is not specified in the TableGen
1069 // (i.e. no register operand should be present), and the register value
1070 // is not xzr/x31, then disassemble to a SYS alias instead.
1071 if (NotXZR && !NeedsReg && !OptionalReg)
1072 return false;
1073
1074 std::string Str = Ins + Name;
1075 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1076
1077 O << '\t' << Str;
1078
1079 // For optional registers, don't print the value if it's xzr/x31
1080 // since this defaults to xzr/x31 if register is not specified.
1081 if (NeedsReg || (OptionalReg && NotXZR))
1082 O << ", " << Reg;
1083
1084 return true;
1085}
1086
1087bool AArch64InstPrinter::printSyslAlias(const MCInst *MI,
1088 const MCSubtargetInfo &STI,
1089 raw_ostream &O) {
1090#ifndef NDEBUG
1091 unsigned Opcode = MI->getOpcode();
1092 assert(Opcode == AArch64::SYSLxt && "Invalid opcode for SYSL alias!");
1093#endif
1094
1095 StringRef Reg = getRegisterName(Reg: MI->getOperand(i: 0).getReg());
1096 const MCOperand &Op1 = MI->getOperand(i: 1);
1097 const MCOperand &Cn = MI->getOperand(i: 2);
1098 const MCOperand &Cm = MI->getOperand(i: 3);
1099 const MCOperand &Op2 = MI->getOperand(i: 4);
1100
1101 unsigned Op1Val = Op1.getImm();
1102 unsigned CnVal = Cn.getImm();
1103 unsigned CmVal = Cm.getImm();
1104 unsigned Op2Val = Op2.getImm();
1105
1106 uint16_t Encoding = Op2Val;
1107 Encoding |= CmVal << 3;
1108 Encoding |= CnVal << 7;
1109 Encoding |= Op1Val << 11;
1110
1111 std::string Ins;
1112 std::string Name;
1113
1114 if (CnVal == 12) {
1115 if (CmVal == 3) {
1116 // GICR aliases
1117 const AArch64GICR::GICR *GICR =
1118 AArch64GICR::lookupGICRByEncoding(Encoding);
1119 if (!GICR || !GICR->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1120 return false;
1121
1122 Ins = "gicr";
1123 Name = std::string(GICR->Name);
1124 } else
1125 return false;
1126 } else
1127 return false;
1128
1129 llvm::transform(Range&: Name, d_first: Name.begin(), F: ::tolower);
1130
1131 O << '\t' << Ins << '\t' << Reg.str() << ", " << Name;
1132
1133 return true;
1134}
1135
1136bool AArch64InstPrinter::printSyspAlias(const MCInst *MI,
1137 const MCSubtargetInfo &STI,
1138 raw_ostream &O) {
1139#ifndef NDEBUG
1140 unsigned Opcode = MI->getOpcode();
1141 assert((Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR) &&
1142 "Invalid opcode for SYSP alias!");
1143#endif
1144
1145 const MCOperand &Op1 = MI->getOperand(i: 0);
1146 const MCOperand &Cn = MI->getOperand(i: 1);
1147 const MCOperand &Cm = MI->getOperand(i: 2);
1148 const MCOperand &Op2 = MI->getOperand(i: 3);
1149
1150 unsigned Op1Val = Op1.getImm();
1151 unsigned CnVal = Cn.getImm();
1152 unsigned CmVal = Cm.getImm();
1153 unsigned Op2Val = Op2.getImm();
1154
1155 uint16_t Encoding = Op2Val;
1156 Encoding |= CmVal << 3;
1157 Encoding |= CnVal << 7;
1158 Encoding |= Op1Val << 11;
1159
1160 std::string Ins;
1161 std::string Name;
1162
1163 if (CnVal == 8 || CnVal == 9) {
1164 // TLBIP aliases
1165
1166 const AArch64TLBIP::TLBIP *TLBIP =
1167 AArch64TLBIP::lookupTLBIPByEncoding(Encoding);
1168 if (!TLBIP || !TLBIP->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1169 return false;
1170
1171 Ins = "tlbip\t";
1172 Name = std::string(TLBIP->Name);
1173 } else
1174 return false;
1175
1176 std::string Str = Ins + Name;
1177 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1178
1179 O << '\t' << Str;
1180 O << ", ";
1181 if (MI->getOperand(i: 4).getReg() == AArch64::XZR)
1182 printSyspXzrPair(MI, OpNum: 4, STI, O);
1183 else
1184 printGPRSeqPairsClassOperand<64>(MI, OpNum: 4, STI, O);
1185
1186 return true;
1187}
1188
1189template <int EltSize>
1190void AArch64InstPrinter::printMatrix(const MCInst *MI, unsigned OpNum,
1191 const MCSubtargetInfo &STI,
1192 raw_ostream &O) {
1193 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1194 assert(RegOp.isReg() && "Unexpected operand type!");
1195
1196 printRegName(OS&: O, Reg: RegOp.getReg());
1197 switch (EltSize) {
1198 case 0:
1199 break;
1200 case 8:
1201 O << ".b";
1202 break;
1203 case 16:
1204 O << ".h";
1205 break;
1206 case 32:
1207 O << ".s";
1208 break;
1209 case 64:
1210 O << ".d";
1211 break;
1212 case 128:
1213 O << ".q";
1214 break;
1215 default:
1216 llvm_unreachable("Unsupported element size");
1217 }
1218}
1219
1220template <bool IsVertical>
1221void AArch64InstPrinter::printMatrixTileVector(const MCInst *MI, unsigned OpNum,
1222 const MCSubtargetInfo &STI,
1223 raw_ostream &O) {
1224 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1225 assert(RegOp.isReg() && "Unexpected operand type!");
1226 StringRef RegName = getRegisterName(Reg: RegOp.getReg());
1227
1228 // Insert the horizontal/vertical flag before the suffix.
1229 StringRef Base, Suffix;
1230 std::tie(args&: Base, args&: Suffix) = RegName.split(Separator: '.');
1231 O << Base << (IsVertical ? "v" : "h") << '.' << Suffix;
1232}
1233
1234void AArch64InstPrinter::printMatrixTile(const MCInst *MI, unsigned OpNum,
1235 const MCSubtargetInfo &STI,
1236 raw_ostream &O) {
1237 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1238 assert(RegOp.isReg() && "Unexpected operand type!");
1239 printRegName(OS&: O, Reg: RegOp.getReg());
1240}
1241
1242void AArch64InstPrinter::printSVCROp(const MCInst *MI, unsigned OpNum,
1243 const MCSubtargetInfo &STI,
1244 raw_ostream &O) {
1245 const MCOperand &MO = MI->getOperand(i: OpNum);
1246 assert(MO.isImm() && "Unexpected operand type!");
1247 unsigned svcrop = MO.getImm();
1248 const auto *SVCR = AArch64SVCR::lookupSVCRByEncoding(Encoding: svcrop);
1249 assert(SVCR && "Unexpected SVCR operand!");
1250 O << SVCR->Name;
1251}
1252
1253void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
1254 const MCSubtargetInfo &STI,
1255 raw_ostream &O) {
1256 const MCOperand &Op = MI->getOperand(i: OpNo);
1257 if (Op.isReg()) {
1258 printRegName(OS&: O, Reg: Op.getReg());
1259 } else if (Op.isImm()) {
1260 printImm(MI, OpNo, STI, O);
1261 } else {
1262 assert(Op.isExpr() && "unknown operand kind in printOperand");
1263 MAI.printExpr(O, *Op.getExpr());
1264 }
1265}
1266
1267void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
1268 const MCSubtargetInfo &STI,
1269 raw_ostream &O) {
1270 const MCOperand &Op = MI->getOperand(i: OpNo);
1271 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1272}
1273
1274void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
1275 const MCSubtargetInfo &STI,
1276 raw_ostream &O) {
1277 const MCOperand &Op = MI->getOperand(i: OpNo);
1278 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#llx", Vals: Op.getImm());
1279}
1280
1281template<int Size>
1282void AArch64InstPrinter::printSImm(const MCInst *MI, unsigned OpNo,
1283 const MCSubtargetInfo &STI,
1284 raw_ostream &O) {
1285 const MCOperand &Op = MI->getOperand(i: OpNo);
1286 if (Size == 8)
1287 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed char)Op.getImm());
1288 else if (Size == 16)
1289 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed short)Op.getImm());
1290 else
1291 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1292}
1293
1294void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
1295 unsigned Imm, raw_ostream &O) {
1296 const MCOperand &Op = MI->getOperand(i: OpNo);
1297 if (Op.isReg()) {
1298 MCRegister Reg = Op.getReg();
1299 if (Reg == AArch64::XZR)
1300 markup(OS&: O, M: Markup::Immediate) << "#" << Imm;
1301 else
1302 printRegName(OS&: O, Reg);
1303 } else
1304 llvm_unreachable("unknown operand kind in printPostIncOperand64");
1305}
1306
1307void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
1308 const MCSubtargetInfo &STI,
1309 raw_ostream &O) {
1310 const MCOperand &Op = MI->getOperand(i: OpNo);
1311 assert(Op.isReg() && "Non-register vreg operand!");
1312 printRegName(OS&: O, Reg: Op.getReg(), AltIdx: AArch64::vreg);
1313}
1314
1315void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
1316 const MCSubtargetInfo &STI,
1317 raw_ostream &O) {
1318 const MCOperand &Op = MI->getOperand(i: OpNo);
1319 assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
1320 O << "c" << Op.getImm();
1321}
1322
1323void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
1324 const MCSubtargetInfo &STI,
1325 raw_ostream &O) {
1326 const MCOperand &MO = MI->getOperand(i: OpNum);
1327 if (MO.isImm()) {
1328 unsigned Val = (MO.getImm() & 0xfff);
1329 assert(Val == MO.getImm() && "Add/sub immediate out of range!");
1330 unsigned Shift =
1331 AArch64_AM::getShiftValue(Imm: MI->getOperand(i: OpNum + 1).getImm());
1332 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
1333 if (Shift != 0) {
1334 printShifter(MI, OpNum: OpNum + 1, STI, O);
1335 if (CommentStream)
1336 *CommentStream << '=' << formatImm(Value: Val << Shift) << '\n';
1337 }
1338 } else {
1339 assert(MO.isExpr() && "Unexpected operand type!");
1340 MAI.printExpr(O, *MO.getExpr());
1341 printShifter(MI, OpNum: OpNum + 1, STI, O);
1342 }
1343}
1344
1345template <typename T>
1346void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
1347 const MCSubtargetInfo &STI,
1348 raw_ostream &O) {
1349 uint64_t Val = MI->getOperand(i: OpNum).getImm();
1350 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1351 O << "#0x";
1352 O.write_hex(N: AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 8 * sizeof(T)));
1353}
1354
1355void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
1356 const MCSubtargetInfo &STI,
1357 raw_ostream &O) {
1358 unsigned Val = MI->getOperand(i: OpNum).getImm();
1359 // LSL #0 should not be printed.
1360 if (AArch64_AM::getShiftType(Imm: Val) == AArch64_AM::LSL &&
1361 AArch64_AM::getShiftValue(Imm: Val) == 0)
1362 return;
1363 O << ", " << AArch64_AM::getShiftExtendName(ST: AArch64_AM::getShiftType(Imm: Val))
1364 << " ";
1365 markup(OS&: O, M: Markup::Immediate) << "#" << AArch64_AM::getShiftValue(Imm: Val);
1366}
1367
1368void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
1369 const MCSubtargetInfo &STI,
1370 raw_ostream &O) {
1371 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1372 printShifter(MI, OpNum: OpNum + 1, STI, O);
1373}
1374
1375void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
1376 const MCSubtargetInfo &STI,
1377 raw_ostream &O) {
1378 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1379 printArithExtend(MI, OpNum: OpNum + 1, STI, O);
1380}
1381
1382void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
1383 const MCSubtargetInfo &STI,
1384 raw_ostream &O) {
1385 unsigned Val = MI->getOperand(i: OpNum).getImm();
1386 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Imm: Val);
1387 unsigned ShiftVal = AArch64_AM::getArithShiftValue(Imm: Val);
1388
1389 // If the destination or first source register operand is [W]SP, print
1390 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
1391 // all.
1392 if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
1393 MCRegister Dest = MI->getOperand(i: 0).getReg();
1394 MCRegister Src1 = MI->getOperand(i: 1).getReg();
1395 if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
1396 ExtType == AArch64_AM::UXTX) ||
1397 ((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
1398 ExtType == AArch64_AM::UXTW) ) {
1399 if (ShiftVal != 0) {
1400 O << ", lsl ";
1401 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1402 }
1403 return;
1404 }
1405 }
1406 O << ", " << AArch64_AM::getShiftExtendName(ST: ExtType);
1407 if (ShiftVal != 0) {
1408 O << " ";
1409 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1410 }
1411}
1412
1413void AArch64InstPrinter::printMemExtendImpl(bool SignExtend, bool DoShift,
1414 unsigned Width, char SrcRegKind,
1415 raw_ostream &O) {
1416 // sxtw, sxtx, uxtw or lsl (== uxtx)
1417 bool IsLSL = !SignExtend && SrcRegKind == 'x';
1418 if (IsLSL)
1419 O << "lsl";
1420 else
1421 O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
1422
1423 if (DoShift || IsLSL) {
1424 O << " ";
1425 markup(OS&: O, M: Markup::Immediate) << "#" << Log2_32(Value: Width / 8);
1426 }
1427}
1428
1429void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
1430 raw_ostream &O, char SrcRegKind,
1431 unsigned Width) {
1432 bool SignExtend = MI->getOperand(i: OpNum).getImm();
1433 bool DoShift = MI->getOperand(i: OpNum + 1).getImm();
1434 printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
1435}
1436
1437template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
1438void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
1439 unsigned OpNum,
1440 const MCSubtargetInfo &STI,
1441 raw_ostream &O) {
1442 printOperand(MI, OpNo: OpNum, STI, O);
1443 if (Suffix == 's' || Suffix == 'd')
1444 O << '.' << Suffix;
1445 else
1446 assert(Suffix == 0 && "Unsupported suffix size");
1447
1448 bool DoShift = ExtWidth != 8;
1449 if (SignExtend || DoShift || SrcRegKind == 'w') {
1450 O << ", ";
1451 printMemExtendImpl(SignExtend, DoShift, Width: ExtWidth, SrcRegKind, O);
1452 }
1453}
1454
1455template <int EltSize>
1456void AArch64InstPrinter::printPredicateAsCounter(const MCInst *MI,
1457 unsigned OpNum,
1458 const MCSubtargetInfo &STI,
1459 raw_ostream &O) {
1460 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1461 if (Reg < AArch64::PN0 || Reg > AArch64::PN15)
1462 llvm_unreachable("Unsupported predicate-as-counter register");
1463 O << "pn" << Reg - AArch64::PN0;
1464
1465 switch (EltSize) {
1466 case 0:
1467 break;
1468 case 8:
1469 O << ".b";
1470 break;
1471 case 16:
1472 O << ".h";
1473 break;
1474 case 32:
1475 O << ".s";
1476 break;
1477 case 64:
1478 O << ".d";
1479 break;
1480 default:
1481 llvm_unreachable("Unsupported element size");
1482 }
1483}
1484
1485void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
1486 const MCSubtargetInfo &STI,
1487 raw_ostream &O) {
1488 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1489 O << AArch64CC::getCondCodeName(Code: CC);
1490}
1491
1492void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
1493 const MCSubtargetInfo &STI,
1494 raw_ostream &O) {
1495 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1496 O << AArch64CC::getCondCodeName(Code: AArch64CC::getInvertedCondCode(Code: CC));
1497}
1498
1499void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
1500 const MCSubtargetInfo &STI,
1501 raw_ostream &O) {
1502 O << '[';
1503 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1504 O << ']';
1505}
1506
1507template <int Scale>
1508void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
1509 const MCSubtargetInfo &STI,
1510 raw_ostream &O) {
1511 markup(OS&: O, M: Markup::Immediate)
1512 << '#' << formatImm(Value: Scale * MI->getOperand(i: OpNum).getImm());
1513}
1514
1515template <int Scale, int Offset>
1516void AArch64InstPrinter::printImmRangeScale(const MCInst *MI, unsigned OpNum,
1517 const MCSubtargetInfo &STI,
1518 raw_ostream &O) {
1519 unsigned FirstImm = Scale * MI->getOperand(i: OpNum).getImm();
1520 O << formatImm(Value: FirstImm);
1521 O << ":" << formatImm(Value: FirstImm + Offset);
1522}
1523
1524void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
1525 unsigned Scale, raw_ostream &O) {
1526 const MCOperand MO = MI->getOperand(i: OpNum);
1527 if (MO.isImm()) {
1528 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: MO.getImm() * Scale);
1529 } else {
1530 assert(MO.isExpr() && "Unexpected operand type!");
1531 MAI.printExpr(O, *MO.getExpr());
1532 }
1533}
1534
1535void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
1536 unsigned Scale, raw_ostream &O) {
1537 const MCOperand MO1 = MI->getOperand(i: OpNum + 1);
1538 O << '[';
1539 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1540 if (MO1.isImm()) {
1541 O << ", ";
1542 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: MO1.getImm() * Scale);
1543 } else {
1544 assert(MO1.isExpr() && "Unexpected operand type!");
1545 O << ", ";
1546 MAI.printExpr(O, *MO1.getExpr());
1547 }
1548 O << ']';
1549}
1550
1551void AArch64InstPrinter::printRPRFMOperand(const MCInst *MI, unsigned OpNum,
1552 const MCSubtargetInfo &STI,
1553 raw_ostream &O) {
1554 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1555 if (auto PRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: prfop)) {
1556 O << PRFM->Name;
1557 return;
1558 }
1559
1560 O << '#' << formatImm(Value: prfop);
1561}
1562
1563template <bool IsSVEPrefetch>
1564void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
1565 const MCSubtargetInfo &STI,
1566 raw_ostream &O) {
1567 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1568 if (IsSVEPrefetch) {
1569 if (auto PRFM = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: prfop)) {
1570 O << PRFM->Name;
1571 return;
1572 }
1573 } else {
1574 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(Encoding: prfop);
1575 if (PRFM && PRFM->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1576 O << PRFM->Name;
1577 return;
1578 }
1579 }
1580
1581 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: prfop);
1582}
1583
1584void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
1585 const MCSubtargetInfo &STI,
1586 raw_ostream &O) {
1587 unsigned psbhintop = MI->getOperand(i: OpNum).getImm();
1588 auto PSB = AArch64PSBHint::lookupPSBByEncoding(Encoding: psbhintop);
1589 if (PSB)
1590 O << PSB->Name;
1591 else
1592 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: psbhintop);
1593}
1594
1595void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
1596 const MCSubtargetInfo &STI,
1597 raw_ostream &O) {
1598 unsigned btihintop = MI->getOperand(i: OpNum).getImm() ^ 32;
1599 auto BTI = AArch64BTIHint::lookupBTIByEncoding(Encoding: btihintop);
1600 if (BTI)
1601 O << BTI->Name;
1602 else
1603 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: btihintop);
1604}
1605
1606void AArch64InstPrinter::printCMHPriorityHintOp(const MCInst *MI,
1607 unsigned OpNum,
1608 const MCSubtargetInfo &STI,
1609 raw_ostream &O) {
1610 unsigned priorityhint_op = MI->getOperand(i: OpNum).getImm();
1611 auto PHint =
1612 AArch64CMHPriorityHint::lookupCMHPriorityHintByEncoding(Encoding: priorityhint_op);
1613 if (PHint)
1614 O << PHint->Name;
1615 else
1616 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: priorityhint_op);
1617}
1618
1619void AArch64InstPrinter::printTIndexHintOp(const MCInst *MI, unsigned OpNum,
1620 const MCSubtargetInfo &STI,
1621 raw_ostream &O) {
1622 unsigned tindexhintop = MI->getOperand(i: OpNum).getImm();
1623 auto TIndex = AArch64TIndexHint::lookupTIndexByEncoding(Encoding: tindexhintop);
1624 if (TIndex)
1625 O << TIndex->Name;
1626 else
1627 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: tindexhintop);
1628}
1629
1630void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
1631 const MCSubtargetInfo &STI,
1632 raw_ostream &O) {
1633 const MCOperand &MO = MI->getOperand(i: OpNum);
1634 float FPImm = MO.isDFPImm() ? bit_cast<double>(from: MO.getDFPImm())
1635 : AArch64_AM::getFPImmFloat(Imm: MO.getImm());
1636
1637 // 8 decimal places are enough to perfectly represent permitted floats.
1638 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%.8f", Vals: FPImm);
1639}
1640
1641static MCRegister getNextVectorRegister(MCRegister Reg, unsigned Stride = 1) {
1642 while (Stride--) {
1643 switch (Reg.id()) {
1644 default:
1645 llvm_unreachable("Vector register expected!");
1646 case AArch64::Q0: Reg = AArch64::Q1; break;
1647 case AArch64::Q1: Reg = AArch64::Q2; break;
1648 case AArch64::Q2: Reg = AArch64::Q3; break;
1649 case AArch64::Q3: Reg = AArch64::Q4; break;
1650 case AArch64::Q4: Reg = AArch64::Q5; break;
1651 case AArch64::Q5: Reg = AArch64::Q6; break;
1652 case AArch64::Q6: Reg = AArch64::Q7; break;
1653 case AArch64::Q7: Reg = AArch64::Q8; break;
1654 case AArch64::Q8: Reg = AArch64::Q9; break;
1655 case AArch64::Q9: Reg = AArch64::Q10; break;
1656 case AArch64::Q10: Reg = AArch64::Q11; break;
1657 case AArch64::Q11: Reg = AArch64::Q12; break;
1658 case AArch64::Q12: Reg = AArch64::Q13; break;
1659 case AArch64::Q13: Reg = AArch64::Q14; break;
1660 case AArch64::Q14: Reg = AArch64::Q15; break;
1661 case AArch64::Q15: Reg = AArch64::Q16; break;
1662 case AArch64::Q16: Reg = AArch64::Q17; break;
1663 case AArch64::Q17: Reg = AArch64::Q18; break;
1664 case AArch64::Q18: Reg = AArch64::Q19; break;
1665 case AArch64::Q19: Reg = AArch64::Q20; break;
1666 case AArch64::Q20: Reg = AArch64::Q21; break;
1667 case AArch64::Q21: Reg = AArch64::Q22; break;
1668 case AArch64::Q22: Reg = AArch64::Q23; break;
1669 case AArch64::Q23: Reg = AArch64::Q24; break;
1670 case AArch64::Q24: Reg = AArch64::Q25; break;
1671 case AArch64::Q25: Reg = AArch64::Q26; break;
1672 case AArch64::Q26: Reg = AArch64::Q27; break;
1673 case AArch64::Q27: Reg = AArch64::Q28; break;
1674 case AArch64::Q28: Reg = AArch64::Q29; break;
1675 case AArch64::Q29: Reg = AArch64::Q30; break;
1676 case AArch64::Q30: Reg = AArch64::Q31; break;
1677 // Vector lists can wrap around.
1678 case AArch64::Q31:
1679 Reg = AArch64::Q0;
1680 break;
1681 case AArch64::Z0: Reg = AArch64::Z1; break;
1682 case AArch64::Z1: Reg = AArch64::Z2; break;
1683 case AArch64::Z2: Reg = AArch64::Z3; break;
1684 case AArch64::Z3: Reg = AArch64::Z4; break;
1685 case AArch64::Z4: Reg = AArch64::Z5; break;
1686 case AArch64::Z5: Reg = AArch64::Z6; break;
1687 case AArch64::Z6: Reg = AArch64::Z7; break;
1688 case AArch64::Z7: Reg = AArch64::Z8; break;
1689 case AArch64::Z8: Reg = AArch64::Z9; break;
1690 case AArch64::Z9: Reg = AArch64::Z10; break;
1691 case AArch64::Z10: Reg = AArch64::Z11; break;
1692 case AArch64::Z11: Reg = AArch64::Z12; break;
1693 case AArch64::Z12: Reg = AArch64::Z13; break;
1694 case AArch64::Z13: Reg = AArch64::Z14; break;
1695 case AArch64::Z14: Reg = AArch64::Z15; break;
1696 case AArch64::Z15: Reg = AArch64::Z16; break;
1697 case AArch64::Z16: Reg = AArch64::Z17; break;
1698 case AArch64::Z17: Reg = AArch64::Z18; break;
1699 case AArch64::Z18: Reg = AArch64::Z19; break;
1700 case AArch64::Z19: Reg = AArch64::Z20; break;
1701 case AArch64::Z20: Reg = AArch64::Z21; break;
1702 case AArch64::Z21: Reg = AArch64::Z22; break;
1703 case AArch64::Z22: Reg = AArch64::Z23; break;
1704 case AArch64::Z23: Reg = AArch64::Z24; break;
1705 case AArch64::Z24: Reg = AArch64::Z25; break;
1706 case AArch64::Z25: Reg = AArch64::Z26; break;
1707 case AArch64::Z26: Reg = AArch64::Z27; break;
1708 case AArch64::Z27: Reg = AArch64::Z28; break;
1709 case AArch64::Z28: Reg = AArch64::Z29; break;
1710 case AArch64::Z29: Reg = AArch64::Z30; break;
1711 case AArch64::Z30: Reg = AArch64::Z31; break;
1712 // Vector lists can wrap around.
1713 case AArch64::Z31:
1714 Reg = AArch64::Z0;
1715 break;
1716 case AArch64::P0: Reg = AArch64::P1; break;
1717 case AArch64::P1: Reg = AArch64::P2; break;
1718 case AArch64::P2: Reg = AArch64::P3; break;
1719 case AArch64::P3: Reg = AArch64::P4; break;
1720 case AArch64::P4: Reg = AArch64::P5; break;
1721 case AArch64::P5: Reg = AArch64::P6; break;
1722 case AArch64::P6: Reg = AArch64::P7; break;
1723 case AArch64::P7: Reg = AArch64::P8; break;
1724 case AArch64::P8: Reg = AArch64::P9; break;
1725 case AArch64::P9: Reg = AArch64::P10; break;
1726 case AArch64::P10: Reg = AArch64::P11; break;
1727 case AArch64::P11: Reg = AArch64::P12; break;
1728 case AArch64::P12: Reg = AArch64::P13; break;
1729 case AArch64::P13: Reg = AArch64::P14; break;
1730 case AArch64::P14: Reg = AArch64::P15; break;
1731 // Vector lists can wrap around.
1732 case AArch64::P15: Reg = AArch64::P0; break;
1733 }
1734 }
1735 return Reg;
1736}
1737
1738template<unsigned size>
1739void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
1740 unsigned OpNum,
1741 const MCSubtargetInfo &STI,
1742 raw_ostream &O) {
1743 static_assert(size == 64 || size == 32,
1744 "Template parameter must be either 32 or 64");
1745 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1746
1747 unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
1748 unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
1749
1750 MCRegister Even = MRI.getSubReg(Reg, Idx: Sube);
1751 MCRegister Odd = MRI.getSubReg(Reg, Idx: Subo);
1752 printRegName(OS&: O, Reg: Even);
1753 O << ", ";
1754 printRegName(OS&: O, Reg: Odd);
1755}
1756
1757void AArch64InstPrinter::printMatrixTileList(const MCInst *MI, unsigned OpNum,
1758 const MCSubtargetInfo &STI,
1759 raw_ostream &O) {
1760 unsigned MaxRegs = 8;
1761 unsigned RegMask = MI->getOperand(i: OpNum).getImm();
1762
1763 unsigned NumRegs = 0;
1764 for (unsigned I = 0; I < MaxRegs; ++I)
1765 if ((RegMask & (1 << I)) != 0)
1766 ++NumRegs;
1767
1768 O << "{";
1769 unsigned Printed = 0;
1770 for (unsigned I = 0; I < MaxRegs; ++I) {
1771 unsigned Reg = RegMask & (1 << I);
1772 if (Reg == 0)
1773 continue;
1774 printRegName(OS&: O, Reg: AArch64::ZAD0 + I);
1775 if (Printed + 1 != NumRegs)
1776 O << ", ";
1777 ++Printed;
1778 }
1779 O << "}";
1780}
1781
1782void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
1783 const MCSubtargetInfo &STI,
1784 raw_ostream &O,
1785 StringRef LayoutSuffix) {
1786 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1787
1788 O << "{ ";
1789
1790 // Work out how many registers there are in the list (if there is an actual
1791 // list).
1792 unsigned NumRegs = 1;
1793 if (MRI.getRegClass(i: AArch64::DDRegClassID).contains(Reg) ||
1794 MRI.getRegClass(i: AArch64::ZPR2RegClassID).contains(Reg) ||
1795 MRI.getRegClass(i: AArch64::QQRegClassID).contains(Reg) ||
1796 MRI.getRegClass(i: AArch64::PPR2RegClassID).contains(Reg) ||
1797 MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1798 NumRegs = 2;
1799 else if (MRI.getRegClass(i: AArch64::DDDRegClassID).contains(Reg) ||
1800 MRI.getRegClass(i: AArch64::ZPR3RegClassID).contains(Reg) ||
1801 MRI.getRegClass(i: AArch64::QQQRegClassID).contains(Reg))
1802 NumRegs = 3;
1803 else if (MRI.getRegClass(i: AArch64::DDDDRegClassID).contains(Reg) ||
1804 MRI.getRegClass(i: AArch64::ZPR4RegClassID).contains(Reg) ||
1805 MRI.getRegClass(i: AArch64::QQQQRegClassID).contains(Reg) ||
1806 MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1807 NumRegs = 4;
1808
1809 unsigned Stride = 1;
1810 if (MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1811 Stride = 8;
1812 else if (MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1813 Stride = 4;
1814
1815 // Now forget about the list and find out what the first register is.
1816 if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::dsub0))
1817 Reg = FirstReg;
1818 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::qsub0))
1819 Reg = FirstReg;
1820 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::zsub0))
1821 Reg = FirstReg;
1822 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::psub0))
1823 Reg = FirstReg;
1824
1825 // If it's a D-reg, we need to promote it to the equivalent Q-reg before
1826 // printing (otherwise getRegisterName fails).
1827 if (MRI.getRegClass(i: AArch64::FPR64RegClassID).contains(Reg)) {
1828 const MCRegisterClass &FPR128RC =
1829 MRI.getRegClass(i: AArch64::FPR128RegClassID);
1830 Reg = MRI.getMatchingSuperReg(Reg, SubIdx: AArch64::dsub, RC: &FPR128RC);
1831 }
1832
1833 if ((MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1834 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg)) &&
1835 NumRegs > 1 && Stride == 1 &&
1836 // Do not print the range when the last register is lower than the first.
1837 // Because it is a wrap-around register.
1838 Reg < getNextVectorRegister(Reg, Stride: NumRegs - 1)) {
1839 printRegName(OS&: O, Reg);
1840 O << LayoutSuffix;
1841 if (NumRegs > 1) {
1842 // Set of two sve registers should be separated by ','
1843 StringRef split_char = NumRegs == 2 ? ", " : " - ";
1844 O << split_char;
1845 printRegName(OS&: O, Reg: (getNextVectorRegister(Reg, Stride: NumRegs - 1)));
1846 O << LayoutSuffix;
1847 }
1848 } else {
1849 for (unsigned i = 0; i < NumRegs;
1850 ++i, Reg = getNextVectorRegister(Reg, Stride)) {
1851 // wrap-around sve register
1852 if (MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1853 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg))
1854 printRegName(OS&: O, Reg);
1855 else
1856 printRegName(OS&: O, Reg, AltIdx: AArch64::vreg);
1857 O << LayoutSuffix;
1858 if (i + 1 != NumRegs)
1859 O << ", ";
1860 }
1861 }
1862 O << " }";
1863}
1864
1865void
1866AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
1867 unsigned OpNum,
1868 const MCSubtargetInfo &STI,
1869 raw_ostream &O) {
1870 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1871}
1872
1873template <unsigned NumLanes, char LaneKind>
1874void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
1875 const MCSubtargetInfo &STI,
1876 raw_ostream &O) {
1877 if (LaneKind == 0) {
1878 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1879 return;
1880 }
1881 std::string Suffix(".");
1882 if (NumLanes)
1883 Suffix += itostr(X: NumLanes) + LaneKind;
1884 else
1885 Suffix += LaneKind;
1886
1887 printVectorList(MI, OpNum, STI, O, LayoutSuffix: Suffix);
1888}
1889
1890template <unsigned Scale>
1891void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
1892 const MCSubtargetInfo &STI,
1893 raw_ostream &O) {
1894 O << "[" << Scale * MI->getOperand(i: OpNum).getImm() << "]";
1895}
1896
1897template <unsigned Scale>
1898void AArch64InstPrinter::printMatrixIndex(const MCInst *MI, unsigned OpNum,
1899 const MCSubtargetInfo &STI,
1900 raw_ostream &O) {
1901 O << Scale * MI->getOperand(i: OpNum).getImm();
1902}
1903
1904void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
1905 unsigned OpNum,
1906 const MCSubtargetInfo &STI,
1907 raw_ostream &O) {
1908 // Do not print the numeric target address when symbolizing.
1909 if (SymbolizeOperands)
1910 return;
1911
1912 const MCOperand &Op = MI->getOperand(i: OpNum);
1913
1914 // If the label has already been resolved to an immediate offset (say, when
1915 // we're running the disassembler), just print the immediate.
1916 if (Op.isImm()) {
1917 int64_t Offset = Op.getImm() * 4;
1918 if (PrintBranchImmAsAddress)
1919 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1920 else
1921 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Offset);
1922 return;
1923 }
1924
1925 // If the branch target is simply an address then print it in hex.
1926 const MCConstantExpr *BranchTarget =
1927 dyn_cast<MCConstantExpr>(Val: MI->getOperand(i: OpNum).getExpr());
1928 int64_t TargetAddress;
1929 if (BranchTarget && BranchTarget->evaluateAsAbsolute(Res&: TargetAddress)) {
1930 markup(OS&: O, M: Markup::Target) << formatHex(Value: (uint64_t)TargetAddress);
1931 } else {
1932 // Otherwise, just print the expression.
1933 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1934 }
1935}
1936
1937void AArch64InstPrinter::printAdrAdrpLabel(const MCInst *MI, uint64_t Address,
1938 unsigned OpNum,
1939 const MCSubtargetInfo &STI,
1940 raw_ostream &O) {
1941 // Do not print the numeric target address when symbolizing.
1942 // However, do print for ADRP, as this is typically used together with an ADD
1943 // or an immediate-offset ldr/str and the label is likely at the wrong point.
1944 if (SymbolizeOperands && MI->getOpcode() != AArch64::ADRP)
1945 return;
1946
1947 const MCOperand &Op = MI->getOperand(i: OpNum);
1948
1949 // If the label has already been resolved to an immediate offset (say, when
1950 // we're running the disassembler), just print the immediate.
1951 if (Op.isImm()) {
1952 int64_t Offset = Op.getImm();
1953 if (MI->getOpcode() == AArch64::ADRP) {
1954 Offset = Offset * 4096;
1955 Address = Address & -4096;
1956 }
1957 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1958 if (PrintBranchImmAsAddress)
1959 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1960 else
1961 markup(OS&: O, M: Markup::Immediate) << "#" << Offset;
1962 return;
1963 }
1964
1965 // Otherwise, just print the expression.
1966 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1967}
1968
1969void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
1970 const MCSubtargetInfo &STI,
1971 raw_ostream &O) {
1972 unsigned Val = MI->getOperand(i: OpNo).getImm();
1973 unsigned Opcode = MI->getOpcode();
1974
1975 StringRef Name;
1976 if (Opcode == AArch64::ISB) {
1977 auto ISB = AArch64ISB::lookupISBByEncoding(Encoding: Val);
1978 Name = ISB ? ISB->Name : "";
1979 } else if (Opcode == AArch64::TSB) {
1980 auto TSB = AArch64TSB::lookupTSBByEncoding(Encoding: Val);
1981 Name = TSB ? TSB->Name : "";
1982 } else {
1983 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Val);
1984 Name = DB ? DB->Name : "";
1985 }
1986 if (!Name.empty())
1987 O << Name;
1988 else
1989 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
1990}
1991
1992void AArch64InstPrinter::printBarriernXSOption(const MCInst *MI, unsigned OpNo,
1993 const MCSubtargetInfo &STI,
1994 raw_ostream &O) {
1995 unsigned Val = MI->getOperand(i: OpNo).getImm();
1996 assert(MI->getOpcode() == AArch64::DSBnXS);
1997
1998 StringRef Name;
1999 auto DB = AArch64DBnXS::lookupDBnXSByEncoding(Encoding: Val);
2000 Name = DB ? DB->Name : "";
2001
2002 if (!Name.empty())
2003 O << Name;
2004 else
2005 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
2006}
2007
2008static bool isValidSysReg(const AArch64SysReg::SysReg &Reg, bool Read,
2009 const MCSubtargetInfo &STI) {
2010 return (Read ? Reg.Readable : Reg.Writeable) &&
2011 Reg.haveFeatures(ActiveFeatures: STI.getFeatureBits());
2012}
2013
2014// Looks up a system register either by encoding. Some system
2015// registers share the same encoding between different architectures,
2016// to work around this tablegen will return a range of registers with the same
2017// encodings. We need to check each register in the range to see if it valid.
2018static const AArch64SysReg::SysReg *lookupSysReg(unsigned Val, bool Read,
2019 const MCSubtargetInfo &STI) {
2020 auto Range = AArch64SysReg::lookupSysRegByEncoding(Encoding: Val);
2021 for (auto &Reg : Range) {
2022 if (isValidSysReg(Reg, Read, STI))
2023 return &Reg;
2024 }
2025
2026 return nullptr;
2027}
2028
2029void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
2030 const MCSubtargetInfo &STI,
2031 raw_ostream &O) {
2032 unsigned Val = MI->getOperand(i: OpNo).getImm();
2033
2034 // Horrible hack for the one register that has identical encodings but
2035 // different names in MSR and MRS. Because of this, one of MRS and MSR is
2036 // going to get the wrong entry
2037 if (Val == AArch64SysReg::DBGDTRRX_EL0) {
2038 O << "DBGDTRRX_EL0";
2039 return;
2040 }
2041
2042 // Horrible hack for two different registers having the same encoding.
2043 if (Val == AArch64SysReg::TRCEXTINSELR) {
2044 O << "TRCEXTINSELR";
2045 return;
2046 }
2047
2048 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: true /*Read*/, STI);
2049
2050 if (Reg)
2051 O << Reg->Name;
2052 else
2053 O << AArch64SysReg::genericRegisterString(Bits: Val);
2054}
2055
2056void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
2057 const MCSubtargetInfo &STI,
2058 raw_ostream &O) {
2059 unsigned Val = MI->getOperand(i: OpNo).getImm();
2060
2061 // Horrible hack for the one register that has identical encodings but
2062 // different names in MSR and MRS. Because of this, one of MRS and MSR is
2063 // going to get the wrong entry
2064 if (Val == AArch64SysReg::DBGDTRTX_EL0) {
2065 O << "DBGDTRTX_EL0";
2066 return;
2067 }
2068
2069 // Horrible hack for two different registers having the same encoding.
2070 if (Val == AArch64SysReg::TRCEXTINSELR) {
2071 O << "TRCEXTINSELR";
2072 return;
2073 }
2074
2075 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: false /*Read*/, STI);
2076
2077 if (Reg)
2078 O << Reg->Name;
2079 else
2080 O << AArch64SysReg::genericRegisterString(Bits: Val);
2081}
2082
2083void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
2084 const MCSubtargetInfo &STI,
2085 raw_ostream &O) {
2086 unsigned Val = MI->getOperand(i: OpNo).getImm();
2087
2088 auto PStateImm15 = AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: Val);
2089 auto PStateImm1 = AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: Val);
2090 if (PStateImm15 && PStateImm15->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
2091 O << PStateImm15->Name;
2092 else if (PStateImm1 && PStateImm1->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
2093 O << PStateImm1->Name;
2094 else
2095 O << "#" << formatImm(Value: Val);
2096}
2097
2098void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
2099 const MCSubtargetInfo &STI,
2100 raw_ostream &O) {
2101 unsigned RawVal = MI->getOperand(i: OpNo).getImm();
2102 uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(Imm: RawVal);
2103 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#016llx", Vals: Val);
2104}
2105
2106template<int64_t Angle, int64_t Remainder>
2107void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
2108 const MCSubtargetInfo &STI,
2109 raw_ostream &O) {
2110 unsigned Val = MI->getOperand(i: OpNo).getImm();
2111 markup(OS&: O, M: Markup::Immediate) << "#" << (Val * Angle) + Remainder;
2112}
2113
2114void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
2115 const MCSubtargetInfo &STI,
2116 raw_ostream &O) {
2117 unsigned Val = MI->getOperand(i: OpNum).getImm();
2118 if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Encoding: Val))
2119 O << Pat->Name;
2120 else
2121 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
2122}
2123
2124void AArch64InstPrinter::printSVEVecLenSpecifier(const MCInst *MI,
2125 unsigned OpNum,
2126 const MCSubtargetInfo &STI,
2127 raw_ostream &O) {
2128 unsigned Val = MI->getOperand(i: OpNum).getImm();
2129 // Pattern has only 1 bit
2130 if (Val > 1)
2131 llvm_unreachable("Invalid vector length specifier");
2132 if (auto Pat =
2133 AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByEncoding(Encoding: Val))
2134 O << Pat->Name;
2135 else
2136 llvm_unreachable("Invalid vector length specifier");
2137}
2138
2139template <char suffix>
2140void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
2141 const MCSubtargetInfo &STI,
2142 raw_ostream &O) {
2143 switch (suffix) {
2144 case 0:
2145 case 'b':
2146 case 'h':
2147 case 's':
2148 case 'd':
2149 case 'q':
2150 break;
2151 default: llvm_unreachable("Invalid kind specifier.");
2152 }
2153
2154 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2155 printRegName(OS&: O, Reg);
2156 if (suffix != 0)
2157 O << '.' << suffix;
2158}
2159
2160template <typename T>
2161void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
2162 std::make_unsigned_t<T> HexValue = Value;
2163
2164 if (getPrintImmHex())
2165 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)HexValue);
2166 else
2167 markup(OS&: O, M: Markup::Immediate) << '#' << formatDec(Value);
2168
2169 if (CommentStream) {
2170 // Do the opposite to that used for instruction operands.
2171 if (getPrintImmHex())
2172 *CommentStream << '=' << formatDec(Value: HexValue) << '\n';
2173 else
2174 *CommentStream << '=' << formatHex(Value: (uint64_t)Value) << '\n';
2175 }
2176}
2177
2178template <typename T>
2179void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
2180 const MCSubtargetInfo &STI,
2181 raw_ostream &O) {
2182 unsigned UnscaledVal = MI->getOperand(i: OpNum).getImm();
2183 unsigned Shift = MI->getOperand(i: OpNum + 1).getImm();
2184 assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
2185 "Unexpected shift type!");
2186
2187 // #0 lsl #8 is never pretty printed
2188 if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Imm: Shift) != 0)) {
2189 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: UnscaledVal);
2190 printShifter(MI, OpNum: OpNum + 1, STI, O);
2191 return;
2192 }
2193
2194 T Val;
2195 if (std::is_signed<T>())
2196 Val = (int8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2197 else
2198 Val = (uint8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2199
2200 printImmSVE(Val, O);
2201}
2202
2203template <typename T>
2204void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
2205 const MCSubtargetInfo &STI,
2206 raw_ostream &O) {
2207 typedef std::make_signed_t<T> SignedT;
2208 typedef std::make_unsigned_t<T> UnsignedT;
2209
2210 uint64_t Val = MI->getOperand(i: OpNum).getImm();
2211 UnsignedT PrintVal = AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 64);
2212
2213 // Prefer the default format for 16bit values, hex otherwise.
2214 if ((int16_t)PrintVal == (SignedT)PrintVal)
2215 printImmSVE((T)PrintVal, O);
2216 else if ((uint16_t)PrintVal == PrintVal)
2217 printImmSVE(PrintVal, O);
2218 else
2219 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)PrintVal);
2220}
2221
2222template <int Width>
2223void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
2224 const MCSubtargetInfo &STI,
2225 raw_ostream &O) {
2226 unsigned Base;
2227 switch (Width) {
2228 case 8: Base = AArch64::B0; break;
2229 case 16: Base = AArch64::H0; break;
2230 case 32: Base = AArch64::S0; break;
2231 case 64: Base = AArch64::D0; break;
2232 case 128: Base = AArch64::Q0; break;
2233 default:
2234 llvm_unreachable("Unsupported width");
2235 }
2236 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2237 printRegName(OS&: O, Reg: Reg - AArch64::Z0 + Base);
2238}
2239
2240template <unsigned ImmIs0, unsigned ImmIs1>
2241void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
2242 const MCSubtargetInfo &STI,
2243 raw_ostream &O) {
2244 auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs0);
2245 auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs1);
2246 unsigned Val = MI->getOperand(i: OpNum).getImm();
2247 markup(OS&: O, M: Markup::Immediate)
2248 << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
2249}
2250
2251void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
2252 const MCSubtargetInfo &STI,
2253 raw_ostream &O) {
2254 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2255 printRegName(OS&: O, Reg: getWRegFromXReg(Reg));
2256}
2257
2258void AArch64InstPrinter::printGPR64x8(const MCInst *MI, unsigned OpNum,
2259 const MCSubtargetInfo &STI,
2260 raw_ostream &O) {
2261 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2262 printRegName(OS&: O, Reg: MRI.getSubReg(Reg, Idx: AArch64::x8sub_0));
2263}
2264
2265void AArch64InstPrinter::printSyspXzrPair(const MCInst *MI, unsigned OpNum,
2266 const MCSubtargetInfo &STI,
2267 raw_ostream &O) {
2268 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2269 assert(Reg == AArch64::XZR &&
2270 "MC representation of SyspXzrPair should be XZR");
2271 O << getRegisterName(Reg) << ", " << getRegisterName(Reg);
2272}
2273
2274void AArch64InstPrinter::printPHintOp(const MCInst *MI, unsigned OpNum,
2275 const MCSubtargetInfo &STI,
2276 raw_ostream &O) {
2277 unsigned Op = MI->getOperand(i: OpNum).getImm();
2278 auto PH = AArch64PHint::lookupPHintByEncoding(Op);
2279 if (PH)
2280 O << PH->Name;
2281 else
2282 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Op);
2283}
2284