1//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This class prints an AArch64 MCInst to a .s file.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64InstPrinter.h"
14#include "MCTargetDesc/AArch64AddressingModes.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/StringExtras.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCRegisterInfo.h"
22#include "llvm/MC/MCSubtargetInfo.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/Format.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28#include <cassert>
29#include <cstdint>
30#include <string>
31
32using namespace llvm;
33
34#define DEBUG_TYPE "asm-printer"
35
36#define GET_INSTRUCTION_NAME
37#define PRINT_ALIAS_INSTR
38#include "AArch64GenAsmWriter.inc"
39#define GET_INSTRUCTION_NAME
40#define PRINT_ALIAS_INSTR
41#include "AArch64GenAsmWriter1.inc"
42
43AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
44 const MCInstrInfo &MII,
45 const MCRegisterInfo &MRI)
46 : MCInstPrinter(MAI, MII, MRI) {}
47
48AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
49 const MCInstrInfo &MII,
50 const MCRegisterInfo &MRI)
51 : AArch64InstPrinter(MAI, MII, MRI) {}
52
53bool AArch64InstPrinter::applyTargetSpecificCLOption(StringRef Opt) {
54 if (Opt == "no-aliases") {
55 PrintAliases = false;
56 return true;
57 }
58 return false;
59}
60
61void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) {
62 markup(OS, M: Markup::Register) << getRegisterName(Reg);
63}
64
65void AArch64InstPrinter::printRegName(raw_ostream &OS, MCRegister Reg,
66 unsigned AltIdx) {
67 markup(OS, M: Markup::Register) << getRegisterName(Reg, AltIdx);
68}
69
70StringRef AArch64InstPrinter::getRegName(MCRegister Reg) const {
71 return getRegisterName(Reg);
72}
73
74void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
75 StringRef Annot, const MCSubtargetInfo &STI,
76 raw_ostream &O) {
77 // Check for special encodings and print the canonical alias instead.
78
79 unsigned Opcode = MI->getOpcode();
80
81 if (Opcode == AArch64::SYSxt)
82 if (printSysAlias(MI, STI, O)) {
83 printAnnotation(OS&: O, Annot);
84 return;
85 }
86
87 if (Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR)
88 if (printSyspAlias(MI, STI, O)) {
89 printAnnotation(OS&: O, Annot);
90 return;
91 }
92
93 // RPRFM overlaps PRFM (reg), so try to print it as RPRFM here.
94 if ((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) {
95 if (printRangePrefetchAlias(MI, STI, O, Annot))
96 return;
97 }
98
99 // SBFM/UBFM should print to a nicer aliased form if possible.
100 if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
101 Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
102 const MCOperand &Op0 = MI->getOperand(i: 0);
103 const MCOperand &Op1 = MI->getOperand(i: 1);
104 const MCOperand &Op2 = MI->getOperand(i: 2);
105 const MCOperand &Op3 = MI->getOperand(i: 3);
106
107 bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
108 bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
109 if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
110 const char *AsmMnemonic = nullptr;
111
112 switch (Op3.getImm()) {
113 default:
114 break;
115 case 7:
116 if (IsSigned)
117 AsmMnemonic = "sxtb";
118 else if (!Is64Bit)
119 AsmMnemonic = "uxtb";
120 break;
121 case 15:
122 if (IsSigned)
123 AsmMnemonic = "sxth";
124 else if (!Is64Bit)
125 AsmMnemonic = "uxth";
126 break;
127 case 31:
128 // *xtw is only valid for signed 64-bit operations.
129 if (Is64Bit && IsSigned)
130 AsmMnemonic = "sxtw";
131 break;
132 }
133
134 if (AsmMnemonic) {
135 O << '\t' << AsmMnemonic << '\t';
136 printRegName(OS&: O, Reg: Op0.getReg());
137 O << ", ";
138 printRegName(OS&: O, Reg: getWRegFromXReg(Reg: Op1.getReg()));
139 printAnnotation(OS&: O, Annot);
140 return;
141 }
142 }
143
144 // All immediate shifts are aliases, implemented using the Bitfield
145 // instruction. In all cases the immediate shift amount shift must be in
146 // the range 0 to (reg.size -1).
147 if (Op2.isImm() && Op3.isImm()) {
148 const char *AsmMnemonic = nullptr;
149 int shift = 0;
150 int64_t immr = Op2.getImm();
151 int64_t imms = Op3.getImm();
152 if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
153 AsmMnemonic = "lsl";
154 shift = 31 - imms;
155 } else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
156 ((imms + 1 == immr))) {
157 AsmMnemonic = "lsl";
158 shift = 63 - imms;
159 } else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
160 AsmMnemonic = "lsr";
161 shift = immr;
162 } else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
163 AsmMnemonic = "lsr";
164 shift = immr;
165 } else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
166 AsmMnemonic = "asr";
167 shift = immr;
168 } else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
169 AsmMnemonic = "asr";
170 shift = immr;
171 }
172 if (AsmMnemonic) {
173 O << '\t' << AsmMnemonic << '\t';
174 printRegName(OS&: O, Reg: Op0.getReg());
175 O << ", ";
176 printRegName(OS&: O, Reg: Op1.getReg());
177 O << ", ";
178 markup(OS&: O, M: Markup::Immediate) << "#" << shift;
179 printAnnotation(OS&: O, Annot);
180 return;
181 }
182 }
183
184 // SBFIZ/UBFIZ aliases
185 if (Op2.getImm() > Op3.getImm()) {
186 O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t';
187 printRegName(OS&: O, Reg: Op0.getReg());
188 O << ", ";
189 printRegName(OS&: O, Reg: Op1.getReg());
190 O << ", ";
191 markup(OS&: O, M: Markup::Immediate) << "#" << (Is64Bit ? 64 : 32) - Op2.getImm();
192 O << ", ";
193 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() + 1;
194 printAnnotation(OS&: O, Annot);
195 return;
196 }
197
198 // Otherwise SBFX/UBFX is the preferred form
199 O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t';
200 printRegName(OS&: O, Reg: Op0.getReg());
201 O << ", ";
202 printRegName(OS&: O, Reg: Op1.getReg());
203 O << ", ";
204 markup(OS&: O, M: Markup::Immediate) << "#" << Op2.getImm();
205 O << ", ";
206 markup(OS&: O, M: Markup::Immediate) << "#" << Op3.getImm() - Op2.getImm() + 1;
207 printAnnotation(OS&: O, Annot);
208 return;
209 }
210
211 if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
212 const MCOperand &Op0 = MI->getOperand(i: 0); // Op1 == Op0
213 const MCOperand &Op2 = MI->getOperand(i: 2);
214 int ImmR = MI->getOperand(i: 3).getImm();
215 int ImmS = MI->getOperand(i: 4).getImm();
216
217 if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
218 (ImmR == 0 || ImmS < ImmR) && STI.hasFeature(Feature: AArch64::HasV8_2aOps)) {
219 // BFC takes precedence over its entire range, slightly differently to BFI.
220 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
221 int LSB = (BitWidth - ImmR) % BitWidth;
222 int Width = ImmS + 1;
223
224 O << "\tbfc\t";
225 printRegName(OS&: O, Reg: Op0.getReg());
226 O << ", ";
227 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
228 O << ", ";
229 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
230 printAnnotation(OS&: O, Annot);
231 return;
232 } else if (ImmS < ImmR) {
233 // BFI alias
234 int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
235 int LSB = (BitWidth - ImmR) % BitWidth;
236 int Width = ImmS + 1;
237
238 O << "\tbfi\t";
239 printRegName(OS&: O, Reg: Op0.getReg());
240 O << ", ";
241 printRegName(OS&: O, Reg: Op2.getReg());
242 O << ", ";
243 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
244 O << ", ";
245 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
246 printAnnotation(OS&: O, Annot);
247 return;
248 }
249
250 int LSB = ImmR;
251 int Width = ImmS - ImmR + 1;
252 // Otherwise BFXIL the preferred form
253 O << "\tbfxil\t";
254 printRegName(OS&: O, Reg: Op0.getReg());
255 O << ", ";
256 printRegName(OS&: O, Reg: Op2.getReg());
257 O << ", ";
258 markup(OS&: O, M: Markup::Immediate) << "#" << LSB;
259 O << ", ";
260 markup(OS&: O, M: Markup::Immediate) << "#" << Width;
261 printAnnotation(OS&: O, Annot);
262 return;
263 }
264
265 // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
266 // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
267 // printed.
268 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
269 Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
270 MI->getOperand(i: 1).isExpr()) {
271 if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
272 O << "\tmovz\t";
273 else
274 O << "\tmovn\t";
275
276 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
277 O << ", ";
278 {
279 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
280 O << "#";
281 MAI.printExpr(O, *MI->getOperand(i: 1).getExpr());
282 }
283 return;
284 }
285
286 if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
287 MI->getOperand(i: 2).isExpr()) {
288 O << "\tmovk\t";
289 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
290 O << ", ";
291 {
292 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
293 O << "#";
294 MAI.printExpr(O, *MI->getOperand(i: 2).getExpr());
295 }
296 return;
297 }
298
299 auto PrintMovImm = [&](uint64_t Value, int RegWidth) {
300 int64_t SExtVal = SignExtend64(X: Value, B: RegWidth);
301 O << "\tmov\t";
302 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg());
303 O << ", ";
304 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: SExtVal);
305 if (CommentStream) {
306 // Do the opposite to that used for instruction operands.
307 if (getPrintImmHex())
308 *CommentStream << '=' << formatDec(Value: SExtVal) << '\n';
309 else {
310 uint64_t Mask = maskTrailingOnes<uint64_t>(N: RegWidth);
311 *CommentStream << '=' << formatHex(Value: SExtVal & Mask) << '\n';
312 }
313 }
314 };
315
316 // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
317 // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
318 // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
319 // that can represent the move is the MOV alias, and the rest get printed
320 // normally.
321 if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
322 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
323 int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
324 int Shift = MI->getOperand(i: 2).getImm();
325 uint64_t Value = (uint64_t)MI->getOperand(i: 1).getImm() << Shift;
326
327 if (AArch64_AM::isMOVZMovAlias(Value, Shift,
328 RegWidth: Opcode == AArch64::MOVZXi ? 64 : 32)) {
329 PrintMovImm(Value, RegWidth);
330 return;
331 }
332 }
333
334 if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
335 MI->getOperand(i: 1).isImm() && MI->getOperand(i: 2).isImm()) {
336 int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
337 int Shift = MI->getOperand(i: 2).getImm();
338 uint64_t Value = ~((uint64_t)MI->getOperand(i: 1).getImm() << Shift);
339 if (RegWidth == 32)
340 Value = Value & 0xffffffff;
341
342 if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
343 PrintMovImm(Value, RegWidth);
344 return;
345 }
346 }
347
348 if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
349 (MI->getOperand(i: 1).getReg() == AArch64::XZR ||
350 MI->getOperand(i: 1).getReg() == AArch64::WZR) &&
351 MI->getOperand(i: 2).isImm()) {
352 int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
353 uint64_t Value = AArch64_AM::decodeLogicalImmediate(
354 val: MI->getOperand(i: 2).getImm(), regSize: RegWidth);
355 if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
356 PrintMovImm(Value, RegWidth);
357 return;
358 }
359 }
360
361 if (Opcode == AArch64::SPACE) {
362 O << '\t' << MAI.getCommentString() << " SPACE "
363 << MI->getOperand(i: 1).getImm();
364 printAnnotation(OS&: O, Annot);
365 return;
366 }
367
368 // Instruction TSB is specified as a one operand instruction, but 'csync' is
369 // not encoded, so for printing it is treated as a special case here:
370 if (Opcode == AArch64::TSB) {
371 O << "\ttsb\tcsync";
372 return;
373 }
374
375 if (!PrintAliases || !printAliasInstr(MI, Address, STI, OS&: O))
376 printInstruction(MI, Address, STI, O);
377
378 printAnnotation(OS&: O, Annot);
379
380 if (atomicBarrierDroppedOnZero(Opcode) &&
381 (MI->getOperand(i: 0).getReg() == AArch64::XZR ||
382 MI->getOperand(i: 0).getReg() == AArch64::WZR)) {
383 printAnnotation(OS&: O, Annot: "acquire semantics dropped since destination is zero");
384 }
385}
386
387static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
388 bool &IsTbx) {
389 switch (Opcode) {
390 case AArch64::TBXv8i8One:
391 case AArch64::TBXv8i8Two:
392 case AArch64::TBXv8i8Three:
393 case AArch64::TBXv8i8Four:
394 IsTbx = true;
395 Layout = ".8b";
396 return true;
397 case AArch64::TBLv8i8One:
398 case AArch64::TBLv8i8Two:
399 case AArch64::TBLv8i8Three:
400 case AArch64::TBLv8i8Four:
401 IsTbx = false;
402 Layout = ".8b";
403 return true;
404 case AArch64::TBXv16i8One:
405 case AArch64::TBXv16i8Two:
406 case AArch64::TBXv16i8Three:
407 case AArch64::TBXv16i8Four:
408 IsTbx = true;
409 Layout = ".16b";
410 return true;
411 case AArch64::TBLv16i8One:
412 case AArch64::TBLv16i8Two:
413 case AArch64::TBLv16i8Three:
414 case AArch64::TBLv16i8Four:
415 IsTbx = false;
416 Layout = ".16b";
417 return true;
418 default:
419 return false;
420 }
421}
422
423struct LdStNInstrDesc {
424 unsigned Opcode;
425 const char *Mnemonic;
426 const char *Layout;
427 int ListOperand;
428 bool HasLane;
429 int NaturalOffset;
430};
431
432static const LdStNInstrDesc LdStNInstInfo[] = {
433 { .Opcode: AArch64::LD1i8, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
434 { .Opcode: AArch64::LD1i16, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
435 { .Opcode: AArch64::LD1i32, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
436 { .Opcode: AArch64::LD1i64, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
437 { .Opcode: AArch64::LD1i8_POST, .Mnemonic: "ld1", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 1 },
438 { .Opcode: AArch64::LD1i16_POST, .Mnemonic: "ld1", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
439 { .Opcode: AArch64::LD1i32_POST, .Mnemonic: "ld1", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
440 { .Opcode: AArch64::LD1i64_POST, .Mnemonic: "ld1", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
441 { .Opcode: AArch64::LD1Rv16b, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
442 { .Opcode: AArch64::LD1Rv8h, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
443 { .Opcode: AArch64::LD1Rv4s, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
444 { .Opcode: AArch64::LD1Rv2d, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
445 { .Opcode: AArch64::LD1Rv8b, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
446 { .Opcode: AArch64::LD1Rv4h, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
447 { .Opcode: AArch64::LD1Rv2s, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
448 { .Opcode: AArch64::LD1Rv1d, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
449 { .Opcode: AArch64::LD1Rv16b_POST, .Mnemonic: "ld1r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
450 { .Opcode: AArch64::LD1Rv8h_POST, .Mnemonic: "ld1r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
451 { .Opcode: AArch64::LD1Rv4s_POST, .Mnemonic: "ld1r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
452 { .Opcode: AArch64::LD1Rv2d_POST, .Mnemonic: "ld1r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
453 { .Opcode: AArch64::LD1Rv8b_POST, .Mnemonic: "ld1r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 1 },
454 { .Opcode: AArch64::LD1Rv4h_POST, .Mnemonic: "ld1r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
455 { .Opcode: AArch64::LD1Rv2s_POST, .Mnemonic: "ld1r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
456 { .Opcode: AArch64::LD1Rv1d_POST, .Mnemonic: "ld1r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
457 { .Opcode: AArch64::LD1Onev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
458 { .Opcode: AArch64::LD1Onev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
459 { .Opcode: AArch64::LD1Onev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
460 { .Opcode: AArch64::LD1Onev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
461 { .Opcode: AArch64::LD1Onev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
462 { .Opcode: AArch64::LD1Onev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
463 { .Opcode: AArch64::LD1Onev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
464 { .Opcode: AArch64::LD1Onev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
465 { .Opcode: AArch64::LD1Onev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
466 { .Opcode: AArch64::LD1Onev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
467 { .Opcode: AArch64::LD1Onev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
468 { .Opcode: AArch64::LD1Onev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
469 { .Opcode: AArch64::LD1Onev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
470 { .Opcode: AArch64::LD1Onev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
471 { .Opcode: AArch64::LD1Onev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
472 { .Opcode: AArch64::LD1Onev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
473 { .Opcode: AArch64::LD1Twov16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
474 { .Opcode: AArch64::LD1Twov8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
475 { .Opcode: AArch64::LD1Twov4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
476 { .Opcode: AArch64::LD1Twov2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
477 { .Opcode: AArch64::LD1Twov8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
478 { .Opcode: AArch64::LD1Twov4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
479 { .Opcode: AArch64::LD1Twov2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
480 { .Opcode: AArch64::LD1Twov1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
481 { .Opcode: AArch64::LD1Twov16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
482 { .Opcode: AArch64::LD1Twov8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
483 { .Opcode: AArch64::LD1Twov4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
484 { .Opcode: AArch64::LD1Twov2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
485 { .Opcode: AArch64::LD1Twov8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
486 { .Opcode: AArch64::LD1Twov4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
487 { .Opcode: AArch64::LD1Twov2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
488 { .Opcode: AArch64::LD1Twov1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
489 { .Opcode: AArch64::LD1Threev16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
490 { .Opcode: AArch64::LD1Threev8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
491 { .Opcode: AArch64::LD1Threev4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
492 { .Opcode: AArch64::LD1Threev2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
493 { .Opcode: AArch64::LD1Threev8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
494 { .Opcode: AArch64::LD1Threev4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
495 { .Opcode: AArch64::LD1Threev2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
496 { .Opcode: AArch64::LD1Threev1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
497 { .Opcode: AArch64::LD1Threev16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
498 { .Opcode: AArch64::LD1Threev8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
499 { .Opcode: AArch64::LD1Threev4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
500 { .Opcode: AArch64::LD1Threev2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
501 { .Opcode: AArch64::LD1Threev8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
502 { .Opcode: AArch64::LD1Threev4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
503 { .Opcode: AArch64::LD1Threev2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
504 { .Opcode: AArch64::LD1Threev1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
505 { .Opcode: AArch64::LD1Fourv16b, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
506 { .Opcode: AArch64::LD1Fourv8h, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
507 { .Opcode: AArch64::LD1Fourv4s, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
508 { .Opcode: AArch64::LD1Fourv2d, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
509 { .Opcode: AArch64::LD1Fourv8b, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
510 { .Opcode: AArch64::LD1Fourv4h, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
511 { .Opcode: AArch64::LD1Fourv2s, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
512 { .Opcode: AArch64::LD1Fourv1d, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
513 { .Opcode: AArch64::LD1Fourv16b_POST, .Mnemonic: "ld1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
514 { .Opcode: AArch64::LD1Fourv8h_POST, .Mnemonic: "ld1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
515 { .Opcode: AArch64::LD1Fourv4s_POST, .Mnemonic: "ld1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
516 { .Opcode: AArch64::LD1Fourv2d_POST, .Mnemonic: "ld1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
517 { .Opcode: AArch64::LD1Fourv8b_POST, .Mnemonic: "ld1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
518 { .Opcode: AArch64::LD1Fourv4h_POST, .Mnemonic: "ld1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
519 { .Opcode: AArch64::LD1Fourv2s_POST, .Mnemonic: "ld1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
520 { .Opcode: AArch64::LD1Fourv1d_POST, .Mnemonic: "ld1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
521 { .Opcode: AArch64::LD2i8, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
522 { .Opcode: AArch64::LD2i16, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
523 { .Opcode: AArch64::LD2i32, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
524 { .Opcode: AArch64::LD2i64, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
525 { .Opcode: AArch64::LD2i8_POST, .Mnemonic: "ld2", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 2 },
526 { .Opcode: AArch64::LD2i16_POST, .Mnemonic: "ld2", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
527 { .Opcode: AArch64::LD2i32_POST, .Mnemonic: "ld2", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
528 { .Opcode: AArch64::LD2i64_POST, .Mnemonic: "ld2", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
529 { .Opcode: AArch64::LD2Rv16b, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
530 { .Opcode: AArch64::LD2Rv8h, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
531 { .Opcode: AArch64::LD2Rv4s, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
532 { .Opcode: AArch64::LD2Rv2d, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
533 { .Opcode: AArch64::LD2Rv8b, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
534 { .Opcode: AArch64::LD2Rv4h, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
535 { .Opcode: AArch64::LD2Rv2s, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
536 { .Opcode: AArch64::LD2Rv1d, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
537 { .Opcode: AArch64::LD2Rv16b_POST, .Mnemonic: "ld2r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
538 { .Opcode: AArch64::LD2Rv8h_POST, .Mnemonic: "ld2r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
539 { .Opcode: AArch64::LD2Rv4s_POST, .Mnemonic: "ld2r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
540 { .Opcode: AArch64::LD2Rv2d_POST, .Mnemonic: "ld2r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
541 { .Opcode: AArch64::LD2Rv8b_POST, .Mnemonic: "ld2r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 2 },
542 { .Opcode: AArch64::LD2Rv4h_POST, .Mnemonic: "ld2r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
543 { .Opcode: AArch64::LD2Rv2s_POST, .Mnemonic: "ld2r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
544 { .Opcode: AArch64::LD2Rv1d_POST, .Mnemonic: "ld2r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
545 { .Opcode: AArch64::LD2Twov16b, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
546 { .Opcode: AArch64::LD2Twov8h, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
547 { .Opcode: AArch64::LD2Twov4s, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
548 { .Opcode: AArch64::LD2Twov2d, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
549 { .Opcode: AArch64::LD2Twov8b, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
550 { .Opcode: AArch64::LD2Twov4h, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
551 { .Opcode: AArch64::LD2Twov2s, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
552 { .Opcode: AArch64::LD2Twov16b_POST, .Mnemonic: "ld2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
553 { .Opcode: AArch64::LD2Twov8h_POST, .Mnemonic: "ld2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
554 { .Opcode: AArch64::LD2Twov4s_POST, .Mnemonic: "ld2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
555 { .Opcode: AArch64::LD2Twov2d_POST, .Mnemonic: "ld2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
556 { .Opcode: AArch64::LD2Twov8b_POST, .Mnemonic: "ld2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
557 { .Opcode: AArch64::LD2Twov4h_POST, .Mnemonic: "ld2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
558 { .Opcode: AArch64::LD2Twov2s_POST, .Mnemonic: "ld2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
559 { .Opcode: AArch64::LD3i8, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
560 { .Opcode: AArch64::LD3i16, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
561 { .Opcode: AArch64::LD3i32, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
562 { .Opcode: AArch64::LD3i64, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
563 { .Opcode: AArch64::LD3i8_POST, .Mnemonic: "ld3", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 3 },
564 { .Opcode: AArch64::LD3i16_POST, .Mnemonic: "ld3", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 6 },
565 { .Opcode: AArch64::LD3i32_POST, .Mnemonic: "ld3", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 12 },
566 { .Opcode: AArch64::LD3i64_POST, .Mnemonic: "ld3", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 24 },
567 { .Opcode: AArch64::LD3Rv16b, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
568 { .Opcode: AArch64::LD3Rv8h, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
569 { .Opcode: AArch64::LD3Rv4s, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
570 { .Opcode: AArch64::LD3Rv2d, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
571 { .Opcode: AArch64::LD3Rv8b, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
572 { .Opcode: AArch64::LD3Rv4h, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
573 { .Opcode: AArch64::LD3Rv2s, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
574 { .Opcode: AArch64::LD3Rv1d, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
575 { .Opcode: AArch64::LD3Rv16b_POST, .Mnemonic: "ld3r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
576 { .Opcode: AArch64::LD3Rv8h_POST, .Mnemonic: "ld3r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
577 { .Opcode: AArch64::LD3Rv4s_POST, .Mnemonic: "ld3r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
578 { .Opcode: AArch64::LD3Rv2d_POST, .Mnemonic: "ld3r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
579 { .Opcode: AArch64::LD3Rv8b_POST, .Mnemonic: "ld3r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 3 },
580 { .Opcode: AArch64::LD3Rv4h_POST, .Mnemonic: "ld3r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 6 },
581 { .Opcode: AArch64::LD3Rv2s_POST, .Mnemonic: "ld3r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 12 },
582 { .Opcode: AArch64::LD3Rv1d_POST, .Mnemonic: "ld3r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
583 { .Opcode: AArch64::LD3Threev16b, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
584 { .Opcode: AArch64::LD3Threev8h, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
585 { .Opcode: AArch64::LD3Threev4s, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
586 { .Opcode: AArch64::LD3Threev2d, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
587 { .Opcode: AArch64::LD3Threev8b, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
588 { .Opcode: AArch64::LD3Threev4h, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
589 { .Opcode: AArch64::LD3Threev2s, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
590 { .Opcode: AArch64::LD3Threev16b_POST, .Mnemonic: "ld3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
591 { .Opcode: AArch64::LD3Threev8h_POST, .Mnemonic: "ld3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
592 { .Opcode: AArch64::LD3Threev4s_POST, .Mnemonic: "ld3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
593 { .Opcode: AArch64::LD3Threev2d_POST, .Mnemonic: "ld3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
594 { .Opcode: AArch64::LD3Threev8b_POST, .Mnemonic: "ld3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
595 { .Opcode: AArch64::LD3Threev4h_POST, .Mnemonic: "ld3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
596 { .Opcode: AArch64::LD3Threev2s_POST, .Mnemonic: "ld3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
597 { .Opcode: AArch64::LD4i8, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
598 { .Opcode: AArch64::LD4i16, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
599 { .Opcode: AArch64::LD4i32, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
600 { .Opcode: AArch64::LD4i64, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 0 },
601 { .Opcode: AArch64::LD4i8_POST, .Mnemonic: "ld4", .Layout: ".b", .ListOperand: 2, .HasLane: true, .NaturalOffset: 4 },
602 { .Opcode: AArch64::LD4i16_POST, .Mnemonic: "ld4", .Layout: ".h", .ListOperand: 2, .HasLane: true, .NaturalOffset: 8 },
603 { .Opcode: AArch64::LD4i32_POST, .Mnemonic: "ld4", .Layout: ".s", .ListOperand: 2, .HasLane: true, .NaturalOffset: 16 },
604 { .Opcode: AArch64::LD4i64_POST, .Mnemonic: "ld4", .Layout: ".d", .ListOperand: 2, .HasLane: true, .NaturalOffset: 32 },
605 { .Opcode: AArch64::LD4Rv16b, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
606 { .Opcode: AArch64::LD4Rv8h, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
607 { .Opcode: AArch64::LD4Rv4s, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
608 { .Opcode: AArch64::LD4Rv2d, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
609 { .Opcode: AArch64::LD4Rv8b, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
610 { .Opcode: AArch64::LD4Rv4h, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
611 { .Opcode: AArch64::LD4Rv2s, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
612 { .Opcode: AArch64::LD4Rv1d, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
613 { .Opcode: AArch64::LD4Rv16b_POST, .Mnemonic: "ld4r", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
614 { .Opcode: AArch64::LD4Rv8h_POST, .Mnemonic: "ld4r", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
615 { .Opcode: AArch64::LD4Rv4s_POST, .Mnemonic: "ld4r", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
616 { .Opcode: AArch64::LD4Rv2d_POST, .Mnemonic: "ld4r", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
617 { .Opcode: AArch64::LD4Rv8b_POST, .Mnemonic: "ld4r", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 4 },
618 { .Opcode: AArch64::LD4Rv4h_POST, .Mnemonic: "ld4r", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
619 { .Opcode: AArch64::LD4Rv2s_POST, .Mnemonic: "ld4r", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
620 { .Opcode: AArch64::LD4Rv1d_POST, .Mnemonic: "ld4r", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
621 { .Opcode: AArch64::LD4Fourv16b, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
622 { .Opcode: AArch64::LD4Fourv8h, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
623 { .Opcode: AArch64::LD4Fourv4s, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
624 { .Opcode: AArch64::LD4Fourv2d, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
625 { .Opcode: AArch64::LD4Fourv8b, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
626 { .Opcode: AArch64::LD4Fourv4h, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
627 { .Opcode: AArch64::LD4Fourv2s, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
628 { .Opcode: AArch64::LD4Fourv16b_POST, .Mnemonic: "ld4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
629 { .Opcode: AArch64::LD4Fourv8h_POST, .Mnemonic: "ld4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
630 { .Opcode: AArch64::LD4Fourv4s_POST, .Mnemonic: "ld4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
631 { .Opcode: AArch64::LD4Fourv2d_POST, .Mnemonic: "ld4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
632 { .Opcode: AArch64::LD4Fourv8b_POST, .Mnemonic: "ld4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
633 { .Opcode: AArch64::LD4Fourv4h_POST, .Mnemonic: "ld4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
634 { .Opcode: AArch64::LD4Fourv2s_POST, .Mnemonic: "ld4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
635 { .Opcode: AArch64::ST1i8, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
636 { .Opcode: AArch64::ST1i16, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
637 { .Opcode: AArch64::ST1i32, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
638 { .Opcode: AArch64::ST1i64, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
639 { .Opcode: AArch64::ST1i8_POST, .Mnemonic: "st1", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 1 },
640 { .Opcode: AArch64::ST1i16_POST, .Mnemonic: "st1", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
641 { .Opcode: AArch64::ST1i32_POST, .Mnemonic: "st1", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
642 { .Opcode: AArch64::ST1i64_POST, .Mnemonic: "st1", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
643 { .Opcode: AArch64::ST1Onev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
644 { .Opcode: AArch64::ST1Onev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
645 { .Opcode: AArch64::ST1Onev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
646 { .Opcode: AArch64::ST1Onev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
647 { .Opcode: AArch64::ST1Onev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
648 { .Opcode: AArch64::ST1Onev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
649 { .Opcode: AArch64::ST1Onev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
650 { .Opcode: AArch64::ST1Onev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
651 { .Opcode: AArch64::ST1Onev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
652 { .Opcode: AArch64::ST1Onev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
653 { .Opcode: AArch64::ST1Onev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
654 { .Opcode: AArch64::ST1Onev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
655 { .Opcode: AArch64::ST1Onev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
656 { .Opcode: AArch64::ST1Onev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
657 { .Opcode: AArch64::ST1Onev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
658 { .Opcode: AArch64::ST1Onev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 8 },
659 { .Opcode: AArch64::ST1Twov16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
660 { .Opcode: AArch64::ST1Twov8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
661 { .Opcode: AArch64::ST1Twov4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
662 { .Opcode: AArch64::ST1Twov2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
663 { .Opcode: AArch64::ST1Twov8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
664 { .Opcode: AArch64::ST1Twov4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
665 { .Opcode: AArch64::ST1Twov2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
666 { .Opcode: AArch64::ST1Twov1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
667 { .Opcode: AArch64::ST1Twov16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
668 { .Opcode: AArch64::ST1Twov8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
669 { .Opcode: AArch64::ST1Twov4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
670 { .Opcode: AArch64::ST1Twov2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
671 { .Opcode: AArch64::ST1Twov8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
672 { .Opcode: AArch64::ST1Twov4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
673 { .Opcode: AArch64::ST1Twov2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
674 { .Opcode: AArch64::ST1Twov1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
675 { .Opcode: AArch64::ST1Threev16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
676 { .Opcode: AArch64::ST1Threev8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
677 { .Opcode: AArch64::ST1Threev4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
678 { .Opcode: AArch64::ST1Threev2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
679 { .Opcode: AArch64::ST1Threev8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
680 { .Opcode: AArch64::ST1Threev4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
681 { .Opcode: AArch64::ST1Threev2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
682 { .Opcode: AArch64::ST1Threev1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
683 { .Opcode: AArch64::ST1Threev16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
684 { .Opcode: AArch64::ST1Threev8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
685 { .Opcode: AArch64::ST1Threev4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
686 { .Opcode: AArch64::ST1Threev2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
687 { .Opcode: AArch64::ST1Threev8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
688 { .Opcode: AArch64::ST1Threev4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
689 { .Opcode: AArch64::ST1Threev2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
690 { .Opcode: AArch64::ST1Threev1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
691 { .Opcode: AArch64::ST1Fourv16b, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
692 { .Opcode: AArch64::ST1Fourv8h, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
693 { .Opcode: AArch64::ST1Fourv4s, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
694 { .Opcode: AArch64::ST1Fourv2d, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
695 { .Opcode: AArch64::ST1Fourv8b, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
696 { .Opcode: AArch64::ST1Fourv4h, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
697 { .Opcode: AArch64::ST1Fourv2s, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
698 { .Opcode: AArch64::ST1Fourv1d, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
699 { .Opcode: AArch64::ST1Fourv16b_POST, .Mnemonic: "st1", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
700 { .Opcode: AArch64::ST1Fourv8h_POST, .Mnemonic: "st1", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
701 { .Opcode: AArch64::ST1Fourv4s_POST, .Mnemonic: "st1", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
702 { .Opcode: AArch64::ST1Fourv2d_POST, .Mnemonic: "st1", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
703 { .Opcode: AArch64::ST1Fourv8b_POST, .Mnemonic: "st1", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
704 { .Opcode: AArch64::ST1Fourv4h_POST, .Mnemonic: "st1", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
705 { .Opcode: AArch64::ST1Fourv2s_POST, .Mnemonic: "st1", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
706 { .Opcode: AArch64::ST1Fourv1d_POST, .Mnemonic: "st1", .Layout: ".1d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
707 { .Opcode: AArch64::ST2i8, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
708 { .Opcode: AArch64::ST2i16, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
709 { .Opcode: AArch64::ST2i32, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
710 { .Opcode: AArch64::ST2i64, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
711 { .Opcode: AArch64::ST2i8_POST, .Mnemonic: "st2", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 2 },
712 { .Opcode: AArch64::ST2i16_POST, .Mnemonic: "st2", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
713 { .Opcode: AArch64::ST2i32_POST, .Mnemonic: "st2", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
714 { .Opcode: AArch64::ST2i64_POST, .Mnemonic: "st2", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
715 { .Opcode: AArch64::ST2Twov16b, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
716 { .Opcode: AArch64::ST2Twov8h, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
717 { .Opcode: AArch64::ST2Twov4s, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
718 { .Opcode: AArch64::ST2Twov2d, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
719 { .Opcode: AArch64::ST2Twov8b, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
720 { .Opcode: AArch64::ST2Twov4h, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
721 { .Opcode: AArch64::ST2Twov2s, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
722 { .Opcode: AArch64::ST2Twov16b_POST, .Mnemonic: "st2", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
723 { .Opcode: AArch64::ST2Twov8h_POST, .Mnemonic: "st2", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
724 { .Opcode: AArch64::ST2Twov4s_POST, .Mnemonic: "st2", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
725 { .Opcode: AArch64::ST2Twov2d_POST, .Mnemonic: "st2", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
726 { .Opcode: AArch64::ST2Twov8b_POST, .Mnemonic: "st2", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
727 { .Opcode: AArch64::ST2Twov4h_POST, .Mnemonic: "st2", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
728 { .Opcode: AArch64::ST2Twov2s_POST, .Mnemonic: "st2", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 16 },
729 { .Opcode: AArch64::ST3i8, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
730 { .Opcode: AArch64::ST3i16, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
731 { .Opcode: AArch64::ST3i32, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
732 { .Opcode: AArch64::ST3i64, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
733 { .Opcode: AArch64::ST3i8_POST, .Mnemonic: "st3", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 3 },
734 { .Opcode: AArch64::ST3i16_POST, .Mnemonic: "st3", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 6 },
735 { .Opcode: AArch64::ST3i32_POST, .Mnemonic: "st3", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 12 },
736 { .Opcode: AArch64::ST3i64_POST, .Mnemonic: "st3", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 24 },
737 { .Opcode: AArch64::ST3Threev16b, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
738 { .Opcode: AArch64::ST3Threev8h, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
739 { .Opcode: AArch64::ST3Threev4s, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
740 { .Opcode: AArch64::ST3Threev2d, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
741 { .Opcode: AArch64::ST3Threev8b, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
742 { .Opcode: AArch64::ST3Threev4h, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
743 { .Opcode: AArch64::ST3Threev2s, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
744 { .Opcode: AArch64::ST3Threev16b_POST, .Mnemonic: "st3", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
745 { .Opcode: AArch64::ST3Threev8h_POST, .Mnemonic: "st3", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
746 { .Opcode: AArch64::ST3Threev4s_POST, .Mnemonic: "st3", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
747 { .Opcode: AArch64::ST3Threev2d_POST, .Mnemonic: "st3", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 48 },
748 { .Opcode: AArch64::ST3Threev8b_POST, .Mnemonic: "st3", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
749 { .Opcode: AArch64::ST3Threev4h_POST, .Mnemonic: "st3", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
750 { .Opcode: AArch64::ST3Threev2s_POST, .Mnemonic: "st3", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 24 },
751 { .Opcode: AArch64::ST4i8, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
752 { .Opcode: AArch64::ST4i16, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
753 { .Opcode: AArch64::ST4i32, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
754 { .Opcode: AArch64::ST4i64, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 0, .HasLane: true, .NaturalOffset: 0 },
755 { .Opcode: AArch64::ST4i8_POST, .Mnemonic: "st4", .Layout: ".b", .ListOperand: 1, .HasLane: true, .NaturalOffset: 4 },
756 { .Opcode: AArch64::ST4i16_POST, .Mnemonic: "st4", .Layout: ".h", .ListOperand: 1, .HasLane: true, .NaturalOffset: 8 },
757 { .Opcode: AArch64::ST4i32_POST, .Mnemonic: "st4", .Layout: ".s", .ListOperand: 1, .HasLane: true, .NaturalOffset: 16 },
758 { .Opcode: AArch64::ST4i64_POST, .Mnemonic: "st4", .Layout: ".d", .ListOperand: 1, .HasLane: true, .NaturalOffset: 32 },
759 { .Opcode: AArch64::ST4Fourv16b, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
760 { .Opcode: AArch64::ST4Fourv8h, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
761 { .Opcode: AArch64::ST4Fourv4s, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
762 { .Opcode: AArch64::ST4Fourv2d, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
763 { .Opcode: AArch64::ST4Fourv8b, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
764 { .Opcode: AArch64::ST4Fourv4h, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
765 { .Opcode: AArch64::ST4Fourv2s, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 0, .HasLane: false, .NaturalOffset: 0 },
766 { .Opcode: AArch64::ST4Fourv16b_POST, .Mnemonic: "st4", .Layout: ".16b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
767 { .Opcode: AArch64::ST4Fourv8h_POST, .Mnemonic: "st4", .Layout: ".8h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
768 { .Opcode: AArch64::ST4Fourv4s_POST, .Mnemonic: "st4", .Layout: ".4s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
769 { .Opcode: AArch64::ST4Fourv2d_POST, .Mnemonic: "st4", .Layout: ".2d", .ListOperand: 1, .HasLane: false, .NaturalOffset: 64 },
770 { .Opcode: AArch64::ST4Fourv8b_POST, .Mnemonic: "st4", .Layout: ".8b", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
771 { .Opcode: AArch64::ST4Fourv4h_POST, .Mnemonic: "st4", .Layout: ".4h", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
772 { .Opcode: AArch64::ST4Fourv2s_POST, .Mnemonic: "st4", .Layout: ".2s", .ListOperand: 1, .HasLane: false, .NaturalOffset: 32 },
773};
774
775static const LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
776 for (const auto &Info : LdStNInstInfo)
777 if (Info.Opcode == Opcode)
778 return &Info;
779
780 return nullptr;
781}
782
783void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
784 StringRef Annot,
785 const MCSubtargetInfo &STI,
786 raw_ostream &O) {
787 unsigned Opcode = MI->getOpcode();
788 StringRef Layout;
789
790 bool IsTbx;
791 if (isTblTbxInstruction(Opcode: MI->getOpcode(), Layout, IsTbx)) {
792 O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t';
793 printRegName(OS&: O, Reg: MI->getOperand(i: 0).getReg(), AltIdx: AArch64::vreg);
794 O << ", ";
795
796 unsigned ListOpNum = IsTbx ? 2 : 1;
797 printVectorList(MI, OpNum: ListOpNum, STI, O, LayoutSuffix: "");
798
799 O << ", ";
800 printRegName(OS&: O, Reg: MI->getOperand(i: ListOpNum + 1).getReg(), AltIdx: AArch64::vreg);
801 printAnnotation(OS&: O, Annot);
802 return;
803 }
804
805 if (const LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
806 O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
807
808 // Now onto the operands: first a vector list with possible lane
809 // specifier. E.g. { v0 }[2]
810 int OpNum = LdStDesc->ListOperand;
811 printVectorList(MI, OpNum: OpNum++, STI, O, LayoutSuffix: "");
812
813 if (LdStDesc->HasLane)
814 O << '[' << MI->getOperand(i: OpNum++).getImm() << ']';
815
816 // Next the address: [xN]
817 MCRegister AddrReg = MI->getOperand(i: OpNum++).getReg();
818 O << ", [";
819 printRegName(OS&: O, Reg: AddrReg);
820 O << ']';
821
822 // Finally, there might be a post-indexed offset.
823 if (LdStDesc->NaturalOffset != 0) {
824 MCRegister Reg = MI->getOperand(i: OpNum++).getReg();
825 if (Reg != AArch64::XZR) {
826 O << ", ";
827 printRegName(OS&: O, Reg);
828 } else {
829 assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
830 O << ", ";
831 markup(OS&: O, M: Markup::Immediate) << "#" << LdStDesc->NaturalOffset;
832 }
833 }
834
835 printAnnotation(OS&: O, Annot);
836 return;
837 }
838
839 AArch64InstPrinter::printInst(MI, Address, Annot, STI, O);
840}
841
842StringRef AArch64AppleInstPrinter::getRegName(MCRegister Reg) const {
843 return getRegisterName(Reg);
844}
845
846bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst *MI,
847 const MCSubtargetInfo &STI,
848 raw_ostream &O,
849 StringRef Annot) {
850 unsigned Opcode = MI->getOpcode();
851
852#ifndef NDEBUG
853 assert(((Opcode == AArch64::PRFMroX) || (Opcode == AArch64::PRFMroW)) &&
854 "Invalid opcode for RPRFM alias!");
855#endif
856
857 unsigned PRFOp = MI->getOperand(i: 0).getImm();
858 unsigned Mask = 0x18; // 0b11000
859 if ((PRFOp & Mask) != Mask)
860 return false; // Rt != '11xxx', it's a PRFM instruction.
861
862 MCRegister Rm = MI->getOperand(i: 2).getReg();
863
864 // "Rm" must be a 64-bit GPR for RPRFM.
865 if (MRI.getRegClass(i: AArch64::GPR32RegClassID).contains(Reg: Rm))
866 Rm = MRI.getMatchingSuperReg(Reg: Rm, SubIdx: AArch64::sub_32,
867 RC: &MRI.getRegClass(i: AArch64::GPR64RegClassID));
868
869 unsigned SignExtend = MI->getOperand(i: 3).getImm(); // encoded in "option<2>".
870 unsigned Shift = MI->getOperand(i: 4).getImm(); // encoded in "S".
871
872 assert((SignExtend <= 1) && "sign extend should be a single bit!");
873 assert((Shift <= 1) && "Shift should be a single bit!");
874
875 unsigned Option0 = (Opcode == AArch64::PRFMroX) ? 1 : 0;
876
877 // encoded in "option<2>:option<0>:S:Rt<2:0>".
878 unsigned RPRFOp =
879 (SignExtend << 5) | (Option0 << 4) | (Shift << 3) | (PRFOp & 0x7);
880
881 O << "\trprfm ";
882 if (auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: RPRFOp))
883 O << RPRFM->Name << ", ";
884 else
885 O << "#" << formatImm(Value: RPRFOp) << ", ";
886 O << getRegisterName(Reg: Rm);
887 O << ", [";
888 printOperand(MI, OpNo: 1, STI, O); // "Rn".
889 O << "]";
890
891 printAnnotation(OS&: O, Annot);
892
893 return true;
894}
895
896bool AArch64InstPrinter::printSysAlias(const MCInst *MI,
897 const MCSubtargetInfo &STI,
898 raw_ostream &O) {
899#ifndef NDEBUG
900 unsigned Opcode = MI->getOpcode();
901 assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
902#endif
903
904 const MCOperand &Op1 = MI->getOperand(i: 0);
905 const MCOperand &Cn = MI->getOperand(i: 1);
906 const MCOperand &Cm = MI->getOperand(i: 2);
907 const MCOperand &Op2 = MI->getOperand(i: 3);
908
909 unsigned Op1Val = Op1.getImm();
910 unsigned CnVal = Cn.getImm();
911 unsigned CmVal = Cm.getImm();
912 unsigned Op2Val = Op2.getImm();
913
914 uint16_t Encoding = Op2Val;
915 Encoding |= CmVal << 3;
916 Encoding |= CnVal << 7;
917 Encoding |= Op1Val << 11;
918
919 bool NeedsReg;
920 std::string Ins;
921 std::string Name;
922
923 if (CnVal == 7) {
924 switch (CmVal) {
925 default: return false;
926 // Maybe IC, maybe Prediction Restriction
927 case 1:
928 switch (Op1Val) {
929 default: return false;
930 case 0: goto Search_IC;
931 case 3: goto Search_PRCTX;
932 }
933 // Prediction Restriction aliases
934 case 3: {
935 Search_PRCTX:
936 if (Op1Val != 3 || CnVal != 7 || CmVal != 3)
937 return false;
938
939 const auto Requires =
940 Op2Val == 6 ? AArch64::FeatureSPECRES2 : AArch64::FeaturePredRes;
941 if (!(STI.hasFeature(Feature: AArch64::FeatureAll) || STI.hasFeature(Feature: Requires)))
942 return false;
943
944 NeedsReg = true;
945 switch (Op2Val) {
946 default: return false;
947 case 4: Ins = "cfp\t"; break;
948 case 5: Ins = "dvp\t"; break;
949 case 6: Ins = "cosp\t"; break;
950 case 7: Ins = "cpp\t"; break;
951 }
952 Name = "RCTX";
953 }
954 break;
955 // IC aliases
956 case 5: {
957 Search_IC:
958 const AArch64IC::IC *IC = AArch64IC::lookupICByEncoding(Encoding);
959 if (!IC || !IC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
960 return false;
961
962 NeedsReg = IC->NeedsReg;
963 Ins = "ic\t";
964 Name = std::string(IC->Name);
965 }
966 break;
967 // DC aliases
968 case 4: case 6: case 10: case 11: case 12: case 13: case 14:
969 {
970 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
971 if (!DC || !DC->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
972 return false;
973
974 NeedsReg = true;
975 Ins = "dc\t";
976 Name = std::string(DC->Name);
977 }
978 break;
979 // AT aliases
980 case 8: case 9: {
981 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
982 if (!AT || !AT->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
983 return false;
984
985 NeedsReg = true;
986 Ins = "at\t";
987 Name = std::string(AT->Name);
988 }
989 break;
990 // Overlaps with AT and DC
991 case 15: {
992 const AArch64AT::AT *AT = AArch64AT::lookupATByEncoding(Encoding);
993 const AArch64DC::DC *DC = AArch64DC::lookupDCByEncoding(Encoding);
994 if (AT && AT->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
995 NeedsReg = true;
996 Ins = "at\t";
997 Name = std::string(AT->Name);
998 } else if (DC && DC->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
999 NeedsReg = true;
1000 Ins = "dc\t";
1001 Name = std::string(DC->Name);
1002 } else {
1003 return false;
1004 }
1005 } break;
1006 }
1007 } else if (CnVal == 8 || CnVal == 9) {
1008 // TLBI aliases
1009 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
1010 if (!TLBI || !TLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1011 return false;
1012
1013 NeedsReg = TLBI->NeedsReg;
1014 Ins = "tlbi\t";
1015 Name = std::string(TLBI->Name);
1016 }
1017 else
1018 return false;
1019
1020 std::string Str = Ins + Name;
1021 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1022
1023 O << '\t' << Str;
1024 if (NeedsReg) {
1025 O << ", ";
1026 printRegName(OS&: O, Reg: MI->getOperand(i: 4).getReg());
1027 }
1028
1029 return true;
1030}
1031
1032bool AArch64InstPrinter::printSyspAlias(const MCInst *MI,
1033 const MCSubtargetInfo &STI,
1034 raw_ostream &O) {
1035#ifndef NDEBUG
1036 unsigned Opcode = MI->getOpcode();
1037 assert((Opcode == AArch64::SYSPxt || Opcode == AArch64::SYSPxt_XZR) &&
1038 "Invalid opcode for SYSP alias!");
1039#endif
1040
1041 const MCOperand &Op1 = MI->getOperand(i: 0);
1042 const MCOperand &Cn = MI->getOperand(i: 1);
1043 const MCOperand &Cm = MI->getOperand(i: 2);
1044 const MCOperand &Op2 = MI->getOperand(i: 3);
1045
1046 unsigned Op1Val = Op1.getImm();
1047 unsigned CnVal = Cn.getImm();
1048 unsigned CmVal = Cm.getImm();
1049 unsigned Op2Val = Op2.getImm();
1050
1051 uint16_t Encoding = Op2Val;
1052 Encoding |= CmVal << 3;
1053 Encoding |= CnVal << 7;
1054 Encoding |= Op1Val << 11;
1055
1056 std::string Ins;
1057 std::string Name;
1058
1059 if (CnVal == 8 || CnVal == 9) {
1060 // TLBIP aliases
1061
1062 if (CnVal == 9) {
1063 if (!STI.hasFeature(Feature: AArch64::FeatureXS))
1064 return false;
1065 Encoding &= ~(1 << 7);
1066 }
1067
1068 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByEncoding(Encoding);
1069 if (!TLBI || !TLBI->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1070 return false;
1071
1072 Ins = "tlbip\t";
1073 Name = std::string(TLBI->Name);
1074 if (CnVal == 9)
1075 Name += "nXS";
1076 } else
1077 return false;
1078
1079 std::string Str = Ins + Name;
1080 llvm::transform(Range&: Str, d_first: Str.begin(), F: ::tolower);
1081
1082 O << '\t' << Str;
1083 O << ", ";
1084 if (MI->getOperand(i: 4).getReg() == AArch64::XZR)
1085 printSyspXzrPair(MI, OpNum: 4, STI, O);
1086 else
1087 printGPRSeqPairsClassOperand<64>(MI, OpNum: 4, STI, O);
1088
1089 return true;
1090}
1091
1092template <int EltSize>
1093void AArch64InstPrinter::printMatrix(const MCInst *MI, unsigned OpNum,
1094 const MCSubtargetInfo &STI,
1095 raw_ostream &O) {
1096 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1097 assert(RegOp.isReg() && "Unexpected operand type!");
1098
1099 printRegName(OS&: O, Reg: RegOp.getReg());
1100 switch (EltSize) {
1101 case 0:
1102 break;
1103 case 8:
1104 O << ".b";
1105 break;
1106 case 16:
1107 O << ".h";
1108 break;
1109 case 32:
1110 O << ".s";
1111 break;
1112 case 64:
1113 O << ".d";
1114 break;
1115 case 128:
1116 O << ".q";
1117 break;
1118 default:
1119 llvm_unreachable("Unsupported element size");
1120 }
1121}
1122
1123template <bool IsVertical>
1124void AArch64InstPrinter::printMatrixTileVector(const MCInst *MI, unsigned OpNum,
1125 const MCSubtargetInfo &STI,
1126 raw_ostream &O) {
1127 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1128 assert(RegOp.isReg() && "Unexpected operand type!");
1129 StringRef RegName = getRegisterName(Reg: RegOp.getReg());
1130
1131 // Insert the horizontal/vertical flag before the suffix.
1132 StringRef Base, Suffix;
1133 std::tie(args&: Base, args&: Suffix) = RegName.split(Separator: '.');
1134 O << Base << (IsVertical ? "v" : "h") << '.' << Suffix;
1135}
1136
1137void AArch64InstPrinter::printMatrixTile(const MCInst *MI, unsigned OpNum,
1138 const MCSubtargetInfo &STI,
1139 raw_ostream &O) {
1140 const MCOperand &RegOp = MI->getOperand(i: OpNum);
1141 assert(RegOp.isReg() && "Unexpected operand type!");
1142 printRegName(OS&: O, Reg: RegOp.getReg());
1143}
1144
1145void AArch64InstPrinter::printSVCROp(const MCInst *MI, unsigned OpNum,
1146 const MCSubtargetInfo &STI,
1147 raw_ostream &O) {
1148 const MCOperand &MO = MI->getOperand(i: OpNum);
1149 assert(MO.isImm() && "Unexpected operand type!");
1150 unsigned svcrop = MO.getImm();
1151 const auto *SVCR = AArch64SVCR::lookupSVCRByEncoding(Encoding: svcrop);
1152 assert(SVCR && "Unexpected SVCR operand!");
1153 O << SVCR->Name;
1154}
1155
1156void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
1157 const MCSubtargetInfo &STI,
1158 raw_ostream &O) {
1159 const MCOperand &Op = MI->getOperand(i: OpNo);
1160 if (Op.isReg()) {
1161 printRegName(OS&: O, Reg: Op.getReg());
1162 } else if (Op.isImm()) {
1163 printImm(MI, OpNo, STI, O);
1164 } else {
1165 assert(Op.isExpr() && "unknown operand kind in printOperand");
1166 MAI.printExpr(O, *Op.getExpr());
1167 }
1168}
1169
1170void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
1171 const MCSubtargetInfo &STI,
1172 raw_ostream &O) {
1173 const MCOperand &Op = MI->getOperand(i: OpNo);
1174 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1175}
1176
1177void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
1178 const MCSubtargetInfo &STI,
1179 raw_ostream &O) {
1180 const MCOperand &Op = MI->getOperand(i: OpNo);
1181 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#llx", Vals: Op.getImm());
1182}
1183
1184template<int Size>
1185void AArch64InstPrinter::printSImm(const MCInst *MI, unsigned OpNo,
1186 const MCSubtargetInfo &STI,
1187 raw_ostream &O) {
1188 const MCOperand &Op = MI->getOperand(i: OpNo);
1189 if (Size == 8)
1190 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed char)Op.getImm());
1191 else if (Size == 16)
1192 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: (signed short)Op.getImm());
1193 else
1194 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Op.getImm());
1195}
1196
1197void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
1198 unsigned Imm, raw_ostream &O) {
1199 const MCOperand &Op = MI->getOperand(i: OpNo);
1200 if (Op.isReg()) {
1201 MCRegister Reg = Op.getReg();
1202 if (Reg == AArch64::XZR)
1203 markup(OS&: O, M: Markup::Immediate) << "#" << Imm;
1204 else
1205 printRegName(OS&: O, Reg);
1206 } else
1207 llvm_unreachable("unknown operand kind in printPostIncOperand64");
1208}
1209
1210void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
1211 const MCSubtargetInfo &STI,
1212 raw_ostream &O) {
1213 const MCOperand &Op = MI->getOperand(i: OpNo);
1214 assert(Op.isReg() && "Non-register vreg operand!");
1215 printRegName(OS&: O, Reg: Op.getReg(), AltIdx: AArch64::vreg);
1216}
1217
1218void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
1219 const MCSubtargetInfo &STI,
1220 raw_ostream &O) {
1221 const MCOperand &Op = MI->getOperand(i: OpNo);
1222 assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
1223 O << "c" << Op.getImm();
1224}
1225
1226void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
1227 const MCSubtargetInfo &STI,
1228 raw_ostream &O) {
1229 const MCOperand &MO = MI->getOperand(i: OpNum);
1230 if (MO.isImm()) {
1231 unsigned Val = (MO.getImm() & 0xfff);
1232 assert(Val == MO.getImm() && "Add/sub immediate out of range!");
1233 unsigned Shift =
1234 AArch64_AM::getShiftValue(Imm: MI->getOperand(i: OpNum + 1).getImm());
1235 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
1236 if (Shift != 0) {
1237 printShifter(MI, OpNum: OpNum + 1, STI, O);
1238 if (CommentStream)
1239 *CommentStream << '=' << formatImm(Value: Val << Shift) << '\n';
1240 }
1241 } else {
1242 assert(MO.isExpr() && "Unexpected operand type!");
1243 MAI.printExpr(O, *MO.getExpr());
1244 printShifter(MI, OpNum: OpNum + 1, STI, O);
1245 }
1246}
1247
1248template <typename T>
1249void AArch64InstPrinter::printLogicalImm(const MCInst *MI, unsigned OpNum,
1250 const MCSubtargetInfo &STI,
1251 raw_ostream &O) {
1252 uint64_t Val = MI->getOperand(i: OpNum).getImm();
1253 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1254 O << "#0x";
1255 O.write_hex(N: AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 8 * sizeof(T)));
1256}
1257
1258void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
1259 const MCSubtargetInfo &STI,
1260 raw_ostream &O) {
1261 unsigned Val = MI->getOperand(i: OpNum).getImm();
1262 // LSL #0 should not be printed.
1263 if (AArch64_AM::getShiftType(Imm: Val) == AArch64_AM::LSL &&
1264 AArch64_AM::getShiftValue(Imm: Val) == 0)
1265 return;
1266 O << ", " << AArch64_AM::getShiftExtendName(ST: AArch64_AM::getShiftType(Imm: Val))
1267 << " ";
1268 markup(OS&: O, M: Markup::Immediate) << "#" << AArch64_AM::getShiftValue(Imm: Val);
1269}
1270
1271void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
1272 const MCSubtargetInfo &STI,
1273 raw_ostream &O) {
1274 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1275 printShifter(MI, OpNum: OpNum + 1, STI, O);
1276}
1277
1278void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
1279 const MCSubtargetInfo &STI,
1280 raw_ostream &O) {
1281 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1282 printArithExtend(MI, OpNum: OpNum + 1, STI, O);
1283}
1284
1285void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
1286 const MCSubtargetInfo &STI,
1287 raw_ostream &O) {
1288 unsigned Val = MI->getOperand(i: OpNum).getImm();
1289 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Imm: Val);
1290 unsigned ShiftVal = AArch64_AM::getArithShiftValue(Imm: Val);
1291
1292 // If the destination or first source register operand is [W]SP, print
1293 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
1294 // all.
1295 if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
1296 MCRegister Dest = MI->getOperand(i: 0).getReg();
1297 MCRegister Src1 = MI->getOperand(i: 1).getReg();
1298 if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
1299 ExtType == AArch64_AM::UXTX) ||
1300 ((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
1301 ExtType == AArch64_AM::UXTW) ) {
1302 if (ShiftVal != 0) {
1303 O << ", lsl ";
1304 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1305 }
1306 return;
1307 }
1308 }
1309 O << ", " << AArch64_AM::getShiftExtendName(ST: ExtType);
1310 if (ShiftVal != 0) {
1311 O << " ";
1312 markup(OS&: O, M: Markup::Immediate) << "#" << ShiftVal;
1313 }
1314}
1315
1316void AArch64InstPrinter::printMemExtendImpl(bool SignExtend, bool DoShift,
1317 unsigned Width, char SrcRegKind,
1318 raw_ostream &O) {
1319 // sxtw, sxtx, uxtw or lsl (== uxtx)
1320 bool IsLSL = !SignExtend && SrcRegKind == 'x';
1321 if (IsLSL)
1322 O << "lsl";
1323 else
1324 O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
1325
1326 if (DoShift || IsLSL) {
1327 O << " ";
1328 markup(OS&: O, M: Markup::Immediate) << "#" << Log2_32(Value: Width / 8);
1329 }
1330}
1331
1332void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
1333 raw_ostream &O, char SrcRegKind,
1334 unsigned Width) {
1335 bool SignExtend = MI->getOperand(i: OpNum).getImm();
1336 bool DoShift = MI->getOperand(i: OpNum + 1).getImm();
1337 printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
1338}
1339
1340template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
1341void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
1342 unsigned OpNum,
1343 const MCSubtargetInfo &STI,
1344 raw_ostream &O) {
1345 printOperand(MI, OpNo: OpNum, STI, O);
1346 if (Suffix == 's' || Suffix == 'd')
1347 O << '.' << Suffix;
1348 else
1349 assert(Suffix == 0 && "Unsupported suffix size");
1350
1351 bool DoShift = ExtWidth != 8;
1352 if (SignExtend || DoShift || SrcRegKind == 'w') {
1353 O << ", ";
1354 printMemExtendImpl(SignExtend, DoShift, Width: ExtWidth, SrcRegKind, O);
1355 }
1356}
1357
1358template <int EltSize>
1359void AArch64InstPrinter::printPredicateAsCounter(const MCInst *MI,
1360 unsigned OpNum,
1361 const MCSubtargetInfo &STI,
1362 raw_ostream &O) {
1363 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1364 if (Reg < AArch64::PN0 || Reg > AArch64::PN15)
1365 llvm_unreachable("Unsupported predicate-as-counter register");
1366 O << "pn" << Reg - AArch64::PN0;
1367
1368 switch (EltSize) {
1369 case 0:
1370 break;
1371 case 8:
1372 O << ".b";
1373 break;
1374 case 16:
1375 O << ".h";
1376 break;
1377 case 32:
1378 O << ".s";
1379 break;
1380 case 64:
1381 O << ".d";
1382 break;
1383 default:
1384 llvm_unreachable("Unsupported element size");
1385 }
1386}
1387
1388void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
1389 const MCSubtargetInfo &STI,
1390 raw_ostream &O) {
1391 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1392 O << AArch64CC::getCondCodeName(Code: CC);
1393}
1394
1395void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
1396 const MCSubtargetInfo &STI,
1397 raw_ostream &O) {
1398 AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(i: OpNum).getImm();
1399 O << AArch64CC::getCondCodeName(Code: AArch64CC::getInvertedCondCode(Code: CC));
1400}
1401
1402void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
1403 const MCSubtargetInfo &STI,
1404 raw_ostream &O) {
1405 O << '[';
1406 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1407 O << ']';
1408}
1409
1410template <int Scale>
1411void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
1412 const MCSubtargetInfo &STI,
1413 raw_ostream &O) {
1414 markup(OS&: O, M: Markup::Immediate)
1415 << '#' << formatImm(Value: Scale * MI->getOperand(i: OpNum).getImm());
1416}
1417
1418template <int Scale, int Offset>
1419void AArch64InstPrinter::printImmRangeScale(const MCInst *MI, unsigned OpNum,
1420 const MCSubtargetInfo &STI,
1421 raw_ostream &O) {
1422 unsigned FirstImm = Scale * MI->getOperand(i: OpNum).getImm();
1423 O << formatImm(Value: FirstImm);
1424 O << ":" << formatImm(Value: FirstImm + Offset);
1425}
1426
1427void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
1428 unsigned Scale, raw_ostream &O) {
1429 const MCOperand MO = MI->getOperand(i: OpNum);
1430 if (MO.isImm()) {
1431 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: MO.getImm() * Scale);
1432 } else {
1433 assert(MO.isExpr() && "Unexpected operand type!");
1434 MAI.printExpr(O, *MO.getExpr());
1435 }
1436}
1437
1438void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
1439 unsigned Scale, raw_ostream &O) {
1440 const MCOperand MO1 = MI->getOperand(i: OpNum + 1);
1441 O << '[';
1442 printRegName(OS&: O, Reg: MI->getOperand(i: OpNum).getReg());
1443 if (MO1.isImm()) {
1444 O << ", ";
1445 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: MO1.getImm() * Scale);
1446 } else {
1447 assert(MO1.isExpr() && "Unexpected operand type!");
1448 O << ", ";
1449 MAI.printExpr(O, *MO1.getExpr());
1450 }
1451 O << ']';
1452}
1453
1454void AArch64InstPrinter::printRPRFMOperand(const MCInst *MI, unsigned OpNum,
1455 const MCSubtargetInfo &STI,
1456 raw_ostream &O) {
1457 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1458 if (auto PRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: prfop)) {
1459 O << PRFM->Name;
1460 return;
1461 }
1462
1463 O << '#' << formatImm(Value: prfop);
1464}
1465
1466template <bool IsSVEPrefetch>
1467void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
1468 const MCSubtargetInfo &STI,
1469 raw_ostream &O) {
1470 unsigned prfop = MI->getOperand(i: OpNum).getImm();
1471 if (IsSVEPrefetch) {
1472 if (auto PRFM = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: prfop)) {
1473 O << PRFM->Name;
1474 return;
1475 }
1476 } else {
1477 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(Encoding: prfop);
1478 if (PRFM && PRFM->haveFeatures(ActiveFeatures: STI.getFeatureBits())) {
1479 O << PRFM->Name;
1480 return;
1481 }
1482 }
1483
1484 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: prfop);
1485}
1486
1487void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
1488 const MCSubtargetInfo &STI,
1489 raw_ostream &O) {
1490 unsigned psbhintop = MI->getOperand(i: OpNum).getImm();
1491 auto PSB = AArch64PSBHint::lookupPSBByEncoding(Encoding: psbhintop);
1492 if (PSB)
1493 O << PSB->Name;
1494 else
1495 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: psbhintop);
1496}
1497
1498void AArch64InstPrinter::printBTIHintOp(const MCInst *MI, unsigned OpNum,
1499 const MCSubtargetInfo &STI,
1500 raw_ostream &O) {
1501 unsigned btihintop = MI->getOperand(i: OpNum).getImm() ^ 32;
1502 auto BTI = AArch64BTIHint::lookupBTIByEncoding(Encoding: btihintop);
1503 if (BTI)
1504 O << BTI->Name;
1505 else
1506 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: btihintop);
1507}
1508
1509void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
1510 const MCSubtargetInfo &STI,
1511 raw_ostream &O) {
1512 const MCOperand &MO = MI->getOperand(i: OpNum);
1513 float FPImm = MO.isDFPImm() ? bit_cast<double>(from: MO.getDFPImm())
1514 : AArch64_AM::getFPImmFloat(Imm: MO.getImm());
1515
1516 // 8 decimal places are enough to perfectly represent permitted floats.
1517 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%.8f", Vals: FPImm);
1518}
1519
1520static MCRegister getNextVectorRegister(MCRegister Reg, unsigned Stride = 1) {
1521 while (Stride--) {
1522 switch (Reg.id()) {
1523 default:
1524 llvm_unreachable("Vector register expected!");
1525 case AArch64::Q0: Reg = AArch64::Q1; break;
1526 case AArch64::Q1: Reg = AArch64::Q2; break;
1527 case AArch64::Q2: Reg = AArch64::Q3; break;
1528 case AArch64::Q3: Reg = AArch64::Q4; break;
1529 case AArch64::Q4: Reg = AArch64::Q5; break;
1530 case AArch64::Q5: Reg = AArch64::Q6; break;
1531 case AArch64::Q6: Reg = AArch64::Q7; break;
1532 case AArch64::Q7: Reg = AArch64::Q8; break;
1533 case AArch64::Q8: Reg = AArch64::Q9; break;
1534 case AArch64::Q9: Reg = AArch64::Q10; break;
1535 case AArch64::Q10: Reg = AArch64::Q11; break;
1536 case AArch64::Q11: Reg = AArch64::Q12; break;
1537 case AArch64::Q12: Reg = AArch64::Q13; break;
1538 case AArch64::Q13: Reg = AArch64::Q14; break;
1539 case AArch64::Q14: Reg = AArch64::Q15; break;
1540 case AArch64::Q15: Reg = AArch64::Q16; break;
1541 case AArch64::Q16: Reg = AArch64::Q17; break;
1542 case AArch64::Q17: Reg = AArch64::Q18; break;
1543 case AArch64::Q18: Reg = AArch64::Q19; break;
1544 case AArch64::Q19: Reg = AArch64::Q20; break;
1545 case AArch64::Q20: Reg = AArch64::Q21; break;
1546 case AArch64::Q21: Reg = AArch64::Q22; break;
1547 case AArch64::Q22: Reg = AArch64::Q23; break;
1548 case AArch64::Q23: Reg = AArch64::Q24; break;
1549 case AArch64::Q24: Reg = AArch64::Q25; break;
1550 case AArch64::Q25: Reg = AArch64::Q26; break;
1551 case AArch64::Q26: Reg = AArch64::Q27; break;
1552 case AArch64::Q27: Reg = AArch64::Q28; break;
1553 case AArch64::Q28: Reg = AArch64::Q29; break;
1554 case AArch64::Q29: Reg = AArch64::Q30; break;
1555 case AArch64::Q30: Reg = AArch64::Q31; break;
1556 // Vector lists can wrap around.
1557 case AArch64::Q31:
1558 Reg = AArch64::Q0;
1559 break;
1560 case AArch64::Z0: Reg = AArch64::Z1; break;
1561 case AArch64::Z1: Reg = AArch64::Z2; break;
1562 case AArch64::Z2: Reg = AArch64::Z3; break;
1563 case AArch64::Z3: Reg = AArch64::Z4; break;
1564 case AArch64::Z4: Reg = AArch64::Z5; break;
1565 case AArch64::Z5: Reg = AArch64::Z6; break;
1566 case AArch64::Z6: Reg = AArch64::Z7; break;
1567 case AArch64::Z7: Reg = AArch64::Z8; break;
1568 case AArch64::Z8: Reg = AArch64::Z9; break;
1569 case AArch64::Z9: Reg = AArch64::Z10; break;
1570 case AArch64::Z10: Reg = AArch64::Z11; break;
1571 case AArch64::Z11: Reg = AArch64::Z12; break;
1572 case AArch64::Z12: Reg = AArch64::Z13; break;
1573 case AArch64::Z13: Reg = AArch64::Z14; break;
1574 case AArch64::Z14: Reg = AArch64::Z15; break;
1575 case AArch64::Z15: Reg = AArch64::Z16; break;
1576 case AArch64::Z16: Reg = AArch64::Z17; break;
1577 case AArch64::Z17: Reg = AArch64::Z18; break;
1578 case AArch64::Z18: Reg = AArch64::Z19; break;
1579 case AArch64::Z19: Reg = AArch64::Z20; break;
1580 case AArch64::Z20: Reg = AArch64::Z21; break;
1581 case AArch64::Z21: Reg = AArch64::Z22; break;
1582 case AArch64::Z22: Reg = AArch64::Z23; break;
1583 case AArch64::Z23: Reg = AArch64::Z24; break;
1584 case AArch64::Z24: Reg = AArch64::Z25; break;
1585 case AArch64::Z25: Reg = AArch64::Z26; break;
1586 case AArch64::Z26: Reg = AArch64::Z27; break;
1587 case AArch64::Z27: Reg = AArch64::Z28; break;
1588 case AArch64::Z28: Reg = AArch64::Z29; break;
1589 case AArch64::Z29: Reg = AArch64::Z30; break;
1590 case AArch64::Z30: Reg = AArch64::Z31; break;
1591 // Vector lists can wrap around.
1592 case AArch64::Z31:
1593 Reg = AArch64::Z0;
1594 break;
1595 case AArch64::P0: Reg = AArch64::P1; break;
1596 case AArch64::P1: Reg = AArch64::P2; break;
1597 case AArch64::P2: Reg = AArch64::P3; break;
1598 case AArch64::P3: Reg = AArch64::P4; break;
1599 case AArch64::P4: Reg = AArch64::P5; break;
1600 case AArch64::P5: Reg = AArch64::P6; break;
1601 case AArch64::P6: Reg = AArch64::P7; break;
1602 case AArch64::P7: Reg = AArch64::P8; break;
1603 case AArch64::P8: Reg = AArch64::P9; break;
1604 case AArch64::P9: Reg = AArch64::P10; break;
1605 case AArch64::P10: Reg = AArch64::P11; break;
1606 case AArch64::P11: Reg = AArch64::P12; break;
1607 case AArch64::P12: Reg = AArch64::P13; break;
1608 case AArch64::P13: Reg = AArch64::P14; break;
1609 case AArch64::P14: Reg = AArch64::P15; break;
1610 // Vector lists can wrap around.
1611 case AArch64::P15: Reg = AArch64::P0; break;
1612 }
1613 }
1614 return Reg;
1615}
1616
1617template<unsigned size>
1618void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
1619 unsigned OpNum,
1620 const MCSubtargetInfo &STI,
1621 raw_ostream &O) {
1622 static_assert(size == 64 || size == 32,
1623 "Template parameter must be either 32 or 64");
1624 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1625
1626 unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
1627 unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
1628
1629 MCRegister Even = MRI.getSubReg(Reg, Idx: Sube);
1630 MCRegister Odd = MRI.getSubReg(Reg, Idx: Subo);
1631 printRegName(OS&: O, Reg: Even);
1632 O << ", ";
1633 printRegName(OS&: O, Reg: Odd);
1634}
1635
1636void AArch64InstPrinter::printMatrixTileList(const MCInst *MI, unsigned OpNum,
1637 const MCSubtargetInfo &STI,
1638 raw_ostream &O) {
1639 unsigned MaxRegs = 8;
1640 unsigned RegMask = MI->getOperand(i: OpNum).getImm();
1641
1642 unsigned NumRegs = 0;
1643 for (unsigned I = 0; I < MaxRegs; ++I)
1644 if ((RegMask & (1 << I)) != 0)
1645 ++NumRegs;
1646
1647 O << "{";
1648 unsigned Printed = 0;
1649 for (unsigned I = 0; I < MaxRegs; ++I) {
1650 unsigned Reg = RegMask & (1 << I);
1651 if (Reg == 0)
1652 continue;
1653 printRegName(OS&: O, Reg: AArch64::ZAD0 + I);
1654 if (Printed + 1 != NumRegs)
1655 O << ", ";
1656 ++Printed;
1657 }
1658 O << "}";
1659}
1660
1661void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
1662 const MCSubtargetInfo &STI,
1663 raw_ostream &O,
1664 StringRef LayoutSuffix) {
1665 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
1666
1667 O << "{ ";
1668
1669 // Work out how many registers there are in the list (if there is an actual
1670 // list).
1671 unsigned NumRegs = 1;
1672 if (MRI.getRegClass(i: AArch64::DDRegClassID).contains(Reg) ||
1673 MRI.getRegClass(i: AArch64::ZPR2RegClassID).contains(Reg) ||
1674 MRI.getRegClass(i: AArch64::QQRegClassID).contains(Reg) ||
1675 MRI.getRegClass(i: AArch64::PPR2RegClassID).contains(Reg) ||
1676 MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1677 NumRegs = 2;
1678 else if (MRI.getRegClass(i: AArch64::DDDRegClassID).contains(Reg) ||
1679 MRI.getRegClass(i: AArch64::ZPR3RegClassID).contains(Reg) ||
1680 MRI.getRegClass(i: AArch64::QQQRegClassID).contains(Reg))
1681 NumRegs = 3;
1682 else if (MRI.getRegClass(i: AArch64::DDDDRegClassID).contains(Reg) ||
1683 MRI.getRegClass(i: AArch64::ZPR4RegClassID).contains(Reg) ||
1684 MRI.getRegClass(i: AArch64::QQQQRegClassID).contains(Reg) ||
1685 MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1686 NumRegs = 4;
1687
1688 unsigned Stride = 1;
1689 if (MRI.getRegClass(i: AArch64::ZPR2StridedRegClassID).contains(Reg))
1690 Stride = 8;
1691 else if (MRI.getRegClass(i: AArch64::ZPR4StridedRegClassID).contains(Reg))
1692 Stride = 4;
1693
1694 // Now forget about the list and find out what the first register is.
1695 if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::dsub0))
1696 Reg = FirstReg;
1697 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::qsub0))
1698 Reg = FirstReg;
1699 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::zsub0))
1700 Reg = FirstReg;
1701 else if (MCRegister FirstReg = MRI.getSubReg(Reg, Idx: AArch64::psub0))
1702 Reg = FirstReg;
1703
1704 // If it's a D-reg, we need to promote it to the equivalent Q-reg before
1705 // printing (otherwise getRegisterName fails).
1706 if (MRI.getRegClass(i: AArch64::FPR64RegClassID).contains(Reg)) {
1707 const MCRegisterClass &FPR128RC =
1708 MRI.getRegClass(i: AArch64::FPR128RegClassID);
1709 Reg = MRI.getMatchingSuperReg(Reg, SubIdx: AArch64::dsub, RC: &FPR128RC);
1710 }
1711
1712 if ((MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1713 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg)) &&
1714 NumRegs > 1 && Stride == 1 &&
1715 // Do not print the range when the last register is lower than the first.
1716 // Because it is a wrap-around register.
1717 Reg < getNextVectorRegister(Reg, Stride: NumRegs - 1)) {
1718 printRegName(OS&: O, Reg);
1719 O << LayoutSuffix;
1720 if (NumRegs > 1) {
1721 // Set of two sve registers should be separated by ','
1722 StringRef split_char = NumRegs == 2 ? ", " : " - ";
1723 O << split_char;
1724 printRegName(OS&: O, Reg: (getNextVectorRegister(Reg, Stride: NumRegs - 1)));
1725 O << LayoutSuffix;
1726 }
1727 } else {
1728 for (unsigned i = 0; i < NumRegs;
1729 ++i, Reg = getNextVectorRegister(Reg, Stride)) {
1730 // wrap-around sve register
1731 if (MRI.getRegClass(i: AArch64::ZPRRegClassID).contains(Reg) ||
1732 MRI.getRegClass(i: AArch64::PPRRegClassID).contains(Reg))
1733 printRegName(OS&: O, Reg);
1734 else
1735 printRegName(OS&: O, Reg, AltIdx: AArch64::vreg);
1736 O << LayoutSuffix;
1737 if (i + 1 != NumRegs)
1738 O << ", ";
1739 }
1740 }
1741 O << " }";
1742}
1743
1744void
1745AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
1746 unsigned OpNum,
1747 const MCSubtargetInfo &STI,
1748 raw_ostream &O) {
1749 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1750}
1751
1752template <unsigned NumLanes, char LaneKind>
1753void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
1754 const MCSubtargetInfo &STI,
1755 raw_ostream &O) {
1756 if (LaneKind == 0) {
1757 printVectorList(MI, OpNum, STI, O, LayoutSuffix: "");
1758 return;
1759 }
1760 std::string Suffix(".");
1761 if (NumLanes)
1762 Suffix += itostr(X: NumLanes) + LaneKind;
1763 else
1764 Suffix += LaneKind;
1765
1766 printVectorList(MI, OpNum, STI, O, LayoutSuffix: Suffix);
1767}
1768
1769template <unsigned Scale>
1770void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
1771 const MCSubtargetInfo &STI,
1772 raw_ostream &O) {
1773 O << "[" << Scale * MI->getOperand(i: OpNum).getImm() << "]";
1774}
1775
1776template <unsigned Scale>
1777void AArch64InstPrinter::printMatrixIndex(const MCInst *MI, unsigned OpNum,
1778 const MCSubtargetInfo &STI,
1779 raw_ostream &O) {
1780 O << Scale * MI->getOperand(i: OpNum).getImm();
1781}
1782
1783void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, uint64_t Address,
1784 unsigned OpNum,
1785 const MCSubtargetInfo &STI,
1786 raw_ostream &O) {
1787 // Do not print the numeric target address when symbolizing.
1788 if (SymbolizeOperands)
1789 return;
1790
1791 const MCOperand &Op = MI->getOperand(i: OpNum);
1792
1793 // If the label has already been resolved to an immediate offset (say, when
1794 // we're running the disassembler), just print the immediate.
1795 if (Op.isImm()) {
1796 int64_t Offset = Op.getImm() * 4;
1797 if (PrintBranchImmAsAddress)
1798 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1799 else
1800 markup(OS&: O, M: Markup::Immediate) << "#" << formatImm(Value: Offset);
1801 return;
1802 }
1803
1804 // If the branch target is simply an address then print it in hex.
1805 const MCConstantExpr *BranchTarget =
1806 dyn_cast<MCConstantExpr>(Val: MI->getOperand(i: OpNum).getExpr());
1807 int64_t TargetAddress;
1808 if (BranchTarget && BranchTarget->evaluateAsAbsolute(Res&: TargetAddress)) {
1809 markup(OS&: O, M: Markup::Target) << formatHex(Value: (uint64_t)TargetAddress);
1810 } else {
1811 // Otherwise, just print the expression.
1812 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1813 }
1814}
1815
1816void AArch64InstPrinter::printAdrAdrpLabel(const MCInst *MI, uint64_t Address,
1817 unsigned OpNum,
1818 const MCSubtargetInfo &STI,
1819 raw_ostream &O) {
1820 // Do not print the numeric target address when symbolizing.
1821 // However, do print for ADRP, as this is typically used together with an ADD
1822 // or an immediate-offset ldr/str and the label is likely at the wrong point.
1823 if (SymbolizeOperands && MI->getOpcode() != AArch64::ADRP)
1824 return;
1825
1826 const MCOperand &Op = MI->getOperand(i: OpNum);
1827
1828 // If the label has already been resolved to an immediate offset (say, when
1829 // we're running the disassembler), just print the immediate.
1830 if (Op.isImm()) {
1831 int64_t Offset = Op.getImm();
1832 if (MI->getOpcode() == AArch64::ADRP) {
1833 Offset = Offset * 4096;
1834 Address = Address & -4096;
1835 }
1836 WithMarkup M = markup(OS&: O, M: Markup::Immediate);
1837 if (PrintBranchImmAsAddress)
1838 markup(OS&: O, M: Markup::Target) << formatHex(Value: Address + Offset);
1839 else
1840 markup(OS&: O, M: Markup::Immediate) << "#" << Offset;
1841 return;
1842 }
1843
1844 // Otherwise, just print the expression.
1845 MAI.printExpr(O, *MI->getOperand(i: OpNum).getExpr());
1846}
1847
1848void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
1849 const MCSubtargetInfo &STI,
1850 raw_ostream &O) {
1851 unsigned Val = MI->getOperand(i: OpNo).getImm();
1852 unsigned Opcode = MI->getOpcode();
1853
1854 StringRef Name;
1855 if (Opcode == AArch64::ISB) {
1856 auto ISB = AArch64ISB::lookupISBByEncoding(Encoding: Val);
1857 Name = ISB ? ISB->Name : "";
1858 } else if (Opcode == AArch64::TSB) {
1859 auto TSB = AArch64TSB::lookupTSBByEncoding(Encoding: Val);
1860 Name = TSB ? TSB->Name : "";
1861 } else {
1862 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Val);
1863 Name = DB ? DB->Name : "";
1864 }
1865 if (!Name.empty())
1866 O << Name;
1867 else
1868 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
1869}
1870
1871void AArch64InstPrinter::printBarriernXSOption(const MCInst *MI, unsigned OpNo,
1872 const MCSubtargetInfo &STI,
1873 raw_ostream &O) {
1874 unsigned Val = MI->getOperand(i: OpNo).getImm();
1875 assert(MI->getOpcode() == AArch64::DSBnXS);
1876
1877 StringRef Name;
1878 auto DB = AArch64DBnXS::lookupDBnXSByEncoding(Encoding: Val);
1879 Name = DB ? DB->Name : "";
1880
1881 if (!Name.empty())
1882 O << Name;
1883 else
1884 markup(OS&: O, M: Markup::Immediate) << "#" << Val;
1885}
1886
1887static bool isValidSysReg(const AArch64SysReg::SysReg &Reg, bool Read,
1888 const MCSubtargetInfo &STI) {
1889 return (Read ? Reg.Readable : Reg.Writeable) &&
1890 Reg.haveFeatures(ActiveFeatures: STI.getFeatureBits());
1891}
1892
1893// Looks up a system register either by encoding. Some system
1894// registers share the same encoding between different architectures,
1895// to work around this tablegen will return a range of registers with the same
1896// encodings. We need to check each register in the range to see if it valid.
1897static const AArch64SysReg::SysReg *lookupSysReg(unsigned Val, bool Read,
1898 const MCSubtargetInfo &STI) {
1899 auto Range = AArch64SysReg::lookupSysRegByEncoding(Encoding: Val);
1900 for (auto &Reg : Range) {
1901 if (isValidSysReg(Reg, Read, STI))
1902 return &Reg;
1903 }
1904
1905 return nullptr;
1906}
1907
1908void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
1909 const MCSubtargetInfo &STI,
1910 raw_ostream &O) {
1911 unsigned Val = MI->getOperand(i: OpNo).getImm();
1912
1913 // Horrible hack for the one register that has identical encodings but
1914 // different names in MSR and MRS. Because of this, one of MRS and MSR is
1915 // going to get the wrong entry
1916 if (Val == AArch64SysReg::DBGDTRRX_EL0) {
1917 O << "DBGDTRRX_EL0";
1918 return;
1919 }
1920
1921 // Horrible hack for two different registers having the same encoding.
1922 if (Val == AArch64SysReg::TRCEXTINSELR) {
1923 O << "TRCEXTINSELR";
1924 return;
1925 }
1926
1927 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: true /*Read*/, STI);
1928
1929 if (Reg)
1930 O << Reg->Name;
1931 else
1932 O << AArch64SysReg::genericRegisterString(Bits: Val);
1933}
1934
1935void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
1936 const MCSubtargetInfo &STI,
1937 raw_ostream &O) {
1938 unsigned Val = MI->getOperand(i: OpNo).getImm();
1939
1940 // Horrible hack for the one register that has identical encodings but
1941 // different names in MSR and MRS. Because of this, one of MRS and MSR is
1942 // going to get the wrong entry
1943 if (Val == AArch64SysReg::DBGDTRTX_EL0) {
1944 O << "DBGDTRTX_EL0";
1945 return;
1946 }
1947
1948 // Horrible hack for two different registers having the same encoding.
1949 if (Val == AArch64SysReg::TRCEXTINSELR) {
1950 O << "TRCEXTINSELR";
1951 return;
1952 }
1953
1954 const AArch64SysReg::SysReg *Reg = lookupSysReg(Val, Read: false /*Read*/, STI);
1955
1956 if (Reg)
1957 O << Reg->Name;
1958 else
1959 O << AArch64SysReg::genericRegisterString(Bits: Val);
1960}
1961
1962void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
1963 const MCSubtargetInfo &STI,
1964 raw_ostream &O) {
1965 unsigned Val = MI->getOperand(i: OpNo).getImm();
1966
1967 auto PStateImm15 = AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: Val);
1968 auto PStateImm1 = AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: Val);
1969 if (PStateImm15 && PStateImm15->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1970 O << PStateImm15->Name;
1971 else if (PStateImm1 && PStateImm1->haveFeatures(ActiveFeatures: STI.getFeatureBits()))
1972 O << PStateImm1->Name;
1973 else
1974 O << "#" << formatImm(Value: Val);
1975}
1976
1977void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
1978 const MCSubtargetInfo &STI,
1979 raw_ostream &O) {
1980 unsigned RawVal = MI->getOperand(i: OpNo).getImm();
1981 uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(Imm: RawVal);
1982 markup(OS&: O, M: Markup::Immediate) << format(Fmt: "#%#016llx", Vals: Val);
1983}
1984
1985template<int64_t Angle, int64_t Remainder>
1986void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
1987 const MCSubtargetInfo &STI,
1988 raw_ostream &O) {
1989 unsigned Val = MI->getOperand(i: OpNo).getImm();
1990 markup(OS&: O, M: Markup::Immediate) << "#" << (Val * Angle) + Remainder;
1991}
1992
1993void AArch64InstPrinter::printSVEPattern(const MCInst *MI, unsigned OpNum,
1994 const MCSubtargetInfo &STI,
1995 raw_ostream &O) {
1996 unsigned Val = MI->getOperand(i: OpNum).getImm();
1997 if (auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByEncoding(Encoding: Val))
1998 O << Pat->Name;
1999 else
2000 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Val);
2001}
2002
2003void AArch64InstPrinter::printSVEVecLenSpecifier(const MCInst *MI,
2004 unsigned OpNum,
2005 const MCSubtargetInfo &STI,
2006 raw_ostream &O) {
2007 unsigned Val = MI->getOperand(i: OpNum).getImm();
2008 // Pattern has only 1 bit
2009 if (Val > 1)
2010 llvm_unreachable("Invalid vector length specifier");
2011 if (auto Pat =
2012 AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByEncoding(Encoding: Val))
2013 O << Pat->Name;
2014 else
2015 llvm_unreachable("Invalid vector length specifier");
2016}
2017
2018template <char suffix>
2019void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
2020 const MCSubtargetInfo &STI,
2021 raw_ostream &O) {
2022 switch (suffix) {
2023 case 0:
2024 case 'b':
2025 case 'h':
2026 case 's':
2027 case 'd':
2028 case 'q':
2029 break;
2030 default: llvm_unreachable("Invalid kind specifier.");
2031 }
2032
2033 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2034 printRegName(OS&: O, Reg);
2035 if (suffix != 0)
2036 O << '.' << suffix;
2037}
2038
2039template <typename T>
2040void AArch64InstPrinter::printImmSVE(T Value, raw_ostream &O) {
2041 std::make_unsigned_t<T> HexValue = Value;
2042
2043 if (getPrintImmHex())
2044 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)HexValue);
2045 else
2046 markup(OS&: O, M: Markup::Immediate) << '#' << formatDec(Value);
2047
2048 if (CommentStream) {
2049 // Do the opposite to that used for instruction operands.
2050 if (getPrintImmHex())
2051 *CommentStream << '=' << formatDec(Value: HexValue) << '\n';
2052 else
2053 *CommentStream << '=' << formatHex(Value: (uint64_t)Value) << '\n';
2054 }
2055}
2056
2057template <typename T>
2058void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
2059 const MCSubtargetInfo &STI,
2060 raw_ostream &O) {
2061 unsigned UnscaledVal = MI->getOperand(i: OpNum).getImm();
2062 unsigned Shift = MI->getOperand(i: OpNum + 1).getImm();
2063 assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
2064 "Unexpected shift type!");
2065
2066 // #0 lsl #8 is never pretty printed
2067 if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Imm: Shift) != 0)) {
2068 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: UnscaledVal);
2069 printShifter(MI, OpNum: OpNum + 1, STI, O);
2070 return;
2071 }
2072
2073 T Val;
2074 if (std::is_signed<T>())
2075 Val = (int8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2076 else
2077 Val = (uint8_t)UnscaledVal * (1 << AArch64_AM::getShiftValue(Imm: Shift));
2078
2079 printImmSVE(Val, O);
2080}
2081
2082template <typename T>
2083void AArch64InstPrinter::printSVELogicalImm(const MCInst *MI, unsigned OpNum,
2084 const MCSubtargetInfo &STI,
2085 raw_ostream &O) {
2086 typedef std::make_signed_t<T> SignedT;
2087 typedef std::make_unsigned_t<T> UnsignedT;
2088
2089 uint64_t Val = MI->getOperand(i: OpNum).getImm();
2090 UnsignedT PrintVal = AArch64_AM::decodeLogicalImmediate(val: Val, regSize: 64);
2091
2092 // Prefer the default format for 16bit values, hex otherwise.
2093 if ((int16_t)PrintVal == (SignedT)PrintVal)
2094 printImmSVE((T)PrintVal, O);
2095 else if ((uint16_t)PrintVal == PrintVal)
2096 printImmSVE(PrintVal, O);
2097 else
2098 markup(OS&: O, M: Markup::Immediate) << '#' << formatHex(Value: (uint64_t)PrintVal);
2099}
2100
2101template <int Width>
2102void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
2103 const MCSubtargetInfo &STI,
2104 raw_ostream &O) {
2105 unsigned Base;
2106 switch (Width) {
2107 case 8: Base = AArch64::B0; break;
2108 case 16: Base = AArch64::H0; break;
2109 case 32: Base = AArch64::S0; break;
2110 case 64: Base = AArch64::D0; break;
2111 case 128: Base = AArch64::Q0; break;
2112 default:
2113 llvm_unreachable("Unsupported width");
2114 }
2115 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2116 printRegName(OS&: O, Reg: Reg - AArch64::Z0 + Base);
2117}
2118
2119template <unsigned ImmIs0, unsigned ImmIs1>
2120void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
2121 const MCSubtargetInfo &STI,
2122 raw_ostream &O) {
2123 auto *Imm0Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs0);
2124 auto *Imm1Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmIs1);
2125 unsigned Val = MI->getOperand(i: OpNum).getImm();
2126 markup(OS&: O, M: Markup::Immediate)
2127 << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr);
2128}
2129
2130void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
2131 const MCSubtargetInfo &STI,
2132 raw_ostream &O) {
2133 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2134 printRegName(OS&: O, Reg: getWRegFromXReg(Reg));
2135}
2136
2137void AArch64InstPrinter::printGPR64x8(const MCInst *MI, unsigned OpNum,
2138 const MCSubtargetInfo &STI,
2139 raw_ostream &O) {
2140 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2141 printRegName(OS&: O, Reg: MRI.getSubReg(Reg, Idx: AArch64::x8sub_0));
2142}
2143
2144void AArch64InstPrinter::printSyspXzrPair(const MCInst *MI, unsigned OpNum,
2145 const MCSubtargetInfo &STI,
2146 raw_ostream &O) {
2147 MCRegister Reg = MI->getOperand(i: OpNum).getReg();
2148 assert(Reg == AArch64::XZR &&
2149 "MC representation of SyspXzrPair should be XZR");
2150 O << getRegisterName(Reg) << ", " << getRegisterName(Reg);
2151}
2152
2153void AArch64InstPrinter::printPHintOp(const MCInst *MI, unsigned OpNum,
2154 const MCSubtargetInfo &STI,
2155 raw_ostream &O) {
2156 unsigned Op = MI->getOperand(i: OpNum).getImm();
2157 auto PH = AArch64PHint::lookupPHintByEncoding(Op);
2158 if (PH)
2159 O << PH->Name;
2160 else
2161 markup(OS&: O, M: Markup::Immediate) << '#' << formatImm(Value: Op);
2162}
2163