1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCAsmInfo.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCLinkerOptimizationHint.h"
33#include "llvm/MC/MCObjectFileInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
42#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCTargetOptions.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/AArch64BuildAttributes.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/raw_ostream.h"
52#include "llvm/TargetParser/AArch64TargetParser.h"
53#include "llvm/TargetParser/SubtargetFeature.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(i: 0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(i: 0).getReg();
112 Prefix.Pg = Inst.getOperand(i: 2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(i: 0).getReg();
124 Prefix.Pg = Inst.getOperand(i: 1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
164 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
165 std::string &Suggestion);
166 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
167 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseRegister(OperandVector &Operands);
169 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
170 bool parseNeonVectorList(OperandVector &Operands);
171 bool parseOptionalMulOperand(OperandVector &Operands);
172 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
173 bool parseKeywordOperand(OperandVector &Operands);
174 bool parseOperand(OperandVector &Operands, bool isCondCode,
175 bool invertCondCode);
176 bool parseImmExpr(int64_t &Out);
177 bool parseComma();
178 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
179 unsigned Last);
180
181 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182 OperandVector &Operands);
183
184 bool parseDataExpr(const MCExpr *&Res) override;
185 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
186
187 bool parseDirectiveArch(SMLoc L);
188 bool parseDirectiveArchExtension(SMLoc L);
189 bool parseDirectiveCPU(SMLoc L);
190 bool parseDirectiveInst(SMLoc L);
191
192 bool parseDirectiveTLSDescCall(SMLoc L);
193
194 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
195 bool parseDirectiveLtorg(SMLoc L);
196
197 bool parseDirectiveReq(StringRef Name, SMLoc L);
198 bool parseDirectiveUnreq(SMLoc L);
199 bool parseDirectiveCFINegateRAState();
200 bool parseDirectiveCFINegateRAStateWithPC();
201 bool parseDirectiveCFIBKeyFrame();
202 bool parseDirectiveCFIMTETaggedFrame();
203
204 bool parseDirectiveVariantPCS(SMLoc L);
205
206 bool parseDirectiveSEHAllocStack(SMLoc L);
207 bool parseDirectiveSEHPrologEnd(SMLoc L);
208 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
209 bool parseDirectiveSEHSaveFPLR(SMLoc L);
210 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
211 bool parseDirectiveSEHSaveReg(SMLoc L);
212 bool parseDirectiveSEHSaveRegX(SMLoc L);
213 bool parseDirectiveSEHSaveRegP(SMLoc L);
214 bool parseDirectiveSEHSaveRegPX(SMLoc L);
215 bool parseDirectiveSEHSaveLRPair(SMLoc L);
216 bool parseDirectiveSEHSaveFReg(SMLoc L);
217 bool parseDirectiveSEHSaveFRegX(SMLoc L);
218 bool parseDirectiveSEHSaveFRegP(SMLoc L);
219 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
220 bool parseDirectiveSEHSetFP(SMLoc L);
221 bool parseDirectiveSEHAddFP(SMLoc L);
222 bool parseDirectiveSEHNop(SMLoc L);
223 bool parseDirectiveSEHSaveNext(SMLoc L);
224 bool parseDirectiveSEHEpilogStart(SMLoc L);
225 bool parseDirectiveSEHEpilogEnd(SMLoc L);
226 bool parseDirectiveSEHTrapFrame(SMLoc L);
227 bool parseDirectiveSEHMachineFrame(SMLoc L);
228 bool parseDirectiveSEHContext(SMLoc L);
229 bool parseDirectiveSEHECContext(SMLoc L);
230 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
231 bool parseDirectiveSEHPACSignLR(SMLoc L);
232 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
233 bool parseDirectiveSEHAllocZ(SMLoc L);
234 bool parseDirectiveSEHSaveZReg(SMLoc L);
235 bool parseDirectiveSEHSavePReg(SMLoc L);
236 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
237 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
238
239 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
240 SmallVectorImpl<SMLoc> &Loc);
241 unsigned getNumRegsForRegKind(RegKind K);
242 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
243 OperandVector &Operands, MCStreamer &Out,
244 uint64_t &ErrorInfo,
245 bool MatchingInlineAsm) override;
246 /// @name Auto-generated Match Functions
247 /// {
248
249#define GET_ASSEMBLER_HEADER
250#include "AArch64GenAsmMatcher.inc"
251
252 /// }
253
254 ParseStatus tryParseScalarRegister(MCRegister &Reg);
255 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
256 RegKind MatchKind);
257 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
258 ParseStatus tryParseSVCR(OperandVector &Operands);
259 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
260 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
261 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
262 ParseStatus tryParseSysReg(OperandVector &Operands);
263 ParseStatus tryParseSysCROperand(OperandVector &Operands);
264 template <bool IsSVEPrefetch = false>
265 ParseStatus tryParsePrefetch(OperandVector &Operands);
266 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
267 ParseStatus tryParsePSBHint(OperandVector &Operands);
268 ParseStatus tryParseBTIHint(OperandVector &Operands);
269 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
270 ParseStatus tryParseAdrLabel(OperandVector &Operands);
271 template <bool AddFPZeroAsLiteral>
272 ParseStatus tryParseFPImm(OperandVector &Operands);
273 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
274 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
275 bool tryParseNeonVectorRegister(OperandVector &Operands);
276 ParseStatus tryParseVectorIndex(OperandVector &Operands);
277 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
278 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
279 template <bool ParseShiftExtend,
280 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
281 ParseStatus tryParseGPROperand(OperandVector &Operands);
282 ParseStatus tryParseZTOperand(OperandVector &Operands);
283 template <bool ParseShiftExtend, bool ParseSuffix>
284 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
285 template <RegKind RK>
286 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
287 ParseStatus
288 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
289 template <RegKind VectorKind>
290 ParseStatus tryParseVectorList(OperandVector &Operands,
291 bool ExpectMatch = false);
292 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
293 ParseStatus tryParseSVEPattern(OperandVector &Operands);
294 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
295 ParseStatus tryParseGPR64x8(OperandVector &Operands);
296 ParseStatus tryParseImmRange(OperandVector &Operands);
297 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
298 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
299
300public:
301 enum AArch64MatchResultTy {
302 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
303#define GET_OPERAND_DIAGNOSTIC_TYPES
304#include "AArch64GenAsmMatcher.inc"
305 };
306 bool IsILP32;
307 bool IsWindowsArm64EC;
308
309 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
310 const MCInstrInfo &MII, const MCTargetOptions &Options)
311 : MCTargetAsmParser(Options, STI, MII) {
312 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
313 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
314 MCAsmParserExtension::Initialize(Parser);
315 MCStreamer &S = getParser().getStreamer();
316 if (S.getTargetStreamer() == nullptr)
317 new AArch64TargetStreamer(S);
318
319 // Alias .hword/.word/.[dx]word to the target-independent
320 // .2byte/.4byte/.8byte directives as they have the same form and
321 // semantics:
322 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
323 Parser.addAliasForDirective(Directive: ".hword", Alias: ".2byte");
324 Parser.addAliasForDirective(Directive: ".word", Alias: ".4byte");
325 Parser.addAliasForDirective(Directive: ".dword", Alias: ".8byte");
326 Parser.addAliasForDirective(Directive: ".xword", Alias: ".8byte");
327
328 // Initialize the set of available features.
329 setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits()));
330 }
331
332 bool areEqualRegs(const MCParsedAsmOperand &Op1,
333 const MCParsedAsmOperand &Op2) const override;
334 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
335 SMLoc NameLoc, OperandVector &Operands) override;
336 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
337 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
338 SMLoc &EndLoc) override;
339 bool ParseDirective(AsmToken DirectiveID) override;
340 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
341 unsigned Kind) override;
342
343 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
344 AArch64::Specifier &DarwinSpec,
345 int64_t &Addend);
346};
347
348/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
349/// instruction.
350class AArch64Operand : public MCParsedAsmOperand {
351private:
352 enum KindTy {
353 k_Immediate,
354 k_ShiftedImm,
355 k_ImmRange,
356 k_CondCode,
357 k_Register,
358 k_MatrixRegister,
359 k_MatrixTileList,
360 k_SVCR,
361 k_VectorList,
362 k_VectorIndex,
363 k_Token,
364 k_SysReg,
365 k_SysCR,
366 k_Prefetch,
367 k_ShiftExtend,
368 k_FPImm,
369 k_Barrier,
370 k_PSBHint,
371 k_PHint,
372 k_BTIHint,
373 } Kind;
374
375 SMLoc StartLoc, EndLoc;
376
377 struct TokOp {
378 const char *Data;
379 unsigned Length;
380 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
381 };
382
383 // Separate shift/extend operand.
384 struct ShiftExtendOp {
385 AArch64_AM::ShiftExtendType Type;
386 unsigned Amount;
387 bool HasExplicitAmount;
388 };
389
390 struct RegOp {
391 unsigned RegNum;
392 RegKind Kind;
393 int ElementWidth;
394
395 // The register may be allowed as a different register class,
396 // e.g. for GPR64as32 or GPR32as64.
397 RegConstraintEqualityTy EqualityTy;
398
399 // In some cases the shift/extend needs to be explicitly parsed together
400 // with the register, rather than as a separate operand. This is needed
401 // for addressing modes where the instruction as a whole dictates the
402 // scaling/extend, rather than specific bits in the instruction.
403 // By parsing them as a single operand, we avoid the need to pass an
404 // extra operand in all CodeGen patterns (because all operands need to
405 // have an associated value), and we avoid the need to update TableGen to
406 // accept operands that have no associated bits in the instruction.
407 //
408 // An added benefit of parsing them together is that the assembler
409 // can give a sensible diagnostic if the scaling is not correct.
410 //
411 // The default is 'lsl #0' (HasExplicitAmount = false) if no
412 // ShiftExtend is specified.
413 ShiftExtendOp ShiftExtend;
414 };
415
416 struct MatrixRegOp {
417 unsigned RegNum;
418 unsigned ElementWidth;
419 MatrixKind Kind;
420 };
421
422 struct MatrixTileListOp {
423 unsigned RegMask = 0;
424 };
425
426 struct VectorListOp {
427 unsigned RegNum;
428 unsigned Count;
429 unsigned Stride;
430 unsigned NumElements;
431 unsigned ElementWidth;
432 RegKind RegisterKind;
433 };
434
435 struct VectorIndexOp {
436 int Val;
437 };
438
439 struct ImmOp {
440 const MCExpr *Val;
441 };
442
443 struct ShiftedImmOp {
444 const MCExpr *Val;
445 unsigned ShiftAmount;
446 };
447
448 struct ImmRangeOp {
449 unsigned First;
450 unsigned Last;
451 };
452
453 struct CondCodeOp {
454 AArch64CC::CondCode Code;
455 };
456
457 struct FPImmOp {
458 uint64_t Val; // APFloat value bitcasted to uint64_t.
459 bool IsExact; // describes whether parsed value was exact.
460 };
461
462 struct BarrierOp {
463 const char *Data;
464 unsigned Length;
465 unsigned Val; // Not the enum since not all values have names.
466 bool HasnXSModifier;
467 };
468
469 struct SysRegOp {
470 const char *Data;
471 unsigned Length;
472 uint32_t MRSReg;
473 uint32_t MSRReg;
474 uint32_t PStateField;
475 };
476
477 struct SysCRImmOp {
478 unsigned Val;
479 };
480
481 struct PrefetchOp {
482 const char *Data;
483 unsigned Length;
484 unsigned Val;
485 };
486
487 struct PSBHintOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492 struct PHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct BTIHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502
503 struct SVCROp {
504 const char *Data;
505 unsigned Length;
506 unsigned PStateField;
507 };
508
509 union {
510 struct TokOp Tok;
511 struct RegOp Reg;
512 struct MatrixRegOp MatrixReg;
513 struct MatrixTileListOp MatrixTileList;
514 struct VectorListOp VectorList;
515 struct VectorIndexOp VectorIndex;
516 struct ImmOp Imm;
517 struct ShiftedImmOp ShiftedImm;
518 struct ImmRangeOp ImmRange;
519 struct CondCodeOp CondCode;
520 struct FPImmOp FPImm;
521 struct BarrierOp Barrier;
522 struct SysRegOp SysReg;
523 struct SysCRImmOp SysCRImm;
524 struct PrefetchOp Prefetch;
525 struct PSBHintOp PSBHint;
526 struct PHintOp PHint;
527 struct BTIHintOp BTIHint;
528 struct ShiftExtendOp ShiftExtend;
529 struct SVCROp SVCR;
530 };
531
532 // Keep the MCContext around as the MCExprs may need manipulated during
533 // the add<>Operands() calls.
534 MCContext &Ctx;
535
536public:
537 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
538
539 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
540 Kind = o.Kind;
541 StartLoc = o.StartLoc;
542 EndLoc = o.EndLoc;
543 switch (Kind) {
544 case k_Token:
545 Tok = o.Tok;
546 break;
547 case k_Immediate:
548 Imm = o.Imm;
549 break;
550 case k_ShiftedImm:
551 ShiftedImm = o.ShiftedImm;
552 break;
553 case k_ImmRange:
554 ImmRange = o.ImmRange;
555 break;
556 case k_CondCode:
557 CondCode = o.CondCode;
558 break;
559 case k_FPImm:
560 FPImm = o.FPImm;
561 break;
562 case k_Barrier:
563 Barrier = o.Barrier;
564 break;
565 case k_Register:
566 Reg = o.Reg;
567 break;
568 case k_MatrixRegister:
569 MatrixReg = o.MatrixReg;
570 break;
571 case k_MatrixTileList:
572 MatrixTileList = o.MatrixTileList;
573 break;
574 case k_VectorList:
575 VectorList = o.VectorList;
576 break;
577 case k_VectorIndex:
578 VectorIndex = o.VectorIndex;
579 break;
580 case k_SysReg:
581 SysReg = o.SysReg;
582 break;
583 case k_SysCR:
584 SysCRImm = o.SysCRImm;
585 break;
586 case k_Prefetch:
587 Prefetch = o.Prefetch;
588 break;
589 case k_PSBHint:
590 PSBHint = o.PSBHint;
591 break;
592 case k_PHint:
593 PHint = o.PHint;
594 break;
595 case k_BTIHint:
596 BTIHint = o.BTIHint;
597 break;
598 case k_ShiftExtend:
599 ShiftExtend = o.ShiftExtend;
600 break;
601 case k_SVCR:
602 SVCR = o.SVCR;
603 break;
604 }
605 }
606
607 /// getStartLoc - Get the location of the first token of this operand.
608 SMLoc getStartLoc() const override { return StartLoc; }
609 /// getEndLoc - Get the location of the last token of this operand.
610 SMLoc getEndLoc() const override { return EndLoc; }
611
612 StringRef getToken() const {
613 assert(Kind == k_Token && "Invalid access!");
614 return StringRef(Tok.Data, Tok.Length);
615 }
616
617 bool isTokenSuffix() const {
618 assert(Kind == k_Token && "Invalid access!");
619 return Tok.IsSuffix;
620 }
621
622 const MCExpr *getImm() const {
623 assert(Kind == k_Immediate && "Invalid access!");
624 return Imm.Val;
625 }
626
627 const MCExpr *getShiftedImmVal() const {
628 assert(Kind == k_ShiftedImm && "Invalid access!");
629 return ShiftedImm.Val;
630 }
631
632 unsigned getShiftedImmShift() const {
633 assert(Kind == k_ShiftedImm && "Invalid access!");
634 return ShiftedImm.ShiftAmount;
635 }
636
637 unsigned getFirstImmVal() const {
638 assert(Kind == k_ImmRange && "Invalid access!");
639 return ImmRange.First;
640 }
641
642 unsigned getLastImmVal() const {
643 assert(Kind == k_ImmRange && "Invalid access!");
644 return ImmRange.Last;
645 }
646
647 AArch64CC::CondCode getCondCode() const {
648 assert(Kind == k_CondCode && "Invalid access!");
649 return CondCode.Code;
650 }
651
652 APFloat getFPImm() const {
653 assert (Kind == k_FPImm && "Invalid access!");
654 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
655 }
656
657 bool getFPImmIsExact() const {
658 assert (Kind == k_FPImm && "Invalid access!");
659 return FPImm.IsExact;
660 }
661
662 unsigned getBarrier() const {
663 assert(Kind == k_Barrier && "Invalid access!");
664 return Barrier.Val;
665 }
666
667 StringRef getBarrierName() const {
668 assert(Kind == k_Barrier && "Invalid access!");
669 return StringRef(Barrier.Data, Barrier.Length);
670 }
671
672 bool getBarriernXSModifier() const {
673 assert(Kind == k_Barrier && "Invalid access!");
674 return Barrier.HasnXSModifier;
675 }
676
677 MCRegister getReg() const override {
678 assert(Kind == k_Register && "Invalid access!");
679 return Reg.RegNum;
680 }
681
682 unsigned getMatrixReg() const {
683 assert(Kind == k_MatrixRegister && "Invalid access!");
684 return MatrixReg.RegNum;
685 }
686
687 unsigned getMatrixElementWidth() const {
688 assert(Kind == k_MatrixRegister && "Invalid access!");
689 return MatrixReg.ElementWidth;
690 }
691
692 MatrixKind getMatrixKind() const {
693 assert(Kind == k_MatrixRegister && "Invalid access!");
694 return MatrixReg.Kind;
695 }
696
697 unsigned getMatrixTileListRegMask() const {
698 assert(isMatrixTileList() && "Invalid access!");
699 return MatrixTileList.RegMask;
700 }
701
702 RegConstraintEqualityTy getRegEqualityTy() const {
703 assert(Kind == k_Register && "Invalid access!");
704 return Reg.EqualityTy;
705 }
706
707 unsigned getVectorListStart() const {
708 assert(Kind == k_VectorList && "Invalid access!");
709 return VectorList.RegNum;
710 }
711
712 unsigned getVectorListCount() const {
713 assert(Kind == k_VectorList && "Invalid access!");
714 return VectorList.Count;
715 }
716
717 unsigned getVectorListStride() const {
718 assert(Kind == k_VectorList && "Invalid access!");
719 return VectorList.Stride;
720 }
721
722 int getVectorIndex() const {
723 assert(Kind == k_VectorIndex && "Invalid access!");
724 return VectorIndex.Val;
725 }
726
727 StringRef getSysReg() const {
728 assert(Kind == k_SysReg && "Invalid access!");
729 return StringRef(SysReg.Data, SysReg.Length);
730 }
731
732 unsigned getSysCR() const {
733 assert(Kind == k_SysCR && "Invalid access!");
734 return SysCRImm.Val;
735 }
736
737 unsigned getPrefetch() const {
738 assert(Kind == k_Prefetch && "Invalid access!");
739 return Prefetch.Val;
740 }
741
742 unsigned getPSBHint() const {
743 assert(Kind == k_PSBHint && "Invalid access!");
744 return PSBHint.Val;
745 }
746
747 unsigned getPHint() const {
748 assert(Kind == k_PHint && "Invalid access!");
749 return PHint.Val;
750 }
751
752 StringRef getPSBHintName() const {
753 assert(Kind == k_PSBHint && "Invalid access!");
754 return StringRef(PSBHint.Data, PSBHint.Length);
755 }
756
757 StringRef getPHintName() const {
758 assert(Kind == k_PHint && "Invalid access!");
759 return StringRef(PHint.Data, PHint.Length);
760 }
761
762 unsigned getBTIHint() const {
763 assert(Kind == k_BTIHint && "Invalid access!");
764 return BTIHint.Val;
765 }
766
767 StringRef getBTIHintName() const {
768 assert(Kind == k_BTIHint && "Invalid access!");
769 return StringRef(BTIHint.Data, BTIHint.Length);
770 }
771
772 StringRef getSVCR() const {
773 assert(Kind == k_SVCR && "Invalid access!");
774 return StringRef(SVCR.Data, SVCR.Length);
775 }
776
777 StringRef getPrefetchName() const {
778 assert(Kind == k_Prefetch && "Invalid access!");
779 return StringRef(Prefetch.Data, Prefetch.Length);
780 }
781
782 AArch64_AM::ShiftExtendType getShiftExtendType() const {
783 if (Kind == k_ShiftExtend)
784 return ShiftExtend.Type;
785 if (Kind == k_Register)
786 return Reg.ShiftExtend.Type;
787 llvm_unreachable("Invalid access!");
788 }
789
790 unsigned getShiftExtendAmount() const {
791 if (Kind == k_ShiftExtend)
792 return ShiftExtend.Amount;
793 if (Kind == k_Register)
794 return Reg.ShiftExtend.Amount;
795 llvm_unreachable("Invalid access!");
796 }
797
798 bool hasShiftExtendAmount() const {
799 if (Kind == k_ShiftExtend)
800 return ShiftExtend.HasExplicitAmount;
801 if (Kind == k_Register)
802 return Reg.ShiftExtend.HasExplicitAmount;
803 llvm_unreachable("Invalid access!");
804 }
805
806 bool isImm() const override { return Kind == k_Immediate; }
807 bool isMem() const override { return false; }
808
809 bool isUImm6() const {
810 if (!isImm())
811 return false;
812 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
813 if (!MCE)
814 return false;
815 int64_t Val = MCE->getValue();
816 return (Val >= 0 && Val < 64);
817 }
818
819 template <int Width> bool isSImm() const {
820 return bool(isSImmScaled<Width, 1>());
821 }
822
823 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
824 return isImmScaled<Bits, Scale>(true);
825 }
826
827 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
828 DiagnosticPredicate isUImmScaled() const {
829 if (IsRange && isImmRange() &&
830 (getLastImmVal() != getFirstImmVal() + Offset))
831 return DiagnosticPredicate::NoMatch;
832
833 return isImmScaled<Bits, Scale, IsRange>(false);
834 }
835
836 template <int Bits, int Scale, bool IsRange = false>
837 DiagnosticPredicate isImmScaled(bool Signed) const {
838 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
839 (isImmRange() && !IsRange))
840 return DiagnosticPredicate::NoMatch;
841
842 int64_t Val;
843 if (isImmRange())
844 Val = getFirstImmVal();
845 else {
846 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
847 if (!MCE)
848 return DiagnosticPredicate::NoMatch;
849 Val = MCE->getValue();
850 }
851
852 int64_t MinVal, MaxVal;
853 if (Signed) {
854 int64_t Shift = Bits - 1;
855 MinVal = (int64_t(1) << Shift) * -Scale;
856 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
857 } else {
858 MinVal = 0;
859 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
860 }
861
862 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
863 return DiagnosticPredicate::Match;
864
865 return DiagnosticPredicate::NearMatch;
866 }
867
868 DiagnosticPredicate isSVEPattern() const {
869 if (!isImm())
870 return DiagnosticPredicate::NoMatch;
871 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
872 if (!MCE)
873 return DiagnosticPredicate::NoMatch;
874 int64_t Val = MCE->getValue();
875 if (Val >= 0 && Val < 32)
876 return DiagnosticPredicate::Match;
877 return DiagnosticPredicate::NearMatch;
878 }
879
880 DiagnosticPredicate isSVEVecLenSpecifier() const {
881 if (!isImm())
882 return DiagnosticPredicate::NoMatch;
883 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
884 if (!MCE)
885 return DiagnosticPredicate::NoMatch;
886 int64_t Val = MCE->getValue();
887 if (Val >= 0 && Val <= 1)
888 return DiagnosticPredicate::Match;
889 return DiagnosticPredicate::NearMatch;
890 }
891
892 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
893 AArch64::Specifier ELFSpec;
894 AArch64::Specifier DarwinSpec;
895 int64_t Addend;
896 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
897 Addend)) {
898 // If we don't understand the expression, assume the best and
899 // let the fixup and relocation code deal with it.
900 return true;
901 }
902
903 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
904 llvm::is_contained(
905 Set: {AArch64::S_LO12, AArch64::S_GOT_LO12, AArch64::S_GOT_AUTH_LO12,
906 AArch64::S_DTPREL_LO12, AArch64::S_DTPREL_LO12_NC,
907 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
908 AArch64::S_GOTTPREL_LO12_NC, AArch64::S_TLSDESC_LO12,
909 AArch64::S_TLSDESC_AUTH_LO12, AArch64::S_SECREL_LO12,
910 AArch64::S_SECREL_HI12, AArch64::S_GOT_PAGE_LO15},
911 Element: ELFSpec)) {
912 // Note that we don't range-check the addend. It's adjusted modulo page
913 // size when converted, so there is no "out of range" condition when using
914 // @pageoff.
915 return true;
916 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
917 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
918 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
919 return Addend == 0;
920 }
921
922 return false;
923 }
924
925 template <int Scale> bool isUImm12Offset() const {
926 if (!isImm())
927 return false;
928
929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
930 if (!MCE)
931 return isSymbolicUImm12Offset(Expr: getImm());
932
933 int64_t Val = MCE->getValue();
934 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
935 }
936
937 template <int N, int M>
938 bool isImmInRange() const {
939 if (!isImm())
940 return false;
941 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
942 if (!MCE)
943 return false;
944 int64_t Val = MCE->getValue();
945 return (Val >= N && Val <= M);
946 }
947
948 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
949 // a logical immediate can always be represented when inverted.
950 template <typename T>
951 bool isLogicalImm() const {
952 if (!isImm())
953 return false;
954 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
955 if (!MCE)
956 return false;
957
958 int64_t Val = MCE->getValue();
959 // Avoid left shift by 64 directly.
960 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
961 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
962 if ((Val & Upper) && (Val & Upper) != Upper)
963 return false;
964
965 return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8);
966 }
967
968 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
969
970 bool isImmRange() const { return Kind == k_ImmRange; }
971
972 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
973 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
974 /// immediate that can be shifted by 'Shift'.
975 template <unsigned Width>
976 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
977 if (isShiftedImm() && Width == getShiftedImmShift())
978 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal()))
979 return std::make_pair(x: CE->getValue(), y: Width);
980
981 if (isImm())
982 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
983 int64_t Val = CE->getValue();
984 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
985 return std::make_pair(x: Val >> Width, y: Width);
986 else
987 return std::make_pair(x&: Val, y: 0u);
988 }
989
990 return {};
991 }
992
993 bool isAddSubImm() const {
994 if (!isShiftedImm() && !isImm())
995 return false;
996
997 const MCExpr *Expr;
998
999 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1000 if (isShiftedImm()) {
1001 unsigned Shift = ShiftedImm.ShiftAmount;
1002 Expr = ShiftedImm.Val;
1003 if (Shift != 0 && Shift != 12)
1004 return false;
1005 } else {
1006 Expr = getImm();
1007 }
1008
1009 AArch64::Specifier ELFSpec;
1010 AArch64::Specifier DarwinSpec;
1011 int64_t Addend;
1012 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1013 Addend)) {
1014 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1015 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1016 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1017 llvm::is_contained(
1018 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
1019 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
1020 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
1021 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
1022 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
1023 AArch64::S_SECREL_HI12, AArch64::S_SECREL_LO12},
1024 Element: ELFSpec);
1025 }
1026
1027 // If it's a constant, it should be a real immediate in range.
1028 if (auto ShiftedVal = getShiftedVal<12>())
1029 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1030
1031 // If it's an expression, we hope for the best and let the fixup/relocation
1032 // code deal with it.
1033 return true;
1034 }
1035
1036 bool isAddSubImmNeg() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 // Otherwise it should be a real negative immediate in range.
1041 if (auto ShiftedVal = getShiftedVal<12>())
1042 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1043
1044 return false;
1045 }
1046
1047 // Signed value in the range -128 to +127. For element widths of
1048 // 16 bits or higher it may also be a signed multiple of 256 in the
1049 // range -32768 to +32512.
1050 // For element-width of 8 bits a range of -128 to 255 is accepted,
1051 // since a copy of a byte can be either signed/unsigned.
1052 template <typename T>
1053 DiagnosticPredicate isSVECpyImm() const {
1054 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1055 return DiagnosticPredicate::NoMatch;
1056
1057 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1058 std::is_same<int8_t, T>::value;
1059 if (auto ShiftedImm = getShiftedVal<8>())
1060 if (!(IsByte && ShiftedImm->second) &&
1061 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1062 << ShiftedImm->second))
1063 return DiagnosticPredicate::Match;
1064
1065 return DiagnosticPredicate::NearMatch;
1066 }
1067
1068 // Unsigned value in the range 0 to 255. For element widths of
1069 // 16 bits or higher it may also be a signed multiple of 256 in the
1070 // range 0 to 65280.
1071 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1072 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1073 return DiagnosticPredicate::NoMatch;
1074
1075 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1076 std::is_same<int8_t, T>::value;
1077 if (auto ShiftedImm = getShiftedVal<8>())
1078 if (!(IsByte && ShiftedImm->second) &&
1079 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1080 << ShiftedImm->second))
1081 return DiagnosticPredicate::Match;
1082
1083 return DiagnosticPredicate::NearMatch;
1084 }
1085
1086 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1087 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1088 return DiagnosticPredicate::Match;
1089 return DiagnosticPredicate::NoMatch;
1090 }
1091
1092 bool isCondCode() const { return Kind == k_CondCode; }
1093
1094 bool isSIMDImmType10() const {
1095 if (!isImm())
1096 return false;
1097 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1098 if (!MCE)
1099 return false;
1100 return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue());
1101 }
1102
1103 template<int N>
1104 bool isBranchTarget() const {
1105 if (!isImm())
1106 return false;
1107 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1108 if (!MCE)
1109 return true;
1110 int64_t Val = MCE->getValue();
1111 if (Val & 0x3)
1112 return false;
1113 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1114 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1115 }
1116
1117 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1118 if (!isImm())
1119 return false;
1120
1121 AArch64::Specifier ELFSpec;
1122 AArch64::Specifier DarwinSpec;
1123 int64_t Addend;
1124 if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFSpec, DarwinSpec,
1125 Addend)) {
1126 return false;
1127 }
1128 if (DarwinSpec != AArch64::S_None)
1129 return false;
1130
1131 return llvm::is_contained(Range&: AllowedModifiers, Element: ELFSpec);
1132 }
1133
1134 bool isMovWSymbolG3() const {
1135 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1136 }
1137
1138 bool isMovWSymbolG2() const {
1139 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1140 AArch64::S_ABS_G2_NC, AArch64::S_PREL_G2,
1141 AArch64::S_PREL_G2_NC, AArch64::S_TPREL_G2,
1142 AArch64::S_DTPREL_G2});
1143 }
1144
1145 bool isMovWSymbolG1() const {
1146 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1147 AArch64::S_ABS_G1_NC, AArch64::S_PREL_G1,
1148 AArch64::S_PREL_G1_NC, AArch64::S_GOTTPREL_G1,
1149 AArch64::S_TPREL_G1, AArch64::S_TPREL_G1_NC,
1150 AArch64::S_DTPREL_G1, AArch64::S_DTPREL_G1_NC});
1151 }
1152
1153 bool isMovWSymbolG0() const {
1154 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1155 AArch64::S_ABS_G0_NC, AArch64::S_PREL_G0,
1156 AArch64::S_PREL_G0_NC, AArch64::S_GOTTPREL_G0_NC,
1157 AArch64::S_TPREL_G0, AArch64::S_TPREL_G0_NC,
1158 AArch64::S_DTPREL_G0, AArch64::S_DTPREL_G0_NC});
1159 }
1160
1161 template<int RegWidth, int Shift>
1162 bool isMOVZMovAlias() const {
1163 if (!isImm()) return false;
1164
1165 const MCExpr *E = getImm();
1166 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) {
1167 uint64_t Value = CE->getValue();
1168
1169 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1170 }
1171 // Only supports the case of Shift being 0 if an expression is used as an
1172 // operand
1173 return !Shift && E;
1174 }
1175
1176 template<int RegWidth, int Shift>
1177 bool isMOVNMovAlias() const {
1178 if (!isImm()) return false;
1179
1180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1181 if (!CE) return false;
1182 uint64_t Value = CE->getValue();
1183
1184 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1185 }
1186
1187 bool isFPImm() const {
1188 return Kind == k_FPImm &&
1189 AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1;
1190 }
1191
1192 bool isBarrier() const {
1193 return Kind == k_Barrier && !getBarriernXSModifier();
1194 }
1195 bool isBarriernXS() const {
1196 return Kind == k_Barrier && getBarriernXSModifier();
1197 }
1198 bool isSysReg() const { return Kind == k_SysReg; }
1199
1200 bool isMRSSystemRegister() const {
1201 if (!isSysReg()) return false;
1202
1203 return SysReg.MRSReg != -1U;
1204 }
1205
1206 bool isMSRSystemRegister() const {
1207 if (!isSysReg()) return false;
1208 return SysReg.MSRReg != -1U;
1209 }
1210
1211 bool isSystemPStateFieldWithImm0_1() const {
1212 if (!isSysReg()) return false;
1213 return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField);
1214 }
1215
1216 bool isSystemPStateFieldWithImm0_15() const {
1217 if (!isSysReg())
1218 return false;
1219 return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField);
1220 }
1221
1222 bool isSVCR() const {
1223 if (Kind != k_SVCR)
1224 return false;
1225 return SVCR.PStateField != -1U;
1226 }
1227
1228 bool isReg() const override {
1229 return Kind == k_Register;
1230 }
1231
1232 bool isVectorList() const { return Kind == k_VectorList; }
1233
1234 bool isScalarReg() const {
1235 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1236 }
1237
1238 bool isNeonVectorReg() const {
1239 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1240 }
1241
1242 bool isNeonVectorRegLo() const {
1243 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1244 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1245 Reg: Reg.RegNum) ||
1246 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1247 Reg: Reg.RegNum));
1248 }
1249
1250 bool isNeonVectorReg0to7() const {
1251 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1252 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1253 Reg: Reg.RegNum));
1254 }
1255
1256 bool isMatrix() const { return Kind == k_MatrixRegister; }
1257 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1258
1259 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1260 RegKind RK;
1261 switch (Class) {
1262 case AArch64::PPRRegClassID:
1263 case AArch64::PPR_3bRegClassID:
1264 case AArch64::PPR_p8to15RegClassID:
1265 case AArch64::PNRRegClassID:
1266 case AArch64::PNR_p8to15RegClassID:
1267 case AArch64::PPRorPNRRegClassID:
1268 RK = RegKind::SVEPredicateAsCounter;
1269 break;
1270 default:
1271 llvm_unreachable("Unsupported register class");
1272 }
1273
1274 return (Kind == k_Register && Reg.Kind == RK) &&
1275 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1276 }
1277
1278 template <unsigned Class> bool isSVEVectorReg() const {
1279 RegKind RK;
1280 switch (Class) {
1281 case AArch64::ZPRRegClassID:
1282 case AArch64::ZPR_3bRegClassID:
1283 case AArch64::ZPR_4bRegClassID:
1284 case AArch64::ZPRMul2_LoRegClassID:
1285 case AArch64::ZPRMul2_HiRegClassID:
1286 case AArch64::ZPR_KRegClassID:
1287 RK = RegKind::SVEDataVector;
1288 break;
1289 case AArch64::PPRRegClassID:
1290 case AArch64::PPR_3bRegClassID:
1291 case AArch64::PPR_p8to15RegClassID:
1292 case AArch64::PNRRegClassID:
1293 case AArch64::PNR_p8to15RegClassID:
1294 case AArch64::PPRorPNRRegClassID:
1295 RK = RegKind::SVEPredicateVector;
1296 break;
1297 default:
1298 llvm_unreachable("Unsupported register class");
1299 }
1300
1301 return (Kind == k_Register && Reg.Kind == RK) &&
1302 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1303 }
1304
1305 template <unsigned Class> bool isFPRasZPR() const {
1306 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1307 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1308 }
1309
1310 template <int ElementWidth, unsigned Class>
1311 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1312 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1313 return DiagnosticPredicate::NoMatch;
1314
1315 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1316 return DiagnosticPredicate::Match;
1317
1318 return DiagnosticPredicate::NearMatch;
1319 }
1320
1321 template <int ElementWidth, unsigned Class>
1322 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1323 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1324 Reg.Kind != RegKind::SVEPredicateVector))
1325 return DiagnosticPredicate::NoMatch;
1326
1327 if ((isSVEPredicateAsCounterReg<Class>() ||
1328 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1329 Reg.ElementWidth == ElementWidth)
1330 return DiagnosticPredicate::Match;
1331
1332 return DiagnosticPredicate::NearMatch;
1333 }
1334
1335 template <int ElementWidth, unsigned Class>
1336 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1337 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1338 return DiagnosticPredicate::NoMatch;
1339
1340 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1341 return DiagnosticPredicate::Match;
1342
1343 return DiagnosticPredicate::NearMatch;
1344 }
1345
1346 template <int ElementWidth, unsigned Class>
1347 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1348 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1349 return DiagnosticPredicate::NoMatch;
1350
1351 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1352 return DiagnosticPredicate::Match;
1353
1354 return DiagnosticPredicate::NearMatch;
1355 }
1356
1357 template <int ElementWidth, unsigned Class,
1358 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1359 bool ShiftWidthAlwaysSame>
1360 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1361 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1362 if (!VectorMatch.isMatch())
1363 return DiagnosticPredicate::NoMatch;
1364
1365 // Give a more specific diagnostic when the user has explicitly typed in
1366 // a shift-amount that does not match what is expected, but for which
1367 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1368 bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8);
1369 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1370 ShiftExtendTy == AArch64_AM::SXTW) &&
1371 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1372 return DiagnosticPredicate::NoMatch;
1373
1374 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1375 return DiagnosticPredicate::Match;
1376
1377 return DiagnosticPredicate::NearMatch;
1378 }
1379
1380 bool isGPR32as64() const {
1381 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.RegNum);
1383 }
1384
1385 bool isGPR64as32() const {
1386 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1387 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.RegNum);
1388 }
1389
1390 bool isGPR64x8() const {
1391 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1392 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1393 Reg: Reg.RegNum);
1394 }
1395
1396 bool isWSeqPair() const {
1397 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1398 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1399 Reg: Reg.RegNum);
1400 }
1401
1402 bool isXSeqPair() const {
1403 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1404 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1405 Reg: Reg.RegNum);
1406 }
1407
1408 bool isSyspXzrPair() const {
1409 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1410 }
1411
1412 template<int64_t Angle, int64_t Remainder>
1413 DiagnosticPredicate isComplexRotation() const {
1414 if (!isImm())
1415 return DiagnosticPredicate::NoMatch;
1416
1417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1418 if (!CE)
1419 return DiagnosticPredicate::NoMatch;
1420 uint64_t Value = CE->getValue();
1421
1422 if (Value % Angle == Remainder && Value <= 270)
1423 return DiagnosticPredicate::Match;
1424 return DiagnosticPredicate::NearMatch;
1425 }
1426
1427 template <unsigned RegClassID> bool isGPR64() const {
1428 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1429 AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg());
1430 }
1431
1432 template <unsigned RegClassID, int ExtWidth>
1433 DiagnosticPredicate isGPR64WithShiftExtend() const {
1434 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1435 return DiagnosticPredicate::NoMatch;
1436
1437 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1438 getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8))
1439 return DiagnosticPredicate::Match;
1440 return DiagnosticPredicate::NearMatch;
1441 }
1442
1443 /// Is this a vector list with the type implicit (presumably attached to the
1444 /// instruction itself)?
1445 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1446 bool isImplicitlyTypedVectorList() const {
1447 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1448 VectorList.NumElements == 0 &&
1449 VectorList.RegisterKind == VectorKind &&
1450 (!IsConsecutive || (VectorList.Stride == 1));
1451 }
1452
1453 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1454 unsigned ElementWidth, unsigned Stride = 1>
1455 bool isTypedVectorList() const {
1456 if (Kind != k_VectorList)
1457 return false;
1458 if (VectorList.Count != NumRegs)
1459 return false;
1460 if (VectorList.RegisterKind != VectorKind)
1461 return false;
1462 if (VectorList.ElementWidth != ElementWidth)
1463 return false;
1464 if (VectorList.Stride != Stride)
1465 return false;
1466 return VectorList.NumElements == NumElements;
1467 }
1468
1469 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1470 unsigned ElementWidth, unsigned RegClass>
1471 DiagnosticPredicate isTypedVectorListMultiple() const {
1472 bool Res =
1473 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1474 if (!Res)
1475 return DiagnosticPredicate::NoMatch;
1476 if (!AArch64MCRegisterClasses[RegClass].contains(Reg: VectorList.RegNum))
1477 return DiagnosticPredicate::NearMatch;
1478 return DiagnosticPredicate::Match;
1479 }
1480
1481 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1482 unsigned ElementWidth>
1483 DiagnosticPredicate isTypedVectorListStrided() const {
1484 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1485 ElementWidth, Stride>();
1486 if (!Res)
1487 return DiagnosticPredicate::NoMatch;
1488 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1489 ((VectorList.RegNum >= AArch64::Z16) &&
1490 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1491 return DiagnosticPredicate::Match;
1492 return DiagnosticPredicate::NoMatch;
1493 }
1494
1495 template <int Min, int Max>
1496 DiagnosticPredicate isVectorIndex() const {
1497 if (Kind != k_VectorIndex)
1498 return DiagnosticPredicate::NoMatch;
1499 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1500 return DiagnosticPredicate::Match;
1501 return DiagnosticPredicate::NearMatch;
1502 }
1503
1504 bool isToken() const override { return Kind == k_Token; }
1505
1506 bool isTokenEqual(StringRef Str) const {
1507 return Kind == k_Token && getToken() == Str;
1508 }
1509 bool isSysCR() const { return Kind == k_SysCR; }
1510 bool isPrefetch() const { return Kind == k_Prefetch; }
1511 bool isPSBHint() const { return Kind == k_PSBHint; }
1512 bool isPHint() const { return Kind == k_PHint; }
1513 bool isBTIHint() const { return Kind == k_BTIHint; }
1514 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1515 bool isShifter() const {
1516 if (!isShiftExtend())
1517 return false;
1518
1519 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1520 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1521 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1522 ST == AArch64_AM::MSL);
1523 }
1524
1525 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1526 if (Kind != k_FPImm)
1527 return DiagnosticPredicate::NoMatch;
1528
1529 if (getFPImmIsExact()) {
1530 // Lookup the immediate from table of supported immediates.
1531 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum);
1532 assert(Desc && "Unknown enum value");
1533
1534 // Calculate its FP value.
1535 APFloat RealVal(APFloat::IEEEdouble());
1536 auto StatusOrErr =
1537 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1538 if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1539 llvm_unreachable("FP immediate is not exact");
1540
1541 if (getFPImm().bitwiseIsEqual(RHS: RealVal))
1542 return DiagnosticPredicate::Match;
1543 }
1544
1545 return DiagnosticPredicate::NearMatch;
1546 }
1547
1548 template <unsigned ImmA, unsigned ImmB>
1549 DiagnosticPredicate isExactFPImm() const {
1550 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1551 if ((Res = isExactFPImm<ImmA>()))
1552 return DiagnosticPredicate::Match;
1553 if ((Res = isExactFPImm<ImmB>()))
1554 return DiagnosticPredicate::Match;
1555 return Res;
1556 }
1557
1558 bool isExtend() const {
1559 if (!isShiftExtend())
1560 return false;
1561
1562 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1563 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1564 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1565 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1566 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1567 ET == AArch64_AM::LSL) &&
1568 getShiftExtendAmount() <= 4;
1569 }
1570
1571 bool isExtend64() const {
1572 if (!isExtend())
1573 return false;
1574 // Make sure the extend expects a 32-bit source register.
1575 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1576 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1577 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1578 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1579 }
1580
1581 bool isExtendLSL64() const {
1582 if (!isExtend())
1583 return false;
1584 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1585 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1586 ET == AArch64_AM::LSL) &&
1587 getShiftExtendAmount() <= 4;
1588 }
1589
1590 bool isLSLImm3Shift() const {
1591 if (!isShiftExtend())
1592 return false;
1593 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1594 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1595 }
1596
1597 template<int Width> bool isMemXExtend() const {
1598 if (!isExtend())
1599 return false;
1600 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1601 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1602 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1603 getShiftExtendAmount() == 0);
1604 }
1605
1606 template<int Width> bool isMemWExtend() const {
1607 if (!isExtend())
1608 return false;
1609 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1610 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1611 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1612 getShiftExtendAmount() == 0);
1613 }
1614
1615 template <unsigned width>
1616 bool isArithmeticShifter() const {
1617 if (!isShifter())
1618 return false;
1619
1620 // An arithmetic shifter is LSL, LSR, or ASR.
1621 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1622 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1623 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1624 }
1625
1626 template <unsigned width>
1627 bool isLogicalShifter() const {
1628 if (!isShifter())
1629 return false;
1630
1631 // A logical shifter is LSL, LSR, ASR or ROR.
1632 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1633 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1634 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1635 getShiftExtendAmount() < width;
1636 }
1637
1638 bool isMovImm32Shifter() const {
1639 if (!isShifter())
1640 return false;
1641
1642 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1643 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1644 if (ST != AArch64_AM::LSL)
1645 return false;
1646 uint64_t Val = getShiftExtendAmount();
1647 return (Val == 0 || Val == 16);
1648 }
1649
1650 bool isMovImm64Shifter() const {
1651 if (!isShifter())
1652 return false;
1653
1654 // A MOVi shifter is LSL of 0 or 16.
1655 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1656 if (ST != AArch64_AM::LSL)
1657 return false;
1658 uint64_t Val = getShiftExtendAmount();
1659 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1660 }
1661
1662 bool isLogicalVecShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1667 unsigned Shift = getShiftExtendAmount();
1668 return getShiftExtendType() == AArch64_AM::LSL &&
1669 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1670 }
1671
1672 bool isLogicalVecHalfWordShifter() const {
1673 if (!isLogicalVecShifter())
1674 return false;
1675
1676 // A logical vector shifter is a left shift by 0 or 8.
1677 unsigned Shift = getShiftExtendAmount();
1678 return getShiftExtendType() == AArch64_AM::LSL &&
1679 (Shift == 0 || Shift == 8);
1680 }
1681
1682 bool isMoveVecShifter() const {
1683 if (!isShiftExtend())
1684 return false;
1685
1686 // A logical vector shifter is a left shift by 8 or 16.
1687 unsigned Shift = getShiftExtendAmount();
1688 return getShiftExtendType() == AArch64_AM::MSL &&
1689 (Shift == 8 || Shift == 16);
1690 }
1691
1692 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1693 // to LDUR/STUR when the offset is not legal for the former but is for
1694 // the latter. As such, in addition to checking for being a legal unscaled
1695 // address, also check that it is not a legal scaled address. This avoids
1696 // ambiguity in the matcher.
1697 template<int Width>
1698 bool isSImm9OffsetFB() const {
1699 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1700 }
1701
1702 bool isAdrpLabel() const {
1703 // Validation was handled during parsing, so we just verify that
1704 // something didn't go haywire.
1705 if (!isImm())
1706 return false;
1707
1708 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1709 int64_t Val = CE->getValue();
1710 int64_t Min = - (4096 * (1LL << (21 - 1)));
1711 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1712 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1713 }
1714
1715 return true;
1716 }
1717
1718 bool isAdrLabel() const {
1719 // Validation was handled during parsing, so we just verify that
1720 // something didn't go haywire.
1721 if (!isImm())
1722 return false;
1723
1724 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1725 int64_t Val = CE->getValue();
1726 int64_t Min = - (1LL << (21 - 1));
1727 int64_t Max = ((1LL << (21 - 1)) - 1);
1728 return Val >= Min && Val <= Max;
1729 }
1730
1731 return true;
1732 }
1733
1734 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1735 DiagnosticPredicate isMatrixRegOperand() const {
1736 if (!isMatrix())
1737 return DiagnosticPredicate::NoMatch;
1738 if (getMatrixKind() != Kind ||
1739 !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) ||
1740 EltSize != getMatrixElementWidth())
1741 return DiagnosticPredicate::NearMatch;
1742 return DiagnosticPredicate::Match;
1743 }
1744
1745 bool isPAuthPCRelLabel16Operand() const {
1746 // PAuth PCRel16 operands are similar to regular branch targets, but only
1747 // negative values are allowed for concrete immediates as signing instr
1748 // should be in a lower address.
1749 if (!isImm())
1750 return false;
1751 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1752 if (!MCE)
1753 return true;
1754 int64_t Val = MCE->getValue();
1755 if (Val & 0b11)
1756 return false;
1757 return (Val <= 0) && (Val > -(1 << 18));
1758 }
1759
1760 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1761 // Add as immediates when possible. Null MCExpr = 0.
1762 if (!Expr)
1763 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1764 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
1765 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
1766 else
1767 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
1768 }
1769
1770 void addRegOperands(MCInst &Inst, unsigned N) const {
1771 assert(N == 1 && "Invalid number of operands!");
1772 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1773 }
1774
1775 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1777 Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg()));
1778 }
1779
1780 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 assert(
1783 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1784
1785 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1786 MCRegister Reg = RI->getRegClass(i: AArch64::GPR32RegClassID)
1787 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1788
1789 Inst.addOperand(Op: MCOperand::createReg(Reg));
1790 }
1791
1792 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1793 assert(N == 1 && "Invalid number of operands!");
1794 assert(
1795 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1796
1797 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1798 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
1799 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1800
1801 Inst.addOperand(Op: MCOperand::createReg(Reg));
1802 }
1803
1804 template <int Width>
1805 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1806 unsigned Base;
1807 switch (Width) {
1808 case 8: Base = AArch64::B0; break;
1809 case 16: Base = AArch64::H0; break;
1810 case 32: Base = AArch64::S0; break;
1811 case 64: Base = AArch64::D0; break;
1812 case 128: Base = AArch64::Q0; break;
1813 default:
1814 llvm_unreachable("Unsupported width");
1815 }
1816 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base));
1817 }
1818
1819 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1820 assert(N == 1 && "Invalid number of operands!");
1821 unsigned Reg = getReg();
1822 // Normalise to PPR
1823 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1824 Reg = Reg - AArch64::PN0 + AArch64::P0;
1825 Inst.addOperand(Op: MCOperand::createReg(Reg));
1826 }
1827
1828 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1829 assert(N == 1 && "Invalid number of operands!");
1830 Inst.addOperand(
1831 Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0));
1832 }
1833
1834 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!");
1836 assert(
1837 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1838 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0));
1839 }
1840
1841 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 assert(
1844 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1845 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1846 }
1847
1848 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1849 assert(N == 1 && "Invalid number of operands!");
1850 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1851 }
1852
1853 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!");
1855 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1856 }
1857
1858 enum VecListIndexType {
1859 VecListIdx_DReg = 0,
1860 VecListIdx_QReg = 1,
1861 VecListIdx_ZReg = 2,
1862 VecListIdx_PReg = 3,
1863 };
1864
1865 template <VecListIndexType RegTy, unsigned NumRegs,
1866 bool IsConsecutive = false>
1867 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!");
1869 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1870 "Expected consecutive registers");
1871 static const unsigned FirstRegs[][5] = {
1872 /* DReg */ { AArch64::Q0,
1873 AArch64::D0, AArch64::D0_D1,
1874 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1875 /* QReg */ { AArch64::Q0,
1876 AArch64::Q0, AArch64::Q0_Q1,
1877 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1878 /* ZReg */ { AArch64::Z0,
1879 AArch64::Z0, AArch64::Z0_Z1,
1880 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1881 /* PReg */ { AArch64::P0,
1882 AArch64::P0, AArch64::P0_P1 }
1883 };
1884
1885 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1886 " NumRegs must be <= 4 for ZRegs");
1887
1888 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1889 " NumRegs must be <= 2 for PRegs");
1890
1891 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1892 Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() -
1893 FirstRegs[(unsigned)RegTy][0]));
1894 }
1895
1896 template <unsigned NumRegs>
1897 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1898 assert(N == 1 && "Invalid number of operands!");
1899 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1900
1901 switch (NumRegs) {
1902 case 2:
1903 if (getVectorListStart() < AArch64::Z16) {
1904 assert((getVectorListStart() < AArch64::Z8) &&
1905 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1906 Inst.addOperand(Op: MCOperand::createReg(
1907 Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1908 } else {
1909 assert((getVectorListStart() < AArch64::Z24) &&
1910 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1911 Inst.addOperand(Op: MCOperand::createReg(
1912 Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1913 }
1914 break;
1915 case 4:
1916 if (getVectorListStart() < AArch64::Z16) {
1917 assert((getVectorListStart() < AArch64::Z4) &&
1918 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1919 Inst.addOperand(Op: MCOperand::createReg(
1920 Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1921 } else {
1922 assert((getVectorListStart() < AArch64::Z20) &&
1923 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1924 Inst.addOperand(Op: MCOperand::createReg(
1925 Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1926 }
1927 break;
1928 default:
1929 llvm_unreachable("Unsupported number of registers for strided vec list");
1930 }
1931 }
1932
1933 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1934 assert(N == 1 && "Invalid number of operands!");
1935 unsigned RegMask = getMatrixTileListRegMask();
1936 assert(RegMask <= 0xFF && "Invalid mask!");
1937 Inst.addOperand(Op: MCOperand::createImm(Val: RegMask));
1938 }
1939
1940 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1941 assert(N == 1 && "Invalid number of operands!");
1942 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
1943 }
1944
1945 template <unsigned ImmIs0, unsigned ImmIs1>
1946 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!");
1948 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1949 Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>())));
1950 }
1951
1952 void addImmOperands(MCInst &Inst, unsigned N) const {
1953 assert(N == 1 && "Invalid number of operands!");
1954 // If this is a pageoff symrefexpr with an addend, adjust the addend
1955 // to be only the page-offset portion. Otherwise, just add the expr
1956 // as-is.
1957 addExpr(Inst, Expr: getImm());
1958 }
1959
1960 template <int Shift>
1961 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1962 assert(N == 2 && "Invalid number of operands!");
1963 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1964 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first));
1965 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
1966 } else if (isShiftedImm()) {
1967 addExpr(Inst, Expr: getShiftedImmVal());
1968 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift()));
1969 } else {
1970 addExpr(Inst, Expr: getImm());
1971 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1972 }
1973 }
1974
1975 template <int Shift>
1976 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1977 assert(N == 2 && "Invalid number of operands!");
1978 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1979 Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first));
1980 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
1981 } else
1982 llvm_unreachable("Not a shifted negative immediate");
1983 }
1984
1985 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1987 Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode()));
1988 }
1989
1990 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1991 assert(N == 1 && "Invalid number of operands!");
1992 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1993 if (!MCE)
1994 addExpr(Inst, Expr: getImm());
1995 else
1996 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12));
1997 }
1998
1999 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2000 addImmOperands(Inst, N);
2001 }
2002
2003 template<int Scale>
2004 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2007
2008 if (!MCE) {
2009 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2010 return;
2011 }
2012 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2013 }
2014
2015 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2016 assert(N == 1 && "Invalid number of operands!");
2017 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2018 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue()));
2019 }
2020
2021 template <int Scale>
2022 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 1 && "Invalid number of operands!");
2024 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2025 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2026 }
2027
2028 template <int Scale>
2029 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2030 assert(N == 1 && "Invalid number of operands!");
2031 Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale));
2032 }
2033
2034 template <typename T>
2035 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2038 std::make_unsigned_t<T> Val = MCE->getValue();
2039 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2040 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2041 }
2042
2043 template <typename T>
2044 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2045 assert(N == 1 && "Invalid number of operands!");
2046 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2047 std::make_unsigned_t<T> Val = ~MCE->getValue();
2048 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2049 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2050 }
2051
2052 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2055 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue());
2056 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2057 }
2058
2059 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2060 // Branch operands don't encode the low bits, so shift them off
2061 // here. If it's a label, however, just put it on directly as there's
2062 // not enough information now to do anything.
2063 assert(N == 1 && "Invalid number of operands!");
2064 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2065 if (!MCE) {
2066 addExpr(Inst, Expr: getImm());
2067 return;
2068 }
2069 assert(MCE && "Invalid constant immediate operand!");
2070 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2071 }
2072
2073 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2074 // PC-relative operands don't encode the low bits, so shift them off
2075 // here. If it's a label, however, just put it on directly as there's
2076 // not enough information now to do anything.
2077 assert(N == 1 && "Invalid number of operands!");
2078 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2079 if (!MCE) {
2080 addExpr(Inst, Expr: getImm());
2081 return;
2082 }
2083 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2084 }
2085
2086 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2087 // Branch operands don't encode the low bits, so shift them off
2088 // here. If it's a label, however, just put it on directly as there's
2089 // not enough information now to do anything.
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2092 if (!MCE) {
2093 addExpr(Inst, Expr: getImm());
2094 return;
2095 }
2096 assert(MCE && "Invalid constant immediate operand!");
2097 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2098 }
2099
2100 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2101 // Branch operands don't encode the low bits, so shift them off
2102 // here. If it's a label, however, just put it on directly as there's
2103 // not enough information now to do anything.
2104 assert(N == 1 && "Invalid number of operands!");
2105 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2106 if (!MCE) {
2107 addExpr(Inst, Expr: getImm());
2108 return;
2109 }
2110 assert(MCE && "Invalid constant immediate operand!");
2111 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2112 }
2113
2114 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2115 // Branch operands don't encode the low bits, so shift them off
2116 // here. If it's a label, however, just put it on directly as there's
2117 // not enough information now to do anything.
2118 assert(N == 1 && "Invalid number of operands!");
2119 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2120 if (!MCE) {
2121 addExpr(Inst, Expr: getImm());
2122 return;
2123 }
2124 assert(MCE && "Invalid constant immediate operand!");
2125 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2126 }
2127
2128 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2129 assert(N == 1 && "Invalid number of operands!");
2130 Inst.addOperand(Op: MCOperand::createImm(
2131 Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt())));
2132 }
2133
2134 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2135 assert(N == 1 && "Invalid number of operands!");
2136 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2137 }
2138
2139 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2142 }
2143
2144 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146
2147 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg));
2148 }
2149
2150 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2151 assert(N == 1 && "Invalid number of operands!");
2152
2153 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg));
2154 }
2155
2156 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2157 assert(N == 1 && "Invalid number of operands!");
2158
2159 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2160 }
2161
2162 void addSVCROperands(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!");
2164
2165 Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField));
2166 }
2167
2168 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2169 assert(N == 1 && "Invalid number of operands!");
2170
2171 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2172 }
2173
2174 void addSysCROperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2176 Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR()));
2177 }
2178
2179 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch()));
2182 }
2183
2184 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint()));
2187 }
2188
2189 void addPHintOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191 Inst.addOperand(Op: MCOperand::createImm(Val: getPHint()));
2192 }
2193
2194 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2195 assert(N == 1 && "Invalid number of operands!");
2196 Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint()));
2197 }
2198
2199 void addShifterOperands(MCInst &Inst, unsigned N) const {
2200 assert(N == 1 && "Invalid number of operands!");
2201 unsigned Imm =
2202 AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount());
2203 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2204 }
2205
2206 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2207 assert(N == 1 && "Invalid number of operands!");
2208 unsigned Imm = getShiftExtendAmount();
2209 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2210 }
2211
2212 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2213 assert(N == 1 && "Invalid number of operands!");
2214
2215 if (!isScalarReg())
2216 return;
2217
2218 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2219 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
2220 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
2221 if (Reg != AArch64::XZR)
2222 llvm_unreachable("wrong register");
2223
2224 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR));
2225 }
2226
2227 void addExtendOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!");
2229 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2230 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2231 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2232 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2233 }
2234
2235 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2238 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2239 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2240 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2241 }
2242
2243 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 2 && "Invalid number of operands!");
2245 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2246 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2247 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2248 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0));
2249 }
2250
2251 // For 8-bit load/store instructions with a register offset, both the
2252 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2253 // they're disambiguated by whether the shift was explicit or implicit rather
2254 // than its size.
2255 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2256 assert(N == 2 && "Invalid number of operands!");
2257 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2258 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2259 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2260 Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount()));
2261 }
2262
2263 template<int Shift>
2264 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2265 assert(N == 1 && "Invalid number of operands!");
2266
2267 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2268 if (CE) {
2269 uint64_t Value = CE->getValue();
2270 Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff));
2271 } else {
2272 addExpr(Inst, Expr: getImm());
2273 }
2274 }
2275
2276 template<int Shift>
2277 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2278 assert(N == 1 && "Invalid number of operands!");
2279
2280 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2281 uint64_t Value = CE->getValue();
2282 Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff));
2283 }
2284
2285 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2286 assert(N == 1 && "Invalid number of operands!");
2287 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2288 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90));
2289 }
2290
2291 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2294 Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180));
2295 }
2296
2297 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2298
2299 static std::unique_ptr<AArch64Operand>
2300 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2301 auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx);
2302 Op->Tok.Data = Str.data();
2303 Op->Tok.Length = Str.size();
2304 Op->Tok.IsSuffix = IsSuffix;
2305 Op->StartLoc = S;
2306 Op->EndLoc = S;
2307 return Op;
2308 }
2309
2310 static std::unique_ptr<AArch64Operand>
2311 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2312 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2313 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2314 unsigned ShiftAmount = 0,
2315 unsigned HasExplicitAmount = false) {
2316 auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx);
2317 Op->Reg.RegNum = RegNum;
2318 Op->Reg.Kind = Kind;
2319 Op->Reg.ElementWidth = 0;
2320 Op->Reg.EqualityTy = EqTy;
2321 Op->Reg.ShiftExtend.Type = ExtTy;
2322 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2323 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2324 Op->StartLoc = S;
2325 Op->EndLoc = E;
2326 return Op;
2327 }
2328
2329 static std::unique_ptr<AArch64Operand>
2330 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2331 SMLoc S, SMLoc E, MCContext &Ctx,
2332 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2333 unsigned ShiftAmount = 0,
2334 unsigned HasExplicitAmount = false) {
2335 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2336 Kind == RegKind::SVEPredicateVector ||
2337 Kind == RegKind::SVEPredicateAsCounter) &&
2338 "Invalid vector kind");
2339 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount,
2340 HasExplicitAmount);
2341 Op->Reg.ElementWidth = ElementWidth;
2342 return Op;
2343 }
2344
2345 static std::unique_ptr<AArch64Operand>
2346 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2347 unsigned NumElements, unsigned ElementWidth,
2348 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2349 auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx);
2350 Op->VectorList.RegNum = RegNum;
2351 Op->VectorList.Count = Count;
2352 Op->VectorList.Stride = Stride;
2353 Op->VectorList.NumElements = NumElements;
2354 Op->VectorList.ElementWidth = ElementWidth;
2355 Op->VectorList.RegisterKind = RegisterKind;
2356 Op->StartLoc = S;
2357 Op->EndLoc = E;
2358 return Op;
2359 }
2360
2361 static std::unique_ptr<AArch64Operand>
2362 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx);
2364 Op->VectorIndex.Val = Idx;
2365 Op->StartLoc = S;
2366 Op->EndLoc = E;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand>
2371 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2372 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx);
2373 Op->MatrixTileList.RegMask = RegMask;
2374 Op->StartLoc = S;
2375 Op->EndLoc = E;
2376 return Op;
2377 }
2378
2379 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2380 const unsigned ElementWidth) {
2381 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2382 RegMap = {
2383 {{0, AArch64::ZAB0},
2384 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2385 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2386 {{8, AArch64::ZAB0},
2387 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2388 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2389 {{16, AArch64::ZAH0},
2390 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2391 {{16, AArch64::ZAH1},
2392 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2393 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2394 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2395 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2396 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2397 };
2398
2399 if (ElementWidth == 64)
2400 OutRegs.insert(V: Reg);
2401 else {
2402 std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)];
2403 assert(!Regs.empty() && "Invalid tile or element width!");
2404 OutRegs.insert_range(R&: Regs);
2405 }
2406 }
2407
2408 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2409 SMLoc E, MCContext &Ctx) {
2410 auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx);
2411 Op->Imm.Val = Val;
2412 Op->StartLoc = S;
2413 Op->EndLoc = E;
2414 return Op;
2415 }
2416
2417 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2418 unsigned ShiftAmount,
2419 SMLoc S, SMLoc E,
2420 MCContext &Ctx) {
2421 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx);
2422 Op->ShiftedImm .Val = Val;
2423 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2424 Op->StartLoc = S;
2425 Op->EndLoc = E;
2426 return Op;
2427 }
2428
2429 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2430 unsigned Last, SMLoc S,
2431 SMLoc E,
2432 MCContext &Ctx) {
2433 auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx);
2434 Op->ImmRange.First = First;
2435 Op->ImmRange.Last = Last;
2436 Op->EndLoc = E;
2437 return Op;
2438 }
2439
2440 static std::unique_ptr<AArch64Operand>
2441 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2442 auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx);
2443 Op->CondCode.Code = Code;
2444 Op->StartLoc = S;
2445 Op->EndLoc = E;
2446 return Op;
2447 }
2448
2449 static std::unique_ptr<AArch64Operand>
2450 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2451 auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx);
2452 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2453 Op->FPImm.IsExact = IsExact;
2454 Op->StartLoc = S;
2455 Op->EndLoc = S;
2456 return Op;
2457 }
2458
2459 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2460 StringRef Str,
2461 SMLoc S,
2462 MCContext &Ctx,
2463 bool HasnXSModifier) {
2464 auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx);
2465 Op->Barrier.Val = Val;
2466 Op->Barrier.Data = Str.data();
2467 Op->Barrier.Length = Str.size();
2468 Op->Barrier.HasnXSModifier = HasnXSModifier;
2469 Op->StartLoc = S;
2470 Op->EndLoc = S;
2471 return Op;
2472 }
2473
2474 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2475 uint32_t MRSReg,
2476 uint32_t MSRReg,
2477 uint32_t PStateField,
2478 MCContext &Ctx) {
2479 auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx);
2480 Op->SysReg.Data = Str.data();
2481 Op->SysReg.Length = Str.size();
2482 Op->SysReg.MRSReg = MRSReg;
2483 Op->SysReg.MSRReg = MSRReg;
2484 Op->SysReg.PStateField = PStateField;
2485 Op->StartLoc = S;
2486 Op->EndLoc = S;
2487 return Op;
2488 }
2489
2490 static std::unique_ptr<AArch64Operand>
2491 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2492 auto Op = std::make_unique<AArch64Operand>(args: k_PHint, args&: Ctx);
2493 Op->PHint.Val = Val;
2494 Op->PHint.Data = Str.data();
2495 Op->PHint.Length = Str.size();
2496 Op->StartLoc = S;
2497 Op->EndLoc = S;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2502 SMLoc E, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx);
2504 Op->SysCRImm.Val = Val;
2505 Op->StartLoc = S;
2506 Op->EndLoc = E;
2507 return Op;
2508 }
2509
2510 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2511 StringRef Str,
2512 SMLoc S,
2513 MCContext &Ctx) {
2514 auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx);
2515 Op->Prefetch.Val = Val;
2516 Op->Barrier.Data = Str.data();
2517 Op->Barrier.Length = Str.size();
2518 Op->StartLoc = S;
2519 Op->EndLoc = S;
2520 return Op;
2521 }
2522
2523 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2524 StringRef Str,
2525 SMLoc S,
2526 MCContext &Ctx) {
2527 auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx);
2528 Op->PSBHint.Val = Val;
2529 Op->PSBHint.Data = Str.data();
2530 Op->PSBHint.Length = Str.size();
2531 Op->StartLoc = S;
2532 Op->EndLoc = S;
2533 return Op;
2534 }
2535
2536 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2537 StringRef Str,
2538 SMLoc S,
2539 MCContext &Ctx) {
2540 auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx);
2541 Op->BTIHint.Val = Val | 32;
2542 Op->BTIHint.Data = Str.data();
2543 Op->BTIHint.Length = Str.size();
2544 Op->StartLoc = S;
2545 Op->EndLoc = S;
2546 return Op;
2547 }
2548
2549 static std::unique_ptr<AArch64Operand>
2550 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2551 SMLoc S, SMLoc E, MCContext &Ctx) {
2552 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx);
2553 Op->MatrixReg.RegNum = RegNum;
2554 Op->MatrixReg.ElementWidth = ElementWidth;
2555 Op->MatrixReg.Kind = Kind;
2556 Op->StartLoc = S;
2557 Op->EndLoc = E;
2558 return Op;
2559 }
2560
2561 static std::unique_ptr<AArch64Operand>
2562 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2563 auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx);
2564 Op->SVCR.PStateField = PStateField;
2565 Op->SVCR.Data = Str.data();
2566 Op->SVCR.Length = Str.size();
2567 Op->StartLoc = S;
2568 Op->EndLoc = S;
2569 return Op;
2570 }
2571
2572 static std::unique_ptr<AArch64Operand>
2573 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2574 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2575 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx);
2576 Op->ShiftExtend.Type = ShOp;
2577 Op->ShiftExtend.Amount = Val;
2578 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2579 Op->StartLoc = S;
2580 Op->EndLoc = E;
2581 return Op;
2582 }
2583};
2584
2585} // end anonymous namespace.
2586
2587void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2588 switch (Kind) {
2589 case k_FPImm:
2590 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2591 if (!getFPImmIsExact())
2592 OS << " (inexact)";
2593 OS << ">";
2594 break;
2595 case k_Barrier: {
2596 StringRef Name = getBarrierName();
2597 if (!Name.empty())
2598 OS << "<barrier " << Name << ">";
2599 else
2600 OS << "<barrier invalid #" << getBarrier() << ">";
2601 break;
2602 }
2603 case k_Immediate:
2604 MAI.printExpr(OS, *getImm());
2605 break;
2606 case k_ShiftedImm: {
2607 unsigned Shift = getShiftedImmShift();
2608 OS << "<shiftedimm ";
2609 MAI.printExpr(OS, *getShiftedImmVal());
2610 OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">";
2611 break;
2612 }
2613 case k_ImmRange: {
2614 OS << "<immrange ";
2615 OS << getFirstImmVal();
2616 OS << ":" << getLastImmVal() << ">";
2617 break;
2618 }
2619 case k_CondCode:
2620 OS << "<condcode " << getCondCode() << ">";
2621 break;
2622 case k_VectorList: {
2623 OS << "<vectorlist ";
2624 unsigned Reg = getVectorListStart();
2625 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2626 OS << Reg + i * getVectorListStride() << " ";
2627 OS << ">";
2628 break;
2629 }
2630 case k_VectorIndex:
2631 OS << "<vectorindex " << getVectorIndex() << ">";
2632 break;
2633 case k_SysReg:
2634 OS << "<sysreg: " << getSysReg() << '>';
2635 break;
2636 case k_Token:
2637 OS << "'" << getToken() << "'";
2638 break;
2639 case k_SysCR:
2640 OS << "c" << getSysCR();
2641 break;
2642 case k_Prefetch: {
2643 StringRef Name = getPrefetchName();
2644 if (!Name.empty())
2645 OS << "<prfop " << Name << ">";
2646 else
2647 OS << "<prfop invalid #" << getPrefetch() << ">";
2648 break;
2649 }
2650 case k_PSBHint:
2651 OS << getPSBHintName();
2652 break;
2653 case k_PHint:
2654 OS << getPHintName();
2655 break;
2656 case k_BTIHint:
2657 OS << getBTIHintName();
2658 break;
2659 case k_MatrixRegister:
2660 OS << "<matrix " << getMatrixReg() << ">";
2661 break;
2662 case k_MatrixTileList: {
2663 OS << "<matrixlist ";
2664 unsigned RegMask = getMatrixTileListRegMask();
2665 unsigned MaxBits = 8;
2666 for (unsigned I = MaxBits; I > 0; --I)
2667 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2668 OS << '>';
2669 break;
2670 }
2671 case k_SVCR: {
2672 OS << getSVCR();
2673 break;
2674 }
2675 case k_Register:
2676 OS << "<register " << getReg() << ">";
2677 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2678 break;
2679 [[fallthrough]];
2680 case k_ShiftExtend:
2681 OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #"
2682 << getShiftExtendAmount();
2683 if (!hasShiftExtendAmount())
2684 OS << "<imp>";
2685 OS << '>';
2686 break;
2687 }
2688}
2689
2690/// @name Auto-generated Match Functions
2691/// {
2692
2693static MCRegister MatchRegisterName(StringRef Name);
2694
2695/// }
2696
2697static unsigned MatchNeonVectorRegName(StringRef Name) {
2698 return StringSwitch<unsigned>(Name.lower())
2699 .Case(S: "v0", Value: AArch64::Q0)
2700 .Case(S: "v1", Value: AArch64::Q1)
2701 .Case(S: "v2", Value: AArch64::Q2)
2702 .Case(S: "v3", Value: AArch64::Q3)
2703 .Case(S: "v4", Value: AArch64::Q4)
2704 .Case(S: "v5", Value: AArch64::Q5)
2705 .Case(S: "v6", Value: AArch64::Q6)
2706 .Case(S: "v7", Value: AArch64::Q7)
2707 .Case(S: "v8", Value: AArch64::Q8)
2708 .Case(S: "v9", Value: AArch64::Q9)
2709 .Case(S: "v10", Value: AArch64::Q10)
2710 .Case(S: "v11", Value: AArch64::Q11)
2711 .Case(S: "v12", Value: AArch64::Q12)
2712 .Case(S: "v13", Value: AArch64::Q13)
2713 .Case(S: "v14", Value: AArch64::Q14)
2714 .Case(S: "v15", Value: AArch64::Q15)
2715 .Case(S: "v16", Value: AArch64::Q16)
2716 .Case(S: "v17", Value: AArch64::Q17)
2717 .Case(S: "v18", Value: AArch64::Q18)
2718 .Case(S: "v19", Value: AArch64::Q19)
2719 .Case(S: "v20", Value: AArch64::Q20)
2720 .Case(S: "v21", Value: AArch64::Q21)
2721 .Case(S: "v22", Value: AArch64::Q22)
2722 .Case(S: "v23", Value: AArch64::Q23)
2723 .Case(S: "v24", Value: AArch64::Q24)
2724 .Case(S: "v25", Value: AArch64::Q25)
2725 .Case(S: "v26", Value: AArch64::Q26)
2726 .Case(S: "v27", Value: AArch64::Q27)
2727 .Case(S: "v28", Value: AArch64::Q28)
2728 .Case(S: "v29", Value: AArch64::Q29)
2729 .Case(S: "v30", Value: AArch64::Q30)
2730 .Case(S: "v31", Value: AArch64::Q31)
2731 .Default(Value: 0);
2732}
2733
2734/// Returns an optional pair of (#elements, element-width) if Suffix
2735/// is a valid vector kind. Where the number of elements in a vector
2736/// or the vector width is implicit or explicitly unknown (but still a
2737/// valid suffix kind), 0 is used.
2738static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2739 RegKind VectorKind) {
2740 std::pair<int, int> Res = {-1, -1};
2741
2742 switch (VectorKind) {
2743 case RegKind::NeonVector:
2744 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2745 .Case(S: "", Value: {0, 0})
2746 .Case(S: ".1d", Value: {1, 64})
2747 .Case(S: ".1q", Value: {1, 128})
2748 // '.2h' needed for fp16 scalar pairwise reductions
2749 .Case(S: ".2h", Value: {2, 16})
2750 .Case(S: ".2b", Value: {2, 8})
2751 .Case(S: ".2s", Value: {2, 32})
2752 .Case(S: ".2d", Value: {2, 64})
2753 // '.4b' is another special case for the ARMv8.2a dot product
2754 // operand
2755 .Case(S: ".4b", Value: {4, 8})
2756 .Case(S: ".4h", Value: {4, 16})
2757 .Case(S: ".4s", Value: {4, 32})
2758 .Case(S: ".8b", Value: {8, 8})
2759 .Case(S: ".8h", Value: {8, 16})
2760 .Case(S: ".16b", Value: {16, 8})
2761 // Accept the width neutral ones, too, for verbose syntax. If
2762 // those aren't used in the right places, the token operand won't
2763 // match so all will work out.
2764 .Case(S: ".b", Value: {0, 8})
2765 .Case(S: ".h", Value: {0, 16})
2766 .Case(S: ".s", Value: {0, 32})
2767 .Case(S: ".d", Value: {0, 64})
2768 .Default(Value: {-1, -1});
2769 break;
2770 case RegKind::SVEPredicateAsCounter:
2771 case RegKind::SVEPredicateVector:
2772 case RegKind::SVEDataVector:
2773 case RegKind::Matrix:
2774 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2775 .Case(S: "", Value: {0, 0})
2776 .Case(S: ".b", Value: {0, 8})
2777 .Case(S: ".h", Value: {0, 16})
2778 .Case(S: ".s", Value: {0, 32})
2779 .Case(S: ".d", Value: {0, 64})
2780 .Case(S: ".q", Value: {0, 128})
2781 .Default(Value: {-1, -1});
2782 break;
2783 default:
2784 llvm_unreachable("Unsupported RegKind");
2785 }
2786
2787 if (Res == std::make_pair(x: -1, y: -1))
2788 return std::nullopt;
2789
2790 return std::optional<std::pair<int, int>>(Res);
2791}
2792
2793static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2794 return parseVectorKind(Suffix, VectorKind).has_value();
2795}
2796
2797static unsigned matchSVEDataVectorRegName(StringRef Name) {
2798 return StringSwitch<unsigned>(Name.lower())
2799 .Case(S: "z0", Value: AArch64::Z0)
2800 .Case(S: "z1", Value: AArch64::Z1)
2801 .Case(S: "z2", Value: AArch64::Z2)
2802 .Case(S: "z3", Value: AArch64::Z3)
2803 .Case(S: "z4", Value: AArch64::Z4)
2804 .Case(S: "z5", Value: AArch64::Z5)
2805 .Case(S: "z6", Value: AArch64::Z6)
2806 .Case(S: "z7", Value: AArch64::Z7)
2807 .Case(S: "z8", Value: AArch64::Z8)
2808 .Case(S: "z9", Value: AArch64::Z9)
2809 .Case(S: "z10", Value: AArch64::Z10)
2810 .Case(S: "z11", Value: AArch64::Z11)
2811 .Case(S: "z12", Value: AArch64::Z12)
2812 .Case(S: "z13", Value: AArch64::Z13)
2813 .Case(S: "z14", Value: AArch64::Z14)
2814 .Case(S: "z15", Value: AArch64::Z15)
2815 .Case(S: "z16", Value: AArch64::Z16)
2816 .Case(S: "z17", Value: AArch64::Z17)
2817 .Case(S: "z18", Value: AArch64::Z18)
2818 .Case(S: "z19", Value: AArch64::Z19)
2819 .Case(S: "z20", Value: AArch64::Z20)
2820 .Case(S: "z21", Value: AArch64::Z21)
2821 .Case(S: "z22", Value: AArch64::Z22)
2822 .Case(S: "z23", Value: AArch64::Z23)
2823 .Case(S: "z24", Value: AArch64::Z24)
2824 .Case(S: "z25", Value: AArch64::Z25)
2825 .Case(S: "z26", Value: AArch64::Z26)
2826 .Case(S: "z27", Value: AArch64::Z27)
2827 .Case(S: "z28", Value: AArch64::Z28)
2828 .Case(S: "z29", Value: AArch64::Z29)
2829 .Case(S: "z30", Value: AArch64::Z30)
2830 .Case(S: "z31", Value: AArch64::Z31)
2831 .Default(Value: 0);
2832}
2833
2834static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2835 return StringSwitch<unsigned>(Name.lower())
2836 .Case(S: "p0", Value: AArch64::P0)
2837 .Case(S: "p1", Value: AArch64::P1)
2838 .Case(S: "p2", Value: AArch64::P2)
2839 .Case(S: "p3", Value: AArch64::P3)
2840 .Case(S: "p4", Value: AArch64::P4)
2841 .Case(S: "p5", Value: AArch64::P5)
2842 .Case(S: "p6", Value: AArch64::P6)
2843 .Case(S: "p7", Value: AArch64::P7)
2844 .Case(S: "p8", Value: AArch64::P8)
2845 .Case(S: "p9", Value: AArch64::P9)
2846 .Case(S: "p10", Value: AArch64::P10)
2847 .Case(S: "p11", Value: AArch64::P11)
2848 .Case(S: "p12", Value: AArch64::P12)
2849 .Case(S: "p13", Value: AArch64::P13)
2850 .Case(S: "p14", Value: AArch64::P14)
2851 .Case(S: "p15", Value: AArch64::P15)
2852 .Default(Value: 0);
2853}
2854
2855static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2856 return StringSwitch<unsigned>(Name.lower())
2857 .Case(S: "pn0", Value: AArch64::PN0)
2858 .Case(S: "pn1", Value: AArch64::PN1)
2859 .Case(S: "pn2", Value: AArch64::PN2)
2860 .Case(S: "pn3", Value: AArch64::PN3)
2861 .Case(S: "pn4", Value: AArch64::PN4)
2862 .Case(S: "pn5", Value: AArch64::PN5)
2863 .Case(S: "pn6", Value: AArch64::PN6)
2864 .Case(S: "pn7", Value: AArch64::PN7)
2865 .Case(S: "pn8", Value: AArch64::PN8)
2866 .Case(S: "pn9", Value: AArch64::PN9)
2867 .Case(S: "pn10", Value: AArch64::PN10)
2868 .Case(S: "pn11", Value: AArch64::PN11)
2869 .Case(S: "pn12", Value: AArch64::PN12)
2870 .Case(S: "pn13", Value: AArch64::PN13)
2871 .Case(S: "pn14", Value: AArch64::PN14)
2872 .Case(S: "pn15", Value: AArch64::PN15)
2873 .Default(Value: 0);
2874}
2875
2876static unsigned matchMatrixTileListRegName(StringRef Name) {
2877 return StringSwitch<unsigned>(Name.lower())
2878 .Case(S: "za0.d", Value: AArch64::ZAD0)
2879 .Case(S: "za1.d", Value: AArch64::ZAD1)
2880 .Case(S: "za2.d", Value: AArch64::ZAD2)
2881 .Case(S: "za3.d", Value: AArch64::ZAD3)
2882 .Case(S: "za4.d", Value: AArch64::ZAD4)
2883 .Case(S: "za5.d", Value: AArch64::ZAD5)
2884 .Case(S: "za6.d", Value: AArch64::ZAD6)
2885 .Case(S: "za7.d", Value: AArch64::ZAD7)
2886 .Case(S: "za0.s", Value: AArch64::ZAS0)
2887 .Case(S: "za1.s", Value: AArch64::ZAS1)
2888 .Case(S: "za2.s", Value: AArch64::ZAS2)
2889 .Case(S: "za3.s", Value: AArch64::ZAS3)
2890 .Case(S: "za0.h", Value: AArch64::ZAH0)
2891 .Case(S: "za1.h", Value: AArch64::ZAH1)
2892 .Case(S: "za0.b", Value: AArch64::ZAB0)
2893 .Default(Value: 0);
2894}
2895
2896static unsigned matchMatrixRegName(StringRef Name) {
2897 return StringSwitch<unsigned>(Name.lower())
2898 .Case(S: "za", Value: AArch64::ZA)
2899 .Case(S: "za0.q", Value: AArch64::ZAQ0)
2900 .Case(S: "za1.q", Value: AArch64::ZAQ1)
2901 .Case(S: "za2.q", Value: AArch64::ZAQ2)
2902 .Case(S: "za3.q", Value: AArch64::ZAQ3)
2903 .Case(S: "za4.q", Value: AArch64::ZAQ4)
2904 .Case(S: "za5.q", Value: AArch64::ZAQ5)
2905 .Case(S: "za6.q", Value: AArch64::ZAQ6)
2906 .Case(S: "za7.q", Value: AArch64::ZAQ7)
2907 .Case(S: "za8.q", Value: AArch64::ZAQ8)
2908 .Case(S: "za9.q", Value: AArch64::ZAQ9)
2909 .Case(S: "za10.q", Value: AArch64::ZAQ10)
2910 .Case(S: "za11.q", Value: AArch64::ZAQ11)
2911 .Case(S: "za12.q", Value: AArch64::ZAQ12)
2912 .Case(S: "za13.q", Value: AArch64::ZAQ13)
2913 .Case(S: "za14.q", Value: AArch64::ZAQ14)
2914 .Case(S: "za15.q", Value: AArch64::ZAQ15)
2915 .Case(S: "za0.d", Value: AArch64::ZAD0)
2916 .Case(S: "za1.d", Value: AArch64::ZAD1)
2917 .Case(S: "za2.d", Value: AArch64::ZAD2)
2918 .Case(S: "za3.d", Value: AArch64::ZAD3)
2919 .Case(S: "za4.d", Value: AArch64::ZAD4)
2920 .Case(S: "za5.d", Value: AArch64::ZAD5)
2921 .Case(S: "za6.d", Value: AArch64::ZAD6)
2922 .Case(S: "za7.d", Value: AArch64::ZAD7)
2923 .Case(S: "za0.s", Value: AArch64::ZAS0)
2924 .Case(S: "za1.s", Value: AArch64::ZAS1)
2925 .Case(S: "za2.s", Value: AArch64::ZAS2)
2926 .Case(S: "za3.s", Value: AArch64::ZAS3)
2927 .Case(S: "za0.h", Value: AArch64::ZAH0)
2928 .Case(S: "za1.h", Value: AArch64::ZAH1)
2929 .Case(S: "za0.b", Value: AArch64::ZAB0)
2930 .Case(S: "za0h.q", Value: AArch64::ZAQ0)
2931 .Case(S: "za1h.q", Value: AArch64::ZAQ1)
2932 .Case(S: "za2h.q", Value: AArch64::ZAQ2)
2933 .Case(S: "za3h.q", Value: AArch64::ZAQ3)
2934 .Case(S: "za4h.q", Value: AArch64::ZAQ4)
2935 .Case(S: "za5h.q", Value: AArch64::ZAQ5)
2936 .Case(S: "za6h.q", Value: AArch64::ZAQ6)
2937 .Case(S: "za7h.q", Value: AArch64::ZAQ7)
2938 .Case(S: "za8h.q", Value: AArch64::ZAQ8)
2939 .Case(S: "za9h.q", Value: AArch64::ZAQ9)
2940 .Case(S: "za10h.q", Value: AArch64::ZAQ10)
2941 .Case(S: "za11h.q", Value: AArch64::ZAQ11)
2942 .Case(S: "za12h.q", Value: AArch64::ZAQ12)
2943 .Case(S: "za13h.q", Value: AArch64::ZAQ13)
2944 .Case(S: "za14h.q", Value: AArch64::ZAQ14)
2945 .Case(S: "za15h.q", Value: AArch64::ZAQ15)
2946 .Case(S: "za0h.d", Value: AArch64::ZAD0)
2947 .Case(S: "za1h.d", Value: AArch64::ZAD1)
2948 .Case(S: "za2h.d", Value: AArch64::ZAD2)
2949 .Case(S: "za3h.d", Value: AArch64::ZAD3)
2950 .Case(S: "za4h.d", Value: AArch64::ZAD4)
2951 .Case(S: "za5h.d", Value: AArch64::ZAD5)
2952 .Case(S: "za6h.d", Value: AArch64::ZAD6)
2953 .Case(S: "za7h.d", Value: AArch64::ZAD7)
2954 .Case(S: "za0h.s", Value: AArch64::ZAS0)
2955 .Case(S: "za1h.s", Value: AArch64::ZAS1)
2956 .Case(S: "za2h.s", Value: AArch64::ZAS2)
2957 .Case(S: "za3h.s", Value: AArch64::ZAS3)
2958 .Case(S: "za0h.h", Value: AArch64::ZAH0)
2959 .Case(S: "za1h.h", Value: AArch64::ZAH1)
2960 .Case(S: "za0h.b", Value: AArch64::ZAB0)
2961 .Case(S: "za0v.q", Value: AArch64::ZAQ0)
2962 .Case(S: "za1v.q", Value: AArch64::ZAQ1)
2963 .Case(S: "za2v.q", Value: AArch64::ZAQ2)
2964 .Case(S: "za3v.q", Value: AArch64::ZAQ3)
2965 .Case(S: "za4v.q", Value: AArch64::ZAQ4)
2966 .Case(S: "za5v.q", Value: AArch64::ZAQ5)
2967 .Case(S: "za6v.q", Value: AArch64::ZAQ6)
2968 .Case(S: "za7v.q", Value: AArch64::ZAQ7)
2969 .Case(S: "za8v.q", Value: AArch64::ZAQ8)
2970 .Case(S: "za9v.q", Value: AArch64::ZAQ9)
2971 .Case(S: "za10v.q", Value: AArch64::ZAQ10)
2972 .Case(S: "za11v.q", Value: AArch64::ZAQ11)
2973 .Case(S: "za12v.q", Value: AArch64::ZAQ12)
2974 .Case(S: "za13v.q", Value: AArch64::ZAQ13)
2975 .Case(S: "za14v.q", Value: AArch64::ZAQ14)
2976 .Case(S: "za15v.q", Value: AArch64::ZAQ15)
2977 .Case(S: "za0v.d", Value: AArch64::ZAD0)
2978 .Case(S: "za1v.d", Value: AArch64::ZAD1)
2979 .Case(S: "za2v.d", Value: AArch64::ZAD2)
2980 .Case(S: "za3v.d", Value: AArch64::ZAD3)
2981 .Case(S: "za4v.d", Value: AArch64::ZAD4)
2982 .Case(S: "za5v.d", Value: AArch64::ZAD5)
2983 .Case(S: "za6v.d", Value: AArch64::ZAD6)
2984 .Case(S: "za7v.d", Value: AArch64::ZAD7)
2985 .Case(S: "za0v.s", Value: AArch64::ZAS0)
2986 .Case(S: "za1v.s", Value: AArch64::ZAS1)
2987 .Case(S: "za2v.s", Value: AArch64::ZAS2)
2988 .Case(S: "za3v.s", Value: AArch64::ZAS3)
2989 .Case(S: "za0v.h", Value: AArch64::ZAH0)
2990 .Case(S: "za1v.h", Value: AArch64::ZAH1)
2991 .Case(S: "za0v.b", Value: AArch64::ZAB0)
2992 .Default(Value: 0);
2993}
2994
2995bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2996 SMLoc &EndLoc) {
2997 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2998}
2999
3000ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3001 SMLoc &EndLoc) {
3002 StartLoc = getLoc();
3003 ParseStatus Res = tryParseScalarRegister(Reg);
3004 EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3005 return Res;
3006}
3007
3008// Matches a register name or register alias previously defined by '.req'
3009unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3010 RegKind Kind) {
3011 unsigned RegNum = 0;
3012 if ((RegNum = matchSVEDataVectorRegName(Name)))
3013 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3014
3015 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3016 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3017
3018 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
3019 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3020
3021 if ((RegNum = MatchNeonVectorRegName(Name)))
3022 return Kind == RegKind::NeonVector ? RegNum : 0;
3023
3024 if ((RegNum = matchMatrixRegName(Name)))
3025 return Kind == RegKind::Matrix ? RegNum : 0;
3026
3027 if (Name.equals_insensitive(RHS: "zt0"))
3028 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3029
3030 // The parsed register must be of RegKind Scalar
3031 if ((RegNum = MatchRegisterName(Name)))
3032 return (Kind == RegKind::Scalar) ? RegNum : 0;
3033
3034 if (!RegNum) {
3035 // Handle a few common aliases of registers.
3036 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3037 .Case(S: "fp", Value: AArch64::FP)
3038 .Case(S: "lr", Value: AArch64::LR)
3039 .Case(S: "x31", Value: AArch64::XZR)
3040 .Case(S: "w31", Value: AArch64::WZR)
3041 .Default(Value: 0))
3042 return Kind == RegKind::Scalar ? RegNum : 0;
3043
3044 // Check for aliases registered via .req. Canonicalize to lower case.
3045 // That's more consistent since register names are case insensitive, and
3046 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3047 auto Entry = RegisterReqs.find(Key: Name.lower());
3048 if (Entry == RegisterReqs.end())
3049 return 0;
3050
3051 // set RegNum if the match is the right kind of register
3052 if (Kind == Entry->getValue().first)
3053 RegNum = Entry->getValue().second;
3054 }
3055 return RegNum;
3056}
3057
3058unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3059 switch (K) {
3060 case RegKind::Scalar:
3061 case RegKind::NeonVector:
3062 case RegKind::SVEDataVector:
3063 return 32;
3064 case RegKind::Matrix:
3065 case RegKind::SVEPredicateVector:
3066 case RegKind::SVEPredicateAsCounter:
3067 return 16;
3068 case RegKind::LookupTable:
3069 return 1;
3070 }
3071 llvm_unreachable("Unsupported RegKind");
3072}
3073
3074/// tryParseScalarRegister - Try to parse a register name. The token must be an
3075/// Identifier when called, and if it is a register name the token is eaten and
3076/// the register is added to the operand list.
3077ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3078 const AsmToken &Tok = getTok();
3079 if (Tok.isNot(K: AsmToken::Identifier))
3080 return ParseStatus::NoMatch;
3081
3082 std::string lowerCase = Tok.getString().lower();
3083 unsigned Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar);
3084 if (Reg == 0)
3085 return ParseStatus::NoMatch;
3086
3087 RegNum = Reg;
3088 Lex(); // Eat identifier token.
3089 return ParseStatus::Success;
3090}
3091
3092/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3093ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3094 SMLoc S = getLoc();
3095
3096 if (getTok().isNot(K: AsmToken::Identifier))
3097 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3098
3099 StringRef Tok = getTok().getIdentifier();
3100 if (Tok[0] != 'c' && Tok[0] != 'C')
3101 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3102
3103 uint32_t CRNum;
3104 bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum);
3105 if (BadNum || CRNum > 15)
3106 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3107
3108 Lex(); // Eat identifier token.
3109 Operands.push_back(
3110 Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext()));
3111 return ParseStatus::Success;
3112}
3113
3114// Either an identifier for named values or a 6-bit immediate.
3115ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3116 SMLoc S = getLoc();
3117 const AsmToken &Tok = getTok();
3118
3119 unsigned MaxVal = 63;
3120
3121 // Immediate case, with optional leading hash:
3122 if (parseOptionalToken(T: AsmToken::Hash) ||
3123 Tok.is(K: AsmToken::Integer)) {
3124 const MCExpr *ImmVal;
3125 if (getParser().parseExpression(Res&: ImmVal))
3126 return ParseStatus::Failure;
3127
3128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3129 if (!MCE)
3130 return TokError(Msg: "immediate value expected for prefetch operand");
3131 unsigned prfop = MCE->getValue();
3132 if (prfop > MaxVal)
3133 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3134 "] expected");
3135
3136 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue());
3137 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3138 Val: prfop, Str: RPRFM ? RPRFM->Name : "", S, Ctx&: getContext()));
3139 return ParseStatus::Success;
3140 }
3141
3142 if (Tok.isNot(K: AsmToken::Identifier))
3143 return TokError(Msg: "prefetch hint expected");
3144
3145 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString());
3146 if (!RPRFM)
3147 return TokError(Msg: "prefetch hint expected");
3148
3149 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3150 Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3151 Lex(); // Eat identifier token.
3152 return ParseStatus::Success;
3153}
3154
3155/// tryParsePrefetch - Try to parse a prefetch operand.
3156template <bool IsSVEPrefetch>
3157ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3158 SMLoc S = getLoc();
3159 const AsmToken &Tok = getTok();
3160
3161 auto LookupByName = [](StringRef N) {
3162 if (IsSVEPrefetch) {
3163 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N))
3164 return std::optional<unsigned>(Res->Encoding);
3165 } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N))
3166 return std::optional<unsigned>(Res->Encoding);
3167 return std::optional<unsigned>();
3168 };
3169
3170 auto LookupByEncoding = [](unsigned E) {
3171 if (IsSVEPrefetch) {
3172 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E))
3173 return std::optional<StringRef>(Res->Name);
3174 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E))
3175 return std::optional<StringRef>(Res->Name);
3176 return std::optional<StringRef>();
3177 };
3178 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3179
3180 // Either an identifier for named values or a 5-bit immediate.
3181 // Eat optional hash.
3182 if (parseOptionalToken(T: AsmToken::Hash) ||
3183 Tok.is(K: AsmToken::Integer)) {
3184 const MCExpr *ImmVal;
3185 if (getParser().parseExpression(Res&: ImmVal))
3186 return ParseStatus::Failure;
3187
3188 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3189 if (!MCE)
3190 return TokError(Msg: "immediate value expected for prefetch operand");
3191 unsigned prfop = MCE->getValue();
3192 if (prfop > MaxVal)
3193 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3194 "] expected");
3195
3196 auto PRFM = LookupByEncoding(MCE->getValue());
3197 Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or(""),
3198 S, Ctx&: getContext()));
3199 return ParseStatus::Success;
3200 }
3201
3202 if (Tok.isNot(K: AsmToken::Identifier))
3203 return TokError(Msg: "prefetch hint expected");
3204
3205 auto PRFM = LookupByName(Tok.getString());
3206 if (!PRFM)
3207 return TokError(Msg: "prefetch hint expected");
3208
3209 Operands.push_back(AArch64Operand::CreatePrefetch(
3210 Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext()));
3211 Lex(); // Eat identifier token.
3212 return ParseStatus::Success;
3213}
3214
3215/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3216ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3217 SMLoc S = getLoc();
3218 const AsmToken &Tok = getTok();
3219 if (Tok.isNot(K: AsmToken::Identifier))
3220 return TokError(Msg: "invalid operand for instruction");
3221
3222 auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString());
3223 if (!PSB)
3224 return TokError(Msg: "invalid operand for instruction");
3225
3226 Operands.push_back(Elt: AArch64Operand::CreatePSBHint(
3227 Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3228 Lex(); // Eat identifier token.
3229 return ParseStatus::Success;
3230}
3231
3232ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3233 SMLoc StartLoc = getLoc();
3234
3235 MCRegister RegNum;
3236
3237 // The case where xzr, xzr is not present is handled by an InstAlias.
3238
3239 auto RegTok = getTok(); // in case we need to backtrack
3240 if (!tryParseScalarRegister(RegNum).isSuccess())
3241 return ParseStatus::NoMatch;
3242
3243 if (RegNum != AArch64::XZR) {
3244 getLexer().UnLex(Token: RegTok);
3245 return ParseStatus::NoMatch;
3246 }
3247
3248 if (parseComma())
3249 return ParseStatus::Failure;
3250
3251 if (!tryParseScalarRegister(RegNum).isSuccess())
3252 return TokError(Msg: "expected register operand");
3253
3254 if (RegNum != AArch64::XZR)
3255 return TokError(Msg: "xzr must be followed by xzr");
3256
3257 // We need to push something, since we claim this is an operand in .td.
3258 // See also AArch64AsmParser::parseKeywordOperand.
3259 Operands.push_back(Elt: AArch64Operand::CreateReg(
3260 RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
3261
3262 return ParseStatus::Success;
3263}
3264
3265/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3266ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3267 SMLoc S = getLoc();
3268 const AsmToken &Tok = getTok();
3269 if (Tok.isNot(K: AsmToken::Identifier))
3270 return TokError(Msg: "invalid operand for instruction");
3271
3272 auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString());
3273 if (!BTI)
3274 return TokError(Msg: "invalid operand for instruction");
3275
3276 Operands.push_back(Elt: AArch64Operand::CreateBTIHint(
3277 Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3278 Lex(); // Eat identifier token.
3279 return ParseStatus::Success;
3280}
3281
3282/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3283/// instruction.
3284ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3285 SMLoc S = getLoc();
3286 const MCExpr *Expr = nullptr;
3287
3288 if (getTok().is(K: AsmToken::Hash)) {
3289 Lex(); // Eat hash token.
3290 }
3291
3292 if (parseSymbolicImmVal(ImmVal&: Expr))
3293 return ParseStatus::Failure;
3294
3295 AArch64::Specifier ELFSpec;
3296 AArch64::Specifier DarwinSpec;
3297 int64_t Addend;
3298 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3299 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3300 // No modifier was specified at all; this is the syntax for an ELF basic
3301 // ADRP relocation (unfortunately).
3302 Expr =
3303 MCSpecifierExpr::create(Expr, S: AArch64::S_ABS_PAGE, Ctx&: getContext(), Loc: S);
3304 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3305 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3306 Addend != 0) {
3307 return Error(L: S, Msg: "gotpage label reference not allowed an addend");
3308 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3309 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3310 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3311 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3312 ELFSpec != AArch64::S_GOT_PAGE &&
3313 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3314 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3315 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3316 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3317 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3318 // The operand must be an @page or @gotpage qualified symbolref.
3319 return Error(L: S, Msg: "page or gotpage label reference expected");
3320 }
3321 }
3322
3323 // We have either a label reference possibly with addend or an immediate. The
3324 // addend is a raw value here. The linker will adjust it to only reference the
3325 // page.
3326 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3327 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3328
3329 return ParseStatus::Success;
3330}
3331
3332/// tryParseAdrLabel - Parse and validate a source label for the ADR
3333/// instruction.
3334ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3335 SMLoc S = getLoc();
3336 const MCExpr *Expr = nullptr;
3337
3338 // Leave anything with a bracket to the default for SVE
3339 if (getTok().is(K: AsmToken::LBrac))
3340 return ParseStatus::NoMatch;
3341
3342 if (getTok().is(K: AsmToken::Hash))
3343 Lex(); // Eat hash token.
3344
3345 if (parseSymbolicImmVal(ImmVal&: Expr))
3346 return ParseStatus::Failure;
3347
3348 AArch64::Specifier ELFSpec;
3349 AArch64::Specifier DarwinSpec;
3350 int64_t Addend;
3351 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3352 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3353 // No modifier was specified at all; this is the syntax for an ELF basic
3354 // ADR relocation (unfortunately).
3355 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_ABS, Ctx&: getContext(), Loc: S);
3356 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3357 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3358 // adr. It's not actually GOT entry page address but the GOT address
3359 // itself - we just share the same variant kind with :got_auth: operator
3360 // applied for adrp.
3361 // TODO: can we somehow get current TargetMachine object to call
3362 // getCodeModel() on it to ensure we are using tiny code model?
3363 return Error(L: S, Msg: "unexpected adr label");
3364 }
3365 }
3366
3367 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3368 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3369 return ParseStatus::Success;
3370}
3371
3372/// tryParseFPImm - A floating point immediate expression operand.
3373template <bool AddFPZeroAsLiteral>
3374ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3375 SMLoc S = getLoc();
3376
3377 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3378
3379 // Handle negation, as that still comes through as a separate token.
3380 bool isNegative = parseOptionalToken(T: AsmToken::Minus);
3381
3382 const AsmToken &Tok = getTok();
3383 if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) {
3384 if (!Hash)
3385 return ParseStatus::NoMatch;
3386 return TokError(Msg: "invalid floating point immediate");
3387 }
3388
3389 // Parse hexadecimal representation.
3390 if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x")) {
3391 if (Tok.getIntVal() > 255 || isNegative)
3392 return TokError(Msg: "encoded floating point value out of range");
3393
3394 APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal()));
3395 Operands.push_back(
3396 Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext()));
3397 } else {
3398 // Parse FP representation.
3399 APFloat RealVal(APFloat::IEEEdouble());
3400 auto StatusOrErr =
3401 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3402 if (errorToBool(Err: StatusOrErr.takeError()))
3403 return TokError(Msg: "invalid floating point representation");
3404
3405 if (isNegative)
3406 RealVal.changeSign();
3407
3408 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3409 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
3410 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
3411 } else
3412 Operands.push_back(Elt: AArch64Operand::CreateFPImm(
3413 Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext()));
3414 }
3415
3416 Lex(); // Eat the token.
3417
3418 return ParseStatus::Success;
3419}
3420
3421/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3422/// a shift suffix, for example '#1, lsl #12'.
3423ParseStatus
3424AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3425 SMLoc S = getLoc();
3426
3427 if (getTok().is(K: AsmToken::Hash))
3428 Lex(); // Eat '#'
3429 else if (getTok().isNot(K: AsmToken::Integer))
3430 // Operand should start from # or should be integer, emit error otherwise.
3431 return ParseStatus::NoMatch;
3432
3433 if (getTok().is(K: AsmToken::Integer) &&
3434 getLexer().peekTok().is(K: AsmToken::Colon))
3435 return tryParseImmRange(Operands);
3436
3437 const MCExpr *Imm = nullptr;
3438 if (parseSymbolicImmVal(ImmVal&: Imm))
3439 return ParseStatus::Failure;
3440 else if (getTok().isNot(K: AsmToken::Comma)) {
3441 Operands.push_back(
3442 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3443 return ParseStatus::Success;
3444 }
3445
3446 // Eat ','
3447 Lex();
3448 StringRef VecGroup;
3449 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3450 Operands.push_back(
3451 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3452 Operands.push_back(
3453 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
3454 return ParseStatus::Success;
3455 }
3456
3457 // The optional operand must be "lsl #N" where N is non-negative.
3458 if (!getTok().is(K: AsmToken::Identifier) ||
3459 !getTok().getIdentifier().equals_insensitive(RHS: "lsl"))
3460 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3461
3462 // Eat 'lsl'
3463 Lex();
3464
3465 parseOptionalToken(T: AsmToken::Hash);
3466
3467 if (getTok().isNot(K: AsmToken::Integer))
3468 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3469
3470 int64_t ShiftAmount = getTok().getIntVal();
3471
3472 if (ShiftAmount < 0)
3473 return Error(L: getLoc(), Msg: "positive shift amount required");
3474 Lex(); // Eat the number
3475
3476 // Just in case the optional lsl #0 is used for immediates other than zero.
3477 if (ShiftAmount == 0 && Imm != nullptr) {
3478 Operands.push_back(
3479 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3480 return ParseStatus::Success;
3481 }
3482
3483 Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S,
3484 E: getLoc(), Ctx&: getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3489/// suggestion to help common typos.
3490AArch64CC::CondCode
3491AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3492 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3493 .Case(S: "eq", Value: AArch64CC::EQ)
3494 .Case(S: "ne", Value: AArch64CC::NE)
3495 .Case(S: "cs", Value: AArch64CC::HS)
3496 .Case(S: "hs", Value: AArch64CC::HS)
3497 .Case(S: "cc", Value: AArch64CC::LO)
3498 .Case(S: "lo", Value: AArch64CC::LO)
3499 .Case(S: "mi", Value: AArch64CC::MI)
3500 .Case(S: "pl", Value: AArch64CC::PL)
3501 .Case(S: "vs", Value: AArch64CC::VS)
3502 .Case(S: "vc", Value: AArch64CC::VC)
3503 .Case(S: "hi", Value: AArch64CC::HI)
3504 .Case(S: "ls", Value: AArch64CC::LS)
3505 .Case(S: "ge", Value: AArch64CC::GE)
3506 .Case(S: "lt", Value: AArch64CC::LT)
3507 .Case(S: "gt", Value: AArch64CC::GT)
3508 .Case(S: "le", Value: AArch64CC::LE)
3509 .Case(S: "al", Value: AArch64CC::AL)
3510 .Case(S: "nv", Value: AArch64CC::NV)
3511 // SVE condition code aliases:
3512 .Case(S: "none", Value: AArch64CC::EQ)
3513 .Case(S: "any", Value: AArch64CC::NE)
3514 .Case(S: "nlast", Value: AArch64CC::HS)
3515 .Case(S: "last", Value: AArch64CC::LO)
3516 .Case(S: "first", Value: AArch64CC::MI)
3517 .Case(S: "nfrst", Value: AArch64CC::PL)
3518 .Case(S: "pmore", Value: AArch64CC::HI)
3519 .Case(S: "plast", Value: AArch64CC::LS)
3520 .Case(S: "tcont", Value: AArch64CC::GE)
3521 .Case(S: "tstop", Value: AArch64CC::LT)
3522 .Default(Value: AArch64CC::Invalid);
3523
3524 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3525 Suggestion = "nfrst";
3526
3527 return CC;
3528}
3529
3530/// parseCondCode - Parse a Condition Code operand.
3531bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3532 bool invertCondCode) {
3533 SMLoc S = getLoc();
3534 const AsmToken &Tok = getTok();
3535 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3536
3537 StringRef Cond = Tok.getString();
3538 std::string Suggestion;
3539 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3540 if (CC == AArch64CC::Invalid) {
3541 std::string Msg = "invalid condition code";
3542 if (!Suggestion.empty())
3543 Msg += ", did you mean " + Suggestion + "?";
3544 return TokError(Msg);
3545 }
3546 Lex(); // Eat identifier token.
3547
3548 if (invertCondCode) {
3549 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3550 return TokError(Msg: "condition codes AL and NV are invalid for this instruction");
3551 CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC));
3552 }
3553
3554 Operands.push_back(
3555 Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext()));
3556 return false;
3557}
3558
3559ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3560 const AsmToken &Tok = getTok();
3561 SMLoc S = getLoc();
3562
3563 if (Tok.isNot(K: AsmToken::Identifier))
3564 return TokError(Msg: "invalid operand for instruction");
3565
3566 unsigned PStateImm = -1;
3567 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString());
3568 if (!SVCR)
3569 return ParseStatus::NoMatch;
3570 if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
3571 PStateImm = SVCR->Encoding;
3572
3573 Operands.push_back(
3574 Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext()));
3575 Lex(); // Eat identifier token.
3576 return ParseStatus::Success;
3577}
3578
3579ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3580 const AsmToken &Tok = getTok();
3581 SMLoc S = getLoc();
3582
3583 StringRef Name = Tok.getString();
3584
3585 if (Name.equals_insensitive(RHS: "za") || Name.starts_with_insensitive(Prefix: "za.")) {
3586 Lex(); // eat "za[.(b|h|s|d)]"
3587 unsigned ElementWidth = 0;
3588 auto DotPosition = Name.find(C: '.');
3589 if (DotPosition != StringRef::npos) {
3590 const auto &KindRes =
3591 parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix);
3592 if (!KindRes)
3593 return TokError(
3594 Msg: "Expected the register to be followed by element width suffix");
3595 ElementWidth = KindRes->second;
3596 }
3597 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3598 RegNum: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(),
3599 Ctx&: getContext()));
3600 if (getLexer().is(K: AsmToken::LBrac)) {
3601 // There's no comma after matrix operand, so we can parse the next operand
3602 // immediately.
3603 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3604 return ParseStatus::NoMatch;
3605 }
3606 return ParseStatus::Success;
3607 }
3608
3609 // Try to parse matrix register.
3610 unsigned Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix);
3611 if (!Reg)
3612 return ParseStatus::NoMatch;
3613
3614 size_t DotPosition = Name.find(C: '.');
3615 assert(DotPosition != StringRef::npos && "Unexpected register");
3616
3617 StringRef Head = Name.take_front(N: DotPosition);
3618 StringRef Tail = Name.drop_front(N: DotPosition);
3619 StringRef RowOrColumn = Head.take_back();
3620
3621 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3622 .Case(S: "h", Value: MatrixKind::Row)
3623 .Case(S: "v", Value: MatrixKind::Col)
3624 .Default(Value: MatrixKind::Tile);
3625
3626 // Next up, parsing the suffix
3627 const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
3628 if (!KindRes)
3629 return TokError(
3630 Msg: "Expected the register to be followed by element width suffix");
3631 unsigned ElementWidth = KindRes->second;
3632
3633 Lex();
3634
3635 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3636 RegNum: Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext()));
3637
3638 if (getLexer().is(K: AsmToken::LBrac)) {
3639 // There's no comma after matrix operand, so we can parse the next operand
3640 // immediately.
3641 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3642 return ParseStatus::NoMatch;
3643 }
3644 return ParseStatus::Success;
3645}
3646
3647/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3648/// them if present.
3649ParseStatus
3650AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3651 const AsmToken &Tok = getTok();
3652 std::string LowerID = Tok.getString().lower();
3653 AArch64_AM::ShiftExtendType ShOp =
3654 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3655 .Case(S: "lsl", Value: AArch64_AM::LSL)
3656 .Case(S: "lsr", Value: AArch64_AM::LSR)
3657 .Case(S: "asr", Value: AArch64_AM::ASR)
3658 .Case(S: "ror", Value: AArch64_AM::ROR)
3659 .Case(S: "msl", Value: AArch64_AM::MSL)
3660 .Case(S: "uxtb", Value: AArch64_AM::UXTB)
3661 .Case(S: "uxth", Value: AArch64_AM::UXTH)
3662 .Case(S: "uxtw", Value: AArch64_AM::UXTW)
3663 .Case(S: "uxtx", Value: AArch64_AM::UXTX)
3664 .Case(S: "sxtb", Value: AArch64_AM::SXTB)
3665 .Case(S: "sxth", Value: AArch64_AM::SXTH)
3666 .Case(S: "sxtw", Value: AArch64_AM::SXTW)
3667 .Case(S: "sxtx", Value: AArch64_AM::SXTX)
3668 .Default(Value: AArch64_AM::InvalidShiftExtend);
3669
3670 if (ShOp == AArch64_AM::InvalidShiftExtend)
3671 return ParseStatus::NoMatch;
3672
3673 SMLoc S = Tok.getLoc();
3674 Lex();
3675
3676 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3677
3678 if (!Hash && getLexer().isNot(K: AsmToken::Integer)) {
3679 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3680 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3681 ShOp == AArch64_AM::MSL) {
3682 // We expect a number here.
3683 return TokError(Msg: "expected #imm after shift specifier");
3684 }
3685
3686 // "extend" type operations don't need an immediate, #0 is implicit.
3687 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3688 Operands.push_back(
3689 Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext()));
3690 return ParseStatus::Success;
3691 }
3692
3693 // Make sure we do actually have a number, identifier or a parenthesized
3694 // expression.
3695 SMLoc E = getLoc();
3696 if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) &&
3697 !getTok().is(K: AsmToken::Identifier))
3698 return Error(L: E, Msg: "expected integer shift amount");
3699
3700 const MCExpr *ImmVal;
3701 if (getParser().parseExpression(Res&: ImmVal))
3702 return ParseStatus::Failure;
3703
3704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3705 if (!MCE)
3706 return Error(L: E, Msg: "expected constant '#imm' after shift specifier");
3707
3708 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3709 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(
3710 ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext()));
3711 return ParseStatus::Success;
3712}
3713
3714static const struct Extension {
3715 const char *Name;
3716 const FeatureBitset Features;
3717} ExtensionMap[] = {
3718 {.Name: "crc", .Features: {AArch64::FeatureCRC}},
3719 {.Name: "sm4", .Features: {AArch64::FeatureSM4}},
3720 {.Name: "sha3", .Features: {AArch64::FeatureSHA3}},
3721 {.Name: "sha2", .Features: {AArch64::FeatureSHA2}},
3722 {.Name: "aes", .Features: {AArch64::FeatureAES}},
3723 {.Name: "crypto", .Features: {AArch64::FeatureCrypto}},
3724 {.Name: "fp", .Features: {AArch64::FeatureFPARMv8}},
3725 {.Name: "simd", .Features: {AArch64::FeatureNEON}},
3726 {.Name: "ras", .Features: {AArch64::FeatureRAS}},
3727 {.Name: "rasv2", .Features: {AArch64::FeatureRASv2}},
3728 {.Name: "lse", .Features: {AArch64::FeatureLSE}},
3729 {.Name: "predres", .Features: {AArch64::FeaturePredRes}},
3730 {.Name: "predres2", .Features: {AArch64::FeatureSPECRES2}},
3731 {.Name: "ccdp", .Features: {AArch64::FeatureCacheDeepPersist}},
3732 {.Name: "mte", .Features: {AArch64::FeatureMTE}},
3733 {.Name: "memtag", .Features: {AArch64::FeatureMTE}},
3734 {.Name: "tlb-rmi", .Features: {AArch64::FeatureTLB_RMI}},
3735 {.Name: "pan", .Features: {AArch64::FeaturePAN}},
3736 {.Name: "pan-rwv", .Features: {AArch64::FeaturePAN_RWV}},
3737 {.Name: "ccpp", .Features: {AArch64::FeatureCCPP}},
3738 {.Name: "rcpc", .Features: {AArch64::FeatureRCPC}},
3739 {.Name: "rng", .Features: {AArch64::FeatureRandGen}},
3740 {.Name: "sve", .Features: {AArch64::FeatureSVE}},
3741 {.Name: "sve-b16b16", .Features: {AArch64::FeatureSVEB16B16}},
3742 {.Name: "sve2", .Features: {AArch64::FeatureSVE2}},
3743 {.Name: "sve-aes", .Features: {AArch64::FeatureSVEAES}},
3744 {.Name: "sve2-aes", .Features: {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3745 {.Name: "sve2-sm4", .Features: {AArch64::FeatureSVE2SM4}},
3746 {.Name: "sve-sha3", .Features: {AArch64::FeatureSVESHA3}},
3747 {.Name: "sve2-sha3", .Features: {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3748 {.Name: "sve-bitperm", .Features: {AArch64::FeatureSVEBitPerm}},
3749 {.Name: "sve2-bitperm",
3750 .Features: {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3751 AArch64::FeatureSVE2}},
3752 {.Name: "sve2p1", .Features: {AArch64::FeatureSVE2p1}},
3753 {.Name: "ls64", .Features: {AArch64::FeatureLS64}},
3754 {.Name: "xs", .Features: {AArch64::FeatureXS}},
3755 {.Name: "pauth", .Features: {AArch64::FeaturePAuth}},
3756 {.Name: "flagm", .Features: {AArch64::FeatureFlagM}},
3757 {.Name: "rme", .Features: {AArch64::FeatureRME}},
3758 {.Name: "sme", .Features: {AArch64::FeatureSME}},
3759 {.Name: "sme-f64f64", .Features: {AArch64::FeatureSMEF64F64}},
3760 {.Name: "sme-f16f16", .Features: {AArch64::FeatureSMEF16F16}},
3761 {.Name: "sme-i16i64", .Features: {AArch64::FeatureSMEI16I64}},
3762 {.Name: "sme2", .Features: {AArch64::FeatureSME2}},
3763 {.Name: "sme2p1", .Features: {AArch64::FeatureSME2p1}},
3764 {.Name: "sme-b16b16", .Features: {AArch64::FeatureSMEB16B16}},
3765 {.Name: "hbc", .Features: {AArch64::FeatureHBC}},
3766 {.Name: "mops", .Features: {AArch64::FeatureMOPS}},
3767 {.Name: "mec", .Features: {AArch64::FeatureMEC}},
3768 {.Name: "the", .Features: {AArch64::FeatureTHE}},
3769 {.Name: "d128", .Features: {AArch64::FeatureD128}},
3770 {.Name: "lse128", .Features: {AArch64::FeatureLSE128}},
3771 {.Name: "ite", .Features: {AArch64::FeatureITE}},
3772 {.Name: "cssc", .Features: {AArch64::FeatureCSSC}},
3773 {.Name: "rcpc3", .Features: {AArch64::FeatureRCPC3}},
3774 {.Name: "gcs", .Features: {AArch64::FeatureGCS}},
3775 {.Name: "bf16", .Features: {AArch64::FeatureBF16}},
3776 {.Name: "compnum", .Features: {AArch64::FeatureComplxNum}},
3777 {.Name: "dotprod", .Features: {AArch64::FeatureDotProd}},
3778 {.Name: "f32mm", .Features: {AArch64::FeatureMatMulFP32}},
3779 {.Name: "f64mm", .Features: {AArch64::FeatureMatMulFP64}},
3780 {.Name: "fp16", .Features: {AArch64::FeatureFullFP16}},
3781 {.Name: "fp16fml", .Features: {AArch64::FeatureFP16FML}},
3782 {.Name: "i8mm", .Features: {AArch64::FeatureMatMulInt8}},
3783 {.Name: "lor", .Features: {AArch64::FeatureLOR}},
3784 {.Name: "profile", .Features: {AArch64::FeatureSPE}},
3785 // "rdma" is the name documented by binutils for the feature, but
3786 // binutils also accepts incomplete prefixes of features, so "rdm"
3787 // works too. Support both spellings here.
3788 {.Name: "rdm", .Features: {AArch64::FeatureRDM}},
3789 {.Name: "rdma", .Features: {AArch64::FeatureRDM}},
3790 {.Name: "sb", .Features: {AArch64::FeatureSB}},
3791 {.Name: "ssbs", .Features: {AArch64::FeatureSSBS}},
3792 {.Name: "tme", .Features: {AArch64::FeatureTME}},
3793 {.Name: "fp8", .Features: {AArch64::FeatureFP8}},
3794 {.Name: "faminmax", .Features: {AArch64::FeatureFAMINMAX}},
3795 {.Name: "fp8fma", .Features: {AArch64::FeatureFP8FMA}},
3796 {.Name: "ssve-fp8fma", .Features: {AArch64::FeatureSSVE_FP8FMA}},
3797 {.Name: "fp8dot2", .Features: {AArch64::FeatureFP8DOT2}},
3798 {.Name: "ssve-fp8dot2", .Features: {AArch64::FeatureSSVE_FP8DOT2}},
3799 {.Name: "fp8dot4", .Features: {AArch64::FeatureFP8DOT4}},
3800 {.Name: "ssve-fp8dot4", .Features: {AArch64::FeatureSSVE_FP8DOT4}},
3801 {.Name: "lut", .Features: {AArch64::FeatureLUT}},
3802 {.Name: "sme-lutv2", .Features: {AArch64::FeatureSME_LUTv2}},
3803 {.Name: "sme-f8f16", .Features: {AArch64::FeatureSMEF8F16}},
3804 {.Name: "sme-f8f32", .Features: {AArch64::FeatureSMEF8F32}},
3805 {.Name: "sme-fa64", .Features: {AArch64::FeatureSMEFA64}},
3806 {.Name: "cpa", .Features: {AArch64::FeatureCPA}},
3807 {.Name: "tlbiw", .Features: {AArch64::FeatureTLBIW}},
3808 {.Name: "pops", .Features: {AArch64::FeaturePoPS}},
3809 {.Name: "cmpbr", .Features: {AArch64::FeatureCMPBR}},
3810 {.Name: "f8f32mm", .Features: {AArch64::FeatureF8F32MM}},
3811 {.Name: "f8f16mm", .Features: {AArch64::FeatureF8F16MM}},
3812 {.Name: "fprcvt", .Features: {AArch64::FeatureFPRCVT}},
3813 {.Name: "lsfe", .Features: {AArch64::FeatureLSFE}},
3814 {.Name: "sme2p2", .Features: {AArch64::FeatureSME2p2}},
3815 {.Name: "ssve-aes", .Features: {AArch64::FeatureSSVE_AES}},
3816 {.Name: "sve2p2", .Features: {AArch64::FeatureSVE2p2}},
3817 {.Name: "sve-aes2", .Features: {AArch64::FeatureSVEAES2}},
3818 {.Name: "sve-bfscale", .Features: {AArch64::FeatureSVEBFSCALE}},
3819 {.Name: "sve-f16f32mm", .Features: {AArch64::FeatureSVE_F16F32MM}},
3820 {.Name: "lsui", .Features: {AArch64::FeatureLSUI}},
3821 {.Name: "occmo", .Features: {AArch64::FeatureOCCMO}},
3822 {.Name: "pcdphint", .Features: {AArch64::FeaturePCDPHINT}},
3823 {.Name: "ssve-bitperm", .Features: {AArch64::FeatureSSVE_BitPerm}},
3824 {.Name: "sme-mop4", .Features: {AArch64::FeatureSME_MOP4}},
3825 {.Name: "sme-tmop", .Features: {AArch64::FeatureSME_TMOP}},
3826};
3827
3828static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3829 if (FBS[AArch64::HasV8_0aOps])
3830 Str += "ARMv8a";
3831 if (FBS[AArch64::HasV8_1aOps])
3832 Str += "ARMv8.1a";
3833 else if (FBS[AArch64::HasV8_2aOps])
3834 Str += "ARMv8.2a";
3835 else if (FBS[AArch64::HasV8_3aOps])
3836 Str += "ARMv8.3a";
3837 else if (FBS[AArch64::HasV8_4aOps])
3838 Str += "ARMv8.4a";
3839 else if (FBS[AArch64::HasV8_5aOps])
3840 Str += "ARMv8.5a";
3841 else if (FBS[AArch64::HasV8_6aOps])
3842 Str += "ARMv8.6a";
3843 else if (FBS[AArch64::HasV8_7aOps])
3844 Str += "ARMv8.7a";
3845 else if (FBS[AArch64::HasV8_8aOps])
3846 Str += "ARMv8.8a";
3847 else if (FBS[AArch64::HasV8_9aOps])
3848 Str += "ARMv8.9a";
3849 else if (FBS[AArch64::HasV9_0aOps])
3850 Str += "ARMv9-a";
3851 else if (FBS[AArch64::HasV9_1aOps])
3852 Str += "ARMv9.1a";
3853 else if (FBS[AArch64::HasV9_2aOps])
3854 Str += "ARMv9.2a";
3855 else if (FBS[AArch64::HasV9_3aOps])
3856 Str += "ARMv9.3a";
3857 else if (FBS[AArch64::HasV9_4aOps])
3858 Str += "ARMv9.4a";
3859 else if (FBS[AArch64::HasV9_5aOps])
3860 Str += "ARMv9.5a";
3861 else if (FBS[AArch64::HasV9_6aOps])
3862 Str += "ARMv9.6a";
3863 else if (FBS[AArch64::HasV8_0rOps])
3864 Str += "ARMv8r";
3865 else {
3866 SmallVector<std::string, 2> ExtMatches;
3867 for (const auto& Ext : ExtensionMap) {
3868 // Use & in case multiple features are enabled
3869 if ((FBS & Ext.Features) != FeatureBitset())
3870 ExtMatches.push_back(Elt: Ext.Name);
3871 }
3872 Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", ") : "(unknown)";
3873 }
3874}
3875
3876void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3877 SMLoc S) {
3878 const uint16_t Op2 = Encoding & 7;
3879 const uint16_t Cm = (Encoding & 0x78) >> 3;
3880 const uint16_t Cn = (Encoding & 0x780) >> 7;
3881 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3882
3883 const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext());
3884
3885 Operands.push_back(
3886 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
3887 Operands.push_back(
3888 Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext()));
3889 Operands.push_back(
3890 Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext()));
3891 Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext());
3892 Operands.push_back(
3893 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
3894}
3895
3896/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3897/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3898bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3899 OperandVector &Operands) {
3900 if (Name.contains(C: '.'))
3901 return TokError(Msg: "invalid operand");
3902
3903 Mnemonic = Name;
3904 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys", S: NameLoc, Ctx&: getContext()));
3905
3906 const AsmToken &Tok = getTok();
3907 StringRef Op = Tok.getString();
3908 SMLoc S = Tok.getLoc();
3909 bool ExpectRegister = true;
3910
3911 if (Mnemonic == "ic") {
3912 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op);
3913 if (!IC)
3914 return TokError(Msg: "invalid operand for IC instruction");
3915 else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
3916 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3917 setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str);
3918 return TokError(Msg: Str);
3919 }
3920 ExpectRegister = IC->NeedsReg;
3921 createSysAlias(Encoding: IC->Encoding, Operands, S);
3922 } else if (Mnemonic == "dc") {
3923 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op);
3924 if (!DC)
3925 return TokError(Msg: "invalid operand for DC instruction");
3926 else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
3927 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3928 setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str);
3929 return TokError(Msg: Str);
3930 }
3931 createSysAlias(Encoding: DC->Encoding, Operands, S);
3932 } else if (Mnemonic == "at") {
3933 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op);
3934 if (!AT)
3935 return TokError(Msg: "invalid operand for AT instruction");
3936 else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
3937 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3938 setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str);
3939 return TokError(Msg: Str);
3940 }
3941 createSysAlias(Encoding: AT->Encoding, Operands, S);
3942 } else if (Mnemonic == "tlbi") {
3943 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op);
3944 if (!TLBI)
3945 return TokError(Msg: "invalid operand for TLBI instruction");
3946 else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
3947 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3948 setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str);
3949 return TokError(Msg: Str);
3950 }
3951 ExpectRegister = TLBI->NeedsReg;
3952 createSysAlias(Encoding: TLBI->Encoding, Operands, S);
3953 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3954
3955 if (Op.lower() != "rctx")
3956 return TokError(Msg: "invalid operand for prediction restriction instruction");
3957
3958 bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll);
3959 bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes);
3960 bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2);
3961
3962 if (Mnemonic == "cosp" && !hasSpecres2)
3963 return TokError(Msg: "COSP requires: predres2");
3964 if (!hasPredres)
3965 return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres");
3966
3967 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3968 : Mnemonic == "dvp" ? 0b101
3969 : Mnemonic == "cosp" ? 0b110
3970 : Mnemonic == "cpp" ? 0b111
3971 : 0;
3972 assert(PRCTX_Op2 &&
3973 "Invalid mnemonic for prediction restriction instruction");
3974 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3975 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3976
3977 createSysAlias(Encoding, Operands, S);
3978 }
3979
3980 Lex(); // Eat operand.
3981
3982 bool HasRegister = false;
3983
3984 // Check for the optional register operand.
3985 if (parseOptionalToken(T: AsmToken::Comma)) {
3986 if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands))
3987 return TokError(Msg: "expected register operand");
3988 HasRegister = true;
3989 }
3990
3991 if (ExpectRegister && !HasRegister)
3992 return TokError(Msg: "specified " + Mnemonic + " op requires a register");
3993 else if (!ExpectRegister && HasRegister)
3994 return TokError(Msg: "specified " + Mnemonic + " op does not use a register");
3995
3996 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
3997 return true;
3998
3999 return false;
4000}
4001
4002/// parseSyspAlias - The TLBIP instructions are simple aliases for
4003/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4004bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4005 OperandVector &Operands) {
4006 if (Name.contains(C: '.'))
4007 return TokError(Msg: "invalid operand");
4008
4009 Mnemonic = Name;
4010 Operands.push_back(
4011 Elt: AArch64Operand::CreateToken(Str: "sysp", S: NameLoc, Ctx&: getContext()));
4012
4013 const AsmToken &Tok = getTok();
4014 StringRef Op = Tok.getString();
4015 SMLoc S = Tok.getLoc();
4016
4017 if (Mnemonic == "tlbip") {
4018 bool HasnXSQualifier = Op.ends_with_insensitive(Suffix: "nXS");
4019 if (HasnXSQualifier) {
4020 Op = Op.drop_back(N: 3);
4021 }
4022 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Name: Op);
4023 if (!TLBIorig)
4024 return TokError(Msg: "invalid operand for TLBIP instruction");
4025 const AArch64TLBI::TLBI TLBI(
4026 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4027 TLBIorig->NeedsReg,
4028 HasnXSQualifier
4029 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4030 : TLBIorig->FeaturesRequired);
4031 if (!TLBI.haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4032 std::string Name =
4033 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4034 std::string Str("TLBIP " + Name + " requires: ");
4035 setRequiredFeatureString(FBS: TLBI.getRequiredFeatures(), Str);
4036 return TokError(Msg: Str);
4037 }
4038 createSysAlias(Encoding: TLBI.Encoding, Operands, S);
4039 }
4040
4041 Lex(); // Eat operand.
4042
4043 if (parseComma())
4044 return true;
4045
4046 if (Tok.isNot(K: AsmToken::Identifier))
4047 return TokError(Msg: "expected register identifier");
4048 auto Result = tryParseSyspXzrPair(Operands);
4049 if (Result.isNoMatch())
4050 Result = tryParseGPRSeqPair(Operands);
4051 if (!Result.isSuccess())
4052 return TokError(Msg: "specified " + Mnemonic +
4053 " op requires a pair of registers");
4054
4055 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4056 return true;
4057
4058 return false;
4059}
4060
4061ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4062 MCAsmParser &Parser = getParser();
4063 const AsmToken &Tok = getTok();
4064
4065 if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier))
4066 return TokError(Msg: "'csync' operand expected");
4067 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4068 // Immediate operand.
4069 const MCExpr *ImmVal;
4070 SMLoc ExprLoc = getLoc();
4071 AsmToken IntTok = Tok;
4072 if (getParser().parseExpression(Res&: ImmVal))
4073 return ParseStatus::Failure;
4074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4075 if (!MCE)
4076 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4077 int64_t Value = MCE->getValue();
4078 if (Mnemonic == "dsb" && Value > 15) {
4079 // This case is a no match here, but it might be matched by the nXS
4080 // variant. Deliberately not unlex the optional '#' as it is not necessary
4081 // to characterize an integer immediate.
4082 Parser.getLexer().UnLex(Token: IntTok);
4083 return ParseStatus::NoMatch;
4084 }
4085 if (Value < 0 || Value > 15)
4086 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4087 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value);
4088 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "",
4089 S: ExprLoc, Ctx&: getContext(),
4090 HasnXSModifier: false /*hasnXSModifier*/));
4091 return ParseStatus::Success;
4092 }
4093
4094 if (Tok.isNot(K: AsmToken::Identifier))
4095 return TokError(Msg: "invalid operand for instruction");
4096
4097 StringRef Operand = Tok.getString();
4098 auto TSB = AArch64TSB::lookupTSBByName(Name: Operand);
4099 auto DB = AArch64DB::lookupDBByName(Name: Operand);
4100 // The only valid named option for ISB is 'sy'
4101 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4102 return TokError(Msg: "'sy' or #imm operand expected");
4103 // The only valid named option for TSB is 'csync'
4104 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4105 return TokError(Msg: "'csync' operand expected");
4106 if (!DB && !TSB) {
4107 if (Mnemonic == "dsb") {
4108 // This case is a no match here, but it might be matched by the nXS
4109 // variant.
4110 return ParseStatus::NoMatch;
4111 }
4112 return TokError(Msg: "invalid barrier option name");
4113 }
4114
4115 Operands.push_back(Elt: AArch64Operand::CreateBarrier(
4116 Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(),
4117 Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/));
4118 Lex(); // Consume the option
4119
4120 return ParseStatus::Success;
4121}
4122
4123ParseStatus
4124AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4125 const AsmToken &Tok = getTok();
4126
4127 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4128 if (Mnemonic != "dsb")
4129 return ParseStatus::Failure;
4130
4131 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4132 // Immediate operand.
4133 const MCExpr *ImmVal;
4134 SMLoc ExprLoc = getLoc();
4135 if (getParser().parseExpression(Res&: ImmVal))
4136 return ParseStatus::Failure;
4137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4138 if (!MCE)
4139 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4140 int64_t Value = MCE->getValue();
4141 // v8.7-A DSB in the nXS variant accepts only the following immediate
4142 // values: 16, 20, 24, 28.
4143 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4144 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4145 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value);
4146 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name,
4147 S: ExprLoc, Ctx&: getContext(),
4148 HasnXSModifier: true /*hasnXSModifier*/));
4149 return ParseStatus::Success;
4150 }
4151
4152 if (Tok.isNot(K: AsmToken::Identifier))
4153 return TokError(Msg: "invalid operand for instruction");
4154
4155 StringRef Operand = Tok.getString();
4156 auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand);
4157
4158 if (!DB)
4159 return TokError(Msg: "invalid barrier option name");
4160
4161 Operands.push_back(
4162 Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(),
4163 Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/));
4164 Lex(); // Consume the option
4165
4166 return ParseStatus::Success;
4167}
4168
4169ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4170 const AsmToken &Tok = getTok();
4171
4172 if (Tok.isNot(K: AsmToken::Identifier))
4173 return ParseStatus::NoMatch;
4174
4175 if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString()))
4176 return ParseStatus::NoMatch;
4177
4178 int MRSReg, MSRReg;
4179 auto SysReg = AArch64SysReg::lookupSysRegByName(Name: Tok.getString());
4180 if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4181 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4182 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4183 } else
4184 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString());
4185
4186 unsigned PStateImm = -1;
4187 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString());
4188 if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4189 PStateImm = PState15->Encoding;
4190 if (!PState15) {
4191 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString());
4192 if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4193 PStateImm = PState1->Encoding;
4194 }
4195
4196 Operands.push_back(
4197 Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg,
4198 PStateField: PStateImm, Ctx&: getContext()));
4199 Lex(); // Eat identifier
4200
4201 return ParseStatus::Success;
4202}
4203
4204ParseStatus
4205AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4206 SMLoc S = getLoc();
4207 const AsmToken &Tok = getTok();
4208 if (Tok.isNot(K: AsmToken::Identifier))
4209 return TokError(Msg: "invalid operand for instruction");
4210
4211 auto PH = AArch64PHint::lookupPHintByName(Tok.getString());
4212 if (!PH)
4213 return TokError(Msg: "invalid operand for instruction");
4214
4215 Operands.push_back(Elt: AArch64Operand::CreatePHintInst(
4216 Val: PH->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
4217 Lex(); // Eat identifier token.
4218 return ParseStatus::Success;
4219}
4220
4221/// tryParseNeonVectorRegister - Parse a vector register operand.
4222bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4223 if (getTok().isNot(K: AsmToken::Identifier))
4224 return true;
4225
4226 SMLoc S = getLoc();
4227 // Check for a vector register specifier first.
4228 StringRef Kind;
4229 MCRegister Reg;
4230 ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector);
4231 if (!Res.isSuccess())
4232 return true;
4233
4234 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector);
4235 if (!KindRes)
4236 return true;
4237
4238 unsigned ElementWidth = KindRes->second;
4239 Operands.push_back(
4240 Elt: AArch64Operand::CreateVectorReg(RegNum: Reg, Kind: RegKind::NeonVector, ElementWidth,
4241 S, E: getLoc(), Ctx&: getContext()));
4242
4243 // If there was an explicit qualifier, that goes on as a literal text
4244 // operand.
4245 if (!Kind.empty())
4246 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext()));
4247
4248 return tryParseVectorIndex(Operands).isFailure();
4249}
4250
4251ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4252 SMLoc SIdx = getLoc();
4253 if (parseOptionalToken(T: AsmToken::LBrac)) {
4254 const MCExpr *ImmVal;
4255 if (getParser().parseExpression(Res&: ImmVal))
4256 return ParseStatus::NoMatch;
4257 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4258 if (!MCE)
4259 return TokError(Msg: "immediate value expected for vector index");
4260
4261 SMLoc E = getLoc();
4262
4263 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4264 return ParseStatus::Failure;
4265
4266 Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx,
4267 E, Ctx&: getContext()));
4268 return ParseStatus::Success;
4269 }
4270
4271 return ParseStatus::NoMatch;
4272}
4273
4274// tryParseVectorRegister - Try to parse a vector register name with
4275// optional kind specifier. If it is a register specifier, eat the token
4276// and return it.
4277ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4278 StringRef &Kind,
4279 RegKind MatchKind) {
4280 const AsmToken &Tok = getTok();
4281
4282 if (Tok.isNot(K: AsmToken::Identifier))
4283 return ParseStatus::NoMatch;
4284
4285 StringRef Name = Tok.getString();
4286 // If there is a kind specifier, it's separated from the register name by
4287 // a '.'.
4288 size_t Start = 0, Next = Name.find(C: '.');
4289 StringRef Head = Name.slice(Start, End: Next);
4290 unsigned RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind);
4291
4292 if (RegNum) {
4293 if (Next != StringRef::npos) {
4294 Kind = Name.substr(Start: Next);
4295 if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind))
4296 return TokError(Msg: "invalid vector kind qualifier");
4297 }
4298 Lex(); // Eat the register token.
4299
4300 Reg = RegNum;
4301 return ParseStatus::Success;
4302 }
4303
4304 return ParseStatus::NoMatch;
4305}
4306
4307ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4308 OperandVector &Operands) {
4309 ParseStatus Status =
4310 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4311 if (!Status.isSuccess())
4312 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4313 return Status;
4314}
4315
4316/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4317template <RegKind RK>
4318ParseStatus
4319AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4320 // Check for a SVE predicate register specifier first.
4321 const SMLoc S = getLoc();
4322 StringRef Kind;
4323 MCRegister RegNum;
4324 auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK);
4325 if (!Res.isSuccess())
4326 return Res;
4327
4328 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK);
4329 if (!KindRes)
4330 return ParseStatus::NoMatch;
4331
4332 unsigned ElementWidth = KindRes->second;
4333 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
4334 RegNum, Kind: RK, ElementWidth, S,
4335 E: getLoc(), Ctx&: getContext()));
4336
4337 if (getLexer().is(K: AsmToken::LBrac)) {
4338 if (RK == RegKind::SVEPredicateAsCounter) {
4339 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4340 if (ResIndex.isSuccess())
4341 return ParseStatus::Success;
4342 } else {
4343 // Indexed predicate, there's no comma so try parse the next operand
4344 // immediately.
4345 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
4346 return ParseStatus::NoMatch;
4347 }
4348 }
4349
4350 // Not all predicates are followed by a '/m' or '/z'.
4351 if (getTok().isNot(K: AsmToken::Slash))
4352 return ParseStatus::Success;
4353
4354 // But when they do they shouldn't have an element type suffix.
4355 if (!Kind.empty())
4356 return Error(L: S, Msg: "not expecting size suffix");
4357
4358 // Add a literal slash as operand
4359 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/", S: getLoc(), Ctx&: getContext()));
4360
4361 Lex(); // Eat the slash.
4362
4363 // Zeroing or merging?
4364 auto Pred = getTok().getString().lower();
4365 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4366 return Error(L: getLoc(), Msg: "expecting 'z' predication");
4367
4368 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4369 return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication");
4370
4371 // Add zero/merge token.
4372 const char *ZM = Pred == "z" ? "z" : "m";
4373 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext()));
4374
4375 Lex(); // Eat zero/merge token.
4376 return ParseStatus::Success;
4377}
4378
4379/// parseRegister - Parse a register operand.
4380bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4381 // Try for a Neon vector register.
4382 if (!tryParseNeonVectorRegister(Operands))
4383 return false;
4384
4385 if (tryParseZTOperand(Operands).isSuccess())
4386 return false;
4387
4388 // Otherwise try for a scalar register.
4389 if (tryParseGPROperand<false>(Operands).isSuccess())
4390 return false;
4391
4392 return true;
4393}
4394
4395bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4396 bool HasELFModifier = false;
4397 AArch64::Specifier RefKind;
4398 SMLoc Loc = getLexer().getLoc();
4399 if (parseOptionalToken(T: AsmToken::Colon)) {
4400 HasELFModifier = true;
4401
4402 if (getTok().isNot(K: AsmToken::Identifier))
4403 return TokError(Msg: "expect relocation specifier in operand after ':'");
4404
4405 std::string LowerCase = getTok().getIdentifier().lower();
4406 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4407 .Case(S: "lo12", Value: AArch64::S_LO12)
4408 .Case(S: "abs_g3", Value: AArch64::S_ABS_G3)
4409 .Case(S: "abs_g2", Value: AArch64::S_ABS_G2)
4410 .Case(S: "abs_g2_s", Value: AArch64::S_ABS_G2_S)
4411 .Case(S: "abs_g2_nc", Value: AArch64::S_ABS_G2_NC)
4412 .Case(S: "abs_g1", Value: AArch64::S_ABS_G1)
4413 .Case(S: "abs_g1_s", Value: AArch64::S_ABS_G1_S)
4414 .Case(S: "abs_g1_nc", Value: AArch64::S_ABS_G1_NC)
4415 .Case(S: "abs_g0", Value: AArch64::S_ABS_G0)
4416 .Case(S: "abs_g0_s", Value: AArch64::S_ABS_G0_S)
4417 .Case(S: "abs_g0_nc", Value: AArch64::S_ABS_G0_NC)
4418 .Case(S: "prel_g3", Value: AArch64::S_PREL_G3)
4419 .Case(S: "prel_g2", Value: AArch64::S_PREL_G2)
4420 .Case(S: "prel_g2_nc", Value: AArch64::S_PREL_G2_NC)
4421 .Case(S: "prel_g1", Value: AArch64::S_PREL_G1)
4422 .Case(S: "prel_g1_nc", Value: AArch64::S_PREL_G1_NC)
4423 .Case(S: "prel_g0", Value: AArch64::S_PREL_G0)
4424 .Case(S: "prel_g0_nc", Value: AArch64::S_PREL_G0_NC)
4425 .Case(S: "dtprel_g2", Value: AArch64::S_DTPREL_G2)
4426 .Case(S: "dtprel_g1", Value: AArch64::S_DTPREL_G1)
4427 .Case(S: "dtprel_g1_nc", Value: AArch64::S_DTPREL_G1_NC)
4428 .Case(S: "dtprel_g0", Value: AArch64::S_DTPREL_G0)
4429 .Case(S: "dtprel_g0_nc", Value: AArch64::S_DTPREL_G0_NC)
4430 .Case(S: "dtprel_hi12", Value: AArch64::S_DTPREL_HI12)
4431 .Case(S: "dtprel_lo12", Value: AArch64::S_DTPREL_LO12)
4432 .Case(S: "dtprel_lo12_nc", Value: AArch64::S_DTPREL_LO12_NC)
4433 .Case(S: "pg_hi21_nc", Value: AArch64::S_ABS_PAGE_NC)
4434 .Case(S: "tprel_g2", Value: AArch64::S_TPREL_G2)
4435 .Case(S: "tprel_g1", Value: AArch64::S_TPREL_G1)
4436 .Case(S: "tprel_g1_nc", Value: AArch64::S_TPREL_G1_NC)
4437 .Case(S: "tprel_g0", Value: AArch64::S_TPREL_G0)
4438 .Case(S: "tprel_g0_nc", Value: AArch64::S_TPREL_G0_NC)
4439 .Case(S: "tprel_hi12", Value: AArch64::S_TPREL_HI12)
4440 .Case(S: "tprel_lo12", Value: AArch64::S_TPREL_LO12)
4441 .Case(S: "tprel_lo12_nc", Value: AArch64::S_TPREL_LO12_NC)
4442 .Case(S: "tlsdesc_lo12", Value: AArch64::S_TLSDESC_LO12)
4443 .Case(S: "tlsdesc_auth_lo12", Value: AArch64::S_TLSDESC_AUTH_LO12)
4444 .Case(S: "got", Value: AArch64::S_GOT_PAGE)
4445 .Case(S: "gotpage_lo15", Value: AArch64::S_GOT_PAGE_LO15)
4446 .Case(S: "got_lo12", Value: AArch64::S_GOT_LO12)
4447 .Case(S: "got_auth", Value: AArch64::S_GOT_AUTH_PAGE)
4448 .Case(S: "got_auth_lo12", Value: AArch64::S_GOT_AUTH_LO12)
4449 .Case(S: "gottprel", Value: AArch64::S_GOTTPREL_PAGE)
4450 .Case(S: "gottprel_lo12", Value: AArch64::S_GOTTPREL_LO12_NC)
4451 .Case(S: "gottprel_g1", Value: AArch64::S_GOTTPREL_G1)
4452 .Case(S: "gottprel_g0_nc", Value: AArch64::S_GOTTPREL_G0_NC)
4453 .Case(S: "tlsdesc", Value: AArch64::S_TLSDESC_PAGE)
4454 .Case(S: "tlsdesc_auth", Value: AArch64::S_TLSDESC_AUTH_PAGE)
4455 .Case(S: "secrel_lo12", Value: AArch64::S_SECREL_LO12)
4456 .Case(S: "secrel_hi12", Value: AArch64::S_SECREL_HI12)
4457 .Default(Value: AArch64::S_INVALID);
4458
4459 if (RefKind == AArch64::S_INVALID)
4460 return TokError(Msg: "expect relocation specifier in operand after ':'");
4461
4462 Lex(); // Eat identifier
4463
4464 if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier"))
4465 return true;
4466 }
4467
4468 if (getParser().parseExpression(Res&: ImmVal))
4469 return true;
4470
4471 if (HasELFModifier)
4472 ImmVal = MCSpecifierExpr::create(Expr: ImmVal, S: RefKind, Ctx&: getContext(), Loc);
4473
4474 SMLoc EndLoc;
4475 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4476 if (getParser().parseAtSpecifier(Res&: ImmVal, EndLoc))
4477 return true;
4478 const MCExpr *Term;
4479 MCBinaryExpr::Opcode Opcode;
4480 if (parseOptionalToken(T: AsmToken::Plus))
4481 Opcode = MCBinaryExpr::Add;
4482 else if (parseOptionalToken(T: AsmToken::Minus))
4483 Opcode = MCBinaryExpr::Sub;
4484 else
4485 return false;
4486 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc))
4487 return true;
4488 ImmVal = MCBinaryExpr::create(Op: Opcode, LHS: ImmVal, RHS: Term, Ctx&: getContext());
4489 }
4490
4491 return false;
4492}
4493
4494ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4495 if (getTok().isNot(K: AsmToken::LCurly))
4496 return ParseStatus::NoMatch;
4497
4498 auto ParseMatrixTile = [this](unsigned &Reg,
4499 unsigned &ElementWidth) -> ParseStatus {
4500 StringRef Name = getTok().getString();
4501 size_t DotPosition = Name.find(C: '.');
4502 if (DotPosition == StringRef::npos)
4503 return ParseStatus::NoMatch;
4504
4505 unsigned RegNum = matchMatrixTileListRegName(Name);
4506 if (!RegNum)
4507 return ParseStatus::NoMatch;
4508
4509 StringRef Tail = Name.drop_front(N: DotPosition);
4510 const std::optional<std::pair<int, int>> &KindRes =
4511 parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
4512 if (!KindRes)
4513 return TokError(
4514 Msg: "Expected the register to be followed by element width suffix");
4515 ElementWidth = KindRes->second;
4516 Reg = RegNum;
4517 Lex(); // Eat the register.
4518 return ParseStatus::Success;
4519 };
4520
4521 SMLoc S = getLoc();
4522 auto LCurly = getTok();
4523 Lex(); // Eat left bracket token.
4524
4525 // Empty matrix list
4526 if (parseOptionalToken(T: AsmToken::RCurly)) {
4527 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4528 /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext()));
4529 return ParseStatus::Success;
4530 }
4531
4532 // Try parse {za} alias early
4533 if (getTok().getString().equals_insensitive(RHS: "za")) {
4534 Lex(); // Eat 'za'
4535
4536 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4537 return ParseStatus::Failure;
4538
4539 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4540 /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext()));
4541 return ParseStatus::Success;
4542 }
4543
4544 SMLoc TileLoc = getLoc();
4545
4546 unsigned FirstReg, ElementWidth;
4547 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4548 if (!ParseRes.isSuccess()) {
4549 getLexer().UnLex(Token: LCurly);
4550 return ParseRes;
4551 }
4552
4553 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4554
4555 unsigned PrevReg = FirstReg;
4556
4557 SmallSet<unsigned, 8> DRegs;
4558 AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth);
4559
4560 SmallSet<unsigned, 8> SeenRegs;
4561 SeenRegs.insert(V: FirstReg);
4562
4563 while (parseOptionalToken(T: AsmToken::Comma)) {
4564 TileLoc = getLoc();
4565 unsigned Reg, NextElementWidth;
4566 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4567 if (!ParseRes.isSuccess())
4568 return ParseRes;
4569
4570 // Element size must match on all regs in the list.
4571 if (ElementWidth != NextElementWidth)
4572 return Error(L: TileLoc, Msg: "mismatched register size suffix");
4573
4574 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(Reg: PrevReg)))
4575 Warning(L: TileLoc, Msg: "tile list not in ascending order");
4576
4577 if (SeenRegs.contains(V: Reg))
4578 Warning(L: TileLoc, Msg: "duplicate tile in list");
4579 else {
4580 SeenRegs.insert(V: Reg);
4581 AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth);
4582 }
4583
4584 PrevReg = Reg;
4585 }
4586
4587 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4588 return ParseStatus::Failure;
4589
4590 unsigned RegMask = 0;
4591 for (auto Reg : DRegs)
4592 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4593 RI->getEncodingValue(Reg: AArch64::ZAD0));
4594 Operands.push_back(
4595 Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext()));
4596
4597 return ParseStatus::Success;
4598}
4599
4600template <RegKind VectorKind>
4601ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4602 bool ExpectMatch) {
4603 MCAsmParser &Parser = getParser();
4604 if (!getTok().is(K: AsmToken::LCurly))
4605 return ParseStatus::NoMatch;
4606
4607 // Wrapper around parse function
4608 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4609 bool NoMatchIsError) -> ParseStatus {
4610 auto RegTok = getTok();
4611 auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind);
4612 if (ParseRes.isSuccess()) {
4613 if (parseVectorKind(Suffix: Kind, VectorKind))
4614 return ParseRes;
4615 llvm_unreachable("Expected a valid vector kind");
4616 }
4617
4618 if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() &&
4619 RegTok.getString().equals_insensitive(RHS: "zt0"))
4620 return ParseStatus::NoMatch;
4621
4622 if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() ||
4623 (ParseRes.isNoMatch() && NoMatchIsError &&
4624 !RegTok.getString().starts_with_insensitive(Prefix: "za")))
4625 return Error(L: Loc, Msg: "vector register expected");
4626
4627 return ParseStatus::NoMatch;
4628 };
4629
4630 unsigned NumRegs = getNumRegsForRegKind(K: VectorKind);
4631 SMLoc S = getLoc();
4632 auto LCurly = getTok();
4633 Lex(); // Eat left bracket token.
4634
4635 StringRef Kind;
4636 MCRegister FirstReg;
4637 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4638
4639 // Put back the original left bracket if there was no match, so that
4640 // different types of list-operands can be matched (e.g. SVE, Neon).
4641 if (ParseRes.isNoMatch())
4642 Parser.getLexer().UnLex(Token: LCurly);
4643
4644 if (!ParseRes.isSuccess())
4645 return ParseRes;
4646
4647 MCRegister PrevReg = FirstReg;
4648 unsigned Count = 1;
4649
4650 unsigned Stride = 1;
4651 if (parseOptionalToken(T: AsmToken::Minus)) {
4652 SMLoc Loc = getLoc();
4653 StringRef NextKind;
4654
4655 MCRegister Reg;
4656 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4657 if (!ParseRes.isSuccess())
4658 return ParseRes;
4659
4660 // Any Kind suffices must match on all regs in the list.
4661 if (Kind != NextKind)
4662 return Error(L: Loc, Msg: "mismatched register size suffix");
4663
4664 unsigned Space =
4665 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4666
4667 if (Space == 0 || Space > 3)
4668 return Error(L: Loc, Msg: "invalid number of vectors");
4669
4670 Count += Space;
4671 }
4672 else {
4673 bool HasCalculatedStride = false;
4674 while (parseOptionalToken(T: AsmToken::Comma)) {
4675 SMLoc Loc = getLoc();
4676 StringRef NextKind;
4677 MCRegister Reg;
4678 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4679 if (!ParseRes.isSuccess())
4680 return ParseRes;
4681
4682 // Any Kind suffices must match on all regs in the list.
4683 if (Kind != NextKind)
4684 return Error(L: Loc, Msg: "mismatched register size suffix");
4685
4686 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4687 unsigned PrevRegVal =
4688 getContext().getRegisterInfo()->getEncodingValue(Reg: PrevReg);
4689 if (!HasCalculatedStride) {
4690 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4691 : (NumRegs - (PrevRegVal - RegVal));
4692 HasCalculatedStride = true;
4693 }
4694
4695 // Register must be incremental (with a wraparound at last register).
4696 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4697 return Error(L: Loc, Msg: "registers must have the same sequential stride");
4698
4699 PrevReg = Reg;
4700 ++Count;
4701 }
4702 }
4703
4704 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4705 return ParseStatus::Failure;
4706
4707 if (Count > 4)
4708 return Error(L: S, Msg: "invalid number of vectors");
4709
4710 unsigned NumElements = 0;
4711 unsigned ElementWidth = 0;
4712 if (!Kind.empty()) {
4713 if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind))
4714 std::tie(args&: NumElements, args&: ElementWidth) = *VK;
4715 }
4716
4717 Operands.push_back(Elt: AArch64Operand::CreateVectorList(
4718 RegNum: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S,
4719 E: getLoc(), Ctx&: getContext()));
4720
4721 return ParseStatus::Success;
4722}
4723
4724/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4725bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4726 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true);
4727 if (!ParseRes.isSuccess())
4728 return true;
4729
4730 return tryParseVectorIndex(Operands).isFailure();
4731}
4732
4733ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4734 SMLoc StartLoc = getLoc();
4735
4736 MCRegister RegNum;
4737 ParseStatus Res = tryParseScalarRegister(RegNum);
4738 if (!Res.isSuccess())
4739 return Res;
4740
4741 if (!parseOptionalToken(T: AsmToken::Comma)) {
4742 Operands.push_back(Elt: AArch64Operand::CreateReg(
4743 RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4744 return ParseStatus::Success;
4745 }
4746
4747 parseOptionalToken(T: AsmToken::Hash);
4748
4749 if (getTok().isNot(K: AsmToken::Integer))
4750 return Error(L: getLoc(), Msg: "index must be absent or #0");
4751
4752 const MCExpr *ImmVal;
4753 if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) ||
4754 cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0)
4755 return Error(L: getLoc(), Msg: "index must be absent or #0");
4756
4757 Operands.push_back(Elt: AArch64Operand::CreateReg(
4758 RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4759 return ParseStatus::Success;
4760}
4761
4762ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4763 SMLoc StartLoc = getLoc();
4764 const AsmToken &Tok = getTok();
4765 std::string Name = Tok.getString().lower();
4766
4767 unsigned RegNum = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable);
4768
4769 if (RegNum == 0)
4770 return ParseStatus::NoMatch;
4771
4772 Operands.push_back(Elt: AArch64Operand::CreateReg(
4773 RegNum, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4774 Lex(); // Eat register.
4775
4776 // Check if register is followed by an index
4777 if (parseOptionalToken(T: AsmToken::LBrac)) {
4778 Operands.push_back(
4779 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
4780 const MCExpr *ImmVal;
4781 if (getParser().parseExpression(Res&: ImmVal))
4782 return ParseStatus::NoMatch;
4783 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4784 if (!MCE)
4785 return TokError(Msg: "immediate value expected for vector index");
4786 Operands.push_back(Elt: AArch64Operand::CreateImm(
4787 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc,
4788 E: getLoc(), Ctx&: getContext()));
4789 if (parseOptionalToken(T: AsmToken::Comma))
4790 if (parseOptionalMulOperand(Operands))
4791 return ParseStatus::Failure;
4792 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4793 return ParseStatus::Failure;
4794 Operands.push_back(
4795 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
4796 }
4797 return ParseStatus::Success;
4798}
4799
4800template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4801ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4802 SMLoc StartLoc = getLoc();
4803
4804 MCRegister RegNum;
4805 ParseStatus Res = tryParseScalarRegister(RegNum);
4806 if (!Res.isSuccess())
4807 return Res;
4808
4809 // No shift/extend is the default.
4810 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
4811 Operands.push_back(Elt: AArch64Operand::CreateReg(
4812 RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy));
4813 return ParseStatus::Success;
4814 }
4815
4816 // Eat the comma
4817 Lex();
4818
4819 // Match the shift
4820 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4821 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
4822 if (!Res.isSuccess())
4823 return Res;
4824
4825 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4826 Operands.push_back(Elt: AArch64Operand::CreateReg(
4827 RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy,
4828 ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
4829 HasExplicitAmount: Ext->hasShiftExtendAmount()));
4830
4831 return ParseStatus::Success;
4832}
4833
4834bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4835 MCAsmParser &Parser = getParser();
4836
4837 // Some SVE instructions have a decoration after the immediate, i.e.
4838 // "mul vl". We parse them here and add tokens, which must be present in the
4839 // asm string in the tablegen instruction.
4840 bool NextIsVL =
4841 Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl");
4842 bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash);
4843 if (!getTok().getString().equals_insensitive(RHS: "mul") ||
4844 !(NextIsVL || NextIsHash))
4845 return true;
4846
4847 Operands.push_back(
4848 Elt: AArch64Operand::CreateToken(Str: "mul", S: getLoc(), Ctx&: getContext()));
4849 Lex(); // Eat the "mul"
4850
4851 if (NextIsVL) {
4852 Operands.push_back(
4853 Elt: AArch64Operand::CreateToken(Str: "vl", S: getLoc(), Ctx&: getContext()));
4854 Lex(); // Eat the "vl"
4855 return false;
4856 }
4857
4858 if (NextIsHash) {
4859 Lex(); // Eat the #
4860 SMLoc S = getLoc();
4861
4862 // Parse immediate operand.
4863 const MCExpr *ImmVal;
4864 if (!Parser.parseExpression(Res&: ImmVal))
4865 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) {
4866 Operands.push_back(Elt: AArch64Operand::CreateImm(
4867 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(),
4868 Ctx&: getContext()));
4869 return false;
4870 }
4871 }
4872
4873 return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'");
4874}
4875
4876bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4877 StringRef &VecGroup) {
4878 MCAsmParser &Parser = getParser();
4879 auto Tok = Parser.getTok();
4880 if (Tok.isNot(K: AsmToken::Identifier))
4881 return true;
4882
4883 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4884 .Case(S: "vgx2", Value: "vgx2")
4885 .Case(S: "vgx4", Value: "vgx4")
4886 .Default(Value: "");
4887
4888 if (VG.empty())
4889 return true;
4890
4891 VecGroup = VG;
4892 Parser.Lex(); // Eat vgx[2|4]
4893 return false;
4894}
4895
4896bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4897 auto Tok = getTok();
4898 if (Tok.isNot(K: AsmToken::Identifier))
4899 return true;
4900
4901 auto Keyword = Tok.getString();
4902 Keyword = StringSwitch<StringRef>(Keyword.lower())
4903 .Case(S: "sm", Value: "sm")
4904 .Case(S: "za", Value: "za")
4905 .Default(Value: Keyword);
4906 Operands.push_back(
4907 Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext()));
4908
4909 Lex();
4910 return false;
4911}
4912
4913/// parseOperand - Parse a arm instruction operand. For now this parses the
4914/// operand regardless of the mnemonic.
4915bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4916 bool invertCondCode) {
4917 MCAsmParser &Parser = getParser();
4918
4919 ParseStatus ResTy =
4920 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4921
4922 // Check if the current operand has a custom associated parser, if so, try to
4923 // custom parse the operand, or fallback to the general approach.
4924 if (ResTy.isSuccess())
4925 return false;
4926 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4927 // there was a match, but an error occurred, in which case, just return that
4928 // the operand parsing failed.
4929 if (ResTy.isFailure())
4930 return true;
4931
4932 // Nothing custom, so do general case parsing.
4933 SMLoc S, E;
4934 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4935 if (parseOptionalToken(T: AsmToken::Comma)) {
4936 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4937 if (!Res.isNoMatch())
4938 return Res.isFailure();
4939 getLexer().UnLex(Token: SavedTok);
4940 }
4941 return false;
4942 };
4943 switch (getLexer().getKind()) {
4944 default: {
4945 SMLoc S = getLoc();
4946 const MCExpr *Expr;
4947 if (parseSymbolicImmVal(ImmVal&: Expr))
4948 return Error(L: S, Msg: "invalid operand");
4949
4950 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
4951 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
4952 return parseOptionalShiftExtend(getTok());
4953 }
4954 case AsmToken::LBrac: {
4955 Operands.push_back(
4956 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
4957 Lex(); // Eat '['
4958
4959 // There's no comma after a '[', so we can parse the next operand
4960 // immediately.
4961 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
4962 }
4963 case AsmToken::LCurly: {
4964 if (!parseNeonVectorList(Operands))
4965 return false;
4966
4967 Operands.push_back(
4968 Elt: AArch64Operand::CreateToken(Str: "{", S: getLoc(), Ctx&: getContext()));
4969 Lex(); // Eat '{'
4970
4971 // There's no comma after a '{', so we can parse the next operand
4972 // immediately.
4973 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
4974 }
4975 case AsmToken::Identifier: {
4976 // See if this is a "VG" decoration used by SME instructions.
4977 StringRef VecGroup;
4978 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4979 Operands.push_back(
4980 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
4981 return false;
4982 }
4983 // If we're expecting a Condition Code operand, then just parse that.
4984 if (isCondCode)
4985 return parseCondCode(Operands, invertCondCode);
4986
4987 // If it's a register name, parse it.
4988 if (!parseRegister(Operands)) {
4989 // Parse an optional shift/extend modifier.
4990 AsmToken SavedTok = getTok();
4991 if (parseOptionalToken(T: AsmToken::Comma)) {
4992 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4993 // such cases and don't report an error when <label> happens to match a
4994 // shift/extend modifier.
4995 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4996 /*ParseForAllFeatures=*/true);
4997 if (!Res.isNoMatch())
4998 return Res.isFailure();
4999 Res = tryParseOptionalShiftExtend(Operands);
5000 if (!Res.isNoMatch())
5001 return Res.isFailure();
5002 getLexer().UnLex(Token: SavedTok);
5003 }
5004 return false;
5005 }
5006
5007 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5008 // by SVE instructions.
5009 if (!parseOptionalMulOperand(Operands))
5010 return false;
5011
5012 // If this is a two-word mnemonic, parse its special keyword
5013 // operand as an identifier.
5014 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5015 Mnemonic == "gcsb")
5016 return parseKeywordOperand(Operands);
5017
5018 // This was not a register so parse other operands that start with an
5019 // identifier (like labels) as expressions and create them as immediates.
5020 const MCExpr *IdVal, *Term;
5021 S = getLoc();
5022 if (getParser().parseExpression(Res&: IdVal))
5023 return true;
5024 if (getParser().parseAtSpecifier(Res&: IdVal, EndLoc&: E))
5025 return true;
5026 std::optional<MCBinaryExpr::Opcode> Opcode;
5027 if (parseOptionalToken(T: AsmToken::Plus))
5028 Opcode = MCBinaryExpr::Add;
5029 else if (parseOptionalToken(T: AsmToken::Minus))
5030 Opcode = MCBinaryExpr::Sub;
5031 if (Opcode) {
5032 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc&: E))
5033 return true;
5034 IdVal = MCBinaryExpr::create(Op: *Opcode, LHS: IdVal, RHS: Term, Ctx&: getContext());
5035 }
5036 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext()));
5037
5038 // Parse an optional shift/extend modifier.
5039 return parseOptionalShiftExtend(getTok());
5040 }
5041 case AsmToken::Integer:
5042 case AsmToken::Real:
5043 case AsmToken::Hash: {
5044 // #42 -> immediate.
5045 S = getLoc();
5046
5047 parseOptionalToken(T: AsmToken::Hash);
5048
5049 // Parse a negative sign
5050 bool isNegative = false;
5051 if (getTok().is(K: AsmToken::Minus)) {
5052 isNegative = true;
5053 // We need to consume this token only when we have a Real, otherwise
5054 // we let parseSymbolicImmVal take care of it
5055 if (Parser.getLexer().peekTok().is(K: AsmToken::Real))
5056 Lex();
5057 }
5058
5059 // The only Real that should come through here is a literal #0.0 for
5060 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5061 // so convert the value.
5062 const AsmToken &Tok = getTok();
5063 if (Tok.is(K: AsmToken::Real)) {
5064 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5065 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5066 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5067 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5068 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5069 return TokError(Msg: "unexpected floating point literal");
5070 else if (IntVal != 0 || isNegative)
5071 return TokError(Msg: "expected floating-point constant #0.0");
5072 Lex(); // Eat the token.
5073
5074 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
5075 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
5076 return false;
5077 }
5078
5079 const MCExpr *ImmVal;
5080 if (parseSymbolicImmVal(ImmVal))
5081 return true;
5082
5083 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5084 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext()));
5085
5086 // Parse an optional shift/extend modifier.
5087 return parseOptionalShiftExtend(Tok);
5088 }
5089 case AsmToken::Equal: {
5090 SMLoc Loc = getLoc();
5091 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5092 return TokError(Msg: "unexpected token in operand");
5093 Lex(); // Eat '='
5094 const MCExpr *SubExprVal;
5095 if (getParser().parseExpression(Res&: SubExprVal))
5096 return true;
5097
5098 if (Operands.size() < 2 ||
5099 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5100 return Error(L: Loc, Msg: "Only valid when first operand is register");
5101
5102 bool IsXReg =
5103 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5104 Reg: Operands[1]->getReg());
5105
5106 MCContext& Ctx = getContext();
5107 E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1);
5108 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5109 if (isa<MCConstantExpr>(Val: SubExprVal)) {
5110 uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
5111 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5112 while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) {
5113 ShiftAmt += 16;
5114 Imm >>= 16;
5115 }
5116 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5117 Operands[0] = AArch64Operand::CreateToken(Str: "movz", S: Loc, Ctx);
5118 Operands.push_back(Elt: AArch64Operand::CreateImm(
5119 Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx));
5120 if (ShiftAmt)
5121 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL,
5122 Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx));
5123 return false;
5124 }
5125 APInt Simm = APInt(64, Imm << ShiftAmt);
5126 // check if the immediate is an unsigned or signed 32-bit int for W regs
5127 if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32)))
5128 return Error(L: Loc, Msg: "Immediate too large for register");
5129 }
5130 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5131 const MCExpr *CPLoc =
5132 getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc);
5133 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx));
5134 return false;
5135 }
5136 }
5137}
5138
5139bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5140 const MCExpr *Expr = nullptr;
5141 SMLoc L = getLoc();
5142 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
5143 return true;
5144 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
5145 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
5146 return true;
5147 Out = Value->getValue();
5148 return false;
5149}
5150
5151bool AArch64AsmParser::parseComma() {
5152 if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma"))
5153 return true;
5154 // Eat the comma
5155 Lex();
5156 return false;
5157}
5158
5159bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5160 unsigned First, unsigned Last) {
5161 MCRegister Reg;
5162 SMLoc Start, End;
5163 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register"))
5164 return true;
5165
5166 // Special handling for FP and LR; they aren't linearly after x28 in
5167 // the registers enum.
5168 unsigned RangeEnd = Last;
5169 if (Base == AArch64::X0) {
5170 if (Last == AArch64::FP) {
5171 RangeEnd = AArch64::X28;
5172 if (Reg == AArch64::FP) {
5173 Out = 29;
5174 return false;
5175 }
5176 }
5177 if (Last == AArch64::LR) {
5178 RangeEnd = AArch64::X28;
5179 if (Reg == AArch64::FP) {
5180 Out = 29;
5181 return false;
5182 } else if (Reg == AArch64::LR) {
5183 Out = 30;
5184 return false;
5185 }
5186 }
5187 }
5188
5189 if (check(P: Reg < First || Reg > RangeEnd, Loc: Start,
5190 Msg: Twine("expected register in range ") +
5191 AArch64InstPrinter::getRegisterName(Reg: First) + " to " +
5192 AArch64InstPrinter::getRegisterName(Reg: Last)))
5193 return true;
5194 Out = Reg - Base;
5195 return false;
5196}
5197
5198bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5199 const MCParsedAsmOperand &Op2) const {
5200 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5201 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5202
5203 if (AOp1.isVectorList() && AOp2.isVectorList())
5204 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5205 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5206 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5207
5208 if (!AOp1.isReg() || !AOp2.isReg())
5209 return false;
5210
5211 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5212 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5213 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5214
5215 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5216 "Testing equality of non-scalar registers not supported");
5217
5218 // Check if a registers match their sub/super register classes.
5219 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5220 return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg();
5221 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5222 return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg();
5223 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5224 return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg();
5225 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5226 return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg();
5227
5228 return false;
5229}
5230
5231/// Parse an AArch64 instruction mnemonic followed by its operands.
5232bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5233 StringRef Name, SMLoc NameLoc,
5234 OperandVector &Operands) {
5235 Name = StringSwitch<StringRef>(Name.lower())
5236 .Case(S: "beq", Value: "b.eq")
5237 .Case(S: "bne", Value: "b.ne")
5238 .Case(S: "bhs", Value: "b.hs")
5239 .Case(S: "bcs", Value: "b.cs")
5240 .Case(S: "blo", Value: "b.lo")
5241 .Case(S: "bcc", Value: "b.cc")
5242 .Case(S: "bmi", Value: "b.mi")
5243 .Case(S: "bpl", Value: "b.pl")
5244 .Case(S: "bvs", Value: "b.vs")
5245 .Case(S: "bvc", Value: "b.vc")
5246 .Case(S: "bhi", Value: "b.hi")
5247 .Case(S: "bls", Value: "b.ls")
5248 .Case(S: "bge", Value: "b.ge")
5249 .Case(S: "blt", Value: "b.lt")
5250 .Case(S: "bgt", Value: "b.gt")
5251 .Case(S: "ble", Value: "b.le")
5252 .Case(S: "bal", Value: "b.al")
5253 .Case(S: "bnv", Value: "b.nv")
5254 .Default(Value: Name);
5255
5256 // First check for the AArch64-specific .req directive.
5257 if (getTok().is(K: AsmToken::Identifier) &&
5258 getTok().getIdentifier().lower() == ".req") {
5259 parseDirectiveReq(Name, L: NameLoc);
5260 // We always return 'error' for this, as we're done with this
5261 // statement and don't need to match the 'instruction."
5262 return true;
5263 }
5264
5265 // Create the leading tokens for the mnemonic, split by '.' characters.
5266 size_t Start = 0, Next = Name.find(C: '.');
5267 StringRef Head = Name.slice(Start, End: Next);
5268
5269 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5270 // the SYS instruction.
5271 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5272 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5273 return parseSysAlias(Name: Head, NameLoc, Operands);
5274
5275 // TLBIP instructions are aliases for the SYSP instruction.
5276 if (Head == "tlbip")
5277 return parseSyspAlias(Name: Head, NameLoc, Operands);
5278
5279 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext()));
5280 Mnemonic = Head;
5281
5282 // Handle condition codes for a branch mnemonic
5283 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5284 Start = Next;
5285 Next = Name.find(C: '.', From: Start + 1);
5286 Head = Name.slice(Start: Start + 1, End: Next);
5287
5288 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5289 (Head.data() - Name.data()));
5290 std::string Suggestion;
5291 AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion);
5292 if (CC == AArch64CC::Invalid) {
5293 std::string Msg = "invalid condition code";
5294 if (!Suggestion.empty())
5295 Msg += ", did you mean " + Suggestion + "?";
5296 return Error(L: SuffixLoc, Msg);
5297 }
5298 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".", S: SuffixLoc, Ctx&: getContext(),
5299 /*IsSuffix=*/true));
5300 Operands.push_back(
5301 Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext()));
5302 }
5303
5304 // Add the remaining tokens in the mnemonic.
5305 while (Next != StringRef::npos) {
5306 Start = Next;
5307 Next = Name.find(C: '.', From: Start + 1);
5308 Head = Name.slice(Start, End: Next);
5309 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5310 (Head.data() - Name.data()) + 1);
5311 Operands.push_back(Elt: AArch64Operand::CreateToken(
5312 Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true));
5313 }
5314
5315 // Conditional compare instructions have a Condition Code operand, which needs
5316 // to be parsed and an immediate operand created.
5317 bool condCodeFourthOperand =
5318 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5319 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5320 Head == "csinc" || Head == "csinv" || Head == "csneg");
5321
5322 // These instructions are aliases to some of the conditional select
5323 // instructions. However, the condition code is inverted in the aliased
5324 // instruction.
5325 //
5326 // FIXME: Is this the correct way to handle these? Or should the parser
5327 // generate the aliased instructions directly?
5328 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5329 bool condCodeThirdOperand =
5330 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5331
5332 // Read the remaining operands.
5333 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
5334
5335 unsigned N = 1;
5336 do {
5337 // Parse and remember the operand.
5338 if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) ||
5339 (N == 3 && condCodeThirdOperand) ||
5340 (N == 2 && condCodeSecondOperand),
5341 invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) {
5342 return true;
5343 }
5344
5345 // After successfully parsing some operands there are three special cases
5346 // to consider (i.e. notional operands not separated by commas). Two are
5347 // due to memory specifiers:
5348 // + An RBrac will end an address for load/store/prefetch
5349 // + An '!' will indicate a pre-indexed operation.
5350 //
5351 // And a further case is '}', which ends a group of tokens specifying the
5352 // SME accumulator array 'ZA' or tile vector, i.e.
5353 //
5354 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5355 //
5356 // It's someone else's responsibility to make sure these tokens are sane
5357 // in the given context!
5358
5359 if (parseOptionalToken(T: AsmToken::RBrac))
5360 Operands.push_back(
5361 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5362 if (parseOptionalToken(T: AsmToken::Exclaim))
5363 Operands.push_back(
5364 Elt: AArch64Operand::CreateToken(Str: "!", S: getLoc(), Ctx&: getContext()));
5365 if (parseOptionalToken(T: AsmToken::RCurly))
5366 Operands.push_back(
5367 Elt: AArch64Operand::CreateToken(Str: "}", S: getLoc(), Ctx&: getContext()));
5368
5369 ++N;
5370 } while (parseOptionalToken(T: AsmToken::Comma));
5371 }
5372
5373 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
5374 return true;
5375
5376 return false;
5377}
5378
5379static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5380 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5381 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5382 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5383 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5384 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5385 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5386 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5387}
5388
5389// FIXME: This entire function is a giant hack to provide us with decent
5390// operand range validation/diagnostics until TableGen/MC can be extended
5391// to support autogeneration of this kind of validation.
5392bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5393 SmallVectorImpl<SMLoc> &Loc) {
5394 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5395 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
5396
5397 // A prefix only applies to the instruction following it. Here we extract
5398 // prefix information for the next instruction before validating the current
5399 // one so that in the case of failure we don't erroneously continue using the
5400 // current prefix.
5401 PrefixInfo Prefix = NextPrefix;
5402 NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags);
5403
5404 // Before validating the instruction in isolation we run through the rules
5405 // applicable when it follows a prefix instruction.
5406 // NOTE: brk & hlt can be prefixed but require no additional validation.
5407 if (Prefix.isActive() &&
5408 (Inst.getOpcode() != AArch64::BRK) &&
5409 (Inst.getOpcode() != AArch64::HLT)) {
5410
5411 // Prefixed instructions must have a destructive operand.
5412 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5413 AArch64::NotDestructive)
5414 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5415 " movprfx, suggest replacing movprfx with mov");
5416
5417 // Destination operands must match.
5418 if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg())
5419 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5420 " movprfx writing to a different destination");
5421
5422 // Destination operand must not be used in any other location.
5423 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5424 if (Inst.getOperand(i).isReg() &&
5425 (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) &&
5426 isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg()))
5427 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5428 " movprfx and destination also used as non-destructive"
5429 " source");
5430 }
5431
5432 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5433 if (Prefix.isPredicated()) {
5434 int PgIdx = -1;
5435
5436 // Find the instructions general predicate.
5437 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5438 if (Inst.getOperand(i).isReg() &&
5439 PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) {
5440 PgIdx = i;
5441 break;
5442 }
5443
5444 // Instruction must be predicated if the movprfx is predicated.
5445 if (PgIdx == -1 ||
5446 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5447 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5448 " predicated movprfx, suggest using unpredicated movprfx");
5449
5450 // Instruction must use same general predicate as the movprfx.
5451 if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg())
5452 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5453 " predicated movprfx using a different general predicate");
5454
5455 // Instruction element type must match the movprfx.
5456 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5457 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5458 " predicated movprfx with a different element size");
5459 }
5460 }
5461
5462 // On ARM64EC, only valid registers may be used. Warn against using
5463 // explicitly disallowed registers.
5464 if (IsWindowsArm64EC) {
5465 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5466 if (Inst.getOperand(i).isReg()) {
5467 MCRegister Reg = Inst.getOperand(i).getReg();
5468 // At this point, vector registers are matched to their
5469 // appropriately sized alias.
5470 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5471 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5472 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5473 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5474 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5475 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5476 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5477 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5478 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5479 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5480 Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) +
5481 " is disallowed on ARM64EC.");
5482 }
5483 }
5484 }
5485 }
5486
5487 // Check for indexed addressing modes w/ the base register being the
5488 // same as a destination/source register or pair load where
5489 // the Rt == Rt2. All of those are undefined behaviour.
5490 switch (Inst.getOpcode()) {
5491 case AArch64::LDPSWpre:
5492 case AArch64::LDPWpost:
5493 case AArch64::LDPWpre:
5494 case AArch64::LDPXpost:
5495 case AArch64::LDPXpre: {
5496 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5497 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5498 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5499 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5500 return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base "
5501 "is also a destination");
5502 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5503 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base "
5504 "is also a destination");
5505 [[fallthrough]];
5506 }
5507 case AArch64::LDR_ZA:
5508 case AArch64::STR_ZA: {
5509 if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() &&
5510 Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm())
5511 return Error(L: Loc[1],
5512 Msg: "unpredictable instruction, immediate and offset mismatch.");
5513 break;
5514 }
5515 case AArch64::LDPDi:
5516 case AArch64::LDPQi:
5517 case AArch64::LDPSi:
5518 case AArch64::LDPSWi:
5519 case AArch64::LDPWi:
5520 case AArch64::LDPXi: {
5521 MCRegister Rt = Inst.getOperand(i: 0).getReg();
5522 MCRegister Rt2 = Inst.getOperand(i: 1).getReg();
5523 if (Rt == Rt2)
5524 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5525 break;
5526 }
5527 case AArch64::LDPDpost:
5528 case AArch64::LDPDpre:
5529 case AArch64::LDPQpost:
5530 case AArch64::LDPQpre:
5531 case AArch64::LDPSpost:
5532 case AArch64::LDPSpre:
5533 case AArch64::LDPSWpost: {
5534 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5535 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5536 if (Rt == Rt2)
5537 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5538 break;
5539 }
5540 case AArch64::STPDpost:
5541 case AArch64::STPDpre:
5542 case AArch64::STPQpost:
5543 case AArch64::STPQpre:
5544 case AArch64::STPSpost:
5545 case AArch64::STPSpre:
5546 case AArch64::STPWpost:
5547 case AArch64::STPWpre:
5548 case AArch64::STPXpost:
5549 case AArch64::STPXpre: {
5550 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5551 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5552 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5553 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5554 return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base "
5555 "is also a source");
5556 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5557 return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base "
5558 "is also a source");
5559 break;
5560 }
5561 case AArch64::LDRBBpre:
5562 case AArch64::LDRBpre:
5563 case AArch64::LDRHHpre:
5564 case AArch64::LDRHpre:
5565 case AArch64::LDRSBWpre:
5566 case AArch64::LDRSBXpre:
5567 case AArch64::LDRSHWpre:
5568 case AArch64::LDRSHXpre:
5569 case AArch64::LDRSWpre:
5570 case AArch64::LDRWpre:
5571 case AArch64::LDRXpre:
5572 case AArch64::LDRBBpost:
5573 case AArch64::LDRBpost:
5574 case AArch64::LDRHHpost:
5575 case AArch64::LDRHpost:
5576 case AArch64::LDRSBWpost:
5577 case AArch64::LDRSBXpost:
5578 case AArch64::LDRSHWpost:
5579 case AArch64::LDRSHXpost:
5580 case AArch64::LDRSWpost:
5581 case AArch64::LDRWpost:
5582 case AArch64::LDRXpost: {
5583 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5584 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5585 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5586 return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base "
5587 "is also a source");
5588 break;
5589 }
5590 case AArch64::STRBBpost:
5591 case AArch64::STRBpost:
5592 case AArch64::STRHHpost:
5593 case AArch64::STRHpost:
5594 case AArch64::STRWpost:
5595 case AArch64::STRXpost:
5596 case AArch64::STRBBpre:
5597 case AArch64::STRBpre:
5598 case AArch64::STRHHpre:
5599 case AArch64::STRHpre:
5600 case AArch64::STRWpre:
5601 case AArch64::STRXpre: {
5602 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5603 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5604 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5605 return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base "
5606 "is also a source");
5607 break;
5608 }
5609 case AArch64::STXRB:
5610 case AArch64::STXRH:
5611 case AArch64::STXRW:
5612 case AArch64::STXRX:
5613 case AArch64::STLXRB:
5614 case AArch64::STLXRH:
5615 case AArch64::STLXRW:
5616 case AArch64::STLXRX: {
5617 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5618 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5619 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5620 if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) ||
5621 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5622 return Error(L: Loc[0],
5623 Msg: "unpredictable STXR instruction, status is also a source");
5624 break;
5625 }
5626 case AArch64::STXPW:
5627 case AArch64::STXPX:
5628 case AArch64::STLXPW:
5629 case AArch64::STLXPX: {
5630 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5631 MCRegister Rt1 = Inst.getOperand(i: 1).getReg();
5632 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5633 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5634 if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) ||
5635 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5636 return Error(L: Loc[0],
5637 Msg: "unpredictable STXP instruction, status is also a source");
5638 break;
5639 }
5640 case AArch64::LDRABwriteback:
5641 case AArch64::LDRAAwriteback: {
5642 MCRegister Xt = Inst.getOperand(i: 0).getReg();
5643 MCRegister Xn = Inst.getOperand(i: 1).getReg();
5644 if (Xt == Xn)
5645 return Error(L: Loc[0],
5646 Msg: "unpredictable LDRA instruction, writeback base"
5647 " is also a destination");
5648 break;
5649 }
5650 }
5651
5652 // Check v8.8-A memops instructions.
5653 switch (Inst.getOpcode()) {
5654 case AArch64::CPYFP:
5655 case AArch64::CPYFPWN:
5656 case AArch64::CPYFPRN:
5657 case AArch64::CPYFPN:
5658 case AArch64::CPYFPWT:
5659 case AArch64::CPYFPWTWN:
5660 case AArch64::CPYFPWTRN:
5661 case AArch64::CPYFPWTN:
5662 case AArch64::CPYFPRT:
5663 case AArch64::CPYFPRTWN:
5664 case AArch64::CPYFPRTRN:
5665 case AArch64::CPYFPRTN:
5666 case AArch64::CPYFPT:
5667 case AArch64::CPYFPTWN:
5668 case AArch64::CPYFPTRN:
5669 case AArch64::CPYFPTN:
5670 case AArch64::CPYFM:
5671 case AArch64::CPYFMWN:
5672 case AArch64::CPYFMRN:
5673 case AArch64::CPYFMN:
5674 case AArch64::CPYFMWT:
5675 case AArch64::CPYFMWTWN:
5676 case AArch64::CPYFMWTRN:
5677 case AArch64::CPYFMWTN:
5678 case AArch64::CPYFMRT:
5679 case AArch64::CPYFMRTWN:
5680 case AArch64::CPYFMRTRN:
5681 case AArch64::CPYFMRTN:
5682 case AArch64::CPYFMT:
5683 case AArch64::CPYFMTWN:
5684 case AArch64::CPYFMTRN:
5685 case AArch64::CPYFMTN:
5686 case AArch64::CPYFE:
5687 case AArch64::CPYFEWN:
5688 case AArch64::CPYFERN:
5689 case AArch64::CPYFEN:
5690 case AArch64::CPYFEWT:
5691 case AArch64::CPYFEWTWN:
5692 case AArch64::CPYFEWTRN:
5693 case AArch64::CPYFEWTN:
5694 case AArch64::CPYFERT:
5695 case AArch64::CPYFERTWN:
5696 case AArch64::CPYFERTRN:
5697 case AArch64::CPYFERTN:
5698 case AArch64::CPYFET:
5699 case AArch64::CPYFETWN:
5700 case AArch64::CPYFETRN:
5701 case AArch64::CPYFETN:
5702 case AArch64::CPYP:
5703 case AArch64::CPYPWN:
5704 case AArch64::CPYPRN:
5705 case AArch64::CPYPN:
5706 case AArch64::CPYPWT:
5707 case AArch64::CPYPWTWN:
5708 case AArch64::CPYPWTRN:
5709 case AArch64::CPYPWTN:
5710 case AArch64::CPYPRT:
5711 case AArch64::CPYPRTWN:
5712 case AArch64::CPYPRTRN:
5713 case AArch64::CPYPRTN:
5714 case AArch64::CPYPT:
5715 case AArch64::CPYPTWN:
5716 case AArch64::CPYPTRN:
5717 case AArch64::CPYPTN:
5718 case AArch64::CPYM:
5719 case AArch64::CPYMWN:
5720 case AArch64::CPYMRN:
5721 case AArch64::CPYMN:
5722 case AArch64::CPYMWT:
5723 case AArch64::CPYMWTWN:
5724 case AArch64::CPYMWTRN:
5725 case AArch64::CPYMWTN:
5726 case AArch64::CPYMRT:
5727 case AArch64::CPYMRTWN:
5728 case AArch64::CPYMRTRN:
5729 case AArch64::CPYMRTN:
5730 case AArch64::CPYMT:
5731 case AArch64::CPYMTWN:
5732 case AArch64::CPYMTRN:
5733 case AArch64::CPYMTN:
5734 case AArch64::CPYE:
5735 case AArch64::CPYEWN:
5736 case AArch64::CPYERN:
5737 case AArch64::CPYEN:
5738 case AArch64::CPYEWT:
5739 case AArch64::CPYEWTWN:
5740 case AArch64::CPYEWTRN:
5741 case AArch64::CPYEWTN:
5742 case AArch64::CPYERT:
5743 case AArch64::CPYERTWN:
5744 case AArch64::CPYERTRN:
5745 case AArch64::CPYERTN:
5746 case AArch64::CPYET:
5747 case AArch64::CPYETWN:
5748 case AArch64::CPYETRN:
5749 case AArch64::CPYETN: {
5750 MCRegister Xd_wb = Inst.getOperand(i: 0).getReg();
5751 MCRegister Xs_wb = Inst.getOperand(i: 1).getReg();
5752 MCRegister Xn_wb = Inst.getOperand(i: 2).getReg();
5753 MCRegister Xd = Inst.getOperand(i: 3).getReg();
5754 MCRegister Xs = Inst.getOperand(i: 4).getReg();
5755 MCRegister Xn = Inst.getOperand(i: 5).getReg();
5756 if (Xd_wb != Xd)
5757 return Error(L: Loc[0],
5758 Msg: "invalid CPY instruction, Xd_wb and Xd do not match");
5759 if (Xs_wb != Xs)
5760 return Error(L: Loc[0],
5761 Msg: "invalid CPY instruction, Xs_wb and Xs do not match");
5762 if (Xn_wb != Xn)
5763 return Error(L: Loc[0],
5764 Msg: "invalid CPY instruction, Xn_wb and Xn do not match");
5765 if (Xd == Xs)
5766 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source"
5767 " registers are the same");
5768 if (Xd == Xn)
5769 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size"
5770 " registers are the same");
5771 if (Xs == Xn)
5772 return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size"
5773 " registers are the same");
5774 break;
5775 }
5776 case AArch64::SETP:
5777 case AArch64::SETPT:
5778 case AArch64::SETPN:
5779 case AArch64::SETPTN:
5780 case AArch64::SETM:
5781 case AArch64::SETMT:
5782 case AArch64::SETMN:
5783 case AArch64::SETMTN:
5784 case AArch64::SETE:
5785 case AArch64::SETET:
5786 case AArch64::SETEN:
5787 case AArch64::SETETN:
5788 case AArch64::SETGP:
5789 case AArch64::SETGPT:
5790 case AArch64::SETGPN:
5791 case AArch64::SETGPTN:
5792 case AArch64::SETGM:
5793 case AArch64::SETGMT:
5794 case AArch64::SETGMN:
5795 case AArch64::SETGMTN:
5796 case AArch64::MOPSSETGE:
5797 case AArch64::MOPSSETGET:
5798 case AArch64::MOPSSETGEN:
5799 case AArch64::MOPSSETGETN: {
5800 MCRegister Xd_wb = Inst.getOperand(i: 0).getReg();
5801 MCRegister Xn_wb = Inst.getOperand(i: 1).getReg();
5802 MCRegister Xd = Inst.getOperand(i: 2).getReg();
5803 MCRegister Xn = Inst.getOperand(i: 3).getReg();
5804 MCRegister Xm = Inst.getOperand(i: 4).getReg();
5805 if (Xd_wb != Xd)
5806 return Error(L: Loc[0],
5807 Msg: "invalid SET instruction, Xd_wb and Xd do not match");
5808 if (Xn_wb != Xn)
5809 return Error(L: Loc[0],
5810 Msg: "invalid SET instruction, Xn_wb and Xn do not match");
5811 if (Xd == Xn)
5812 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
5813 " registers are the same");
5814 if (Xd == Xm)
5815 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source"
5816 " registers are the same");
5817 if (Xn == Xm)
5818 return Error(L: Loc[0], Msg: "invalid SET instruction, source and size"
5819 " registers are the same");
5820 break;
5821 }
5822 }
5823
5824 // Now check immediate ranges. Separate from the above as there is overlap
5825 // in the instructions being checked and this keeps the nested conditionals
5826 // to a minimum.
5827 switch (Inst.getOpcode()) {
5828 case AArch64::ADDSWri:
5829 case AArch64::ADDSXri:
5830 case AArch64::ADDWri:
5831 case AArch64::ADDXri:
5832 case AArch64::SUBSWri:
5833 case AArch64::SUBSXri:
5834 case AArch64::SUBWri:
5835 case AArch64::SUBXri: {
5836 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5837 // some slight duplication here.
5838 if (Inst.getOperand(i: 2).isExpr()) {
5839 const MCExpr *Expr = Inst.getOperand(i: 2).getExpr();
5840 AArch64::Specifier ELFSpec;
5841 AArch64::Specifier DarwinSpec;
5842 int64_t Addend;
5843 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
5844
5845 // Only allow these with ADDXri.
5846 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
5847 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
5848 Inst.getOpcode() == AArch64::ADDXri)
5849 return false;
5850
5851 // Only allow these with ADDXri/ADDWri
5852 if (llvm::is_contained(
5853 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
5854 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
5855 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
5856 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
5857 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
5858 AArch64::S_SECREL_LO12, AArch64::S_SECREL_HI12},
5859 Element: ELFSpec) &&
5860 (Inst.getOpcode() == AArch64::ADDXri ||
5861 Inst.getOpcode() == AArch64::ADDWri))
5862 return false;
5863
5864 // Don't allow symbol refs in the immediate field otherwise
5865 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5866 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5867 // 'cmp w0, 'borked')
5868 return Error(L: Loc.back(), Msg: "invalid immediate expression");
5869 }
5870 // We don't validate more complex expressions here
5871 }
5872 return false;
5873 }
5874 default:
5875 return false;
5876 }
5877}
5878
5879static std::string AArch64MnemonicSpellCheck(StringRef S,
5880 const FeatureBitset &FBS,
5881 unsigned VariantID = 0);
5882
5883bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5884 uint64_t ErrorInfo,
5885 OperandVector &Operands) {
5886 switch (ErrCode) {
5887 case Match_InvalidTiedOperand: {
5888 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5889 if (Op.isVectorList())
5890 return Error(L: Loc, Msg: "operand must match destination register list");
5891
5892 assert(Op.isReg() && "Unexpected operand type");
5893 switch (Op.getRegEqualityTy()) {
5894 case RegConstraintEqualityTy::EqualsSubReg:
5895 return Error(L: Loc, Msg: "operand must be 64-bit form of destination register");
5896 case RegConstraintEqualityTy::EqualsSuperReg:
5897 return Error(L: Loc, Msg: "operand must be 32-bit form of destination register");
5898 case RegConstraintEqualityTy::EqualsReg:
5899 return Error(L: Loc, Msg: "operand must match destination register");
5900 }
5901 llvm_unreachable("Unknown RegConstraintEqualityTy");
5902 }
5903 case Match_MissingFeature:
5904 return Error(L: Loc,
5905 Msg: "instruction requires a CPU feature not currently enabled");
5906 case Match_InvalidOperand:
5907 return Error(L: Loc, Msg: "invalid operand for instruction");
5908 case Match_InvalidSuffix:
5909 return Error(L: Loc, Msg: "invalid type suffix for instruction");
5910 case Match_InvalidCondCode:
5911 return Error(L: Loc, Msg: "expected AArch64 condition code");
5912 case Match_AddSubRegExtendSmall:
5913 return Error(L: Loc,
5914 Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5915 case Match_AddSubRegExtendLarge:
5916 return Error(L: Loc,
5917 Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5918 case Match_AddSubSecondSource:
5919 return Error(L: Loc,
5920 Msg: "expected compatible register, symbol or integer in range [0, 4095]");
5921 case Match_LogicalSecondSource:
5922 return Error(L: Loc, Msg: "expected compatible register or logical immediate");
5923 case Match_InvalidMovImm32Shift:
5924 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16");
5925 case Match_InvalidMovImm64Shift:
5926 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48");
5927 case Match_AddSubRegShift32:
5928 return Error(L: Loc,
5929 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5930 case Match_AddSubRegShift64:
5931 return Error(L: Loc,
5932 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5933 case Match_InvalidFPImm:
5934 return Error(L: Loc,
5935 Msg: "expected compatible register or floating-point constant");
5936 case Match_InvalidMemoryIndexedSImm6:
5937 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
5938 case Match_InvalidMemoryIndexedSImm5:
5939 return Error(L: Loc, Msg: "index must be an integer in range [-16, 15].");
5940 case Match_InvalidMemoryIndexed1SImm4:
5941 return Error(L: Loc, Msg: "index must be an integer in range [-8, 7].");
5942 case Match_InvalidMemoryIndexed2SImm4:
5943 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14].");
5944 case Match_InvalidMemoryIndexed3SImm4:
5945 return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21].");
5946 case Match_InvalidMemoryIndexed4SImm4:
5947 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28].");
5948 case Match_InvalidMemoryIndexed16SImm4:
5949 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112].");
5950 case Match_InvalidMemoryIndexed32SImm4:
5951 return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224].");
5952 case Match_InvalidMemoryIndexed1SImm6:
5953 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
5954 case Match_InvalidMemoryIndexedSImm8:
5955 return Error(L: Loc, Msg: "index must be an integer in range [-128, 127].");
5956 case Match_InvalidMemoryIndexedSImm9:
5957 return Error(L: Loc, Msg: "index must be an integer in range [-256, 255].");
5958 case Match_InvalidMemoryIndexed16SImm9:
5959 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080].");
5960 case Match_InvalidMemoryIndexed8SImm10:
5961 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088].");
5962 case Match_InvalidMemoryIndexed4SImm7:
5963 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252].");
5964 case Match_InvalidMemoryIndexed8SImm7:
5965 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504].");
5966 case Match_InvalidMemoryIndexed16SImm7:
5967 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008].");
5968 case Match_InvalidMemoryIndexed8UImm5:
5969 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248].");
5970 case Match_InvalidMemoryIndexed8UImm3:
5971 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56].");
5972 case Match_InvalidMemoryIndexed4UImm5:
5973 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124].");
5974 case Match_InvalidMemoryIndexed2UImm5:
5975 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62].");
5976 case Match_InvalidMemoryIndexed8UImm6:
5977 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504].");
5978 case Match_InvalidMemoryIndexed16UImm6:
5979 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008].");
5980 case Match_InvalidMemoryIndexed4UImm6:
5981 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252].");
5982 case Match_InvalidMemoryIndexed2UImm6:
5983 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126].");
5984 case Match_InvalidMemoryIndexed1UImm6:
5985 return Error(L: Loc, Msg: "index must be in range [0, 63].");
5986 case Match_InvalidMemoryWExtend8:
5987 return Error(L: Loc,
5988 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0");
5989 case Match_InvalidMemoryWExtend16:
5990 return Error(L: Loc,
5991 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5992 case Match_InvalidMemoryWExtend32:
5993 return Error(L: Loc,
5994 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5995 case Match_InvalidMemoryWExtend64:
5996 return Error(L: Loc,
5997 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5998 case Match_InvalidMemoryWExtend128:
5999 return Error(L: Loc,
6000 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6001 case Match_InvalidMemoryXExtend8:
6002 return Error(L: Loc,
6003 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0");
6004 case Match_InvalidMemoryXExtend16:
6005 return Error(L: Loc,
6006 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6007 case Match_InvalidMemoryXExtend32:
6008 return Error(L: Loc,
6009 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6010 case Match_InvalidMemoryXExtend64:
6011 return Error(L: Loc,
6012 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6013 case Match_InvalidMemoryXExtend128:
6014 return Error(L: Loc,
6015 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6016 case Match_InvalidMemoryIndexed1:
6017 return Error(L: Loc, Msg: "index must be an integer in range [0, 4095].");
6018 case Match_InvalidMemoryIndexed2:
6019 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190].");
6020 case Match_InvalidMemoryIndexed4:
6021 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380].");
6022 case Match_InvalidMemoryIndexed8:
6023 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760].");
6024 case Match_InvalidMemoryIndexed16:
6025 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520].");
6026 case Match_InvalidImm0_0:
6027 return Error(L: Loc, Msg: "immediate must be 0.");
6028 case Match_InvalidImm0_1:
6029 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1].");
6030 case Match_InvalidImm0_3:
6031 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3].");
6032 case Match_InvalidImm0_7:
6033 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7].");
6034 case Match_InvalidImm0_15:
6035 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15].");
6036 case Match_InvalidImm0_31:
6037 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31].");
6038 case Match_InvalidImm0_63:
6039 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63].");
6040 case Match_InvalidImm0_127:
6041 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127].");
6042 case Match_InvalidImm0_255:
6043 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255].");
6044 case Match_InvalidImm0_65535:
6045 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535].");
6046 case Match_InvalidImm1_8:
6047 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8].");
6048 case Match_InvalidImm1_16:
6049 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16].");
6050 case Match_InvalidImm1_32:
6051 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32].");
6052 case Match_InvalidImm1_64:
6053 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64].");
6054 case Match_InvalidImmM1_62:
6055 return Error(L: Loc, Msg: "immediate must be an integer in range [-1, 62].");
6056 case Match_InvalidMemoryIndexedRange2UImm0:
6057 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1.");
6058 case Match_InvalidMemoryIndexedRange2UImm1:
6059 return Error(L: Loc, Msg: "vector select offset must be an immediate range of the "
6060 "form <immf>:<imml>, where the first "
6061 "immediate is a multiple of 2 in the range [0, 2], and "
6062 "the second immediate is immf + 1.");
6063 case Match_InvalidMemoryIndexedRange2UImm2:
6064 case Match_InvalidMemoryIndexedRange2UImm3:
6065 return Error(
6066 L: Loc,
6067 Msg: "vector select offset must be an immediate range of the form "
6068 "<immf>:<imml>, "
6069 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6070 "[0, 14] "
6071 "depending on the instruction, and the second immediate is immf + 1.");
6072 case Match_InvalidMemoryIndexedRange4UImm0:
6073 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3.");
6074 case Match_InvalidMemoryIndexedRange4UImm1:
6075 case Match_InvalidMemoryIndexedRange4UImm2:
6076 return Error(
6077 L: Loc,
6078 Msg: "vector select offset must be an immediate range of the form "
6079 "<immf>:<imml>, "
6080 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6081 "[0, 12] "
6082 "depending on the instruction, and the second immediate is immf + 3.");
6083 case Match_InvalidSVEAddSubImm8:
6084 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]"
6085 " with a shift amount of 0");
6086 case Match_InvalidSVEAddSubImm16:
6087 case Match_InvalidSVEAddSubImm32:
6088 case Match_InvalidSVEAddSubImm64:
6089 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a "
6090 "multiple of 256 in range [256, 65280]");
6091 case Match_InvalidSVECpyImm8:
6092 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]"
6093 " with a shift amount of 0");
6094 case Match_InvalidSVECpyImm16:
6095 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6096 "multiple of 256 in range [-32768, 65280]");
6097 case Match_InvalidSVECpyImm32:
6098 case Match_InvalidSVECpyImm64:
6099 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6100 "multiple of 256 in range [-32768, 32512]");
6101 case Match_InvalidIndexRange0_0:
6102 return Error(L: Loc, Msg: "expected lane specifier '[0]'");
6103 case Match_InvalidIndexRange1_1:
6104 return Error(L: Loc, Msg: "expected lane specifier '[1]'");
6105 case Match_InvalidIndexRange0_15:
6106 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6107 case Match_InvalidIndexRange0_7:
6108 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6109 case Match_InvalidIndexRange0_3:
6110 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6111 case Match_InvalidIndexRange0_1:
6112 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1].");
6113 case Match_InvalidSVEIndexRange0_63:
6114 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63].");
6115 case Match_InvalidSVEIndexRange0_31:
6116 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31].");
6117 case Match_InvalidSVEIndexRange0_15:
6118 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6119 case Match_InvalidSVEIndexRange0_7:
6120 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6121 case Match_InvalidSVEIndexRange0_3:
6122 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6123 case Match_InvalidLabel:
6124 return Error(L: Loc, Msg: "expected label or encodable integer pc offset");
6125 case Match_MRS:
6126 return Error(L: Loc, Msg: "expected readable system register");
6127 case Match_MSR:
6128 case Match_InvalidSVCR:
6129 return Error(L: Loc, Msg: "expected writable system register or pstate");
6130 case Match_InvalidComplexRotationEven:
6131 return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270.");
6132 case Match_InvalidComplexRotationOdd:
6133 return Error(L: Loc, Msg: "complex rotation must be 90 or 270.");
6134 case Match_MnemonicFail: {
6135 std::string Suggestion = AArch64MnemonicSpellCheck(
6136 S: ((AArch64Operand &)*Operands[0]).getToken(),
6137 FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits()));
6138 return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion);
6139 }
6140 case Match_InvalidGPR64shifted8:
6141 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift");
6142 case Match_InvalidGPR64shifted16:
6143 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6144 case Match_InvalidGPR64shifted32:
6145 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6146 case Match_InvalidGPR64shifted64:
6147 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6148 case Match_InvalidGPR64shifted128:
6149 return Error(
6150 L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6151 case Match_InvalidGPR64NoXZRshifted8:
6152 return Error(L: Loc, Msg: "register must be x0..x30 without shift");
6153 case Match_InvalidGPR64NoXZRshifted16:
6154 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'");
6155 case Match_InvalidGPR64NoXZRshifted32:
6156 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'");
6157 case Match_InvalidGPR64NoXZRshifted64:
6158 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'");
6159 case Match_InvalidGPR64NoXZRshifted128:
6160 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'");
6161 case Match_InvalidZPR32UXTW8:
6162 case Match_InvalidZPR32SXTW8:
6163 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6164 case Match_InvalidZPR32UXTW16:
6165 case Match_InvalidZPR32SXTW16:
6166 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6167 case Match_InvalidZPR32UXTW32:
6168 case Match_InvalidZPR32SXTW32:
6169 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6170 case Match_InvalidZPR32UXTW64:
6171 case Match_InvalidZPR32SXTW64:
6172 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6173 case Match_InvalidZPR64UXTW8:
6174 case Match_InvalidZPR64SXTW8:
6175 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6176 case Match_InvalidZPR64UXTW16:
6177 case Match_InvalidZPR64SXTW16:
6178 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6179 case Match_InvalidZPR64UXTW32:
6180 case Match_InvalidZPR64SXTW32:
6181 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6182 case Match_InvalidZPR64UXTW64:
6183 case Match_InvalidZPR64SXTW64:
6184 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6185 case Match_InvalidZPR32LSL8:
6186 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'");
6187 case Match_InvalidZPR32LSL16:
6188 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6189 case Match_InvalidZPR32LSL32:
6190 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6191 case Match_InvalidZPR32LSL64:
6192 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6193 case Match_InvalidZPR64LSL8:
6194 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'");
6195 case Match_InvalidZPR64LSL16:
6196 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6197 case Match_InvalidZPR64LSL32:
6198 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6199 case Match_InvalidZPR64LSL64:
6200 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6201 case Match_InvalidZPR0:
6202 return Error(L: Loc, Msg: "expected register without element width suffix");
6203 case Match_InvalidZPR8:
6204 case Match_InvalidZPR16:
6205 case Match_InvalidZPR32:
6206 case Match_InvalidZPR64:
6207 case Match_InvalidZPR128:
6208 return Error(L: Loc, Msg: "invalid element width");
6209 case Match_InvalidZPR_3b8:
6210 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b");
6211 case Match_InvalidZPR_3b16:
6212 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h");
6213 case Match_InvalidZPR_3b32:
6214 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s");
6215 case Match_InvalidZPR_4b8:
6216 return Error(L: Loc,
6217 Msg: "Invalid restricted vector register, expected z0.b..z15.b");
6218 case Match_InvalidZPR_4b16:
6219 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h");
6220 case Match_InvalidZPR_4b32:
6221 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s");
6222 case Match_InvalidZPR_4b64:
6223 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d");
6224 case Match_InvalidZPRMul2_Lo8:
6225 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6226 "register in z0.b..z14.b");
6227 case Match_InvalidZPRMul2_Hi8:
6228 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6229 "register in z16.b..z30.b");
6230 case Match_InvalidZPRMul2_Lo16:
6231 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6232 "register in z0.h..z14.h");
6233 case Match_InvalidZPRMul2_Hi16:
6234 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6235 "register in z16.h..z30.h");
6236 case Match_InvalidZPRMul2_Lo32:
6237 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6238 "register in z0.s..z14.s");
6239 case Match_InvalidZPRMul2_Hi32:
6240 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6241 "register in z16.s..z30.s");
6242 case Match_InvalidZPRMul2_Lo64:
6243 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6244 "register in z0.d..z14.d");
6245 case Match_InvalidZPRMul2_Hi64:
6246 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6247 "register in z16.d..z30.d");
6248 case Match_InvalidZPR_K0:
6249 return Error(L: Loc, Msg: "invalid restricted vector register, expected register "
6250 "in z20..z23 or z28..z31");
6251 case Match_InvalidSVEPattern:
6252 return Error(L: Loc, Msg: "invalid predicate pattern");
6253 case Match_InvalidSVEPPRorPNRAnyReg:
6254 case Match_InvalidSVEPPRorPNRBReg:
6255 case Match_InvalidSVEPredicateAnyReg:
6256 case Match_InvalidSVEPredicateBReg:
6257 case Match_InvalidSVEPredicateHReg:
6258 case Match_InvalidSVEPredicateSReg:
6259 case Match_InvalidSVEPredicateDReg:
6260 return Error(L: Loc, Msg: "invalid predicate register.");
6261 case Match_InvalidSVEPredicate3bAnyReg:
6262 return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6263 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6264 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6265 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6266 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6267 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6268 "pn8..pn15 with element suffix.");
6269 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6270 return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register "
6271 "expected pn8..pn15");
6272 case Match_InvalidSVEPNPredicateBReg:
6273 case Match_InvalidSVEPNPredicateHReg:
6274 case Match_InvalidSVEPNPredicateSReg:
6275 case Match_InvalidSVEPNPredicateDReg:
6276 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6277 "pn0..pn15 with element suffix.");
6278 case Match_InvalidSVEVecLenSpecifier:
6279 return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4");
6280 case Match_InvalidSVEPredicateListMul2x8:
6281 case Match_InvalidSVEPredicateListMul2x16:
6282 case Match_InvalidSVEPredicateListMul2x32:
6283 case Match_InvalidSVEPredicateListMul2x64:
6284 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6285 "predicate registers, where the first vector is a multiple of 2 "
6286 "and with correct element type");
6287 case Match_InvalidSVEExactFPImmOperandHalfOne:
6288 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0.");
6289 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6290 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0.");
6291 case Match_InvalidSVEExactFPImmOperandZeroOne:
6292 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0.");
6293 case Match_InvalidMatrixTileVectorH8:
6294 case Match_InvalidMatrixTileVectorV8:
6295 return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b");
6296 case Match_InvalidMatrixTileVectorH16:
6297 case Match_InvalidMatrixTileVectorV16:
6298 return Error(L: Loc,
6299 Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6300 case Match_InvalidMatrixTileVectorH32:
6301 case Match_InvalidMatrixTileVectorV32:
6302 return Error(L: Loc,
6303 Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6304 case Match_InvalidMatrixTileVectorH64:
6305 case Match_InvalidMatrixTileVectorV64:
6306 return Error(L: Loc,
6307 Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6308 case Match_InvalidMatrixTileVectorH128:
6309 case Match_InvalidMatrixTileVectorV128:
6310 return Error(L: Loc,
6311 Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6312 case Match_InvalidMatrixTile16:
6313 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-1].h");
6314 case Match_InvalidMatrixTile32:
6315 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s");
6316 case Match_InvalidMatrixTile64:
6317 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d");
6318 case Match_InvalidMatrix:
6319 return Error(L: Loc, Msg: "invalid matrix operand, expected za");
6320 case Match_InvalidMatrix8:
6321 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b");
6322 case Match_InvalidMatrix16:
6323 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h");
6324 case Match_InvalidMatrix32:
6325 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s");
6326 case Match_InvalidMatrix64:
6327 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d");
6328 case Match_InvalidMatrixIndexGPR32_12_15:
6329 return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]");
6330 case Match_InvalidMatrixIndexGPR32_8_11:
6331 return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]");
6332 case Match_InvalidSVEVectorList2x8Mul2:
6333 case Match_InvalidSVEVectorList2x16Mul2:
6334 case Match_InvalidSVEVectorList2x32Mul2:
6335 case Match_InvalidSVEVectorList2x64Mul2:
6336 case Match_InvalidSVEVectorList2x128Mul2:
6337 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6338 "SVE vectors, where the first vector is a multiple of 2 "
6339 "and with matching element types");
6340 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6341 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6342 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6343 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6344 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6345 "SVE vectors in the range z0-z14, where the first vector "
6346 "is a multiple of 2 "
6347 "and with matching element types");
6348 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6349 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6350 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6351 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6352 return Error(L: Loc,
6353 Msg: "Invalid vector list, expected list with 2 consecutive "
6354 "SVE vectors in the range z16-z30, where the first vector "
6355 "is a multiple of 2 "
6356 "and with matching element types");
6357 case Match_InvalidSVEVectorList4x8Mul4:
6358 case Match_InvalidSVEVectorList4x16Mul4:
6359 case Match_InvalidSVEVectorList4x32Mul4:
6360 case Match_InvalidSVEVectorList4x64Mul4:
6361 case Match_InvalidSVEVectorList4x128Mul4:
6362 return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive "
6363 "SVE vectors, where the first vector is a multiple of 4 "
6364 "and with matching element types");
6365 case Match_InvalidLookupTable:
6366 return Error(L: Loc, Msg: "Invalid lookup table, expected zt0");
6367 case Match_InvalidSVEVectorListStrided2x8:
6368 case Match_InvalidSVEVectorListStrided2x16:
6369 case Match_InvalidSVEVectorListStrided2x32:
6370 case Match_InvalidSVEVectorListStrided2x64:
6371 return Error(
6372 L: Loc,
6373 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6374 "8 registers apart, and the first register in the range [z0, z7] or "
6375 "[z16, z23] and with correct element type");
6376 case Match_InvalidSVEVectorListStrided4x8:
6377 case Match_InvalidSVEVectorListStrided4x16:
6378 case Match_InvalidSVEVectorListStrided4x32:
6379 case Match_InvalidSVEVectorListStrided4x64:
6380 return Error(
6381 L: Loc,
6382 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6383 "4 registers apart, and the first register in the range [z0, z3] or "
6384 "[z16, z19] and with correct element type");
6385 case Match_AddSubLSLImm3ShiftLarge:
6386 return Error(L: Loc,
6387 Msg: "expected 'lsl' with optional integer in range [0, 7]");
6388 default:
6389 llvm_unreachable("unexpected error code!");
6390 }
6391}
6392
6393static const char *getSubtargetFeatureName(uint64_t Val);
6394
6395bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6396 OperandVector &Operands,
6397 MCStreamer &Out,
6398 uint64_t &ErrorInfo,
6399 bool MatchingInlineAsm) {
6400 assert(!Operands.empty() && "Unexpected empty operand list!");
6401 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6402 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6403
6404 StringRef Tok = Op.getToken();
6405 unsigned NumOperands = Operands.size();
6406
6407 if (NumOperands == 4 && Tok == "lsl") {
6408 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6409 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6410 if (Op2.isScalarReg() && Op3.isImm()) {
6411 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6412 if (Op3CE) {
6413 uint64_t Op3Val = Op3CE->getValue();
6414 uint64_t NewOp3Val = 0;
6415 uint64_t NewOp4Val = 0;
6416 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6417 Reg: Op2.getReg())) {
6418 NewOp3Val = (32 - Op3Val) & 0x1f;
6419 NewOp4Val = 31 - Op3Val;
6420 } else {
6421 NewOp3Val = (64 - Op3Val) & 0x3f;
6422 NewOp4Val = 63 - Op3Val;
6423 }
6424
6425 const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6426 const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6427
6428 Operands[0] =
6429 AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(), Ctx&: getContext());
6430 Operands.push_back(Elt: AArch64Operand::CreateImm(
6431 Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()));
6432 Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(),
6433 E: Op3.getEndLoc(), Ctx&: getContext());
6434 }
6435 }
6436 } else if (NumOperands == 4 && Tok == "bfc") {
6437 // FIXME: Horrible hack to handle BFC->BFM alias.
6438 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6439 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6440 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6441
6442 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6443 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm());
6444 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm());
6445
6446 if (LSBCE && WidthCE) {
6447 uint64_t LSB = LSBCE->getValue();
6448 uint64_t Width = WidthCE->getValue();
6449
6450 uint64_t RegWidth = 0;
6451 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6452 Reg: Op1.getReg()))
6453 RegWidth = 64;
6454 else
6455 RegWidth = 32;
6456
6457 if (LSB >= RegWidth)
6458 return Error(L: LSBOp.getStartLoc(),
6459 Msg: "expected integer in range [0, 31]");
6460 if (Width < 1 || Width > RegWidth)
6461 return Error(L: WidthOp.getStartLoc(),
6462 Msg: "expected integer in range [1, 32]");
6463
6464 uint64_t ImmR = 0;
6465 if (RegWidth == 32)
6466 ImmR = (32 - LSB) & 0x1f;
6467 else
6468 ImmR = (64 - LSB) & 0x3f;
6469
6470 uint64_t ImmS = Width - 1;
6471
6472 if (ImmR != 0 && ImmS >= ImmR)
6473 return Error(L: WidthOp.getStartLoc(),
6474 Msg: "requested insert overflows register");
6475
6476 const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext());
6477 const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext());
6478 Operands[0] =
6479 AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(), Ctx&: getContext());
6480 Operands[2] = AArch64Operand::CreateReg(
6481 RegNum: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar,
6482 S: SMLoc(), E: SMLoc(), Ctx&: getContext());
6483 Operands[3] = AArch64Operand::CreateImm(
6484 Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext());
6485 Operands.emplace_back(
6486 Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(),
6487 E: WidthOp.getEndLoc(), Ctx&: getContext()));
6488 }
6489 }
6490 } else if (NumOperands == 5) {
6491 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6492 // UBFIZ -> UBFM aliases.
6493 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6494 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6495 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6496 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6497
6498 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6499 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6500 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6501
6502 if (Op3CE && Op4CE) {
6503 uint64_t Op3Val = Op3CE->getValue();
6504 uint64_t Op4Val = Op4CE->getValue();
6505
6506 uint64_t RegWidth = 0;
6507 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6508 Reg: Op1.getReg()))
6509 RegWidth = 64;
6510 else
6511 RegWidth = 32;
6512
6513 if (Op3Val >= RegWidth)
6514 return Error(L: Op3.getStartLoc(),
6515 Msg: "expected integer in range [0, 31]");
6516 if (Op4Val < 1 || Op4Val > RegWidth)
6517 return Error(L: Op4.getStartLoc(),
6518 Msg: "expected integer in range [1, 32]");
6519
6520 uint64_t NewOp3Val = 0;
6521 if (RegWidth == 32)
6522 NewOp3Val = (32 - Op3Val) & 0x1f;
6523 else
6524 NewOp3Val = (64 - Op3Val) & 0x3f;
6525
6526 uint64_t NewOp4Val = Op4Val - 1;
6527
6528 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6529 return Error(L: Op4.getStartLoc(),
6530 Msg: "requested insert overflows register");
6531
6532 const MCExpr *NewOp3 =
6533 MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6534 const MCExpr *NewOp4 =
6535 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6536 Operands[3] = AArch64Operand::CreateImm(
6537 Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext());
6538 Operands[4] = AArch64Operand::CreateImm(
6539 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6540 if (Tok == "bfi")
6541 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6542 Ctx&: getContext());
6543 else if (Tok == "sbfiz")
6544 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6545 Ctx&: getContext());
6546 else if (Tok == "ubfiz")
6547 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6548 Ctx&: getContext());
6549 else
6550 llvm_unreachable("No valid mnemonic for alias?");
6551 }
6552 }
6553
6554 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6555 // UBFX -> UBFM aliases.
6556 } else if (NumOperands == 5 &&
6557 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6558 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6559 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6560 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6561
6562 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6563 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6564 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6565
6566 if (Op3CE && Op4CE) {
6567 uint64_t Op3Val = Op3CE->getValue();
6568 uint64_t Op4Val = Op4CE->getValue();
6569
6570 uint64_t RegWidth = 0;
6571 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6572 Reg: Op1.getReg()))
6573 RegWidth = 64;
6574 else
6575 RegWidth = 32;
6576
6577 if (Op3Val >= RegWidth)
6578 return Error(L: Op3.getStartLoc(),
6579 Msg: "expected integer in range [0, 31]");
6580 if (Op4Val < 1 || Op4Val > RegWidth)
6581 return Error(L: Op4.getStartLoc(),
6582 Msg: "expected integer in range [1, 32]");
6583
6584 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6585
6586 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6587 return Error(L: Op4.getStartLoc(),
6588 Msg: "requested extract overflows register");
6589
6590 const MCExpr *NewOp4 =
6591 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6592 Operands[4] = AArch64Operand::CreateImm(
6593 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6594 if (Tok == "bfxil")
6595 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6596 Ctx&: getContext());
6597 else if (Tok == "sbfx")
6598 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6599 Ctx&: getContext());
6600 else if (Tok == "ubfx")
6601 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6602 Ctx&: getContext());
6603 else
6604 llvm_unreachable("No valid mnemonic for alias?");
6605 }
6606 }
6607 }
6608 }
6609
6610 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6611 // instruction for FP registers correctly in some rare circumstances. Convert
6612 // it to a safe instruction and warn (because silently changing someone's
6613 // assembly is rude).
6614 if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) &&
6615 NumOperands == 4 && Tok == "movi") {
6616 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6617 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6618 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6619 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6620 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6621 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6622 if (Suffix.lower() == ".2d" &&
6623 cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) {
6624 Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function"
6625 " correctly on this CPU, converting to equivalent movi.16b");
6626 // Switch the suffix to .16b.
6627 unsigned Idx = Op1.isToken() ? 1 : 2;
6628 Operands[Idx] =
6629 AArch64Operand::CreateToken(Str: ".16b", S: IDLoc, Ctx&: getContext());
6630 }
6631 }
6632 }
6633
6634 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6635 // InstAlias can't quite handle this since the reg classes aren't
6636 // subclasses.
6637 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6638 // The source register can be Wn here, but the matcher expects a
6639 // GPR64. Twiddle it here if necessary.
6640 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6641 if (Op.isScalarReg()) {
6642 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6643 Operands[2] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar,
6644 S: Op.getStartLoc(), E: Op.getEndLoc(),
6645 Ctx&: getContext());
6646 }
6647 }
6648 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6649 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6650 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6651 if (Op.isScalarReg() &&
6652 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6653 Reg: Op.getReg())) {
6654 // The source register can be Wn here, but the matcher expects a
6655 // GPR64. Twiddle it here if necessary.
6656 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6657 if (Op.isScalarReg()) {
6658 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6659 Operands[2] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar,
6660 S: Op.getStartLoc(),
6661 E: Op.getEndLoc(), Ctx&: getContext());
6662 }
6663 }
6664 }
6665 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6666 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6667 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6668 if (Op.isScalarReg() &&
6669 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6670 Reg: Op.getReg())) {
6671 // The source register can be Wn here, but the matcher expects a
6672 // GPR32. Twiddle it here if necessary.
6673 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6674 if (Op.isScalarReg()) {
6675 MCRegister Reg = getWRegFromXReg(Reg: Op.getReg());
6676 Operands[1] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar,
6677 S: Op.getStartLoc(),
6678 E: Op.getEndLoc(), Ctx&: getContext());
6679 }
6680 }
6681 }
6682
6683 MCInst Inst;
6684 FeatureBitset MissingFeatures;
6685 // First try to match against the secondary set of tables containing the
6686 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6687 unsigned MatchResult =
6688 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6689 matchingInlineAsm: MatchingInlineAsm, VariantID: 1);
6690
6691 // If that fails, try against the alternate table containing long-form NEON:
6692 // "fadd v0.2s, v1.2s, v2.2s"
6693 if (MatchResult != Match_Success) {
6694 // But first, save the short-form match result: we can use it in case the
6695 // long-form match also fails.
6696 auto ShortFormNEONErrorInfo = ErrorInfo;
6697 auto ShortFormNEONMatchResult = MatchResult;
6698 auto ShortFormNEONMissingFeatures = MissingFeatures;
6699
6700 MatchResult =
6701 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6702 matchingInlineAsm: MatchingInlineAsm, VariantID: 0);
6703
6704 // Now, both matches failed, and the long-form match failed on the mnemonic
6705 // suffix token operand. The short-form match failure is probably more
6706 // relevant: use it instead.
6707 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6708 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6709 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6710 MatchResult = ShortFormNEONMatchResult;
6711 ErrorInfo = ShortFormNEONErrorInfo;
6712 MissingFeatures = ShortFormNEONMissingFeatures;
6713 }
6714 }
6715
6716 switch (MatchResult) {
6717 case Match_Success: {
6718 // Perform range checking and other semantic validations
6719 SmallVector<SMLoc, 8> OperandLocs;
6720 NumOperands = Operands.size();
6721 for (unsigned i = 1; i < NumOperands; ++i)
6722 OperandLocs.push_back(Elt: Operands[i]->getStartLoc());
6723 if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs))
6724 return true;
6725
6726 Inst.setLoc(IDLoc);
6727 Out.emitInstruction(Inst, STI: getSTI());
6728 return false;
6729 }
6730 case Match_MissingFeature: {
6731 assert(MissingFeatures.any() && "Unknown missing feature!");
6732 // Special case the error message for the very common case where only
6733 // a single subtarget feature is missing (neon, e.g.).
6734 std::string Msg = "instruction requires:";
6735 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6736 if (MissingFeatures[i]) {
6737 Msg += " ";
6738 Msg += getSubtargetFeatureName(Val: i);
6739 }
6740 }
6741 return Error(L: IDLoc, Msg);
6742 }
6743 case Match_MnemonicFail:
6744 return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands);
6745 case Match_InvalidOperand: {
6746 SMLoc ErrorLoc = IDLoc;
6747
6748 if (ErrorInfo != ~0ULL) {
6749 if (ErrorInfo >= Operands.size())
6750 return Error(L: IDLoc, Msg: "too few operands for instruction",
6751 Range: SMRange(IDLoc, getTok().getLoc()));
6752
6753 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6754 if (ErrorLoc == SMLoc())
6755 ErrorLoc = IDLoc;
6756 }
6757 // If the match failed on a suffix token operand, tweak the diagnostic
6758 // accordingly.
6759 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6760 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6761 MatchResult = Match_InvalidSuffix;
6762
6763 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
6764 }
6765 case Match_InvalidTiedOperand:
6766 case Match_InvalidMemoryIndexed1:
6767 case Match_InvalidMemoryIndexed2:
6768 case Match_InvalidMemoryIndexed4:
6769 case Match_InvalidMemoryIndexed8:
6770 case Match_InvalidMemoryIndexed16:
6771 case Match_InvalidCondCode:
6772 case Match_AddSubLSLImm3ShiftLarge:
6773 case Match_AddSubRegExtendSmall:
6774 case Match_AddSubRegExtendLarge:
6775 case Match_AddSubSecondSource:
6776 case Match_LogicalSecondSource:
6777 case Match_AddSubRegShift32:
6778 case Match_AddSubRegShift64:
6779 case Match_InvalidMovImm32Shift:
6780 case Match_InvalidMovImm64Shift:
6781 case Match_InvalidFPImm:
6782 case Match_InvalidMemoryWExtend8:
6783 case Match_InvalidMemoryWExtend16:
6784 case Match_InvalidMemoryWExtend32:
6785 case Match_InvalidMemoryWExtend64:
6786 case Match_InvalidMemoryWExtend128:
6787 case Match_InvalidMemoryXExtend8:
6788 case Match_InvalidMemoryXExtend16:
6789 case Match_InvalidMemoryXExtend32:
6790 case Match_InvalidMemoryXExtend64:
6791 case Match_InvalidMemoryXExtend128:
6792 case Match_InvalidMemoryIndexed1SImm4:
6793 case Match_InvalidMemoryIndexed2SImm4:
6794 case Match_InvalidMemoryIndexed3SImm4:
6795 case Match_InvalidMemoryIndexed4SImm4:
6796 case Match_InvalidMemoryIndexed1SImm6:
6797 case Match_InvalidMemoryIndexed16SImm4:
6798 case Match_InvalidMemoryIndexed32SImm4:
6799 case Match_InvalidMemoryIndexed4SImm7:
6800 case Match_InvalidMemoryIndexed8SImm7:
6801 case Match_InvalidMemoryIndexed16SImm7:
6802 case Match_InvalidMemoryIndexed8UImm5:
6803 case Match_InvalidMemoryIndexed8UImm3:
6804 case Match_InvalidMemoryIndexed4UImm5:
6805 case Match_InvalidMemoryIndexed2UImm5:
6806 case Match_InvalidMemoryIndexed1UImm6:
6807 case Match_InvalidMemoryIndexed2UImm6:
6808 case Match_InvalidMemoryIndexed4UImm6:
6809 case Match_InvalidMemoryIndexed8UImm6:
6810 case Match_InvalidMemoryIndexed16UImm6:
6811 case Match_InvalidMemoryIndexedSImm6:
6812 case Match_InvalidMemoryIndexedSImm5:
6813 case Match_InvalidMemoryIndexedSImm8:
6814 case Match_InvalidMemoryIndexedSImm9:
6815 case Match_InvalidMemoryIndexed16SImm9:
6816 case Match_InvalidMemoryIndexed8SImm10:
6817 case Match_InvalidImm0_0:
6818 case Match_InvalidImm0_1:
6819 case Match_InvalidImm0_3:
6820 case Match_InvalidImm0_7:
6821 case Match_InvalidImm0_15:
6822 case Match_InvalidImm0_31:
6823 case Match_InvalidImm0_63:
6824 case Match_InvalidImm0_127:
6825 case Match_InvalidImm0_255:
6826 case Match_InvalidImm0_65535:
6827 case Match_InvalidImm1_8:
6828 case Match_InvalidImm1_16:
6829 case Match_InvalidImm1_32:
6830 case Match_InvalidImm1_64:
6831 case Match_InvalidImmM1_62:
6832 case Match_InvalidMemoryIndexedRange2UImm0:
6833 case Match_InvalidMemoryIndexedRange2UImm1:
6834 case Match_InvalidMemoryIndexedRange2UImm2:
6835 case Match_InvalidMemoryIndexedRange2UImm3:
6836 case Match_InvalidMemoryIndexedRange4UImm0:
6837 case Match_InvalidMemoryIndexedRange4UImm1:
6838 case Match_InvalidMemoryIndexedRange4UImm2:
6839 case Match_InvalidSVEAddSubImm8:
6840 case Match_InvalidSVEAddSubImm16:
6841 case Match_InvalidSVEAddSubImm32:
6842 case Match_InvalidSVEAddSubImm64:
6843 case Match_InvalidSVECpyImm8:
6844 case Match_InvalidSVECpyImm16:
6845 case Match_InvalidSVECpyImm32:
6846 case Match_InvalidSVECpyImm64:
6847 case Match_InvalidIndexRange0_0:
6848 case Match_InvalidIndexRange1_1:
6849 case Match_InvalidIndexRange0_15:
6850 case Match_InvalidIndexRange0_7:
6851 case Match_InvalidIndexRange0_3:
6852 case Match_InvalidIndexRange0_1:
6853 case Match_InvalidSVEIndexRange0_63:
6854 case Match_InvalidSVEIndexRange0_31:
6855 case Match_InvalidSVEIndexRange0_15:
6856 case Match_InvalidSVEIndexRange0_7:
6857 case Match_InvalidSVEIndexRange0_3:
6858 case Match_InvalidLabel:
6859 case Match_InvalidComplexRotationEven:
6860 case Match_InvalidComplexRotationOdd:
6861 case Match_InvalidGPR64shifted8:
6862 case Match_InvalidGPR64shifted16:
6863 case Match_InvalidGPR64shifted32:
6864 case Match_InvalidGPR64shifted64:
6865 case Match_InvalidGPR64shifted128:
6866 case Match_InvalidGPR64NoXZRshifted8:
6867 case Match_InvalidGPR64NoXZRshifted16:
6868 case Match_InvalidGPR64NoXZRshifted32:
6869 case Match_InvalidGPR64NoXZRshifted64:
6870 case Match_InvalidGPR64NoXZRshifted128:
6871 case Match_InvalidZPR32UXTW8:
6872 case Match_InvalidZPR32UXTW16:
6873 case Match_InvalidZPR32UXTW32:
6874 case Match_InvalidZPR32UXTW64:
6875 case Match_InvalidZPR32SXTW8:
6876 case Match_InvalidZPR32SXTW16:
6877 case Match_InvalidZPR32SXTW32:
6878 case Match_InvalidZPR32SXTW64:
6879 case Match_InvalidZPR64UXTW8:
6880 case Match_InvalidZPR64SXTW8:
6881 case Match_InvalidZPR64UXTW16:
6882 case Match_InvalidZPR64SXTW16:
6883 case Match_InvalidZPR64UXTW32:
6884 case Match_InvalidZPR64SXTW32:
6885 case Match_InvalidZPR64UXTW64:
6886 case Match_InvalidZPR64SXTW64:
6887 case Match_InvalidZPR32LSL8:
6888 case Match_InvalidZPR32LSL16:
6889 case Match_InvalidZPR32LSL32:
6890 case Match_InvalidZPR32LSL64:
6891 case Match_InvalidZPR64LSL8:
6892 case Match_InvalidZPR64LSL16:
6893 case Match_InvalidZPR64LSL32:
6894 case Match_InvalidZPR64LSL64:
6895 case Match_InvalidZPR0:
6896 case Match_InvalidZPR8:
6897 case Match_InvalidZPR16:
6898 case Match_InvalidZPR32:
6899 case Match_InvalidZPR64:
6900 case Match_InvalidZPR128:
6901 case Match_InvalidZPR_3b8:
6902 case Match_InvalidZPR_3b16:
6903 case Match_InvalidZPR_3b32:
6904 case Match_InvalidZPR_4b8:
6905 case Match_InvalidZPR_4b16:
6906 case Match_InvalidZPR_4b32:
6907 case Match_InvalidZPR_4b64:
6908 case Match_InvalidSVEPPRorPNRAnyReg:
6909 case Match_InvalidSVEPPRorPNRBReg:
6910 case Match_InvalidSVEPredicateAnyReg:
6911 case Match_InvalidSVEPattern:
6912 case Match_InvalidSVEVecLenSpecifier:
6913 case Match_InvalidSVEPredicateBReg:
6914 case Match_InvalidSVEPredicateHReg:
6915 case Match_InvalidSVEPredicateSReg:
6916 case Match_InvalidSVEPredicateDReg:
6917 case Match_InvalidSVEPredicate3bAnyReg:
6918 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6919 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6920 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6921 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6922 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6923 case Match_InvalidSVEPNPredicateBReg:
6924 case Match_InvalidSVEPNPredicateHReg:
6925 case Match_InvalidSVEPNPredicateSReg:
6926 case Match_InvalidSVEPNPredicateDReg:
6927 case Match_InvalidSVEPredicateListMul2x8:
6928 case Match_InvalidSVEPredicateListMul2x16:
6929 case Match_InvalidSVEPredicateListMul2x32:
6930 case Match_InvalidSVEPredicateListMul2x64:
6931 case Match_InvalidSVEExactFPImmOperandHalfOne:
6932 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6933 case Match_InvalidSVEExactFPImmOperandZeroOne:
6934 case Match_InvalidMatrixTile16:
6935 case Match_InvalidMatrixTile32:
6936 case Match_InvalidMatrixTile64:
6937 case Match_InvalidMatrix:
6938 case Match_InvalidMatrix8:
6939 case Match_InvalidMatrix16:
6940 case Match_InvalidMatrix32:
6941 case Match_InvalidMatrix64:
6942 case Match_InvalidMatrixTileVectorH8:
6943 case Match_InvalidMatrixTileVectorH16:
6944 case Match_InvalidMatrixTileVectorH32:
6945 case Match_InvalidMatrixTileVectorH64:
6946 case Match_InvalidMatrixTileVectorH128:
6947 case Match_InvalidMatrixTileVectorV8:
6948 case Match_InvalidMatrixTileVectorV16:
6949 case Match_InvalidMatrixTileVectorV32:
6950 case Match_InvalidMatrixTileVectorV64:
6951 case Match_InvalidMatrixTileVectorV128:
6952 case Match_InvalidSVCR:
6953 case Match_InvalidMatrixIndexGPR32_12_15:
6954 case Match_InvalidMatrixIndexGPR32_8_11:
6955 case Match_InvalidLookupTable:
6956 case Match_InvalidZPRMul2_Lo8:
6957 case Match_InvalidZPRMul2_Hi8:
6958 case Match_InvalidZPRMul2_Lo16:
6959 case Match_InvalidZPRMul2_Hi16:
6960 case Match_InvalidZPRMul2_Lo32:
6961 case Match_InvalidZPRMul2_Hi32:
6962 case Match_InvalidZPRMul2_Lo64:
6963 case Match_InvalidZPRMul2_Hi64:
6964 case Match_InvalidZPR_K0:
6965 case Match_InvalidSVEVectorList2x8Mul2:
6966 case Match_InvalidSVEVectorList2x16Mul2:
6967 case Match_InvalidSVEVectorList2x32Mul2:
6968 case Match_InvalidSVEVectorList2x64Mul2:
6969 case Match_InvalidSVEVectorList2x128Mul2:
6970 case Match_InvalidSVEVectorList4x8Mul4:
6971 case Match_InvalidSVEVectorList4x16Mul4:
6972 case Match_InvalidSVEVectorList4x32Mul4:
6973 case Match_InvalidSVEVectorList4x64Mul4:
6974 case Match_InvalidSVEVectorList4x128Mul4:
6975 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6976 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6977 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6978 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6979 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6980 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6981 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6982 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6983 case Match_InvalidSVEVectorListStrided2x8:
6984 case Match_InvalidSVEVectorListStrided2x16:
6985 case Match_InvalidSVEVectorListStrided2x32:
6986 case Match_InvalidSVEVectorListStrided2x64:
6987 case Match_InvalidSVEVectorListStrided4x8:
6988 case Match_InvalidSVEVectorListStrided4x16:
6989 case Match_InvalidSVEVectorListStrided4x32:
6990 case Match_InvalidSVEVectorListStrided4x64:
6991 case Match_MSR:
6992 case Match_MRS: {
6993 if (ErrorInfo >= Operands.size())
6994 return Error(L: IDLoc, Msg: "too few operands for instruction", Range: SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6995 // Any time we get here, there's nothing fancy to do. Just get the
6996 // operand SMLoc and display the diagnostic.
6997 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6998 if (ErrorLoc == SMLoc())
6999 ErrorLoc = IDLoc;
7000 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7001 }
7002 }
7003
7004 llvm_unreachable("Implement any new match types added!");
7005}
7006
7007/// ParseDirective parses the arm specific directives
7008bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7009 const MCContext::Environment Format = getContext().getObjectFileType();
7010 bool IsMachO = Format == MCContext::IsMachO;
7011 bool IsCOFF = Format == MCContext::IsCOFF;
7012 bool IsELF = Format == MCContext::IsELF;
7013
7014 auto IDVal = DirectiveID.getIdentifier().lower();
7015 SMLoc Loc = DirectiveID.getLoc();
7016 if (IDVal == ".arch")
7017 parseDirectiveArch(L: Loc);
7018 else if (IDVal == ".cpu")
7019 parseDirectiveCPU(L: Loc);
7020 else if (IDVal == ".tlsdesccall")
7021 parseDirectiveTLSDescCall(L: Loc);
7022 else if (IDVal == ".ltorg" || IDVal == ".pool")
7023 parseDirectiveLtorg(L: Loc);
7024 else if (IDVal == ".unreq")
7025 parseDirectiveUnreq(L: Loc);
7026 else if (IDVal == ".inst")
7027 parseDirectiveInst(L: Loc);
7028 else if (IDVal == ".cfi_negate_ra_state")
7029 parseDirectiveCFINegateRAState();
7030 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7031 parseDirectiveCFINegateRAStateWithPC();
7032 else if (IDVal == ".cfi_b_key_frame")
7033 parseDirectiveCFIBKeyFrame();
7034 else if (IDVal == ".cfi_mte_tagged_frame")
7035 parseDirectiveCFIMTETaggedFrame();
7036 else if (IDVal == ".arch_extension")
7037 parseDirectiveArchExtension(L: Loc);
7038 else if (IDVal == ".variant_pcs")
7039 parseDirectiveVariantPCS(L: Loc);
7040 else if (IsMachO) {
7041 if (IDVal == MCLOHDirectiveName())
7042 parseDirectiveLOH(LOH: IDVal, L: Loc);
7043 else
7044 return true;
7045 } else if (IsCOFF) {
7046 if (IDVal == ".seh_stackalloc")
7047 parseDirectiveSEHAllocStack(L: Loc);
7048 else if (IDVal == ".seh_endprologue")
7049 parseDirectiveSEHPrologEnd(L: Loc);
7050 else if (IDVal == ".seh_save_r19r20_x")
7051 parseDirectiveSEHSaveR19R20X(L: Loc);
7052 else if (IDVal == ".seh_save_fplr")
7053 parseDirectiveSEHSaveFPLR(L: Loc);
7054 else if (IDVal == ".seh_save_fplr_x")
7055 parseDirectiveSEHSaveFPLRX(L: Loc);
7056 else if (IDVal == ".seh_save_reg")
7057 parseDirectiveSEHSaveReg(L: Loc);
7058 else if (IDVal == ".seh_save_reg_x")
7059 parseDirectiveSEHSaveRegX(L: Loc);
7060 else if (IDVal == ".seh_save_regp")
7061 parseDirectiveSEHSaveRegP(L: Loc);
7062 else if (IDVal == ".seh_save_regp_x")
7063 parseDirectiveSEHSaveRegPX(L: Loc);
7064 else if (IDVal == ".seh_save_lrpair")
7065 parseDirectiveSEHSaveLRPair(L: Loc);
7066 else if (IDVal == ".seh_save_freg")
7067 parseDirectiveSEHSaveFReg(L: Loc);
7068 else if (IDVal == ".seh_save_freg_x")
7069 parseDirectiveSEHSaveFRegX(L: Loc);
7070 else if (IDVal == ".seh_save_fregp")
7071 parseDirectiveSEHSaveFRegP(L: Loc);
7072 else if (IDVal == ".seh_save_fregp_x")
7073 parseDirectiveSEHSaveFRegPX(L: Loc);
7074 else if (IDVal == ".seh_set_fp")
7075 parseDirectiveSEHSetFP(L: Loc);
7076 else if (IDVal == ".seh_add_fp")
7077 parseDirectiveSEHAddFP(L: Loc);
7078 else if (IDVal == ".seh_nop")
7079 parseDirectiveSEHNop(L: Loc);
7080 else if (IDVal == ".seh_save_next")
7081 parseDirectiveSEHSaveNext(L: Loc);
7082 else if (IDVal == ".seh_startepilogue")
7083 parseDirectiveSEHEpilogStart(L: Loc);
7084 else if (IDVal == ".seh_endepilogue")
7085 parseDirectiveSEHEpilogEnd(L: Loc);
7086 else if (IDVal == ".seh_trap_frame")
7087 parseDirectiveSEHTrapFrame(L: Loc);
7088 else if (IDVal == ".seh_pushframe")
7089 parseDirectiveSEHMachineFrame(L: Loc);
7090 else if (IDVal == ".seh_context")
7091 parseDirectiveSEHContext(L: Loc);
7092 else if (IDVal == ".seh_ec_context")
7093 parseDirectiveSEHECContext(L: Loc);
7094 else if (IDVal == ".seh_clear_unwound_to_call")
7095 parseDirectiveSEHClearUnwoundToCall(L: Loc);
7096 else if (IDVal == ".seh_pac_sign_lr")
7097 parseDirectiveSEHPACSignLR(L: Loc);
7098 else if (IDVal == ".seh_save_any_reg")
7099 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false);
7100 else if (IDVal == ".seh_save_any_reg_p")
7101 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false);
7102 else if (IDVal == ".seh_save_any_reg_x")
7103 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true);
7104 else if (IDVal == ".seh_save_any_reg_px")
7105 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true);
7106 else if (IDVal == ".seh_allocz")
7107 parseDirectiveSEHAllocZ(L: Loc);
7108 else if (IDVal == ".seh_save_zreg")
7109 parseDirectiveSEHSaveZReg(L: Loc);
7110 else if (IDVal == ".seh_save_preg")
7111 parseDirectiveSEHSavePReg(L: Loc);
7112 else
7113 return true;
7114 } else if (IsELF) {
7115 if (IDVal == ".aeabi_subsection")
7116 parseDirectiveAeabiSubSectionHeader(L: Loc);
7117 else if (IDVal == ".aeabi_attribute")
7118 parseDirectiveAeabiAArch64Attr(L: Loc);
7119 else
7120 return true;
7121 } else
7122 return true;
7123 return false;
7124}
7125
7126static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7127 SmallVector<StringRef, 4> &RequestedExtensions) {
7128 const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto");
7129 const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto");
7130
7131 if (!NoCrypto && Crypto) {
7132 // Map 'generic' (and others) to sha2 and aes, because
7133 // that was the traditional meaning of crypto.
7134 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7135 ArchInfo == AArch64::ARMV8_3A) {
7136 RequestedExtensions.push_back(Elt: "sha2");
7137 RequestedExtensions.push_back(Elt: "aes");
7138 }
7139 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7140 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7141 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7142 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7143 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7144 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7145 RequestedExtensions.push_back(Elt: "sm4");
7146 RequestedExtensions.push_back(Elt: "sha3");
7147 RequestedExtensions.push_back(Elt: "sha2");
7148 RequestedExtensions.push_back(Elt: "aes");
7149 }
7150 } else if (NoCrypto) {
7151 // Map 'generic' (and others) to sha2 and aes, because
7152 // that was the traditional meaning of crypto.
7153 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7154 ArchInfo == AArch64::ARMV8_3A) {
7155 RequestedExtensions.push_back(Elt: "nosha2");
7156 RequestedExtensions.push_back(Elt: "noaes");
7157 }
7158 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7159 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7160 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7161 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7162 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7163 ArchInfo == AArch64::ARMV9_4A) {
7164 RequestedExtensions.push_back(Elt: "nosm4");
7165 RequestedExtensions.push_back(Elt: "nosha3");
7166 RequestedExtensions.push_back(Elt: "nosha2");
7167 RequestedExtensions.push_back(Elt: "noaes");
7168 }
7169 }
7170}
7171
7172static SMLoc incrementLoc(SMLoc L, int Offset) {
7173 return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset);
7174}
7175
7176/// parseDirectiveArch
7177/// ::= .arch token
7178bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7179 SMLoc CurLoc = getLoc();
7180
7181 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7182 StringRef Arch, ExtensionString;
7183 std::tie(args&: Arch, args&: ExtensionString) = Name.split(Separator: '+');
7184
7185 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7186 if (!ArchInfo)
7187 return Error(L: CurLoc, Msg: "unknown arch name");
7188
7189 if (parseToken(T: AsmToken::EndOfStatement))
7190 return true;
7191
7192 // Get the architecture and extension features.
7193 std::vector<StringRef> AArch64Features;
7194 AArch64Features.push_back(x: ArchInfo->ArchFeature);
7195 AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features);
7196
7197 MCSubtargetInfo &STI = copySTI();
7198 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7199 STI.setDefaultFeatures(CPU: "generic", /*TuneCPU*/ "generic",
7200 FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: ","));
7201
7202 SmallVector<StringRef, 4> RequestedExtensions;
7203 if (!ExtensionString.empty())
7204 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7205
7206 ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions);
7207 CurLoc = incrementLoc(L: CurLoc, Offset: Arch.size());
7208
7209 for (auto Name : RequestedExtensions) {
7210 // Advance source location past '+'.
7211 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7212
7213 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7214
7215 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7216 return Extension.Name == Name;
7217 });
7218
7219 if (It == std::end(arr: ExtensionMap))
7220 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7221
7222 if (EnableFeature)
7223 STI.SetFeatureBitsTransitively(It->Features);
7224 else
7225 STI.ClearFeatureBitsTransitively(FB: It->Features);
7226 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7227 }
7228 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7229 setAvailableFeatures(Features);
7230
7231 getTargetStreamer().emitDirectiveArch(Name);
7232 return false;
7233}
7234
7235/// parseDirectiveArchExtension
7236/// ::= .arch_extension [no]feature
7237bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7238 SMLoc ExtLoc = getLoc();
7239
7240 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7241
7242 if (parseEOL())
7243 return true;
7244
7245 bool EnableFeature = true;
7246 StringRef Name = FullName;
7247 if (Name.starts_with_insensitive(Prefix: "no")) {
7248 EnableFeature = false;
7249 Name = Name.substr(Start: 2);
7250 }
7251
7252 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7253 return Extension.Name == Name;
7254 });
7255
7256 if (It == std::end(arr: ExtensionMap))
7257 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
7258
7259 MCSubtargetInfo &STI = copySTI();
7260 if (EnableFeature)
7261 STI.SetFeatureBitsTransitively(It->Features);
7262 else
7263 STI.ClearFeatureBitsTransitively(FB: It->Features);
7264 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7265 setAvailableFeatures(Features);
7266
7267 getTargetStreamer().emitDirectiveArchExtension(Name: FullName);
7268 return false;
7269}
7270
7271/// parseDirectiveCPU
7272/// ::= .cpu id
7273bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7274 SMLoc CurLoc = getLoc();
7275
7276 StringRef CPU, ExtensionString;
7277 std::tie(args&: CPU, args&: ExtensionString) =
7278 getParser().parseStringToEndOfStatement().trim().split(Separator: '+');
7279
7280 if (parseToken(T: AsmToken::EndOfStatement))
7281 return true;
7282
7283 SmallVector<StringRef, 4> RequestedExtensions;
7284 if (!ExtensionString.empty())
7285 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7286
7287 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7288 if (!CpuArch) {
7289 Error(L: CurLoc, Msg: "unknown CPU name");
7290 return false;
7291 }
7292 ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions);
7293
7294 MCSubtargetInfo &STI = copySTI();
7295 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
7296 CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size());
7297
7298 for (auto Name : RequestedExtensions) {
7299 // Advance source location past '+'.
7300 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7301
7302 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7303
7304 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7305 return Extension.Name == Name;
7306 });
7307
7308 if (It == std::end(arr: ExtensionMap))
7309 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7310
7311 if (EnableFeature)
7312 STI.SetFeatureBitsTransitively(It->Features);
7313 else
7314 STI.ClearFeatureBitsTransitively(FB: It->Features);
7315 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7316 }
7317 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7318 setAvailableFeatures(Features);
7319 return false;
7320}
7321
7322/// parseDirectiveInst
7323/// ::= .inst opcode [, ...]
7324bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7325 if (getLexer().is(K: AsmToken::EndOfStatement))
7326 return Error(L: Loc, Msg: "expected expression following '.inst' directive");
7327
7328 auto parseOp = [&]() -> bool {
7329 SMLoc L = getLoc();
7330 const MCExpr *Expr = nullptr;
7331 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
7332 return true;
7333 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
7334 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
7335 return true;
7336 getTargetStreamer().emitInst(Inst: Value->getValue());
7337 return false;
7338 };
7339
7340 return parseMany(parseOne: parseOp);
7341}
7342
7343// parseDirectiveTLSDescCall:
7344// ::= .tlsdesccall symbol
7345bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7346 StringRef Name;
7347 if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol") ||
7348 parseToken(T: AsmToken::EndOfStatement))
7349 return true;
7350
7351 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7352 const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext());
7353 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_TLSDESC, Ctx&: getContext());
7354
7355 MCInst Inst;
7356 Inst.setOpcode(AArch64::TLSDESCCALL);
7357 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
7358
7359 getParser().getStreamer().emitInstruction(Inst, STI: getSTI());
7360 return false;
7361}
7362
7363/// ::= .loh <lohName | lohId> label1, ..., labelN
7364/// The number of arguments depends on the loh identifier.
7365bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7366 MCLOHType Kind;
7367 if (getTok().isNot(K: AsmToken::Identifier)) {
7368 if (getTok().isNot(K: AsmToken::Integer))
7369 return TokError(Msg: "expected an identifier or a number in directive");
7370 // We successfully get a numeric value for the identifier.
7371 // Check if it is valid.
7372 int64_t Id = getTok().getIntVal();
7373 if (Id <= -1U && !isValidMCLOHType(Kind: Id))
7374 return TokError(Msg: "invalid numeric identifier in directive");
7375 Kind = (MCLOHType)Id;
7376 } else {
7377 StringRef Name = getTok().getIdentifier();
7378 // We successfully parse an identifier.
7379 // Check if it is a recognized one.
7380 int Id = MCLOHNameToId(Name);
7381
7382 if (Id == -1)
7383 return TokError(Msg: "invalid identifier in directive");
7384 Kind = (MCLOHType)Id;
7385 }
7386 // Consume the identifier.
7387 Lex();
7388 // Get the number of arguments of this LOH.
7389 int NbArgs = MCLOHIdToNbArgs(Kind);
7390
7391 assert(NbArgs != -1 && "Invalid number of arguments");
7392
7393 SmallVector<MCSymbol *, 3> Args;
7394 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7395 StringRef Name;
7396 if (getParser().parseIdentifier(Res&: Name))
7397 return TokError(Msg: "expected identifier in directive");
7398 Args.push_back(Elt: getContext().getOrCreateSymbol(Name));
7399
7400 if (Idx + 1 == NbArgs)
7401 break;
7402 if (parseComma())
7403 return true;
7404 }
7405 if (parseEOL())
7406 return true;
7407
7408 getStreamer().emitLOHDirective(Kind: (MCLOHType)Kind, Args);
7409 return false;
7410}
7411
7412/// parseDirectiveLtorg
7413/// ::= .ltorg | .pool
7414bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7415 if (parseEOL())
7416 return true;
7417 getTargetStreamer().emitCurrentConstantPool();
7418 return false;
7419}
7420
7421/// parseDirectiveReq
7422/// ::= name .req registername
7423bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7424 Lex(); // Eat the '.req' token.
7425 SMLoc SRegLoc = getLoc();
7426 RegKind RegisterKind = RegKind::Scalar;
7427 MCRegister RegNum;
7428 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7429
7430 if (!ParseRes.isSuccess()) {
7431 StringRef Kind;
7432 RegisterKind = RegKind::NeonVector;
7433 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector);
7434
7435 if (ParseRes.isFailure())
7436 return true;
7437
7438 if (ParseRes.isSuccess() && !Kind.empty())
7439 return Error(L: SRegLoc, Msg: "vector register without type specifier expected");
7440 }
7441
7442 if (!ParseRes.isSuccess()) {
7443 StringRef Kind;
7444 RegisterKind = RegKind::SVEDataVector;
7445 ParseRes =
7446 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7447
7448 if (ParseRes.isFailure())
7449 return true;
7450
7451 if (ParseRes.isSuccess() && !Kind.empty())
7452 return Error(L: SRegLoc,
7453 Msg: "sve vector register without type specifier expected");
7454 }
7455
7456 if (!ParseRes.isSuccess()) {
7457 StringRef Kind;
7458 RegisterKind = RegKind::SVEPredicateVector;
7459 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7460
7461 if (ParseRes.isFailure())
7462 return true;
7463
7464 if (ParseRes.isSuccess() && !Kind.empty())
7465 return Error(L: SRegLoc,
7466 Msg: "sve predicate register without type specifier expected");
7467 }
7468
7469 if (!ParseRes.isSuccess())
7470 return Error(L: SRegLoc, Msg: "register name or alias expected");
7471
7472 // Shouldn't be anything else.
7473 if (parseEOL())
7474 return true;
7475
7476 auto pair = std::make_pair(x&: RegisterKind, y: (unsigned) RegNum);
7477 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair)
7478 Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'");
7479
7480 return false;
7481}
7482
7483/// parseDirectiveUneq
7484/// ::= .unreq registername
7485bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7486 if (getTok().isNot(K: AsmToken::Identifier))
7487 return TokError(Msg: "unexpected input in .unreq directive.");
7488 RegisterReqs.erase(Key: getTok().getIdentifier().lower());
7489 Lex(); // Eat the identifier.
7490 return parseToken(T: AsmToken::EndOfStatement);
7491}
7492
7493bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7494 if (parseEOL())
7495 return true;
7496 getStreamer().emitCFINegateRAState();
7497 return false;
7498}
7499
7500bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7501 if (parseEOL())
7502 return true;
7503 getStreamer().emitCFINegateRAStateWithPC();
7504 return false;
7505}
7506
7507/// parseDirectiveCFIBKeyFrame
7508/// ::= .cfi_b_key
7509bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7510 if (parseEOL())
7511 return true;
7512 getStreamer().emitCFIBKeyFrame();
7513 return false;
7514}
7515
7516/// parseDirectiveCFIMTETaggedFrame
7517/// ::= .cfi_mte_tagged_frame
7518bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7519 if (parseEOL())
7520 return true;
7521 getStreamer().emitCFIMTETaggedFrame();
7522 return false;
7523}
7524
7525/// parseDirectiveVariantPCS
7526/// ::= .variant_pcs symbolname
7527bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7528 StringRef Name;
7529 if (getParser().parseIdentifier(Res&: Name))
7530 return TokError(Msg: "expected symbol name");
7531 if (parseEOL())
7532 return true;
7533 getTargetStreamer().emitDirectiveVariantPCS(
7534 Symbol: getContext().getOrCreateSymbol(Name));
7535 return false;
7536}
7537
7538/// parseDirectiveSEHAllocStack
7539/// ::= .seh_stackalloc
7540bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7541 int64_t Size;
7542 if (parseImmExpr(Out&: Size))
7543 return true;
7544 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7545 return false;
7546}
7547
7548/// parseDirectiveSEHPrologEnd
7549/// ::= .seh_endprologue
7550bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7551 getTargetStreamer().emitARM64WinCFIPrologEnd();
7552 return false;
7553}
7554
7555/// parseDirectiveSEHSaveR19R20X
7556/// ::= .seh_save_r19r20_x
7557bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7558 int64_t Offset;
7559 if (parseImmExpr(Out&: Offset))
7560 return true;
7561 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7562 return false;
7563}
7564
7565/// parseDirectiveSEHSaveFPLR
7566/// ::= .seh_save_fplr
7567bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7568 int64_t Offset;
7569 if (parseImmExpr(Out&: Offset))
7570 return true;
7571 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7572 return false;
7573}
7574
7575/// parseDirectiveSEHSaveFPLRX
7576/// ::= .seh_save_fplr_x
7577bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7578 int64_t Offset;
7579 if (parseImmExpr(Out&: Offset))
7580 return true;
7581 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7582 return false;
7583}
7584
7585/// parseDirectiveSEHSaveReg
7586/// ::= .seh_save_reg
7587bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7588 unsigned Reg;
7589 int64_t Offset;
7590 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7591 parseComma() || parseImmExpr(Out&: Offset))
7592 return true;
7593 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7594 return false;
7595}
7596
7597/// parseDirectiveSEHSaveRegX
7598/// ::= .seh_save_reg_x
7599bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7600 unsigned Reg;
7601 int64_t Offset;
7602 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7603 parseComma() || parseImmExpr(Out&: Offset))
7604 return true;
7605 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7606 return false;
7607}
7608
7609/// parseDirectiveSEHSaveRegP
7610/// ::= .seh_save_regp
7611bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7612 unsigned Reg;
7613 int64_t Offset;
7614 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7615 parseComma() || parseImmExpr(Out&: Offset))
7616 return true;
7617 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7618 return false;
7619}
7620
7621/// parseDirectiveSEHSaveRegPX
7622/// ::= .seh_save_regp_x
7623bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7624 unsigned Reg;
7625 int64_t Offset;
7626 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7627 parseComma() || parseImmExpr(Out&: Offset))
7628 return true;
7629 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7630 return false;
7631}
7632
7633/// parseDirectiveSEHSaveLRPair
7634/// ::= .seh_save_lrpair
7635bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7636 unsigned Reg;
7637 int64_t Offset;
7638 L = getLoc();
7639 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7640 parseComma() || parseImmExpr(Out&: Offset))
7641 return true;
7642 if (check(P: ((Reg - 19) % 2 != 0), Loc: L,
7643 Msg: "expected register with even offset from x19"))
7644 return true;
7645 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7646 return false;
7647}
7648
7649/// parseDirectiveSEHSaveFReg
7650/// ::= .seh_save_freg
7651bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7652 unsigned Reg;
7653 int64_t Offset;
7654 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7655 parseComma() || parseImmExpr(Out&: Offset))
7656 return true;
7657 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7658 return false;
7659}
7660
7661/// parseDirectiveSEHSaveFRegX
7662/// ::= .seh_save_freg_x
7663bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7664 unsigned Reg;
7665 int64_t Offset;
7666 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7667 parseComma() || parseImmExpr(Out&: Offset))
7668 return true;
7669 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7670 return false;
7671}
7672
7673/// parseDirectiveSEHSaveFRegP
7674/// ::= .seh_save_fregp
7675bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7676 unsigned Reg;
7677 int64_t Offset;
7678 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7679 parseComma() || parseImmExpr(Out&: Offset))
7680 return true;
7681 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7682 return false;
7683}
7684
7685/// parseDirectiveSEHSaveFRegPX
7686/// ::= .seh_save_fregp_x
7687bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7688 unsigned Reg;
7689 int64_t Offset;
7690 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7691 parseComma() || parseImmExpr(Out&: Offset))
7692 return true;
7693 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7694 return false;
7695}
7696
7697/// parseDirectiveSEHSetFP
7698/// ::= .seh_set_fp
7699bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7700 getTargetStreamer().emitARM64WinCFISetFP();
7701 return false;
7702}
7703
7704/// parseDirectiveSEHAddFP
7705/// ::= .seh_add_fp
7706bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7707 int64_t Size;
7708 if (parseImmExpr(Out&: Size))
7709 return true;
7710 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7711 return false;
7712}
7713
7714/// parseDirectiveSEHNop
7715/// ::= .seh_nop
7716bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7717 getTargetStreamer().emitARM64WinCFINop();
7718 return false;
7719}
7720
7721/// parseDirectiveSEHSaveNext
7722/// ::= .seh_save_next
7723bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7724 getTargetStreamer().emitARM64WinCFISaveNext();
7725 return false;
7726}
7727
7728/// parseDirectiveSEHEpilogStart
7729/// ::= .seh_startepilogue
7730bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7731 getTargetStreamer().emitARM64WinCFIEpilogStart();
7732 return false;
7733}
7734
7735/// parseDirectiveSEHEpilogEnd
7736/// ::= .seh_endepilogue
7737bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7738 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7739 return false;
7740}
7741
7742/// parseDirectiveSEHTrapFrame
7743/// ::= .seh_trap_frame
7744bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7745 getTargetStreamer().emitARM64WinCFITrapFrame();
7746 return false;
7747}
7748
7749/// parseDirectiveSEHMachineFrame
7750/// ::= .seh_pushframe
7751bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7752 getTargetStreamer().emitARM64WinCFIMachineFrame();
7753 return false;
7754}
7755
7756/// parseDirectiveSEHContext
7757/// ::= .seh_context
7758bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7759 getTargetStreamer().emitARM64WinCFIContext();
7760 return false;
7761}
7762
7763/// parseDirectiveSEHECContext
7764/// ::= .seh_ec_context
7765bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7766 getTargetStreamer().emitARM64WinCFIECContext();
7767 return false;
7768}
7769
7770/// parseDirectiveSEHClearUnwoundToCall
7771/// ::= .seh_clear_unwound_to_call
7772bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7773 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7774 return false;
7775}
7776
7777/// parseDirectiveSEHPACSignLR
7778/// ::= .seh_pac_sign_lr
7779bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7780 getTargetStreamer().emitARM64WinCFIPACSignLR();
7781 return false;
7782}
7783
7784/// parseDirectiveSEHSaveAnyReg
7785/// ::= .seh_save_any_reg
7786/// ::= .seh_save_any_reg_p
7787/// ::= .seh_save_any_reg_x
7788/// ::= .seh_save_any_reg_px
7789bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7790 bool Writeback) {
7791 MCRegister Reg;
7792 SMLoc Start, End;
7793 int64_t Offset;
7794 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register") ||
7795 parseComma() || parseImmExpr(Out&: Offset))
7796 return true;
7797
7798 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7799 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7800 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7801 return Error(L, Msg: "invalid save_any_reg offset");
7802 unsigned EncodedReg;
7803 if (Reg == AArch64::FP)
7804 EncodedReg = 29;
7805 else if (Reg == AArch64::LR)
7806 EncodedReg = 30;
7807 else
7808 EncodedReg = Reg - AArch64::X0;
7809 if (Paired) {
7810 if (Reg == AArch64::LR)
7811 return Error(L: Start, Msg: "lr cannot be paired with another register");
7812 if (Writeback)
7813 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset);
7814 else
7815 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset);
7816 } else {
7817 if (Writeback)
7818 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset);
7819 else
7820 getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset);
7821 }
7822 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7823 unsigned EncodedReg = Reg - AArch64::D0;
7824 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7825 return Error(L, Msg: "invalid save_any_reg offset");
7826 if (Paired) {
7827 if (Reg == AArch64::D31)
7828 return Error(L: Start, Msg: "d31 cannot be paired with another register");
7829 if (Writeback)
7830 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset);
7831 else
7832 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset);
7833 } else {
7834 if (Writeback)
7835 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset);
7836 else
7837 getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset);
7838 }
7839 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7840 unsigned EncodedReg = Reg - AArch64::Q0;
7841 if (Offset < 0 || Offset % 16)
7842 return Error(L, Msg: "invalid save_any_reg offset");
7843 if (Paired) {
7844 if (Reg == AArch64::Q31)
7845 return Error(L: Start, Msg: "q31 cannot be paired with another register");
7846 if (Writeback)
7847 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset);
7848 else
7849 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset);
7850 } else {
7851 if (Writeback)
7852 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset);
7853 else
7854 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset);
7855 }
7856 } else {
7857 return Error(L: Start, Msg: "save_any_reg register must be x, q or d register");
7858 }
7859 return false;
7860}
7861
7862/// parseDirectiveAllocZ
7863/// ::= .seh_allocz
7864bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
7865 int64_t Offset;
7866 if (parseImmExpr(Out&: Offset))
7867 return true;
7868 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
7869 return false;
7870}
7871
7872/// parseDirectiveSEHSaveZReg
7873/// ::= .seh_save_zreg
7874bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
7875 MCRegister RegNum;
7876 StringRef Kind;
7877 int64_t Offset;
7878 ParseStatus Res =
7879 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7880 if (!Res.isSuccess())
7881 return true;
7882 if (check(P: RegNum < AArch64::Z8 || RegNum > AArch64::Z23, Loc: L,
7883 Msg: "expected register in range z8 to z23"))
7884 return true;
7885 if (parseComma() || parseImmExpr(Out&: Offset))
7886 return true;
7887 getTargetStreamer().emitARM64WinCFISaveZReg(Reg: RegNum - AArch64::Z0, Offset);
7888 return false;
7889}
7890
7891/// parseDirectiveSEHSavePReg
7892/// ::= .seh_save_preg
7893bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
7894 MCRegister RegNum;
7895 StringRef Kind;
7896 int64_t Offset;
7897 ParseStatus Res =
7898 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7899 if (!Res.isSuccess())
7900 return true;
7901 if (check(P: RegNum < AArch64::P4 || RegNum > AArch64::P15, Loc: L,
7902 Msg: "expected register in range p4 to p15"))
7903 return true;
7904 if (parseComma() || parseImmExpr(Out&: Offset))
7905 return true;
7906 getTargetStreamer().emitARM64WinCFISavePReg(Reg: RegNum - AArch64::P0, Offset);
7907 return false;
7908}
7909
7910bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
7911 // Expecting 3 AsmToken::Identifier after '.aeabi_subsection', a name and 2
7912 // parameters, e.g.: .aeabi_subsection (1)aeabi_feature_and_bits, (2)optional,
7913 // (3)uleb128 separated by 2 commas.
7914 MCAsmParser &Parser = getParser();
7915
7916 // Consume the name (subsection name)
7917 StringRef SubsectionName;
7918 AArch64BuildAttributes::VendorID SubsectionNameID;
7919 if (Parser.getTok().is(K: AsmToken::Identifier)) {
7920 SubsectionName = Parser.getTok().getIdentifier();
7921 SubsectionNameID = AArch64BuildAttributes::getVendorID(Vendor: SubsectionName);
7922 } else {
7923 Error(L: Parser.getTok().getLoc(), Msg: "subsection name not found");
7924 return true;
7925 }
7926 Parser.Lex();
7927 // consume a comma
7928 // parseComma() return *false* on success, and call Lex(), no need to call
7929 // Lex() again.
7930 if (Parser.parseComma()) {
7931 return true;
7932 }
7933
7934 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
7935 getTargetStreamer().getAttributesSubsectionByName(Name: SubsectionName);
7936
7937 // Consume the first parameter (optionality parameter)
7938 AArch64BuildAttributes::SubsectionOptional IsOptional;
7939 // options: optional/required
7940 if (Parser.getTok().is(K: AsmToken::Identifier)) {
7941 StringRef Optionality = Parser.getTok().getIdentifier();
7942 IsOptional = AArch64BuildAttributes::getOptionalID(Optional: Optionality);
7943 if (AArch64BuildAttributes::OPTIONAL_NOT_FOUND == IsOptional) {
7944 Error(L: Parser.getTok().getLoc(),
7945 Msg: AArch64BuildAttributes::getSubsectionOptionalUnknownError());
7946 return true;
7947 }
7948 if (SubsectionExists) {
7949 if (IsOptional != SubsectionExists->IsOptional) {
7950 Error(L: Parser.getTok().getLoc(),
7951 Msg: "optionality mismatch! subsection '" + SubsectionName +
7952 "' already exists with optionality defined as '" +
7953 AArch64BuildAttributes::getOptionalStr(
7954 Optional: SubsectionExists->IsOptional) +
7955 "' and not '" +
7956 AArch64BuildAttributes::getOptionalStr(Optional: IsOptional) + "'");
7957 return true;
7958 }
7959 }
7960 } else {
7961 Error(L: Parser.getTok().getLoc(),
7962 Msg: "optionality parameter not found, expected required|optional");
7963 return true;
7964 }
7965 // Check for possible IsOptional unaccepted values for known subsections
7966 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
7967 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
7968 Error(L: Parser.getTok().getLoc(),
7969 Msg: "aeabi_feature_and_bits must be marked as optional");
7970 return true;
7971 }
7972 }
7973 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7974 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
7975 Error(L: Parser.getTok().getLoc(),
7976 Msg: "aeabi_pauthabi must be marked as required");
7977 return true;
7978 }
7979 }
7980 Parser.Lex();
7981 // consume a comma
7982 if (Parser.parseComma()) {
7983 return true;
7984 }
7985
7986 // Consume the second parameter (type parameter)
7987 AArch64BuildAttributes::SubsectionType Type;
7988 if (Parser.getTok().is(K: AsmToken::Identifier)) {
7989 StringRef Name = Parser.getTok().getIdentifier();
7990 Type = AArch64BuildAttributes::getTypeID(Type: Name);
7991 if (AArch64BuildAttributes::TYPE_NOT_FOUND == Type) {
7992 Error(L: Parser.getTok().getLoc(),
7993 Msg: AArch64BuildAttributes::getSubsectionTypeUnknownError());
7994 return true;
7995 }
7996 if (SubsectionExists) {
7997 if (Type != SubsectionExists->ParameterType) {
7998 Error(L: Parser.getTok().getLoc(),
7999 Msg: "type mismatch! subsection '" + SubsectionName +
8000 "' already exists with type defined as '" +
8001 AArch64BuildAttributes::getTypeStr(
8002 Type: SubsectionExists->ParameterType) +
8003 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8004 "'");
8005 return true;
8006 }
8007 }
8008 } else {
8009 Error(L: Parser.getTok().getLoc(),
8010 Msg: "type parameter not found, expected uleb128|ntbs");
8011 return true;
8012 }
8013 // Check for possible unaccepted 'type' values for known subsections
8014 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8015 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8016 if (AArch64BuildAttributes::NTBS == Type) {
8017 Error(L: Parser.getTok().getLoc(),
8018 Msg: SubsectionName + " must be marked as ULEB128");
8019 return true;
8020 }
8021 }
8022 Parser.Lex();
8023
8024 // Parsing finished, check for trailing tokens.
8025 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8026 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token for AArch64 build "
8027 "attributes subsection header directive");
8028 return true;
8029 }
8030
8031 getTargetStreamer().emitAttributesSubsection(VendorName: SubsectionName, IsOptional, ParameterType: Type);
8032
8033 return false;
8034}
8035
8036bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8037 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8038 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8039 // separated by a comma.
8040 MCAsmParser &Parser = getParser();
8041
8042 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8043 getTargetStreamer().getActiveAttributesSubsection();
8044 if (nullptr == ActiveSubsection) {
8045 Error(L: Parser.getTok().getLoc(),
8046 Msg: "no active subsection, build attribute can not be added");
8047 return true;
8048 }
8049 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8050 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8051
8052 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8053 if (AArch64BuildAttributes::getVendorName(
8054 Vendor: AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8055 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8056 if (AArch64BuildAttributes::getVendorName(
8057 Vendor: AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) ==
8058 ActiveSubsectionName)
8059 ActiveSubsectionID = AArch64BuildAttributes::AEABI_FEATURE_AND_BITS;
8060
8061 StringRef TagStr = "";
8062 unsigned Tag;
8063 if (Parser.getTok().is(K: AsmToken::Integer)) {
8064 Tag = getTok().getIntVal();
8065 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8066 TagStr = Parser.getTok().getIdentifier();
8067 switch (ActiveSubsectionID) {
8068 case AArch64BuildAttributes::VENDOR_UNKNOWN:
8069 // Tag was provided as an unrecognized string instead of an unsigned
8070 // integer
8071 Error(L: Parser.getTok().getLoc(), Msg: "unrecognized Tag: '" + TagStr +
8072 "' \nExcept for public subsections, "
8073 "tags have to be an unsigned int.");
8074 return true;
8075 break;
8076 case AArch64BuildAttributes::AEABI_PAUTHABI:
8077 Tag = AArch64BuildAttributes::getPauthABITagsID(PauthABITag: TagStr);
8078 if (AArch64BuildAttributes::PAUTHABI_TAG_NOT_FOUND == Tag) {
8079 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8080 TagStr + "' for subsection '" +
8081 ActiveSubsectionName + "'");
8082 return true;
8083 }
8084 break;
8085 case AArch64BuildAttributes::AEABI_FEATURE_AND_BITS:
8086 Tag = AArch64BuildAttributes::getFeatureAndBitsTagsID(FeatureAndBitsTag: TagStr);
8087 if (AArch64BuildAttributes::FEATURE_AND_BITS_TAG_NOT_FOUND == Tag) {
8088 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8089 TagStr + "' for subsection '" +
8090 ActiveSubsectionName + "'");
8091 return true;
8092 }
8093 break;
8094 }
8095 } else {
8096 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes tag not found");
8097 return true;
8098 }
8099 Parser.Lex();
8100 // consume a comma
8101 // parseComma() return *false* on success, and call Lex(), no need to call
8102 // Lex() again.
8103 if (Parser.parseComma()) {
8104 return true;
8105 }
8106
8107 // Consume the second parameter (attribute value)
8108 unsigned ValueInt = unsigned(-1);
8109 std::string ValueStr = "";
8110 if (Parser.getTok().is(K: AsmToken::Integer)) {
8111 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8112 Error(
8113 L: Parser.getTok().getLoc(),
8114 Msg: "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8115 return true;
8116 }
8117 ValueInt = getTok().getIntVal();
8118 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8119 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8120 Error(
8121 L: Parser.getTok().getLoc(),
8122 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8123 return true;
8124 }
8125 ValueStr = Parser.getTok().getIdentifier();
8126 } else if (Parser.getTok().is(K: AsmToken::String)) {
8127 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8128 Error(
8129 L: Parser.getTok().getLoc(),
8130 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8131 return true;
8132 }
8133 ValueStr = Parser.getTok().getString();
8134 } else {
8135 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes value not found");
8136 return true;
8137 }
8138 // Check for possible unaccepted values for known tags
8139 // (AEABI_FEATURE_AND_BITS)
8140 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8141 if (0 != ValueInt && 1 != ValueInt) {
8142 Error(L: Parser.getTok().getLoc(),
8143 Msg: "unknown AArch64 build attributes Value for Tag '" + TagStr +
8144 "' options are 0|1");
8145 return true;
8146 }
8147 }
8148 Parser.Lex();
8149
8150 // Parsing finished. Check for trailing tokens.
8151 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8152 Error(L: Parser.getTok().getLoc(),
8153 Msg: "unexpected token for AArch64 build attributes tag and value "
8154 "attribute directive");
8155 return true;
8156 }
8157
8158 if (unsigned(-1) != ValueInt) {
8159 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: ValueInt, String: "");
8160 }
8161 if ("" != ValueStr) {
8162 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: unsigned(-1),
8163 String: ValueStr);
8164 }
8165 return false;
8166}
8167
8168bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8169 SMLoc EndLoc;
8170
8171 if (getParser().parseExpression(Res))
8172 return true;
8173 MCAsmParser &Parser = getParser();
8174 if (!parseOptionalToken(T: AsmToken::At))
8175 return false;
8176 if (getLexer().getKind() != AsmToken::Identifier)
8177 return Error(L: getLoc(), Msg: "expected relocation specifier");
8178
8179 std::string Identifier = Parser.getTok().getIdentifier().lower();
8180 SMLoc Loc = getLoc();
8181 Lex();
8182 if (Identifier == "auth")
8183 return parseAuthExpr(Res, EndLoc);
8184
8185 auto Spec = AArch64::S_None;
8186 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8187 if (Identifier == "got")
8188 Spec = AArch64::S_MACHO_GOT;
8189 } else {
8190 // Unofficial, experimental syntax that will be changed.
8191 if (Identifier == "gotpcrel")
8192 Spec = AArch64::S_GOTPCREL;
8193 else if (Identifier == "plt")
8194 Spec = AArch64::S_PLT;
8195 }
8196 if (Spec == AArch64::S_None)
8197 return Error(L: Loc, Msg: "invalid relocation specifier");
8198 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val: Res))
8199 Res = MCSymbolRefExpr::create(Symbol: &SRE->getSymbol(), specifier: Spec, Ctx&: getContext(),
8200 Loc: SRE->getLoc());
8201 else
8202 return Error(L: Loc, Msg: "@ specifier only allowed after a symbol");
8203
8204 for (;;) {
8205 std::optional<MCBinaryExpr::Opcode> Opcode;
8206 if (parseOptionalToken(T: AsmToken::Plus))
8207 Opcode = MCBinaryExpr::Add;
8208 else if (parseOptionalToken(T: AsmToken::Minus))
8209 Opcode = MCBinaryExpr::Sub;
8210 else
8211 break;
8212 const MCExpr *Term;
8213 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc, TypeInfo: nullptr))
8214 return true;
8215 Res = MCBinaryExpr::create(Op: *Opcode, LHS: Res, RHS: Term, Ctx&: getContext(), Loc: Res->getLoc());
8216 }
8217 return false;
8218}
8219
8220/// parseAuthExpr
8221/// ::= _sym@AUTH(ib,123[,addr])
8222/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8223/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8224bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8225 MCAsmParser &Parser = getParser();
8226 MCContext &Ctx = getContext();
8227 AsmToken Tok = Parser.getTok();
8228
8229 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8230 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8231 return true;
8232
8233 if (Parser.getTok().isNot(K: AsmToken::Identifier))
8234 return TokError(Msg: "expected key name");
8235
8236 StringRef KeyStr = Parser.getTok().getIdentifier();
8237 auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr);
8238 if (!KeyIDOrNone)
8239 return TokError(Msg: "invalid key '" + KeyStr + "'");
8240 Parser.Lex();
8241
8242 if (parseToken(T: AsmToken::Comma, Msg: "expected ','"))
8243 return true;
8244
8245 if (Parser.getTok().isNot(K: AsmToken::Integer))
8246 return TokError(Msg: "expected integer discriminator");
8247 int64_t Discriminator = Parser.getTok().getIntVal();
8248
8249 if (!isUInt<16>(x: Discriminator))
8250 return TokError(Msg: "integer discriminator " + Twine(Discriminator) +
8251 " out of range [0, 0xFFFF]");
8252 Parser.Lex();
8253
8254 bool UseAddressDiversity = false;
8255 if (Parser.getTok().is(K: AsmToken::Comma)) {
8256 Parser.Lex();
8257 if (Parser.getTok().isNot(K: AsmToken::Identifier) ||
8258 Parser.getTok().getIdentifier() != "addr")
8259 return TokError(Msg: "expected 'addr'");
8260 UseAddressDiversity = true;
8261 Parser.Lex();
8262 }
8263
8264 EndLoc = Parser.getTok().getEndLoc();
8265 if (parseToken(T: AsmToken::RParen, Msg: "expected ')'"))
8266 return true;
8267
8268 Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone,
8269 HasAddressDiversity: UseAddressDiversity, Ctx);
8270 return false;
8271}
8272
8273bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8274 AArch64::Specifier &ELFSpec,
8275 AArch64::Specifier &DarwinSpec,
8276 int64_t &Addend) {
8277 ELFSpec = AArch64::S_INVALID;
8278 DarwinSpec = AArch64::S_None;
8279 Addend = 0;
8280
8281 if (auto *AE = dyn_cast<MCSpecifierExpr>(Val: Expr)) {
8282 ELFSpec = AE->getSpecifier();
8283 Expr = AE->getSubExpr();
8284 }
8285
8286 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr);
8287 if (SE) {
8288 // It's a simple symbol reference with no addend.
8289 DarwinSpec = AArch64::Specifier(SE->getKind());
8290 return true;
8291 }
8292
8293 // Check that it looks like a symbol + an addend
8294 MCValue Res;
8295 bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr);
8296 if (!Relocatable || Res.getSubSym())
8297 return false;
8298
8299 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8300 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8301 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8302 return false;
8303
8304 if (Res.getAddSym())
8305 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8306 Addend = Res.getConstant();
8307
8308 // It's some symbol reference + a constant addend, but really
8309 // shouldn't use both Darwin and ELF syntax.
8310 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8311}
8312
8313/// Force static initialization.
8314extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8315LLVMInitializeAArch64AsmParser() {
8316 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
8317 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
8318 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
8319 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
8320 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
8321}
8322
8323#define GET_REGISTER_MATCHER
8324#define GET_SUBTARGET_FEATURE_NAME
8325#define GET_MATCHER_IMPLEMENTATION
8326#define GET_MNEMONIC_SPELL_CHECKER
8327#include "AArch64GenAsmMatcher.inc"
8328
8329// Define this matcher function after the auto-generated include so we
8330// have the match class enum definitions.
8331unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8332 unsigned Kind) {
8333 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8334
8335 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8336 if (!Op.isImm())
8337 return Match_InvalidOperand;
8338 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8339 if (!CE)
8340 return Match_InvalidOperand;
8341 if (CE->getValue() == ExpectedVal)
8342 return Match_Success;
8343 return Match_InvalidOperand;
8344 };
8345
8346 switch (Kind) {
8347 default:
8348 return Match_InvalidOperand;
8349 case MCK_MPR:
8350 // If the Kind is a token for the MPR register class which has the "za"
8351 // register (SME accumulator array), check if the asm is a literal "za"
8352 // token. This is for the "smstart za" alias that defines the register
8353 // as a literal token.
8354 if (Op.isTokenEqual(Str: "za"))
8355 return Match_Success;
8356 return Match_InvalidOperand;
8357
8358 // If the kind is a token for a literal immediate, check if our asm operand
8359 // matches. This is for InstAliases which have a fixed-value immediate in
8360 // the asm string, such as hints which are parsed into a specific
8361 // instruction definition.
8362#define MATCH_HASH(N) \
8363 case MCK__HASH_##N: \
8364 return MatchesOpImmediate(N);
8365 MATCH_HASH(0)
8366 MATCH_HASH(1)
8367 MATCH_HASH(2)
8368 MATCH_HASH(3)
8369 MATCH_HASH(4)
8370 MATCH_HASH(6)
8371 MATCH_HASH(7)
8372 MATCH_HASH(8)
8373 MATCH_HASH(10)
8374 MATCH_HASH(12)
8375 MATCH_HASH(14)
8376 MATCH_HASH(16)
8377 MATCH_HASH(24)
8378 MATCH_HASH(25)
8379 MATCH_HASH(26)
8380 MATCH_HASH(27)
8381 MATCH_HASH(28)
8382 MATCH_HASH(29)
8383 MATCH_HASH(30)
8384 MATCH_HASH(31)
8385 MATCH_HASH(32)
8386 MATCH_HASH(40)
8387 MATCH_HASH(48)
8388 MATCH_HASH(64)
8389#undef MATCH_HASH
8390#define MATCH_HASH_MINUS(N) \
8391 case MCK__HASH__MINUS_##N: \
8392 return MatchesOpImmediate(-N);
8393 MATCH_HASH_MINUS(4)
8394 MATCH_HASH_MINUS(8)
8395 MATCH_HASH_MINUS(16)
8396#undef MATCH_HASH_MINUS
8397 }
8398}
8399
8400ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8401
8402 SMLoc S = getLoc();
8403
8404 if (getTok().isNot(K: AsmToken::Identifier))
8405 return Error(L: S, Msg: "expected register");
8406
8407 MCRegister FirstReg;
8408 ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg);
8409 if (!Res.isSuccess())
8410 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8411 "even/odd register pair");
8412
8413 const MCRegisterClass &WRegClass =
8414 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8415 const MCRegisterClass &XRegClass =
8416 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8417
8418 bool isXReg = XRegClass.contains(Reg: FirstReg),
8419 isWReg = WRegClass.contains(Reg: FirstReg);
8420 if (!isXReg && !isWReg)
8421 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8422 "even/odd register pair");
8423
8424 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8425 unsigned FirstEncoding = RI->getEncodingValue(Reg: FirstReg);
8426
8427 if (FirstEncoding & 0x1)
8428 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8429 "even/odd register pair");
8430
8431 if (getTok().isNot(K: AsmToken::Comma))
8432 return Error(L: getLoc(), Msg: "expected comma");
8433 // Eat the comma
8434 Lex();
8435
8436 SMLoc E = getLoc();
8437 MCRegister SecondReg;
8438 Res = tryParseScalarRegister(RegNum&: SecondReg);
8439 if (!Res.isSuccess())
8440 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8441 "even/odd register pair");
8442
8443 if (RI->getEncodingValue(Reg: SecondReg) != FirstEncoding + 1 ||
8444 (isXReg && !XRegClass.contains(Reg: SecondReg)) ||
8445 (isWReg && !WRegClass.contains(Reg: SecondReg)))
8446 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8447 "even/odd register pair");
8448
8449 MCRegister Pair;
8450 if (isXReg) {
8451 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64,
8452 RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8453 } else {
8454 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32,
8455 RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8456 }
8457
8458 Operands.push_back(Elt: AArch64Operand::CreateReg(RegNum: Pair, Kind: RegKind::Scalar, S,
8459 E: getLoc(), Ctx&: getContext()));
8460
8461 return ParseStatus::Success;
8462}
8463
8464template <bool ParseShiftExtend, bool ParseSuffix>
8465ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8466 const SMLoc S = getLoc();
8467 // Check for a SVE vector register specifier first.
8468 MCRegister RegNum;
8469 StringRef Kind;
8470
8471 ParseStatus Res =
8472 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8473
8474 if (!Res.isSuccess())
8475 return Res;
8476
8477 if (ParseSuffix && Kind.empty())
8478 return ParseStatus::NoMatch;
8479
8480 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector);
8481 if (!KindRes)
8482 return ParseStatus::NoMatch;
8483
8484 unsigned ElementWidth = KindRes->second;
8485
8486 // No shift/extend is the default.
8487 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
8488 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8489 RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext()));
8490
8491 ParseStatus Res = tryParseVectorIndex(Operands);
8492 if (Res.isFailure())
8493 return ParseStatus::Failure;
8494 return ParseStatus::Success;
8495 }
8496
8497 // Eat the comma
8498 Lex();
8499
8500 // Match the shift
8501 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
8502 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
8503 if (!Res.isSuccess())
8504 return Res;
8505
8506 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8507 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8508 RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(),
8509 Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
8510 HasExplicitAmount: Ext->hasShiftExtendAmount()));
8511
8512 return ParseStatus::Success;
8513}
8514
8515ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8516 MCAsmParser &Parser = getParser();
8517
8518 SMLoc SS = getLoc();
8519 const AsmToken &TokE = getTok();
8520 bool IsHash = TokE.is(K: AsmToken::Hash);
8521
8522 if (!IsHash && TokE.isNot(K: AsmToken::Identifier))
8523 return ParseStatus::NoMatch;
8524
8525 int64_t Pattern;
8526 if (IsHash) {
8527 Lex(); // Eat hash
8528
8529 // Parse the immediate operand.
8530 const MCExpr *ImmVal;
8531 SS = getLoc();
8532 if (Parser.parseExpression(Res&: ImmVal))
8533 return ParseStatus::Failure;
8534
8535 auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
8536 if (!MCE)
8537 return TokError(Msg: "invalid operand for instruction");
8538
8539 Pattern = MCE->getValue();
8540 } else {
8541 // Parse the pattern
8542 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString());
8543 if (!Pat)
8544 return ParseStatus::NoMatch;
8545
8546 Lex();
8547 Pattern = Pat->Encoding;
8548 assert(Pattern >= 0 && Pattern < 32);
8549 }
8550
8551 Operands.push_back(
8552 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8553 S: SS, E: getLoc(), Ctx&: getContext()));
8554
8555 return ParseStatus::Success;
8556}
8557
8558ParseStatus
8559AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8560 int64_t Pattern;
8561 SMLoc SS = getLoc();
8562 const AsmToken &TokE = getTok();
8563 // Parse the pattern
8564 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8565 Name: TokE.getString());
8566 if (!Pat)
8567 return ParseStatus::NoMatch;
8568
8569 Lex();
8570 Pattern = Pat->Encoding;
8571 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8572
8573 Operands.push_back(
8574 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8575 S: SS, E: getLoc(), Ctx&: getContext()));
8576
8577 return ParseStatus::Success;
8578}
8579
8580ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8581 SMLoc SS = getLoc();
8582
8583 MCRegister XReg;
8584 if (!tryParseScalarRegister(RegNum&: XReg).isSuccess())
8585 return ParseStatus::NoMatch;
8586
8587 MCContext &ctx = getContext();
8588 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8589 MCRegister X8Reg = RI->getMatchingSuperReg(
8590 Reg: XReg, SubIdx: AArch64::x8sub_0,
8591 RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8592 if (!X8Reg)
8593 return Error(L: SS,
8594 Msg: "expected an even-numbered x-register in the range [x0,x22]");
8595
8596 Operands.push_back(
8597 Elt: AArch64Operand::CreateReg(RegNum: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx));
8598 return ParseStatus::Success;
8599}
8600
8601ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8602 SMLoc S = getLoc();
8603
8604 if (getTok().isNot(K: AsmToken::Integer))
8605 return ParseStatus::NoMatch;
8606
8607 if (getLexer().peekTok().isNot(K: AsmToken::Colon))
8608 return ParseStatus::NoMatch;
8609
8610 const MCExpr *ImmF;
8611 if (getParser().parseExpression(Res&: ImmF))
8612 return ParseStatus::NoMatch;
8613
8614 if (getTok().isNot(K: AsmToken::Colon))
8615 return ParseStatus::NoMatch;
8616
8617 Lex(); // Eat ':'
8618 if (getTok().isNot(K: AsmToken::Integer))
8619 return ParseStatus::NoMatch;
8620
8621 SMLoc E = getTok().getLoc();
8622 const MCExpr *ImmL;
8623 if (getParser().parseExpression(Res&: ImmL))
8624 return ParseStatus::NoMatch;
8625
8626 unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue();
8627 unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue();
8628
8629 Operands.push_back(
8630 Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext()));
8631 return ParseStatus::Success;
8632}
8633
8634template <int Adj>
8635ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8636 SMLoc S = getLoc();
8637
8638 parseOptionalToken(T: AsmToken::Hash);
8639 bool IsNegative = parseOptionalToken(T: AsmToken::Minus);
8640
8641 if (getTok().isNot(K: AsmToken::Integer))
8642 return ParseStatus::NoMatch;
8643
8644 const MCExpr *Ex;
8645 if (getParser().parseExpression(Res&: Ex))
8646 return ParseStatus::NoMatch;
8647
8648 int64_t Imm = dyn_cast<MCConstantExpr>(Val: Ex)->getValue();
8649 if (IsNegative)
8650 Imm = -Imm;
8651
8652 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8653 // return a value, which is certain to trigger a error message about invalid
8654 // immediate range instead of a non-descriptive invalid operand error.
8655 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8656 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8657 Imm = -2;
8658 else
8659 Imm += Adj;
8660
8661 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
8662 Operands.push_back(Elt: AArch64Operand::CreateImm(
8663 Val: MCConstantExpr::create(Value: Imm, Ctx&: getContext()), S, E, Ctx&: getContext()));
8664
8665 return ParseStatus::Success;
8666}
8667