1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCAsmInfo.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCLinkerOptimizationHint.h"
33#include "llvm/MC/MCObjectFileInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
42#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCTargetOptions.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/AArch64BuildAttributes.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/raw_ostream.h"
52#include "llvm/TargetParser/AArch64TargetParser.h"
53#include "llvm/TargetParser/SubtargetFeature.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(i: 0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(i: 0).getReg();
112 Prefix.Pg = Inst.getOperand(i: 2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(i: 0).getReg();
124 Prefix.Pg = Inst.getOperand(i: 1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
291 ParseStatus
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
318 MCAsmParserExtension::Initialize(Parser);
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(Directive: ".hword", Alias: ".2byte");
328 Parser.addAliasForDirective(Directive: ".word", Alias: ".4byte");
329 Parser.addAliasForDirective(Directive: ".dword", Alias: ".8byte");
330 Parser.addAliasForDirective(Directive: ".xword", Alias: ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
391 AArch64_AM::ShiftExtendType Type;
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
460 AArch64CC::CondCode Code;
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
671 AArch64CC::CondCode getCondCode() const {
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
875 return DiagnosticPredicate::NoMatch;
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
884 return DiagnosticPredicate::NoMatch;
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
891 if (!MCE)
892 return DiagnosticPredicate::NoMatch;
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
907 return DiagnosticPredicate::Match;
908
909 return DiagnosticPredicate::NearMatch;
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
914 return DiagnosticPredicate::NoMatch;
915 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
916 if (!MCE)
917 return DiagnosticPredicate::NoMatch;
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
920 return DiagnosticPredicate::Match;
921 return DiagnosticPredicate::NearMatch;
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
926 return DiagnosticPredicate::NoMatch;
927 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
928 if (!MCE)
929 return DiagnosticPredicate::NoMatch;
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
932 return DiagnosticPredicate::Match;
933 return DiagnosticPredicate::NearMatch;
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
948 llvm::is_contained(
949 Set: {AArch64::S_LO12, AArch64::S_GOT_LO12, AArch64::S_GOT_AUTH_LO12,
950 AArch64::S_DTPREL_LO12, AArch64::S_DTPREL_LO12_NC,
951 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
952 AArch64::S_GOTTPREL_LO12_NC, AArch64::S_TLSDESC_LO12,
953 AArch64::S_TLSDESC_AUTH_LO12, AArch64::S_SECREL_LO12,
954 AArch64::S_SECREL_HI12, AArch64::S_GOT_PAGE_LO15},
955 Element: ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(Expr: getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal()))
1023 return std::make_pair(x: CE->getValue(), y: Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(x: Val >> Width, y: Width);
1030 else
1031 return std::make_pair(x&: Val, y: 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1061 llvm::is_contained(
1062 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
1063 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
1064 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
1065 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
1066 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
1067 AArch64::S_SECREL_HI12, AArch64::S_SECREL_LO12},
1068 Element: ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1099 return DiagnosticPredicate::NoMatch;
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1107 return DiagnosticPredicate::Match;
1108
1109 return DiagnosticPredicate::NearMatch;
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1117 return DiagnosticPredicate::NoMatch;
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1125 return DiagnosticPredicate::Match;
1126
1127 return DiagnosticPredicate::NearMatch;
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1132 return DiagnosticPredicate::Match;
1133 return DiagnosticPredicate::NoMatch;
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1142 if (!MCE)
1143 return false;
1144 return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue());
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(Range&: AllowedModifiers, Element: ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1184 AArch64::S_ABS_G2_NC, AArch64::S_PREL_G2,
1185 AArch64::S_PREL_G2_NC, AArch64::S_TPREL_G2,
1186 AArch64::S_DTPREL_G2});
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1191 AArch64::S_ABS_G1_NC, AArch64::S_PREL_G1,
1192 AArch64::S_PREL_G1_NC, AArch64::S_GOTTPREL_G1,
1193 AArch64::S_TPREL_G1, AArch64::S_TPREL_G1_NC,
1194 AArch64::S_DTPREL_G1, AArch64::S_DTPREL_G1_NC});
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1199 AArch64::S_ABS_G0_NC, AArch64::S_PREL_G0,
1200 AArch64::S_PREL_G0_NC, AArch64::S_GOTTPREL_G0_NC,
1201 AArch64::S_TPREL_G0, AArch64::S_TPREL_G0_NC,
1202 AArch64::S_DTPREL_G0, AArch64::S_DTPREL_G0_NC});
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg: Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg: Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg: Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1357 return DiagnosticPredicate::NoMatch;
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1360 return DiagnosticPredicate::Match;
1361
1362 return DiagnosticPredicate::NearMatch;
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1369 return DiagnosticPredicate::NoMatch;
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1374 return DiagnosticPredicate::Match;
1375
1376 return DiagnosticPredicate::NearMatch;
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1382 return DiagnosticPredicate::NoMatch;
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1385 return DiagnosticPredicate::Match;
1386
1387 return DiagnosticPredicate::NearMatch;
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1393 return DiagnosticPredicate::NoMatch;
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1396 return DiagnosticPredicate::Match;
1397
1398 return DiagnosticPredicate::NearMatch;
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1407 return DiagnosticPredicate::NoMatch;
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1416 return DiagnosticPredicate::NoMatch;
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1419 return DiagnosticPredicate::Match;
1420
1421 return DiagnosticPredicate::NearMatch;
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg: Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg: Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg: Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1459 return DiagnosticPredicate::NoMatch;
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1462 if (!CE)
1463 return DiagnosticPredicate::NoMatch;
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1467 return DiagnosticPredicate::Match;
1468 return DiagnosticPredicate::NearMatch;
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1479 return DiagnosticPredicate::NoMatch;
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8))
1483 return DiagnosticPredicate::Match;
1484 return DiagnosticPredicate::NearMatch;
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1519 return DiagnosticPredicate::NoMatch;
1520 if (!AArch64MCRegisterClasses[RegClass].contains(Reg: VectorList.Reg))
1521 return DiagnosticPredicate::NearMatch;
1522 return DiagnosticPredicate::Match;
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1531 return DiagnosticPredicate::NoMatch;
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1535 return DiagnosticPredicate::Match;
1536 return DiagnosticPredicate::NoMatch;
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1542 return DiagnosticPredicate::NoMatch;
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1544 return DiagnosticPredicate::Match;
1545 return DiagnosticPredicate::NearMatch;
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1573 return DiagnosticPredicate::NoMatch;
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RHS: RealVal))
1588 return DiagnosticPredicate::Match;
1589 }
1590
1591 return DiagnosticPredicate::NearMatch;
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1598 return DiagnosticPredicate::Match;
1599 if ((Res = isExactFPImm<ImmB>()))
1600 return DiagnosticPredicate::Match;
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1783 return DiagnosticPredicate::NoMatch;
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1787 return DiagnosticPredicate::NearMatch;
1788 return DiagnosticPredicate::Match;
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1809 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
1811 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
1812 else
1813 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1818 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(i: AArch64::GPR32RegClassID)
1833 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1834
1835 Inst.addOperand(Op: MCOperand::createReg(Reg));
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
1845 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1846
1847 Inst.addOperand(Op: MCOperand::createReg(Reg));
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1871 Inst.addOperand(Op: MCOperand::createReg(Reg));
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1891 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1896 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1901 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1952 Inst.addOperand(Op: MCOperand::createReg(
1953 Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1957 Inst.addOperand(Op: MCOperand::createReg(
1958 Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1965 Inst.addOperand(Op: MCOperand::createReg(
1966 Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1970 Inst.addOperand(Op: MCOperand::createReg(
1971 Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(Op: MCOperand::createImm(Val: RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, Expr: getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first));
2011 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, Expr: getShiftedImmVal());
2014 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, Expr: getImm());
2017 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first));
2026 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2033 Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode()));
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2039 if (!MCE)
2040 addExpr(Inst, Expr: getImm());
2041 else
2042 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2053
2054 if (!MCE) {
2055 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2056 return;
2057 }
2058 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2064 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue()));
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2071 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2086 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2095 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue());
2102 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2111 if (!MCE) {
2112 addExpr(Inst, Expr: getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2125 if (!MCE) {
2126 addExpr(Inst, Expr: getImm());
2127 return;
2128 }
2129 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2138 if (!MCE) {
2139 addExpr(Inst, Expr: getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2152 if (!MCE) {
2153 addExpr(Inst, Expr: getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2166 if (!MCE) {
2167 addExpr(Inst, Expr: getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2176 Inst.addOperand(Op: MCOperand::createImm(
2177 Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(Op: MCOperand::createImm(Val: getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(Op: MCOperand::createImm(Val: getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(Op: MCOperand::createImm(Val: getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount());
2259 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2265 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
2276 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2288 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2296 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2304 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2316 Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, Expr: getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2344 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2350 Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2369 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(V: Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(R&: Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(args: k_PHint, args&: Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(args: k_CMHPriorityHint, args&: Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(args: k_TIndexHint, args&: Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2774static MCRegister MatchRegisterName(StringRef Name);
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case(S: "v0", Value: AArch64::Q0)
2781 .Case(S: "v1", Value: AArch64::Q1)
2782 .Case(S: "v2", Value: AArch64::Q2)
2783 .Case(S: "v3", Value: AArch64::Q3)
2784 .Case(S: "v4", Value: AArch64::Q4)
2785 .Case(S: "v5", Value: AArch64::Q5)
2786 .Case(S: "v6", Value: AArch64::Q6)
2787 .Case(S: "v7", Value: AArch64::Q7)
2788 .Case(S: "v8", Value: AArch64::Q8)
2789 .Case(S: "v9", Value: AArch64::Q9)
2790 .Case(S: "v10", Value: AArch64::Q10)
2791 .Case(S: "v11", Value: AArch64::Q11)
2792 .Case(S: "v12", Value: AArch64::Q12)
2793 .Case(S: "v13", Value: AArch64::Q13)
2794 .Case(S: "v14", Value: AArch64::Q14)
2795 .Case(S: "v15", Value: AArch64::Q15)
2796 .Case(S: "v16", Value: AArch64::Q16)
2797 .Case(S: "v17", Value: AArch64::Q17)
2798 .Case(S: "v18", Value: AArch64::Q18)
2799 .Case(S: "v19", Value: AArch64::Q19)
2800 .Case(S: "v20", Value: AArch64::Q20)
2801 .Case(S: "v21", Value: AArch64::Q21)
2802 .Case(S: "v22", Value: AArch64::Q22)
2803 .Case(S: "v23", Value: AArch64::Q23)
2804 .Case(S: "v24", Value: AArch64::Q24)
2805 .Case(S: "v25", Value: AArch64::Q25)
2806 .Case(S: "v26", Value: AArch64::Q26)
2807 .Case(S: "v27", Value: AArch64::Q27)
2808 .Case(S: "v28", Value: AArch64::Q28)
2809 .Case(S: "v29", Value: AArch64::Q29)
2810 .Case(S: "v30", Value: AArch64::Q30)
2811 .Case(S: "v31", Value: AArch64::Q31)
2812 .Default(Value: 0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2825 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2826 .Case(S: "", Value: {0, 0})
2827 .Case(S: ".1d", Value: {1, 64})
2828 .Case(S: ".1q", Value: {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(S: ".2h", Value: {2, 16})
2831 .Case(S: ".2b", Value: {2, 8})
2832 .Case(S: ".2s", Value: {2, 32})
2833 .Case(S: ".2d", Value: {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(S: ".4b", Value: {4, 8})
2837 .Case(S: ".4h", Value: {4, 16})
2838 .Case(S: ".4s", Value: {4, 32})
2839 .Case(S: ".8b", Value: {8, 8})
2840 .Case(S: ".8h", Value: {8, 16})
2841 .Case(S: ".16b", Value: {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(S: ".b", Value: {0, 8})
2846 .Case(S: ".h", Value: {0, 16})
2847 .Case(S: ".s", Value: {0, 32})
2848 .Case(S: ".d", Value: {0, 64})
2849 .Default(Value: {-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2855 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2856 .Case(S: "", Value: {0, 0})
2857 .Case(S: ".b", Value: {0, 8})
2858 .Case(S: ".h", Value: {0, 16})
2859 .Case(S: ".s", Value: {0, 32})
2860 .Case(S: ".d", Value: {0, 64})
2861 .Case(S: ".q", Value: {0, 128})
2862 .Default(Value: {-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(x: -1, y: -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2878static unsigned matchSVEDataVectorRegName(StringRef Name) {
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case(S: "z0", Value: AArch64::Z0)
2881 .Case(S: "z1", Value: AArch64::Z1)
2882 .Case(S: "z2", Value: AArch64::Z2)
2883 .Case(S: "z3", Value: AArch64::Z3)
2884 .Case(S: "z4", Value: AArch64::Z4)
2885 .Case(S: "z5", Value: AArch64::Z5)
2886 .Case(S: "z6", Value: AArch64::Z6)
2887 .Case(S: "z7", Value: AArch64::Z7)
2888 .Case(S: "z8", Value: AArch64::Z8)
2889 .Case(S: "z9", Value: AArch64::Z9)
2890 .Case(S: "z10", Value: AArch64::Z10)
2891 .Case(S: "z11", Value: AArch64::Z11)
2892 .Case(S: "z12", Value: AArch64::Z12)
2893 .Case(S: "z13", Value: AArch64::Z13)
2894 .Case(S: "z14", Value: AArch64::Z14)
2895 .Case(S: "z15", Value: AArch64::Z15)
2896 .Case(S: "z16", Value: AArch64::Z16)
2897 .Case(S: "z17", Value: AArch64::Z17)
2898 .Case(S: "z18", Value: AArch64::Z18)
2899 .Case(S: "z19", Value: AArch64::Z19)
2900 .Case(S: "z20", Value: AArch64::Z20)
2901 .Case(S: "z21", Value: AArch64::Z21)
2902 .Case(S: "z22", Value: AArch64::Z22)
2903 .Case(S: "z23", Value: AArch64::Z23)
2904 .Case(S: "z24", Value: AArch64::Z24)
2905 .Case(S: "z25", Value: AArch64::Z25)
2906 .Case(S: "z26", Value: AArch64::Z26)
2907 .Case(S: "z27", Value: AArch64::Z27)
2908 .Case(S: "z28", Value: AArch64::Z28)
2909 .Case(S: "z29", Value: AArch64::Z29)
2910 .Case(S: "z30", Value: AArch64::Z30)
2911 .Case(S: "z31", Value: AArch64::Z31)
2912 .Default(Value: 0);
2913}
2914
2915static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case(S: "p0", Value: AArch64::P0)
2918 .Case(S: "p1", Value: AArch64::P1)
2919 .Case(S: "p2", Value: AArch64::P2)
2920 .Case(S: "p3", Value: AArch64::P3)
2921 .Case(S: "p4", Value: AArch64::P4)
2922 .Case(S: "p5", Value: AArch64::P5)
2923 .Case(S: "p6", Value: AArch64::P6)
2924 .Case(S: "p7", Value: AArch64::P7)
2925 .Case(S: "p8", Value: AArch64::P8)
2926 .Case(S: "p9", Value: AArch64::P9)
2927 .Case(S: "p10", Value: AArch64::P10)
2928 .Case(S: "p11", Value: AArch64::P11)
2929 .Case(S: "p12", Value: AArch64::P12)
2930 .Case(S: "p13", Value: AArch64::P13)
2931 .Case(S: "p14", Value: AArch64::P14)
2932 .Case(S: "p15", Value: AArch64::P15)
2933 .Default(Value: 0);
2934}
2935
2936static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case(S: "pn0", Value: AArch64::PN0)
2939 .Case(S: "pn1", Value: AArch64::PN1)
2940 .Case(S: "pn2", Value: AArch64::PN2)
2941 .Case(S: "pn3", Value: AArch64::PN3)
2942 .Case(S: "pn4", Value: AArch64::PN4)
2943 .Case(S: "pn5", Value: AArch64::PN5)
2944 .Case(S: "pn6", Value: AArch64::PN6)
2945 .Case(S: "pn7", Value: AArch64::PN7)
2946 .Case(S: "pn8", Value: AArch64::PN8)
2947 .Case(S: "pn9", Value: AArch64::PN9)
2948 .Case(S: "pn10", Value: AArch64::PN10)
2949 .Case(S: "pn11", Value: AArch64::PN11)
2950 .Case(S: "pn12", Value: AArch64::PN12)
2951 .Case(S: "pn13", Value: AArch64::PN13)
2952 .Case(S: "pn14", Value: AArch64::PN14)
2953 .Case(S: "pn15", Value: AArch64::PN15)
2954 .Default(Value: 0);
2955}
2956
2957static unsigned matchMatrixTileListRegName(StringRef Name) {
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case(S: "za0.d", Value: AArch64::ZAD0)
2960 .Case(S: "za1.d", Value: AArch64::ZAD1)
2961 .Case(S: "za2.d", Value: AArch64::ZAD2)
2962 .Case(S: "za3.d", Value: AArch64::ZAD3)
2963 .Case(S: "za4.d", Value: AArch64::ZAD4)
2964 .Case(S: "za5.d", Value: AArch64::ZAD5)
2965 .Case(S: "za6.d", Value: AArch64::ZAD6)
2966 .Case(S: "za7.d", Value: AArch64::ZAD7)
2967 .Case(S: "za0.s", Value: AArch64::ZAS0)
2968 .Case(S: "za1.s", Value: AArch64::ZAS1)
2969 .Case(S: "za2.s", Value: AArch64::ZAS2)
2970 .Case(S: "za3.s", Value: AArch64::ZAS3)
2971 .Case(S: "za0.h", Value: AArch64::ZAH0)
2972 .Case(S: "za1.h", Value: AArch64::ZAH1)
2973 .Case(S: "za0.b", Value: AArch64::ZAB0)
2974 .Default(Value: 0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case(S: "za", Value: AArch64::ZA)
2980 .Case(S: "za0.q", Value: AArch64::ZAQ0)
2981 .Case(S: "za1.q", Value: AArch64::ZAQ1)
2982 .Case(S: "za2.q", Value: AArch64::ZAQ2)
2983 .Case(S: "za3.q", Value: AArch64::ZAQ3)
2984 .Case(S: "za4.q", Value: AArch64::ZAQ4)
2985 .Case(S: "za5.q", Value: AArch64::ZAQ5)
2986 .Case(S: "za6.q", Value: AArch64::ZAQ6)
2987 .Case(S: "za7.q", Value: AArch64::ZAQ7)
2988 .Case(S: "za8.q", Value: AArch64::ZAQ8)
2989 .Case(S: "za9.q", Value: AArch64::ZAQ9)
2990 .Case(S: "za10.q", Value: AArch64::ZAQ10)
2991 .Case(S: "za11.q", Value: AArch64::ZAQ11)
2992 .Case(S: "za12.q", Value: AArch64::ZAQ12)
2993 .Case(S: "za13.q", Value: AArch64::ZAQ13)
2994 .Case(S: "za14.q", Value: AArch64::ZAQ14)
2995 .Case(S: "za15.q", Value: AArch64::ZAQ15)
2996 .Case(S: "za0.d", Value: AArch64::ZAD0)
2997 .Case(S: "za1.d", Value: AArch64::ZAD1)
2998 .Case(S: "za2.d", Value: AArch64::ZAD2)
2999 .Case(S: "za3.d", Value: AArch64::ZAD3)
3000 .Case(S: "za4.d", Value: AArch64::ZAD4)
3001 .Case(S: "za5.d", Value: AArch64::ZAD5)
3002 .Case(S: "za6.d", Value: AArch64::ZAD6)
3003 .Case(S: "za7.d", Value: AArch64::ZAD7)
3004 .Case(S: "za0.s", Value: AArch64::ZAS0)
3005 .Case(S: "za1.s", Value: AArch64::ZAS1)
3006 .Case(S: "za2.s", Value: AArch64::ZAS2)
3007 .Case(S: "za3.s", Value: AArch64::ZAS3)
3008 .Case(S: "za0.h", Value: AArch64::ZAH0)
3009 .Case(S: "za1.h", Value: AArch64::ZAH1)
3010 .Case(S: "za0.b", Value: AArch64::ZAB0)
3011 .Case(S: "za0h.q", Value: AArch64::ZAQ0)
3012 .Case(S: "za1h.q", Value: AArch64::ZAQ1)
3013 .Case(S: "za2h.q", Value: AArch64::ZAQ2)
3014 .Case(S: "za3h.q", Value: AArch64::ZAQ3)
3015 .Case(S: "za4h.q", Value: AArch64::ZAQ4)
3016 .Case(S: "za5h.q", Value: AArch64::ZAQ5)
3017 .Case(S: "za6h.q", Value: AArch64::ZAQ6)
3018 .Case(S: "za7h.q", Value: AArch64::ZAQ7)
3019 .Case(S: "za8h.q", Value: AArch64::ZAQ8)
3020 .Case(S: "za9h.q", Value: AArch64::ZAQ9)
3021 .Case(S: "za10h.q", Value: AArch64::ZAQ10)
3022 .Case(S: "za11h.q", Value: AArch64::ZAQ11)
3023 .Case(S: "za12h.q", Value: AArch64::ZAQ12)
3024 .Case(S: "za13h.q", Value: AArch64::ZAQ13)
3025 .Case(S: "za14h.q", Value: AArch64::ZAQ14)
3026 .Case(S: "za15h.q", Value: AArch64::ZAQ15)
3027 .Case(S: "za0h.d", Value: AArch64::ZAD0)
3028 .Case(S: "za1h.d", Value: AArch64::ZAD1)
3029 .Case(S: "za2h.d", Value: AArch64::ZAD2)
3030 .Case(S: "za3h.d", Value: AArch64::ZAD3)
3031 .Case(S: "za4h.d", Value: AArch64::ZAD4)
3032 .Case(S: "za5h.d", Value: AArch64::ZAD5)
3033 .Case(S: "za6h.d", Value: AArch64::ZAD6)
3034 .Case(S: "za7h.d", Value: AArch64::ZAD7)
3035 .Case(S: "za0h.s", Value: AArch64::ZAS0)
3036 .Case(S: "za1h.s", Value: AArch64::ZAS1)
3037 .Case(S: "za2h.s", Value: AArch64::ZAS2)
3038 .Case(S: "za3h.s", Value: AArch64::ZAS3)
3039 .Case(S: "za0h.h", Value: AArch64::ZAH0)
3040 .Case(S: "za1h.h", Value: AArch64::ZAH1)
3041 .Case(S: "za0h.b", Value: AArch64::ZAB0)
3042 .Case(S: "za0v.q", Value: AArch64::ZAQ0)
3043 .Case(S: "za1v.q", Value: AArch64::ZAQ1)
3044 .Case(S: "za2v.q", Value: AArch64::ZAQ2)
3045 .Case(S: "za3v.q", Value: AArch64::ZAQ3)
3046 .Case(S: "za4v.q", Value: AArch64::ZAQ4)
3047 .Case(S: "za5v.q", Value: AArch64::ZAQ5)
3048 .Case(S: "za6v.q", Value: AArch64::ZAQ6)
3049 .Case(S: "za7v.q", Value: AArch64::ZAQ7)
3050 .Case(S: "za8v.q", Value: AArch64::ZAQ8)
3051 .Case(S: "za9v.q", Value: AArch64::ZAQ9)
3052 .Case(S: "za10v.q", Value: AArch64::ZAQ10)
3053 .Case(S: "za11v.q", Value: AArch64::ZAQ11)
3054 .Case(S: "za12v.q", Value: AArch64::ZAQ12)
3055 .Case(S: "za13v.q", Value: AArch64::ZAQ13)
3056 .Case(S: "za14v.q", Value: AArch64::ZAQ14)
3057 .Case(S: "za15v.q", Value: AArch64::ZAQ15)
3058 .Case(S: "za0v.d", Value: AArch64::ZAD0)
3059 .Case(S: "za1v.d", Value: AArch64::ZAD1)
3060 .Case(S: "za2v.d", Value: AArch64::ZAD2)
3061 .Case(S: "za3v.d", Value: AArch64::ZAD3)
3062 .Case(S: "za4v.d", Value: AArch64::ZAD4)
3063 .Case(S: "za5v.d", Value: AArch64::ZAD5)
3064 .Case(S: "za6v.d", Value: AArch64::ZAD6)
3065 .Case(S: "za7v.d", Value: AArch64::ZAD7)
3066 .Case(S: "za0v.s", Value: AArch64::ZAS0)
3067 .Case(S: "za1v.s", Value: AArch64::ZAS1)
3068 .Case(S: "za2v.s", Value: AArch64::ZAS2)
3069 .Case(S: "za3v.s", Value: AArch64::ZAS3)
3070 .Case(S: "za0v.h", Value: AArch64::ZAH0)
3071 .Case(S: "za1v.h", Value: AArch64::ZAH1)
3072 .Case(S: "za0v.b", Value: AArch64::ZAB0)
3073 .Default(Value: 0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3099 if ((Reg = matchSVEPredicateAsCounterRegName(Name)))
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive(RHS: "zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case(S: "fp", Value: AArch64::FP)
3119 .Case(S: "lr", Value: AArch64::LR)
3120 .Case(S: "x31", Value: AArch64::XZR)
3121 .Case(S: "w31", Value: AArch64::WZR)
3122 .Default(Value: 0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Key: Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(K: AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(K: AsmToken::Identifier))
3178 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(T: AsmToken::Hash) ||
3204 Tok.is(K: AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(Res&: ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3210 if (!MCE)
3211 return TokError(Msg: "immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue());
3218 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3219 Val: prfop, Str: RPRFM ? RPRFM->Name : "", S, Ctx&: getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(K: AsmToken::Identifier))
3224 return TokError(Msg: "prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString());
3227 if (!RPRFM)
3228 return TokError(Msg: "prefetch hint expected");
3229
3230 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3231 Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(T: AsmToken::Hash) ||
3264 Tok.is(K: AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(Res&: ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3270 if (!MCE)
3271 return TokError(Msg: "immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or(""),
3279 S, Ctx&: getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(K: AsmToken::Identifier))
3284 return TokError(Msg: "prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError(Msg: "prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(K: AsmToken::Identifier))
3301 return TokError(Msg: "invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString());
3304 if (!PSB)
3305 return TokError(Msg: "invalid operand for instruction");
3306
3307 Operands.push_back(Elt: AArch64Operand::CreatePSBHint(
3308 Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(Token: RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError(Msg: "expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError(Msg: "xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(Elt: AArch64Operand::CreateReg(
3341 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(K: AsmToken::Identifier))
3351 return TokError(Msg: "invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString());
3354 if (!BTI)
3355 return TokError(Msg: "invalid operand for instruction");
3356
3357 Operands.push_back(Elt: AArch64Operand::CreateBTIHint(
3358 Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(K: AsmToken::Identifier))
3368 return TokError(Msg: "invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Name: Tok.getString());
3372 if (!CMHPriority)
3373 return TokError(Msg: "invalid operand for instruction");
3374
3375 Operands.push_back(Elt: AArch64Operand::CreateCMHPriorityHint(
3376 Val: CMHPriority->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(K: AsmToken::Identifier))
3386 return TokError(Msg: "invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Name: Tok.getString());
3389 if (!TIndex)
3390 return TokError(Msg: "invalid operand for instruction");
3391
3392 Operands.push_back(Elt: AArch64Operand::CreateTIndexHint(
3393 Val: TIndex->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(K: AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(ImmVal&: Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3419 MCSpecifierExpr::create(Expr, S: AArch64::S_ABS_PAGE, Ctx&: getContext(), Loc: S);
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(L: S, Msg: "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(L: S, Msg: "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3443 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(K: AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(K: AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(ImmVal&: Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3471 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_ABS, Ctx&: getContext(), Loc: S);
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(L: S, Msg: "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3484 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(T: AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError(Msg: "invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError(Msg: "encoded floating point value out of range");
3509
3510 APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal()));
3511 Operands.push_back(
3512 Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(Err: StatusOrErr.takeError()))
3519 return TokError(Msg: "invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
3526 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
3527 } else
3528 Operands.push_back(Elt: AArch64Operand::CreateFPImm(
3529 Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(K: AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(K: AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(K: AsmToken::Integer) &&
3550 getLexer().peekTok().is(K: AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(ImmVal&: Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(K: AsmToken::Comma)) {
3557 Operands.push_back(
3558 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3568 Operands.push_back(
3569 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(K: AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive(RHS: "lsl"))
3576 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(T: AsmToken::Hash);
3582
3583 if (getTok().isNot(K: AsmToken::Integer))
3584 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(L: getLoc(), Msg: "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S,
3600 E: getLoc(), Ctx&: getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3606AArch64CC::CondCode
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case(S: "eq", Value: AArch64CC::EQ)
3610 .Case(S: "ne", Value: AArch64CC::NE)
3611 .Case(S: "cs", Value: AArch64CC::HS)
3612 .Case(S: "hs", Value: AArch64CC::HS)
3613 .Case(S: "cc", Value: AArch64CC::LO)
3614 .Case(S: "lo", Value: AArch64CC::LO)
3615 .Case(S: "mi", Value: AArch64CC::MI)
3616 .Case(S: "pl", Value: AArch64CC::PL)
3617 .Case(S: "vs", Value: AArch64CC::VS)
3618 .Case(S: "vc", Value: AArch64CC::VC)
3619 .Case(S: "hi", Value: AArch64CC::HI)
3620 .Case(S: "ls", Value: AArch64CC::LS)
3621 .Case(S: "ge", Value: AArch64CC::GE)
3622 .Case(S: "lt", Value: AArch64CC::LT)
3623 .Case(S: "gt", Value: AArch64CC::GT)
3624 .Case(S: "le", Value: AArch64CC::LE)
3625 .Case(S: "al", Value: AArch64CC::AL)
3626 .Case(S: "nv", Value: AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case(S: "none", Value: AArch64CC::EQ)
3629 .Case(S: "any", Value: AArch64CC::NE)
3630 .Case(S: "nlast", Value: AArch64CC::HS)
3631 .Case(S: "last", Value: AArch64CC::LO)
3632 .Case(S: "first", Value: AArch64CC::MI)
3633 .Case(S: "nfrst", Value: AArch64CC::PL)
3634 .Case(S: "pmore", Value: AArch64CC::HI)
3635 .Case(S: "plast", Value: AArch64CC::LS)
3636 .Case(S: "tcont", Value: AArch64CC::GE)
3637 .Case(S: "tstop", Value: AArch64CC::LT)
3638 .Default(Value: AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError(Msg: "condition codes AL and NV are invalid for this instruction");
3667 CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC));
3668 }
3669
3670 Operands.push_back(
3671 Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(K: AsmToken::Identifier))
3680 return TokError(Msg: "invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive(RHS: "za") || Name.starts_with_insensitive(Prefix: "za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find(C: '.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 Msg: "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3714 Reg: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(),
3715 Ctx&: getContext()));
3716 if (getLexer().is(K: AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find(C: '.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(N: DotPosition);
3734 StringRef Tail = Name.drop_front(N: DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case(S: "h", Value: MatrixKind::Row)
3739 .Case(S: "v", Value: MatrixKind::Col)
3740 .Default(Value: MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 Msg: "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext()));
3753
3754 if (getLexer().is(K: AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3769 AArch64_AM::ShiftExtendType ShOp =
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case(S: "lsl", Value: AArch64_AM::LSL)
3772 .Case(S: "lsr", Value: AArch64_AM::LSR)
3773 .Case(S: "asr", Value: AArch64_AM::ASR)
3774 .Case(S: "ror", Value: AArch64_AM::ROR)
3775 .Case(S: "msl", Value: AArch64_AM::MSL)
3776 .Case(S: "uxtb", Value: AArch64_AM::UXTB)
3777 .Case(S: "uxth", Value: AArch64_AM::UXTH)
3778 .Case(S: "uxtw", Value: AArch64_AM::UXTW)
3779 .Case(S: "uxtx", Value: AArch64_AM::UXTX)
3780 .Case(S: "sxtb", Value: AArch64_AM::SXTB)
3781 .Case(S: "sxth", Value: AArch64_AM::SXTH)
3782 .Case(S: "sxtw", Value: AArch64_AM::SXTW)
3783 .Case(S: "sxtx", Value: AArch64_AM::SXTX)
3784 .Default(Value: AArch64_AM::InvalidShiftExtend);
3785
3786 if (ShOp == AArch64_AM::InvalidShiftExtend)
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(K: AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError(Msg: "expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) &&
3813 !getTok().is(K: AsmToken::Identifier))
3814 return Error(L: E, Msg: "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(Res&: ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3821 if (!MCE)
3822 return Error(L: E, Msg: "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3825 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(
3826 ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3832 const FeatureBitset Features;
3833} ExtensionMap[] = {
3834 {.Name: "crc", .Features: {AArch64::FeatureCRC}},
3835 {.Name: "sm4", .Features: {AArch64::FeatureSM4}},
3836 {.Name: "sha3", .Features: {AArch64::FeatureSHA3}},
3837 {.Name: "sha2", .Features: {AArch64::FeatureSHA2}},
3838 {.Name: "aes", .Features: {AArch64::FeatureAES}},
3839 {.Name: "crypto", .Features: {AArch64::FeatureCrypto}},
3840 {.Name: "fp", .Features: {AArch64::FeatureFPARMv8}},
3841 {.Name: "simd", .Features: {AArch64::FeatureNEON}},
3842 {.Name: "ras", .Features: {AArch64::FeatureRAS}},
3843 {.Name: "rasv2", .Features: {AArch64::FeatureRASv2}},
3844 {.Name: "lse", .Features: {AArch64::FeatureLSE}},
3845 {.Name: "predres", .Features: {AArch64::FeaturePredRes}},
3846 {.Name: "predres2", .Features: {AArch64::FeatureSPECRES2}},
3847 {.Name: "ccdp", .Features: {AArch64::FeatureCacheDeepPersist}},
3848 {.Name: "mte", .Features: {AArch64::FeatureMTE}},
3849 {.Name: "memtag", .Features: {AArch64::FeatureMTE}},
3850 {.Name: "tlb-rmi", .Features: {AArch64::FeatureTLB_RMI}},
3851 {.Name: "pan", .Features: {AArch64::FeaturePAN}},
3852 {.Name: "pan-rwv", .Features: {AArch64::FeaturePAN_RWV}},
3853 {.Name: "ccpp", .Features: {AArch64::FeatureCCPP}},
3854 {.Name: "rcpc", .Features: {AArch64::FeatureRCPC}},
3855 {.Name: "rng", .Features: {AArch64::FeatureRandGen}},
3856 {.Name: "sve", .Features: {AArch64::FeatureSVE}},
3857 {.Name: "sve-b16b16", .Features: {AArch64::FeatureSVEB16B16}},
3858 {.Name: "sve2", .Features: {AArch64::FeatureSVE2}},
3859 {.Name: "sve-aes", .Features: {AArch64::FeatureSVEAES}},
3860 {.Name: "sve2-aes", .Features: {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {.Name: "sve-sm4", .Features: {AArch64::FeatureSVESM4}},
3862 {.Name: "sve2-sm4", .Features: {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {.Name: "sve-sha3", .Features: {AArch64::FeatureSVESHA3}},
3864 {.Name: "sve2-sha3", .Features: {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {.Name: "sve-bitperm", .Features: {AArch64::FeatureSVEBitPerm}},
3866 {.Name: "sve2-bitperm",
3867 .Features: {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {.Name: "sve2p1", .Features: {AArch64::FeatureSVE2p1}},
3870 {.Name: "ls64", .Features: {AArch64::FeatureLS64}},
3871 {.Name: "xs", .Features: {AArch64::FeatureXS}},
3872 {.Name: "pauth", .Features: {AArch64::FeaturePAuth}},
3873 {.Name: "flagm", .Features: {AArch64::FeatureFlagM}},
3874 {.Name: "rme", .Features: {AArch64::FeatureRME}},
3875 {.Name: "sme", .Features: {AArch64::FeatureSME}},
3876 {.Name: "sme-f64f64", .Features: {AArch64::FeatureSMEF64F64}},
3877 {.Name: "sme-f16f16", .Features: {AArch64::FeatureSMEF16F16}},
3878 {.Name: "sme-i16i64", .Features: {AArch64::FeatureSMEI16I64}},
3879 {.Name: "sme2", .Features: {AArch64::FeatureSME2}},
3880 {.Name: "sme2p1", .Features: {AArch64::FeatureSME2p1}},
3881 {.Name: "sme-b16b16", .Features: {AArch64::FeatureSMEB16B16}},
3882 {.Name: "hbc", .Features: {AArch64::FeatureHBC}},
3883 {.Name: "mops", .Features: {AArch64::FeatureMOPS}},
3884 {.Name: "mec", .Features: {AArch64::FeatureMEC}},
3885 {.Name: "the", .Features: {AArch64::FeatureTHE}},
3886 {.Name: "d128", .Features: {AArch64::FeatureD128}},
3887 {.Name: "lse128", .Features: {AArch64::FeatureLSE128}},
3888 {.Name: "ite", .Features: {AArch64::FeatureITE}},
3889 {.Name: "cssc", .Features: {AArch64::FeatureCSSC}},
3890 {.Name: "rcpc3", .Features: {AArch64::FeatureRCPC3}},
3891 {.Name: "gcs", .Features: {AArch64::FeatureGCS}},
3892 {.Name: "bf16", .Features: {AArch64::FeatureBF16}},
3893 {.Name: "compnum", .Features: {AArch64::FeatureComplxNum}},
3894 {.Name: "dotprod", .Features: {AArch64::FeatureDotProd}},
3895 {.Name: "f32mm", .Features: {AArch64::FeatureMatMulFP32}},
3896 {.Name: "f64mm", .Features: {AArch64::FeatureMatMulFP64}},
3897 {.Name: "fp16", .Features: {AArch64::FeatureFullFP16}},
3898 {.Name: "fp16fml", .Features: {AArch64::FeatureFP16FML}},
3899 {.Name: "i8mm", .Features: {AArch64::FeatureMatMulInt8}},
3900 {.Name: "lor", .Features: {AArch64::FeatureLOR}},
3901 {.Name: "profile", .Features: {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {.Name: "rdm", .Features: {AArch64::FeatureRDM}},
3906 {.Name: "rdma", .Features: {AArch64::FeatureRDM}},
3907 {.Name: "sb", .Features: {AArch64::FeatureSB}},
3908 {.Name: "ssbs", .Features: {AArch64::FeatureSSBS}},
3909 {.Name: "fp8", .Features: {AArch64::FeatureFP8}},
3910 {.Name: "faminmax", .Features: {AArch64::FeatureFAMINMAX}},
3911 {.Name: "fp8fma", .Features: {AArch64::FeatureFP8FMA}},
3912 {.Name: "ssve-fp8fma", .Features: {AArch64::FeatureSSVE_FP8FMA}},
3913 {.Name: "fp8dot2", .Features: {AArch64::FeatureFP8DOT2}},
3914 {.Name: "ssve-fp8dot2", .Features: {AArch64::FeatureSSVE_FP8DOT2}},
3915 {.Name: "fp8dot4", .Features: {AArch64::FeatureFP8DOT4}},
3916 {.Name: "ssve-fp8dot4", .Features: {AArch64::FeatureSSVE_FP8DOT4}},
3917 {.Name: "lut", .Features: {AArch64::FeatureLUT}},
3918 {.Name: "sme-lutv2", .Features: {AArch64::FeatureSME_LUTv2}},
3919 {.Name: "sme-f8f16", .Features: {AArch64::FeatureSMEF8F16}},
3920 {.Name: "sme-f8f32", .Features: {AArch64::FeatureSMEF8F32}},
3921 {.Name: "sme-fa64", .Features: {AArch64::FeatureSMEFA64}},
3922 {.Name: "cpa", .Features: {AArch64::FeatureCPA}},
3923 {.Name: "tlbiw", .Features: {AArch64::FeatureTLBIW}},
3924 {.Name: "pops", .Features: {AArch64::FeaturePoPS}},
3925 {.Name: "cmpbr", .Features: {AArch64::FeatureCMPBR}},
3926 {.Name: "f8f32mm", .Features: {AArch64::FeatureF8F32MM}},
3927 {.Name: "f8f16mm", .Features: {AArch64::FeatureF8F16MM}},
3928 {.Name: "fprcvt", .Features: {AArch64::FeatureFPRCVT}},
3929 {.Name: "lsfe", .Features: {AArch64::FeatureLSFE}},
3930 {.Name: "sme2p2", .Features: {AArch64::FeatureSME2p2}},
3931 {.Name: "ssve-aes", .Features: {AArch64::FeatureSSVE_AES}},
3932 {.Name: "sve2p2", .Features: {AArch64::FeatureSVE2p2}},
3933 {.Name: "sve-aes2", .Features: {AArch64::FeatureSVEAES2}},
3934 {.Name: "sve-bfscale", .Features: {AArch64::FeatureSVEBFSCALE}},
3935 {.Name: "sve-f16f32mm", .Features: {AArch64::FeatureSVE_F16F32MM}},
3936 {.Name: "lsui", .Features: {AArch64::FeatureLSUI}},
3937 {.Name: "occmo", .Features: {AArch64::FeatureOCCMO}},
3938 {.Name: "ssve-bitperm", .Features: {AArch64::FeatureSSVE_BitPerm}},
3939 {.Name: "sme-mop4", .Features: {AArch64::FeatureSME_MOP4}},
3940 {.Name: "sme-tmop", .Features: {AArch64::FeatureSME_TMOP}},
3941 {.Name: "lscp", .Features: {AArch64::FeatureLSCP}},
3942 {.Name: "tlbid", .Features: {AArch64::FeatureTLBID}},
3943 {.Name: "mpamv2", .Features: {AArch64::FeatureMPAMv2}},
3944 {.Name: "mtetc", .Features: {AArch64::FeatureMTETC}},
3945 {.Name: "gcie", .Features: {AArch64::FeatureGCIE}},
3946 {.Name: "sme2p3", .Features: {AArch64::FeatureSME2p3}},
3947 {.Name: "sve2p3", .Features: {AArch64::FeatureSVE2p3}},
3948 {.Name: "sve-b16mm", .Features: {AArch64::FeatureSVE_B16MM}},
3949 {.Name: "f16mm", .Features: {AArch64::FeatureF16MM}},
3950 {.Name: "f16f32dot", .Features: {AArch64::FeatureF16F32DOT}},
3951 {.Name: "f16f32mm", .Features: {AArch64::FeatureF16F32MM}},
3952 {.Name: "mops-go", .Features: {AArch64::FeatureMOPS_GO}},
3953 {.Name: "poe2", .Features: {AArch64::FeatureS1POE2}},
3954 {.Name: "tev", .Features: {AArch64::FeatureTEV}},
3955 {.Name: "btie", .Features: {AArch64::FeatureBTIE}},
3956 {.Name: "dit", .Features: {AArch64::FeatureDIT}},
3957 {.Name: "brbe", .Features: {AArch64::FeatureBRBE}},
3958 {.Name: "bti", .Features: {AArch64::FeatureBranchTargetId}},
3959 {.Name: "fcma", .Features: {AArch64::FeatureComplxNum}},
3960 {.Name: "jscvt", .Features: {AArch64::FeatureJS}},
3961 {.Name: "pauth-lr", .Features: {AArch64::FeaturePAuthLR}},
3962 {.Name: "ssve-fexpa", .Features: {AArch64::FeatureSSVE_FEXPA}},
3963 {.Name: "wfxt", .Features: {AArch64::FeatureWFxT}},
3964};
3965
3966static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3967 if (FBS[AArch64::HasV8_0aOps])
3968 Str += "ARMv8a";
3969 if (FBS[AArch64::HasV8_1aOps])
3970 Str += "ARMv8.1a";
3971 else if (FBS[AArch64::HasV8_2aOps])
3972 Str += "ARMv8.2a";
3973 else if (FBS[AArch64::HasV8_3aOps])
3974 Str += "ARMv8.3a";
3975 else if (FBS[AArch64::HasV8_4aOps])
3976 Str += "ARMv8.4a";
3977 else if (FBS[AArch64::HasV8_5aOps])
3978 Str += "ARMv8.5a";
3979 else if (FBS[AArch64::HasV8_6aOps])
3980 Str += "ARMv8.6a";
3981 else if (FBS[AArch64::HasV8_7aOps])
3982 Str += "ARMv8.7a";
3983 else if (FBS[AArch64::HasV8_8aOps])
3984 Str += "ARMv8.8a";
3985 else if (FBS[AArch64::HasV8_9aOps])
3986 Str += "ARMv8.9a";
3987 else if (FBS[AArch64::HasV9_0aOps])
3988 Str += "ARMv9-a";
3989 else if (FBS[AArch64::HasV9_1aOps])
3990 Str += "ARMv9.1a";
3991 else if (FBS[AArch64::HasV9_2aOps])
3992 Str += "ARMv9.2a";
3993 else if (FBS[AArch64::HasV9_3aOps])
3994 Str += "ARMv9.3a";
3995 else if (FBS[AArch64::HasV9_4aOps])
3996 Str += "ARMv9.4a";
3997 else if (FBS[AArch64::HasV9_5aOps])
3998 Str += "ARMv9.5a";
3999 else if (FBS[AArch64::HasV9_6aOps])
4000 Str += "ARMv9.6a";
4001 else if (FBS[AArch64::HasV9_7aOps])
4002 Str += "ARMv9.7a";
4003 else if (FBS[AArch64::HasV8_0rOps])
4004 Str += "ARMv8r";
4005 else {
4006 SmallVector<std::string, 2> ExtMatches;
4007 for (const auto& Ext : ExtensionMap) {
4008 // Use & in case multiple features are enabled
4009 if ((FBS & Ext.Features) != FeatureBitset())
4010 ExtMatches.push_back(Elt: Ext.Name);
4011 }
4012 Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", ") : "(unknown)";
4013 }
4014}
4015
4016void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4017 SMLoc S) {
4018 const uint16_t Op2 = Encoding & 7;
4019 const uint16_t Cm = (Encoding & 0x78) >> 3;
4020 const uint16_t Cn = (Encoding & 0x780) >> 7;
4021 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4022
4023 const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext());
4024
4025 Operands.push_back(
4026 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4027 Operands.push_back(
4028 Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext()));
4029 Operands.push_back(
4030 Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext()));
4031 Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext());
4032 Operands.push_back(
4033 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4034}
4035
4036/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4037/// are simple aliases for the SYS instruction. Parse them specially so that
4038/// we create a SYS MCInst.
4039bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 OperandVector &Operands) {
4041 if (Name.contains(C: '.'))
4042 return TokError(Msg: "invalid operand");
4043
4044 Mnemonic = Name;
4045 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys", S: NameLoc, Ctx&: getContext()));
4046
4047 const AsmToken &Tok = getTok();
4048 StringRef Op = Tok.getString();
4049 SMLoc S = Tok.getLoc();
4050 bool ExpectRegister = true;
4051 bool OptionalRegister = false;
4052 bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll);
4053 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4054
4055 if (Mnemonic == "ic") {
4056 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op);
4057 if (!IC)
4058 return TokError(Msg: "invalid operand for IC instruction");
4059 else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4060 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4061 setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str);
4062 return TokError(Msg: Str);
4063 }
4064 ExpectRegister = IC->NeedsReg;
4065 createSysAlias(Encoding: IC->Encoding, Operands, S);
4066 } else if (Mnemonic == "dc") {
4067 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op);
4068 if (!DC)
4069 return TokError(Msg: "invalid operand for DC instruction");
4070 else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4071 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4072 setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str);
4073 return TokError(Msg: Str);
4074 }
4075 createSysAlias(Encoding: DC->Encoding, Operands, S);
4076 } else if (Mnemonic == "at") {
4077 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op);
4078 if (!AT)
4079 return TokError(Msg: "invalid operand for AT instruction");
4080 else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4081 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4082 setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str);
4083 return TokError(Msg: Str);
4084 }
4085 createSysAlias(Encoding: AT->Encoding, Operands, S);
4086 } else if (Mnemonic == "tlbi") {
4087 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op);
4088 if (!TLBI)
4089 return TokError(Msg: "invalid operand for TLBI instruction");
4090 else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4091 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4092 setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str);
4093 return TokError(Msg: Str);
4094 }
4095 ExpectRegister = TLBI->RegUse == REG_REQUIRED;
4096 if (hasAll || hasTLBID)
4097 OptionalRegister = TLBI->RegUse == REG_OPTIONAL;
4098 createSysAlias(Encoding: TLBI->Encoding, Operands, S);
4099 } else if (Mnemonic == "mlbi") {
4100 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Name: Op);
4101 if (!MLBI)
4102 return TokError(Msg: "invalid operand for MLBI instruction");
4103 else if (!MLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4104 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4105 setRequiredFeatureString(FBS: MLBI->getRequiredFeatures(), Str);
4106 return TokError(Msg: Str);
4107 }
4108 ExpectRegister = MLBI->NeedsReg;
4109 createSysAlias(Encoding: MLBI->Encoding, Operands, S);
4110 } else if (Mnemonic == "gic") {
4111 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Name: Op);
4112 if (!GIC)
4113 return TokError(Msg: "invalid operand for GIC instruction");
4114 else if (!GIC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4115 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4116 setRequiredFeatureString(FBS: GIC->getRequiredFeatures(), Str);
4117 return TokError(Msg: Str);
4118 }
4119 ExpectRegister = GIC->NeedsReg;
4120 createSysAlias(Encoding: GIC->Encoding, Operands, S);
4121 } else if (Mnemonic == "gsb") {
4122 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Name: Op);
4123 if (!GSB)
4124 return TokError(Msg: "invalid operand for GSB instruction");
4125 else if (!GSB->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4126 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4127 setRequiredFeatureString(FBS: GSB->getRequiredFeatures(), Str);
4128 return TokError(Msg: Str);
4129 }
4130 ExpectRegister = false;
4131 createSysAlias(Encoding: GSB->Encoding, Operands, S);
4132 } else if (Mnemonic == "plbi") {
4133 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Name: Op);
4134 if (!PLBI)
4135 return TokError(Msg: "invalid operand for PLBI instruction");
4136 else if (!PLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4137 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4138 setRequiredFeatureString(FBS: PLBI->getRequiredFeatures(), Str);
4139 return TokError(Msg: Str);
4140 }
4141 ExpectRegister = PLBI->RegUse == REG_REQUIRED;
4142 if (hasAll || hasTLBID)
4143 OptionalRegister = PLBI->RegUse == REG_OPTIONAL;
4144 createSysAlias(Encoding: PLBI->Encoding, Operands, S);
4145 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4146 Mnemonic == "cosp") {
4147
4148 if (Op.lower() != "rctx")
4149 return TokError(Msg: "invalid operand for prediction restriction instruction");
4150
4151 bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes);
4152 bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2);
4153
4154 if (Mnemonic == "cosp" && !hasSpecres2)
4155 return TokError(Msg: "COSP requires: predres2");
4156 if (!hasPredres)
4157 return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres");
4158
4159 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4160 : Mnemonic == "dvp" ? 0b101
4161 : Mnemonic == "cosp" ? 0b110
4162 : Mnemonic == "cpp" ? 0b111
4163 : 0;
4164 assert(PRCTX_Op2 &&
4165 "Invalid mnemonic for prediction restriction instruction");
4166 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4167 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4168
4169 createSysAlias(Encoding, Operands, S);
4170 }
4171
4172 Lex(); // Eat operand.
4173
4174 bool HasRegister = false;
4175
4176 // Check for the optional register operand.
4177 if (parseOptionalToken(T: AsmToken::Comma)) {
4178 if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands))
4179 return TokError(Msg: "expected register operand");
4180 HasRegister = true;
4181 }
4182
4183 if (!OptionalRegister) {
4184 if (ExpectRegister && !HasRegister)
4185 return TokError(Msg: "specified " + Mnemonic + " op requires a register");
4186 else if (!ExpectRegister && HasRegister)
4187 return TokError(Msg: "specified " + Mnemonic + " op does not use a register");
4188 }
4189
4190 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4191 return true;
4192
4193 return false;
4194}
4195
4196/// parseSyslAlias - The GICR instructions are simple aliases for
4197/// the SYSL instruction. Parse them specially so that we create a
4198/// SYS MCInst.
4199bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4200 OperandVector &Operands) {
4201
4202 Mnemonic = Name;
4203 Operands.push_back(
4204 Elt: AArch64Operand::CreateToken(Str: "sysl", S: NameLoc, Ctx&: getContext()));
4205
4206 // Now expect two operands (identifier + register)
4207 SMLoc startLoc = getLoc();
4208 const AsmToken &regTok = getTok();
4209 StringRef reg = regTok.getString();
4210 MCRegister Reg = matchRegisterNameAlias(Name: reg.lower(), Kind: RegKind::Scalar);
4211 if (!Reg)
4212 return TokError(Msg: "expected register operand");
4213
4214 Operands.push_back(Elt: AArch64Operand::CreateReg(
4215 Reg, Kind: RegKind::Scalar, S: startLoc, E: getLoc(), Ctx&: getContext(), EqTy: EqualsReg));
4216
4217 Lex(); // Eat token
4218 if (parseToken(T: AsmToken::Comma))
4219 return true;
4220
4221 // Check for identifier
4222 const AsmToken &operandTok = getTok();
4223 StringRef Op = operandTok.getString();
4224 SMLoc S2 = operandTok.getLoc();
4225 Lex(); // Eat token
4226
4227 if (Mnemonic == "gicr") {
4228 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Name: Op);
4229 if (!GICR)
4230 return Error(L: S2, Msg: "invalid operand for GICR instruction");
4231 else if (!GICR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4232 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4233 setRequiredFeatureString(FBS: GICR->getRequiredFeatures(), Str);
4234 return Error(L: S2, Msg: Str);
4235 }
4236 createSysAlias(Encoding: GICR->Encoding, Operands, S: S2);
4237 }
4238
4239 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4240 return true;
4241
4242 return false;
4243}
4244
4245/// parseSyspAlias - The TLBIP instructions are simple aliases for
4246/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4247bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4248 OperandVector &Operands) {
4249 if (Name.contains(C: '.'))
4250 return TokError(Msg: "invalid operand");
4251
4252 Mnemonic = Name;
4253 Operands.push_back(
4254 Elt: AArch64Operand::CreateToken(Str: "sysp", S: NameLoc, Ctx&: getContext()));
4255
4256 const AsmToken &Tok = getTok();
4257 StringRef Op = Tok.getString();
4258 SMLoc S = Tok.getLoc();
4259
4260 if (Mnemonic == "tlbip") {
4261 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Name: Op);
4262 if (!TLBIP)
4263 return TokError(Msg: "invalid operand for TLBIP instruction");
4264
4265 if (!TLBIP->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4266 std::string Str("instruction requires: ");
4267 Str += TLBIP->AllowWithTLBID ? "tlbid or d128" : "d128";
4268 return TokError(Msg: Str);
4269 }
4270 createSysAlias(Encoding: TLBIP->Encoding, Operands, S);
4271 }
4272
4273 Lex(); // Eat operand.
4274
4275 if (parseComma())
4276 return true;
4277
4278 if (Tok.isNot(K: AsmToken::Identifier))
4279 return TokError(Msg: "expected register identifier");
4280 auto Result = tryParseSyspXzrPair(Operands);
4281 if (Result.isNoMatch())
4282 Result = tryParseGPRSeqPair(Operands);
4283 if (!Result.isSuccess())
4284 return TokError(Msg: "specified " + Mnemonic +
4285 " op requires a pair of registers");
4286
4287 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4288 return true;
4289
4290 return false;
4291}
4292
4293ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4294 MCAsmParser &Parser = getParser();
4295 const AsmToken &Tok = getTok();
4296
4297 if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier))
4298 return TokError(Msg: "'csync' operand expected");
4299 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4300 // Immediate operand.
4301 const MCExpr *ImmVal;
4302 SMLoc ExprLoc = getLoc();
4303 AsmToken IntTok = Tok;
4304 if (getParser().parseExpression(Res&: ImmVal))
4305 return ParseStatus::Failure;
4306 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4307 if (!MCE)
4308 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4309 int64_t Value = MCE->getValue();
4310 if (Mnemonic == "dsb" && Value > 15) {
4311 // This case is a no match here, but it might be matched by the nXS
4312 // variant. Deliberately not unlex the optional '#' as it is not necessary
4313 // to characterize an integer immediate.
4314 Parser.getLexer().UnLex(Token: IntTok);
4315 return ParseStatus::NoMatch;
4316 }
4317 if (Value < 0 || Value > 15)
4318 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4319 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value);
4320 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "",
4321 S: ExprLoc, Ctx&: getContext(),
4322 HasnXSModifier: false /*hasnXSModifier*/));
4323 return ParseStatus::Success;
4324 }
4325
4326 if (Tok.isNot(K: AsmToken::Identifier))
4327 return TokError(Msg: "invalid operand for instruction");
4328
4329 StringRef Operand = Tok.getString();
4330 auto TSB = AArch64TSB::lookupTSBByName(Name: Operand);
4331 auto DB = AArch64DB::lookupDBByName(Name: Operand);
4332 // The only valid named option for ISB is 'sy'
4333 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4334 return TokError(Msg: "'sy' or #imm operand expected");
4335 // The only valid named option for TSB is 'csync'
4336 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4337 return TokError(Msg: "'csync' operand expected");
4338 if (!DB && !TSB) {
4339 if (Mnemonic == "dsb") {
4340 // This case is a no match here, but it might be matched by the nXS
4341 // variant.
4342 return ParseStatus::NoMatch;
4343 }
4344 return TokError(Msg: "invalid barrier option name");
4345 }
4346
4347 Operands.push_back(Elt: AArch64Operand::CreateBarrier(
4348 Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(),
4349 Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/));
4350 Lex(); // Consume the option
4351
4352 return ParseStatus::Success;
4353}
4354
4355ParseStatus
4356AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4357 const AsmToken &Tok = getTok();
4358
4359 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4360 if (Mnemonic != "dsb")
4361 return ParseStatus::Failure;
4362
4363 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4364 // Immediate operand.
4365 const MCExpr *ImmVal;
4366 SMLoc ExprLoc = getLoc();
4367 if (getParser().parseExpression(Res&: ImmVal))
4368 return ParseStatus::Failure;
4369 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4370 if (!MCE)
4371 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4372 int64_t Value = MCE->getValue();
4373 // v8.7-A DSB in the nXS variant accepts only the following immediate
4374 // values: 16, 20, 24, 28.
4375 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4376 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4377 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value);
4378 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name,
4379 S: ExprLoc, Ctx&: getContext(),
4380 HasnXSModifier: true /*hasnXSModifier*/));
4381 return ParseStatus::Success;
4382 }
4383
4384 if (Tok.isNot(K: AsmToken::Identifier))
4385 return TokError(Msg: "invalid operand for instruction");
4386
4387 StringRef Operand = Tok.getString();
4388 auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand);
4389
4390 if (!DB)
4391 return TokError(Msg: "invalid barrier option name");
4392
4393 Operands.push_back(
4394 Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(),
4395 Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/));
4396 Lex(); // Consume the option
4397
4398 return ParseStatus::Success;
4399}
4400
4401ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4402 const AsmToken &Tok = getTok();
4403
4404 if (Tok.isNot(K: AsmToken::Identifier))
4405 return ParseStatus::NoMatch;
4406
4407 if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString()))
4408 return ParseStatus::NoMatch;
4409
4410 int MRSReg, MSRReg;
4411 auto SysReg = AArch64SysReg::lookupSysRegByName(Name: Tok.getString());
4412 if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4413 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4414 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4415 } else
4416 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString());
4417
4418 unsigned PStateImm = -1;
4419 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString());
4420 if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4421 PStateImm = PState15->Encoding;
4422 if (!PState15) {
4423 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString());
4424 if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4425 PStateImm = PState1->Encoding;
4426 }
4427
4428 Operands.push_back(
4429 Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg,
4430 PStateField: PStateImm, Ctx&: getContext()));
4431 Lex(); // Eat identifier
4432
4433 return ParseStatus::Success;
4434}
4435
4436ParseStatus
4437AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4438 SMLoc S = getLoc();
4439 const AsmToken &Tok = getTok();
4440 if (Tok.isNot(K: AsmToken::Identifier))
4441 return TokError(Msg: "invalid operand for instruction");
4442
4443 auto PH = AArch64PHint::lookupPHintByName(Tok.getString());
4444 if (!PH)
4445 return TokError(Msg: "invalid operand for instruction");
4446
4447 Operands.push_back(Elt: AArch64Operand::CreatePHintInst(
4448 Val: PH->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
4449 Lex(); // Eat identifier token.
4450 return ParseStatus::Success;
4451}
4452
4453/// tryParseNeonVectorRegister - Parse a vector register operand.
4454bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4455 if (getTok().isNot(K: AsmToken::Identifier))
4456 return true;
4457
4458 SMLoc S = getLoc();
4459 // Check for a vector register specifier first.
4460 StringRef Kind;
4461 MCRegister Reg;
4462 ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector);
4463 if (!Res.isSuccess())
4464 return true;
4465
4466 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector);
4467 if (!KindRes)
4468 return true;
4469
4470 unsigned ElementWidth = KindRes->second;
4471 Operands.push_back(
4472 Elt: AArch64Operand::CreateVectorReg(Reg, Kind: RegKind::NeonVector, ElementWidth,
4473 S, E: getLoc(), Ctx&: getContext()));
4474
4475 // If there was an explicit qualifier, that goes on as a literal text
4476 // operand.
4477 if (!Kind.empty())
4478 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext()));
4479
4480 return tryParseVectorIndex(Operands).isFailure();
4481}
4482
4483ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4484 SMLoc SIdx = getLoc();
4485 if (parseOptionalToken(T: AsmToken::LBrac)) {
4486 const MCExpr *ImmVal;
4487 if (getParser().parseExpression(Res&: ImmVal))
4488 return ParseStatus::NoMatch;
4489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4490 if (!MCE)
4491 return TokError(Msg: "immediate value expected for vector index");
4492
4493 SMLoc E = getLoc();
4494
4495 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4496 return ParseStatus::Failure;
4497
4498 Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx,
4499 E, Ctx&: getContext()));
4500 return ParseStatus::Success;
4501 }
4502
4503 return ParseStatus::NoMatch;
4504}
4505
4506// tryParseVectorRegister - Try to parse a vector register name with
4507// optional kind specifier. If it is a register specifier, eat the token
4508// and return it.
4509ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4510 StringRef &Kind,
4511 RegKind MatchKind) {
4512 const AsmToken &Tok = getTok();
4513
4514 if (Tok.isNot(K: AsmToken::Identifier))
4515 return ParseStatus::NoMatch;
4516
4517 StringRef Name = Tok.getString();
4518 // If there is a kind specifier, it's separated from the register name by
4519 // a '.'.
4520 size_t Start = 0, Next = Name.find(C: '.');
4521 StringRef Head = Name.slice(Start, End: Next);
4522 MCRegister RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind);
4523
4524 if (RegNum) {
4525 if (Next != StringRef::npos) {
4526 Kind = Name.substr(Start: Next);
4527 if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind))
4528 return TokError(Msg: "invalid vector kind qualifier");
4529 }
4530 Lex(); // Eat the register token.
4531
4532 Reg = RegNum;
4533 return ParseStatus::Success;
4534 }
4535
4536 return ParseStatus::NoMatch;
4537}
4538
4539ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4540 OperandVector &Operands) {
4541 ParseStatus Status =
4542 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4543 if (!Status.isSuccess())
4544 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4545 return Status;
4546}
4547
4548/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4549template <RegKind RK>
4550ParseStatus
4551AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4552 // Check for a SVE predicate register specifier first.
4553 const SMLoc S = getLoc();
4554 StringRef Kind;
4555 MCRegister RegNum;
4556 auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK);
4557 if (!Res.isSuccess())
4558 return Res;
4559
4560 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK);
4561 if (!KindRes)
4562 return ParseStatus::NoMatch;
4563
4564 unsigned ElementWidth = KindRes->second;
4565 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
4566 Reg: RegNum, Kind: RK, ElementWidth, S,
4567 E: getLoc(), Ctx&: getContext()));
4568
4569 if (getLexer().is(K: AsmToken::LBrac)) {
4570 if (RK == RegKind::SVEPredicateAsCounter) {
4571 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4572 if (ResIndex.isSuccess())
4573 return ParseStatus::Success;
4574 } else {
4575 // Indexed predicate, there's no comma so try parse the next operand
4576 // immediately.
4577 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
4578 return ParseStatus::NoMatch;
4579 }
4580 }
4581
4582 // Not all predicates are followed by a '/m' or '/z'.
4583 if (getTok().isNot(K: AsmToken::Slash))
4584 return ParseStatus::Success;
4585
4586 // But when they do they shouldn't have an element type suffix.
4587 if (!Kind.empty())
4588 return Error(L: S, Msg: "not expecting size suffix");
4589
4590 // Add a literal slash as operand
4591 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/", S: getLoc(), Ctx&: getContext()));
4592
4593 Lex(); // Eat the slash.
4594
4595 // Zeroing or merging?
4596 auto Pred = getTok().getString().lower();
4597 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4598 return Error(L: getLoc(), Msg: "expecting 'z' predication");
4599
4600 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4601 return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication");
4602
4603 // Add zero/merge token.
4604 const char *ZM = Pred == "z" ? "z" : "m";
4605 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext()));
4606
4607 Lex(); // Eat zero/merge token.
4608 return ParseStatus::Success;
4609}
4610
4611/// parseRegister - Parse a register operand.
4612bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4613 // Try for a Neon vector register.
4614 if (!tryParseNeonVectorRegister(Operands))
4615 return false;
4616
4617 if (tryParseZTOperand(Operands).isSuccess())
4618 return false;
4619
4620 // Otherwise try for a scalar register.
4621 if (tryParseGPROperand<false>(Operands).isSuccess())
4622 return false;
4623
4624 return true;
4625}
4626
4627bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4628 bool HasELFModifier = false;
4629 AArch64::Specifier RefKind;
4630 SMLoc Loc = getLexer().getLoc();
4631 if (parseOptionalToken(T: AsmToken::Colon)) {
4632 HasELFModifier = true;
4633
4634 if (getTok().isNot(K: AsmToken::Identifier))
4635 return TokError(Msg: "expect relocation specifier in operand after ':'");
4636
4637 std::string LowerCase = getTok().getIdentifier().lower();
4638 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4639 .Case(S: "lo12", Value: AArch64::S_LO12)
4640 .Case(S: "abs_g3", Value: AArch64::S_ABS_G3)
4641 .Case(S: "abs_g2", Value: AArch64::S_ABS_G2)
4642 .Case(S: "abs_g2_s", Value: AArch64::S_ABS_G2_S)
4643 .Case(S: "abs_g2_nc", Value: AArch64::S_ABS_G2_NC)
4644 .Case(S: "abs_g1", Value: AArch64::S_ABS_G1)
4645 .Case(S: "abs_g1_s", Value: AArch64::S_ABS_G1_S)
4646 .Case(S: "abs_g1_nc", Value: AArch64::S_ABS_G1_NC)
4647 .Case(S: "abs_g0", Value: AArch64::S_ABS_G0)
4648 .Case(S: "abs_g0_s", Value: AArch64::S_ABS_G0_S)
4649 .Case(S: "abs_g0_nc", Value: AArch64::S_ABS_G0_NC)
4650 .Case(S: "prel_g3", Value: AArch64::S_PREL_G3)
4651 .Case(S: "prel_g2", Value: AArch64::S_PREL_G2)
4652 .Case(S: "prel_g2_nc", Value: AArch64::S_PREL_G2_NC)
4653 .Case(S: "prel_g1", Value: AArch64::S_PREL_G1)
4654 .Case(S: "prel_g1_nc", Value: AArch64::S_PREL_G1_NC)
4655 .Case(S: "prel_g0", Value: AArch64::S_PREL_G0)
4656 .Case(S: "prel_g0_nc", Value: AArch64::S_PREL_G0_NC)
4657 .Case(S: "dtprel", Value: AArch64::S_DTPREL)
4658 .Case(S: "dtprel_g2", Value: AArch64::S_DTPREL_G2)
4659 .Case(S: "dtprel_g1", Value: AArch64::S_DTPREL_G1)
4660 .Case(S: "dtprel_g1_nc", Value: AArch64::S_DTPREL_G1_NC)
4661 .Case(S: "dtprel_g0", Value: AArch64::S_DTPREL_G0)
4662 .Case(S: "dtprel_g0_nc", Value: AArch64::S_DTPREL_G0_NC)
4663 .Case(S: "dtprel_hi12", Value: AArch64::S_DTPREL_HI12)
4664 .Case(S: "dtprel_lo12", Value: AArch64::S_DTPREL_LO12)
4665 .Case(S: "dtprel_lo12_nc", Value: AArch64::S_DTPREL_LO12_NC)
4666 .Case(S: "pg_hi21_nc", Value: AArch64::S_ABS_PAGE_NC)
4667 .Case(S: "tprel_g2", Value: AArch64::S_TPREL_G2)
4668 .Case(S: "tprel_g1", Value: AArch64::S_TPREL_G1)
4669 .Case(S: "tprel_g1_nc", Value: AArch64::S_TPREL_G1_NC)
4670 .Case(S: "tprel_g0", Value: AArch64::S_TPREL_G0)
4671 .Case(S: "tprel_g0_nc", Value: AArch64::S_TPREL_G0_NC)
4672 .Case(S: "tprel_hi12", Value: AArch64::S_TPREL_HI12)
4673 .Case(S: "tprel_lo12", Value: AArch64::S_TPREL_LO12)
4674 .Case(S: "tprel_lo12_nc", Value: AArch64::S_TPREL_LO12_NC)
4675 .Case(S: "tlsdesc_lo12", Value: AArch64::S_TLSDESC_LO12)
4676 .Case(S: "tlsdesc_auth_lo12", Value: AArch64::S_TLSDESC_AUTH_LO12)
4677 .Case(S: "got", Value: AArch64::S_GOT_PAGE)
4678 .Case(S: "gotpage_lo15", Value: AArch64::S_GOT_PAGE_LO15)
4679 .Case(S: "got_lo12", Value: AArch64::S_GOT_LO12)
4680 .Case(S: "got_auth", Value: AArch64::S_GOT_AUTH_PAGE)
4681 .Case(S: "got_auth_lo12", Value: AArch64::S_GOT_AUTH_LO12)
4682 .Case(S: "gottprel", Value: AArch64::S_GOTTPREL_PAGE)
4683 .Case(S: "gottprel_lo12", Value: AArch64::S_GOTTPREL_LO12_NC)
4684 .Case(S: "gottprel_g1", Value: AArch64::S_GOTTPREL_G1)
4685 .Case(S: "gottprel_g0_nc", Value: AArch64::S_GOTTPREL_G0_NC)
4686 .Case(S: "tlsdesc", Value: AArch64::S_TLSDESC_PAGE)
4687 .Case(S: "tlsdesc_auth", Value: AArch64::S_TLSDESC_AUTH_PAGE)
4688 .Case(S: "secrel_lo12", Value: AArch64::S_SECREL_LO12)
4689 .Case(S: "secrel_hi12", Value: AArch64::S_SECREL_HI12)
4690 .Default(Value: AArch64::S_INVALID);
4691
4692 if (RefKind == AArch64::S_INVALID)
4693 return TokError(Msg: "expect relocation specifier in operand after ':'");
4694
4695 Lex(); // Eat identifier
4696
4697 if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier"))
4698 return true;
4699 }
4700
4701 if (getParser().parseExpression(Res&: ImmVal))
4702 return true;
4703
4704 if (HasELFModifier)
4705 ImmVal = MCSpecifierExpr::create(Expr: ImmVal, S: RefKind, Ctx&: getContext(), Loc);
4706
4707 SMLoc EndLoc;
4708 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4709 if (getParser().parseAtSpecifier(Res&: ImmVal, EndLoc))
4710 return true;
4711 const MCExpr *Term;
4712 MCBinaryExpr::Opcode Opcode;
4713 if (parseOptionalToken(T: AsmToken::Plus))
4714 Opcode = MCBinaryExpr::Add;
4715 else if (parseOptionalToken(T: AsmToken::Minus))
4716 Opcode = MCBinaryExpr::Sub;
4717 else
4718 return false;
4719 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc))
4720 return true;
4721 ImmVal = MCBinaryExpr::create(Op: Opcode, LHS: ImmVal, RHS: Term, Ctx&: getContext());
4722 }
4723
4724 return false;
4725}
4726
4727ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4728 if (getTok().isNot(K: AsmToken::LCurly))
4729 return ParseStatus::NoMatch;
4730
4731 auto ParseMatrixTile = [this](unsigned &Reg,
4732 unsigned &ElementWidth) -> ParseStatus {
4733 StringRef Name = getTok().getString();
4734 size_t DotPosition = Name.find(C: '.');
4735 if (DotPosition == StringRef::npos)
4736 return ParseStatus::NoMatch;
4737
4738 unsigned RegNum = matchMatrixTileListRegName(Name);
4739 if (!RegNum)
4740 return ParseStatus::NoMatch;
4741
4742 StringRef Tail = Name.drop_front(N: DotPosition);
4743 const std::optional<std::pair<int, int>> &KindRes =
4744 parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
4745 if (!KindRes)
4746 return TokError(
4747 Msg: "Expected the register to be followed by element width suffix");
4748 ElementWidth = KindRes->second;
4749 Reg = RegNum;
4750 Lex(); // Eat the register.
4751 return ParseStatus::Success;
4752 };
4753
4754 SMLoc S = getLoc();
4755 auto LCurly = getTok();
4756 Lex(); // Eat left bracket token.
4757
4758 // Empty matrix list
4759 if (parseOptionalToken(T: AsmToken::RCurly)) {
4760 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4761 /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext()));
4762 return ParseStatus::Success;
4763 }
4764
4765 // Try parse {za} alias early
4766 if (getTok().getString().equals_insensitive(RHS: "za")) {
4767 Lex(); // Eat 'za'
4768
4769 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4770 return ParseStatus::Failure;
4771
4772 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4773 /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext()));
4774 return ParseStatus::Success;
4775 }
4776
4777 SMLoc TileLoc = getLoc();
4778
4779 unsigned FirstReg, ElementWidth;
4780 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4781 if (!ParseRes.isSuccess()) {
4782 getLexer().UnLex(Token: LCurly);
4783 return ParseRes;
4784 }
4785
4786 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4787
4788 unsigned PrevReg = FirstReg;
4789
4790 SmallSet<unsigned, 8> DRegs;
4791 AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth);
4792
4793 SmallSet<unsigned, 8> SeenRegs;
4794 SeenRegs.insert(V: FirstReg);
4795
4796 while (parseOptionalToken(T: AsmToken::Comma)) {
4797 TileLoc = getLoc();
4798 unsigned Reg, NextElementWidth;
4799 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4800 if (!ParseRes.isSuccess())
4801 return ParseRes;
4802
4803 // Element size must match on all regs in the list.
4804 if (ElementWidth != NextElementWidth)
4805 return Error(L: TileLoc, Msg: "mismatched register size suffix");
4806
4807 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(Reg: PrevReg)))
4808 Warning(L: TileLoc, Msg: "tile list not in ascending order");
4809
4810 if (SeenRegs.contains(V: Reg))
4811 Warning(L: TileLoc, Msg: "duplicate tile in list");
4812 else {
4813 SeenRegs.insert(V: Reg);
4814 AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth);
4815 }
4816
4817 PrevReg = Reg;
4818 }
4819
4820 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4821 return ParseStatus::Failure;
4822
4823 unsigned RegMask = 0;
4824 for (auto Reg : DRegs)
4825 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4826 RI->getEncodingValue(Reg: AArch64::ZAD0));
4827 Operands.push_back(
4828 Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext()));
4829
4830 return ParseStatus::Success;
4831}
4832
4833template <RegKind VectorKind>
4834ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4835 bool ExpectMatch) {
4836 MCAsmParser &Parser = getParser();
4837 if (!getTok().is(K: AsmToken::LCurly))
4838 return ParseStatus::NoMatch;
4839
4840 // Wrapper around parse function
4841 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4842 bool NoMatchIsError) -> ParseStatus {
4843 auto RegTok = getTok();
4844 auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind);
4845 if (ParseRes.isSuccess()) {
4846 if (parseVectorKind(Suffix: Kind, VectorKind))
4847 return ParseRes;
4848 llvm_unreachable("Expected a valid vector kind");
4849 }
4850
4851 if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() &&
4852 RegTok.getString().equals_insensitive(RHS: "zt0"))
4853 return ParseStatus::NoMatch;
4854
4855 if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() ||
4856 (ParseRes.isNoMatch() && NoMatchIsError &&
4857 !RegTok.getString().starts_with_insensitive(Prefix: "za")))
4858 return Error(L: Loc, Msg: "vector register expected");
4859
4860 return ParseStatus::NoMatch;
4861 };
4862
4863 unsigned NumRegs = getNumRegsForRegKind(K: VectorKind);
4864 SMLoc S = getLoc();
4865 auto LCurly = getTok();
4866 Lex(); // Eat left bracket token.
4867
4868 StringRef Kind;
4869 MCRegister FirstReg;
4870 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4871
4872 // Put back the original left bracket if there was no match, so that
4873 // different types of list-operands can be matched (e.g. SVE, Neon).
4874 if (ParseRes.isNoMatch())
4875 Parser.getLexer().UnLex(Token: LCurly);
4876
4877 if (!ParseRes.isSuccess())
4878 return ParseRes;
4879
4880 MCRegister PrevReg = FirstReg;
4881 unsigned Count = 1;
4882
4883 unsigned Stride = 1;
4884 if (parseOptionalToken(T: AsmToken::Minus)) {
4885 SMLoc Loc = getLoc();
4886 StringRef NextKind;
4887
4888 MCRegister Reg;
4889 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4890 if (!ParseRes.isSuccess())
4891 return ParseRes;
4892
4893 // Any Kind suffices must match on all regs in the list.
4894 if (Kind != NextKind)
4895 return Error(L: Loc, Msg: "mismatched register size suffix");
4896
4897 unsigned Space =
4898 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4899
4900 if (Space == 0 || Space > 3)
4901 return Error(L: Loc, Msg: "invalid number of vectors");
4902
4903 Count += Space;
4904 }
4905 else {
4906 bool HasCalculatedStride = false;
4907 while (parseOptionalToken(T: AsmToken::Comma)) {
4908 SMLoc Loc = getLoc();
4909 StringRef NextKind;
4910 MCRegister Reg;
4911 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4912 if (!ParseRes.isSuccess())
4913 return ParseRes;
4914
4915 // Any Kind suffices must match on all regs in the list.
4916 if (Kind != NextKind)
4917 return Error(L: Loc, Msg: "mismatched register size suffix");
4918
4919 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4920 unsigned PrevRegVal =
4921 getContext().getRegisterInfo()->getEncodingValue(Reg: PrevReg);
4922 if (!HasCalculatedStride) {
4923 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4924 : (NumRegs - (PrevRegVal - RegVal));
4925 HasCalculatedStride = true;
4926 }
4927
4928 // Register must be incremental (with a wraparound at last register).
4929 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4930 return Error(L: Loc, Msg: "registers must have the same sequential stride");
4931
4932 PrevReg = Reg;
4933 ++Count;
4934 }
4935 }
4936
4937 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4938 return ParseStatus::Failure;
4939
4940 if (Count > 4)
4941 return Error(L: S, Msg: "invalid number of vectors");
4942
4943 unsigned NumElements = 0;
4944 unsigned ElementWidth = 0;
4945 if (!Kind.empty()) {
4946 if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind))
4947 std::tie(args&: NumElements, args&: ElementWidth) = *VK;
4948 }
4949
4950 Operands.push_back(Elt: AArch64Operand::CreateVectorList(
4951 Reg: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S,
4952 E: getLoc(), Ctx&: getContext()));
4953
4954 if (getTok().is(K: AsmToken::LBrac)) {
4955 ParseStatus Res = tryParseVectorIndex(Operands);
4956 if (Res.isFailure())
4957 return ParseStatus::Failure;
4958 return ParseStatus::Success;
4959 }
4960
4961 return ParseStatus::Success;
4962}
4963
4964/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4965bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4966 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true);
4967 if (!ParseRes.isSuccess())
4968 return true;
4969
4970 return tryParseVectorIndex(Operands).isFailure();
4971}
4972
4973ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4974 SMLoc StartLoc = getLoc();
4975
4976 MCRegister RegNum;
4977 ParseStatus Res = tryParseScalarRegister(RegNum);
4978 if (!Res.isSuccess())
4979 return Res;
4980
4981 if (!parseOptionalToken(T: AsmToken::Comma)) {
4982 Operands.push_back(Elt: AArch64Operand::CreateReg(
4983 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4984 return ParseStatus::Success;
4985 }
4986
4987 parseOptionalToken(T: AsmToken::Hash);
4988
4989 if (getTok().isNot(K: AsmToken::Integer))
4990 return Error(L: getLoc(), Msg: "index must be absent or #0");
4991
4992 const MCExpr *ImmVal;
4993 if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) ||
4994 cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0)
4995 return Error(L: getLoc(), Msg: "index must be absent or #0");
4996
4997 Operands.push_back(Elt: AArch64Operand::CreateReg(
4998 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4999 return ParseStatus::Success;
5000}
5001
5002ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5003 SMLoc StartLoc = getLoc();
5004 const AsmToken &Tok = getTok();
5005 std::string Name = Tok.getString().lower();
5006
5007 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable);
5008
5009 if (!Reg)
5010 return ParseStatus::NoMatch;
5011
5012 Operands.push_back(Elt: AArch64Operand::CreateReg(
5013 Reg, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5014 Lex(); // Eat register.
5015
5016 // Check if register is followed by an index
5017 if (parseOptionalToken(T: AsmToken::LBrac)) {
5018 Operands.push_back(
5019 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5020 const MCExpr *ImmVal;
5021 if (getParser().parseExpression(Res&: ImmVal))
5022 return ParseStatus::NoMatch;
5023 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
5024 if (!MCE)
5025 return TokError(Msg: "immediate value expected for vector index");
5026 Operands.push_back(Elt: AArch64Operand::CreateImm(
5027 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc,
5028 E: getLoc(), Ctx&: getContext()));
5029 if (parseOptionalToken(T: AsmToken::Comma))
5030 if (parseOptionalMulOperand(Operands))
5031 return ParseStatus::Failure;
5032 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
5033 return ParseStatus::Failure;
5034 Operands.push_back(
5035 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5036 }
5037 return ParseStatus::Success;
5038}
5039
5040template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5041ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5042 SMLoc StartLoc = getLoc();
5043
5044 MCRegister RegNum;
5045 ParseStatus Res = tryParseScalarRegister(RegNum);
5046 if (!Res.isSuccess())
5047 return Res;
5048
5049 // No shift/extend is the default.
5050 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
5051 Operands.push_back(Elt: AArch64Operand::CreateReg(
5052 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy));
5053 return ParseStatus::Success;
5054 }
5055
5056 // Eat the comma
5057 Lex();
5058
5059 // Match the shift
5060 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5061 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
5062 if (!Res.isSuccess())
5063 return Res;
5064
5065 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5066 Operands.push_back(Elt: AArch64Operand::CreateReg(
5067 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy,
5068 ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
5069 HasExplicitAmount: Ext->hasShiftExtendAmount()));
5070
5071 return ParseStatus::Success;
5072}
5073
5074bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5075 MCAsmParser &Parser = getParser();
5076
5077 // Some SVE instructions have a decoration after the immediate, i.e.
5078 // "mul vl". We parse them here and add tokens, which must be present in the
5079 // asm string in the tablegen instruction.
5080 bool NextIsVL =
5081 Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl");
5082 bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash);
5083 if (!getTok().getString().equals_insensitive(RHS: "mul") ||
5084 !(NextIsVL || NextIsHash))
5085 return true;
5086
5087 Operands.push_back(
5088 Elt: AArch64Operand::CreateToken(Str: "mul", S: getLoc(), Ctx&: getContext()));
5089 Lex(); // Eat the "mul"
5090
5091 if (NextIsVL) {
5092 Operands.push_back(
5093 Elt: AArch64Operand::CreateToken(Str: "vl", S: getLoc(), Ctx&: getContext()));
5094 Lex(); // Eat the "vl"
5095 return false;
5096 }
5097
5098 if (NextIsHash) {
5099 Lex(); // Eat the #
5100 SMLoc S = getLoc();
5101
5102 // Parse immediate operand.
5103 const MCExpr *ImmVal;
5104 if (!Parser.parseExpression(Res&: ImmVal))
5105 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) {
5106 Operands.push_back(Elt: AArch64Operand::CreateImm(
5107 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(),
5108 Ctx&: getContext()));
5109 return false;
5110 }
5111 }
5112
5113 return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'");
5114}
5115
5116bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5117 StringRef &VecGroup) {
5118 MCAsmParser &Parser = getParser();
5119 auto Tok = Parser.getTok();
5120 if (Tok.isNot(K: AsmToken::Identifier))
5121 return true;
5122
5123 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5124 .Case(S: "vgx2", Value: "vgx2")
5125 .Case(S: "vgx4", Value: "vgx4")
5126 .Default(Value: "");
5127
5128 if (VG.empty())
5129 return true;
5130
5131 VecGroup = VG;
5132 Parser.Lex(); // Eat vgx[2|4]
5133 return false;
5134}
5135
5136bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5137 auto Tok = getTok();
5138 if (Tok.isNot(K: AsmToken::Identifier))
5139 return true;
5140
5141 auto Keyword = Tok.getString();
5142 Keyword = StringSwitch<StringRef>(Keyword.lower())
5143 .Case(S: "sm", Value: "sm")
5144 .Case(S: "za", Value: "za")
5145 .Default(Value: Keyword);
5146 Operands.push_back(
5147 Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext()));
5148
5149 Lex();
5150 return false;
5151}
5152
5153/// parseOperand - Parse a arm instruction operand. For now this parses the
5154/// operand regardless of the mnemonic.
5155bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5156 bool invertCondCode) {
5157 MCAsmParser &Parser = getParser();
5158
5159 ParseStatus ResTy =
5160 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5161
5162 // Check if the current operand has a custom associated parser, if so, try to
5163 // custom parse the operand, or fallback to the general approach.
5164 if (ResTy.isSuccess())
5165 return false;
5166 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5167 // there was a match, but an error occurred, in which case, just return that
5168 // the operand parsing failed.
5169 if (ResTy.isFailure())
5170 return true;
5171
5172 // Nothing custom, so do general case parsing.
5173 SMLoc S, E;
5174 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5175 if (parseOptionalToken(T: AsmToken::Comma)) {
5176 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5177 if (!Res.isNoMatch())
5178 return Res.isFailure();
5179 getLexer().UnLex(Token: SavedTok);
5180 }
5181 return false;
5182 };
5183 switch (getLexer().getKind()) {
5184 default: {
5185 SMLoc S = getLoc();
5186 const MCExpr *Expr;
5187 if (parseSymbolicImmVal(ImmVal&: Expr))
5188 return Error(L: S, Msg: "invalid operand");
5189
5190 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5191 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
5192 return parseOptionalShiftExtend(getTok());
5193 }
5194 case AsmToken::LBrac: {
5195 Operands.push_back(
5196 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5197 Lex(); // Eat '['
5198
5199 // There's no comma after a '[', so we can parse the next operand
5200 // immediately.
5201 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5202 }
5203 case AsmToken::LCurly: {
5204 if (!parseNeonVectorList(Operands))
5205 return false;
5206
5207 Operands.push_back(
5208 Elt: AArch64Operand::CreateToken(Str: "{", S: getLoc(), Ctx&: getContext()));
5209 Lex(); // Eat '{'
5210
5211 // There's no comma after a '{', so we can parse the next operand
5212 // immediately.
5213 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5214 }
5215 case AsmToken::Identifier: {
5216 // See if this is a "VG" decoration used by SME instructions.
5217 StringRef VecGroup;
5218 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5219 Operands.push_back(
5220 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
5221 return false;
5222 }
5223 // If we're expecting a Condition Code operand, then just parse that.
5224 if (isCondCode)
5225 return parseCondCode(Operands, invertCondCode);
5226
5227 // If it's a register name, parse it.
5228 if (!parseRegister(Operands)) {
5229 // Parse an optional shift/extend modifier.
5230 AsmToken SavedTok = getTok();
5231 if (parseOptionalToken(T: AsmToken::Comma)) {
5232 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5233 // such cases and don't report an error when <label> happens to match a
5234 // shift/extend modifier.
5235 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5236 /*ParseForAllFeatures=*/true);
5237 if (!Res.isNoMatch())
5238 return Res.isFailure();
5239 Res = tryParseOptionalShiftExtend(Operands);
5240 if (!Res.isNoMatch())
5241 return Res.isFailure();
5242 getLexer().UnLex(Token: SavedTok);
5243 }
5244 return false;
5245 }
5246
5247 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5248 // by SVE instructions.
5249 if (!parseOptionalMulOperand(Operands))
5250 return false;
5251
5252 // If this is a two-word mnemonic, parse its special keyword
5253 // operand as an identifier.
5254 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5255 Mnemonic == "gcsb")
5256 return parseKeywordOperand(Operands);
5257
5258 // This was not a register so parse other operands that start with an
5259 // identifier (like labels) as expressions and create them as immediates.
5260 const MCExpr *IdVal, *Term;
5261 S = getLoc();
5262 if (getParser().parseExpression(Res&: IdVal))
5263 return true;
5264 if (getParser().parseAtSpecifier(Res&: IdVal, EndLoc&: E))
5265 return true;
5266 std::optional<MCBinaryExpr::Opcode> Opcode;
5267 if (parseOptionalToken(T: AsmToken::Plus))
5268 Opcode = MCBinaryExpr::Add;
5269 else if (parseOptionalToken(T: AsmToken::Minus))
5270 Opcode = MCBinaryExpr::Sub;
5271 if (Opcode) {
5272 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc&: E))
5273 return true;
5274 IdVal = MCBinaryExpr::create(Op: *Opcode, LHS: IdVal, RHS: Term, Ctx&: getContext());
5275 }
5276 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext()));
5277
5278 // Parse an optional shift/extend modifier.
5279 return parseOptionalShiftExtend(getTok());
5280 }
5281 case AsmToken::Integer:
5282 case AsmToken::Real:
5283 case AsmToken::Hash: {
5284 // #42 -> immediate.
5285 S = getLoc();
5286
5287 parseOptionalToken(T: AsmToken::Hash);
5288
5289 // Parse a negative sign
5290 bool isNegative = false;
5291 if (getTok().is(K: AsmToken::Minus)) {
5292 isNegative = true;
5293 // We need to consume this token only when we have a Real, otherwise
5294 // we let parseSymbolicImmVal take care of it
5295 if (Parser.getLexer().peekTok().is(K: AsmToken::Real))
5296 Lex();
5297 }
5298
5299 // The only Real that should come through here is a literal #0.0 for
5300 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5301 // so convert the value.
5302 const AsmToken &Tok = getTok();
5303 if (Tok.is(K: AsmToken::Real)) {
5304 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5305 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5306 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5307 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5308 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5309 return TokError(Msg: "unexpected floating point literal");
5310 else if (IntVal != 0 || isNegative)
5311 return TokError(Msg: "expected floating-point constant #0.0");
5312 Lex(); // Eat the token.
5313
5314 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
5315 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
5316 return false;
5317 }
5318
5319 const MCExpr *ImmVal;
5320 if (parseSymbolicImmVal(ImmVal))
5321 return true;
5322
5323 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5324 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext()));
5325
5326 // Parse an optional shift/extend modifier.
5327 return parseOptionalShiftExtend(Tok);
5328 }
5329 case AsmToken::Equal: {
5330 SMLoc Loc = getLoc();
5331 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5332 return TokError(Msg: "unexpected token in operand");
5333 Lex(); // Eat '='
5334 const MCExpr *SubExprVal;
5335 if (getParser().parseExpression(Res&: SubExprVal))
5336 return true;
5337
5338 if (Operands.size() < 2 ||
5339 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5340 return Error(L: Loc, Msg: "Only valid when first operand is register");
5341
5342 bool IsXReg =
5343 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5344 Reg: Operands[1]->getReg());
5345
5346 MCContext& Ctx = getContext();
5347 E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1);
5348 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5349 if (isa<MCConstantExpr>(Val: SubExprVal)) {
5350 uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
5351 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5352 while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) {
5353 ShiftAmt += 16;
5354 Imm >>= 16;
5355 }
5356 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5357 Operands[0] = AArch64Operand::CreateToken(Str: "movz", S: Loc, Ctx);
5358 Operands.push_back(Elt: AArch64Operand::CreateImm(
5359 Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx));
5360 if (ShiftAmt)
5361 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL,
5362 Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx));
5363 return false;
5364 }
5365 APInt Simm = APInt(64, Imm << ShiftAmt);
5366 // check if the immediate is an unsigned or signed 32-bit int for W regs
5367 if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32)))
5368 return Error(L: Loc, Msg: "Immediate too large for register");
5369 }
5370 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5371 const MCExpr *CPLoc =
5372 getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc);
5373 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx));
5374 return false;
5375 }
5376 }
5377}
5378
5379bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5380 const MCExpr *Expr = nullptr;
5381 SMLoc L = getLoc();
5382 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
5383 return true;
5384 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
5385 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
5386 return true;
5387 Out = Value->getValue();
5388 return false;
5389}
5390
5391bool AArch64AsmParser::parseComma() {
5392 if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma"))
5393 return true;
5394 // Eat the comma
5395 Lex();
5396 return false;
5397}
5398
5399bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5400 unsigned First, unsigned Last) {
5401 MCRegister Reg;
5402 SMLoc Start, End;
5403 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register"))
5404 return true;
5405
5406 // Special handling for FP and LR; they aren't linearly after x28 in
5407 // the registers enum.
5408 unsigned RangeEnd = Last;
5409 if (Base == AArch64::X0) {
5410 if (Last == AArch64::FP) {
5411 RangeEnd = AArch64::X28;
5412 if (Reg == AArch64::FP) {
5413 Out = 29;
5414 return false;
5415 }
5416 }
5417 if (Last == AArch64::LR) {
5418 RangeEnd = AArch64::X28;
5419 if (Reg == AArch64::FP) {
5420 Out = 29;
5421 return false;
5422 } else if (Reg == AArch64::LR) {
5423 Out = 30;
5424 return false;
5425 }
5426 }
5427 }
5428
5429 if (check(P: Reg < First || Reg > RangeEnd, Loc: Start,
5430 Msg: Twine("expected register in range ") +
5431 AArch64InstPrinter::getRegisterName(Reg: First) + " to " +
5432 AArch64InstPrinter::getRegisterName(Reg: Last)))
5433 return true;
5434 Out = Reg - Base;
5435 return false;
5436}
5437
5438bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5439 const MCParsedAsmOperand &Op2) const {
5440 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5441 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5442
5443 if (AOp1.isVectorList() && AOp2.isVectorList())
5444 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5445 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5446 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5447
5448 if (!AOp1.isReg() || !AOp2.isReg())
5449 return false;
5450
5451 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5452 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5453 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5454
5455 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5456 "Testing equality of non-scalar registers not supported");
5457
5458 // Check if a registers match their sub/super register classes.
5459 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5460 return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg();
5461 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5462 return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg();
5463 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5464 return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg();
5465 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5466 return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg();
5467
5468 return false;
5469}
5470
5471/// Parse an AArch64 instruction mnemonic followed by its operands.
5472bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5473 StringRef Name, SMLoc NameLoc,
5474 OperandVector &Operands) {
5475 Name = StringSwitch<StringRef>(Name.lower())
5476 .Case(S: "beq", Value: "b.eq")
5477 .Case(S: "bne", Value: "b.ne")
5478 .Case(S: "bhs", Value: "b.hs")
5479 .Case(S: "bcs", Value: "b.cs")
5480 .Case(S: "blo", Value: "b.lo")
5481 .Case(S: "bcc", Value: "b.cc")
5482 .Case(S: "bmi", Value: "b.mi")
5483 .Case(S: "bpl", Value: "b.pl")
5484 .Case(S: "bvs", Value: "b.vs")
5485 .Case(S: "bvc", Value: "b.vc")
5486 .Case(S: "bhi", Value: "b.hi")
5487 .Case(S: "bls", Value: "b.ls")
5488 .Case(S: "bge", Value: "b.ge")
5489 .Case(S: "blt", Value: "b.lt")
5490 .Case(S: "bgt", Value: "b.gt")
5491 .Case(S: "ble", Value: "b.le")
5492 .Case(S: "bal", Value: "b.al")
5493 .Case(S: "bnv", Value: "b.nv")
5494 .Default(Value: Name);
5495
5496 // First check for the AArch64-specific .req directive.
5497 if (getTok().is(K: AsmToken::Identifier) &&
5498 getTok().getIdentifier().lower() == ".req") {
5499 parseDirectiveReq(Name, L: NameLoc);
5500 // We always return 'error' for this, as we're done with this
5501 // statement and don't need to match the 'instruction."
5502 return true;
5503 }
5504
5505 // Create the leading tokens for the mnemonic, split by '.' characters.
5506 size_t Start = 0, Next = Name.find(C: '.');
5507 StringRef Head = Name.slice(Start, End: Next);
5508
5509 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5510 // instructions are aliases for the SYS instruction.
5511 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5512 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5513 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5514 return parseSysAlias(Name: Head, NameLoc, Operands);
5515
5516 // GICR instructions are aliases for the SYSL instruction.
5517 if (Head == "gicr")
5518 return parseSyslAlias(Name: Head, NameLoc, Operands);
5519
5520 // TLBIP instructions are aliases for the SYSP instruction.
5521 if (Head == "tlbip")
5522 return parseSyspAlias(Name: Head, NameLoc, Operands);
5523
5524 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext()));
5525 Mnemonic = Head;
5526
5527 // Handle condition codes for a branch mnemonic
5528 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5529 Start = Next;
5530 Next = Name.find(C: '.', From: Start + 1);
5531 Head = Name.slice(Start: Start + 1, End: Next);
5532
5533 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5534 (Head.data() - Name.data()));
5535 std::string Suggestion;
5536 AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion);
5537 if (CC == AArch64CC::Invalid) {
5538 std::string Msg = "invalid condition code";
5539 if (!Suggestion.empty())
5540 Msg += ", did you mean " + Suggestion + "?";
5541 return Error(L: SuffixLoc, Msg);
5542 }
5543 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".", S: SuffixLoc, Ctx&: getContext(),
5544 /*IsSuffix=*/true));
5545 Operands.push_back(
5546 Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext()));
5547 }
5548
5549 // Add the remaining tokens in the mnemonic.
5550 while (Next != StringRef::npos) {
5551 Start = Next;
5552 Next = Name.find(C: '.', From: Start + 1);
5553 Head = Name.slice(Start, End: Next);
5554 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5555 (Head.data() - Name.data()) + 1);
5556 Operands.push_back(Elt: AArch64Operand::CreateToken(
5557 Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true));
5558 }
5559
5560 // Conditional compare instructions have a Condition Code operand, which needs
5561 // to be parsed and an immediate operand created.
5562 bool condCodeFourthOperand =
5563 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5564 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5565 Head == "csinc" || Head == "csinv" || Head == "csneg");
5566
5567 // These instructions are aliases to some of the conditional select
5568 // instructions. However, the condition code is inverted in the aliased
5569 // instruction.
5570 //
5571 // FIXME: Is this the correct way to handle these? Or should the parser
5572 // generate the aliased instructions directly?
5573 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5574 bool condCodeThirdOperand =
5575 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5576
5577 // Read the remaining operands.
5578 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
5579
5580 unsigned N = 1;
5581 do {
5582 // Parse and remember the operand.
5583 if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) ||
5584 (N == 3 && condCodeThirdOperand) ||
5585 (N == 2 && condCodeSecondOperand),
5586 invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) {
5587 return true;
5588 }
5589
5590 // After successfully parsing some operands there are three special cases
5591 // to consider (i.e. notional operands not separated by commas). Two are
5592 // due to memory specifiers:
5593 // + An RBrac will end an address for load/store/prefetch
5594 // + An '!' will indicate a pre-indexed operation.
5595 //
5596 // And a further case is '}', which ends a group of tokens specifying the
5597 // SME accumulator array 'ZA' or tile vector, i.e.
5598 //
5599 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5600 //
5601 // It's someone else's responsibility to make sure these tokens are sane
5602 // in the given context!
5603
5604 if (parseOptionalToken(T: AsmToken::RBrac))
5605 Operands.push_back(
5606 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5607 if (parseOptionalToken(T: AsmToken::Exclaim))
5608 Operands.push_back(
5609 Elt: AArch64Operand::CreateToken(Str: "!", S: getLoc(), Ctx&: getContext()));
5610 if (parseOptionalToken(T: AsmToken::RCurly))
5611 Operands.push_back(
5612 Elt: AArch64Operand::CreateToken(Str: "}", S: getLoc(), Ctx&: getContext()));
5613
5614 ++N;
5615 } while (parseOptionalToken(T: AsmToken::Comma));
5616 }
5617
5618 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
5619 return true;
5620
5621 return false;
5622}
5623
5624static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5625 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5626 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5627 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5628 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5629 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5630 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5631 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5632}
5633
5634// FIXME: This entire function is a giant hack to provide us with decent
5635// operand range validation/diagnostics until TableGen/MC can be extended
5636// to support autogeneration of this kind of validation.
5637bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5638 SmallVectorImpl<SMLoc> &Loc) {
5639 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5640 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
5641
5642 // A prefix only applies to the instruction following it. Here we extract
5643 // prefix information for the next instruction before validating the current
5644 // one so that in the case of failure we don't erroneously continue using the
5645 // current prefix.
5646 PrefixInfo Prefix = NextPrefix;
5647 NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags);
5648
5649 // Before validating the instruction in isolation we run through the rules
5650 // applicable when it follows a prefix instruction.
5651 // NOTE: brk & hlt can be prefixed but require no additional validation.
5652 if (Prefix.isActive() &&
5653 (Inst.getOpcode() != AArch64::BRK) &&
5654 (Inst.getOpcode() != AArch64::HLT)) {
5655
5656 // Prefixed instructions must have a destructive operand.
5657 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5658 AArch64::NotDestructive)
5659 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5660 " movprfx, suggest replacing movprfx with mov");
5661
5662 // Destination operands must match.
5663 if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg())
5664 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5665 " movprfx writing to a different destination");
5666
5667 // Destination operand must not be used in any other location.
5668 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5669 if (Inst.getOperand(i).isReg() &&
5670 (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) &&
5671 isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg()))
5672 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5673 " movprfx and destination also used as non-destructive"
5674 " source");
5675 }
5676
5677 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5678 if (Prefix.isPredicated()) {
5679 int PgIdx = -1;
5680
5681 // Find the instructions general predicate.
5682 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5683 if (Inst.getOperand(i).isReg() &&
5684 PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) {
5685 PgIdx = i;
5686 break;
5687 }
5688
5689 // Instruction must be predicated if the movprfx is predicated.
5690 if (PgIdx == -1 ||
5691 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5692 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5693 " predicated movprfx, suggest using unpredicated movprfx");
5694
5695 // Instruction must use same general predicate as the movprfx.
5696 if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg())
5697 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5698 " predicated movprfx using a different general predicate");
5699
5700 // Instruction element type must match the movprfx.
5701 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5702 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5703 " predicated movprfx with a different element size");
5704 }
5705 }
5706
5707 // On ARM64EC, only valid registers may be used. Warn against using
5708 // explicitly disallowed registers.
5709 if (IsWindowsArm64EC) {
5710 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5711 if (Inst.getOperand(i).isReg()) {
5712 MCRegister Reg = Inst.getOperand(i).getReg();
5713 // At this point, vector registers are matched to their
5714 // appropriately sized alias.
5715 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5716 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5717 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5718 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5719 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5720 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5721 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5722 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5723 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5724 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5725 Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) +
5726 " is disallowed on ARM64EC.");
5727 }
5728 }
5729 }
5730 }
5731
5732 // Check for indexed addressing modes w/ the base register being the
5733 // same as a destination/source register or pair load where
5734 // the Rt == Rt2. All of those are undefined behaviour.
5735 switch (Inst.getOpcode()) {
5736 case AArch64::LDPSWpre:
5737 case AArch64::LDPWpost:
5738 case AArch64::LDPWpre:
5739 case AArch64::LDPXpost:
5740 case AArch64::LDPXpre: {
5741 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5742 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5743 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5744 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5745 return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base "
5746 "is also a destination");
5747 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5748 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base "
5749 "is also a destination");
5750 [[fallthrough]];
5751 }
5752 case AArch64::LDR_ZA:
5753 case AArch64::STR_ZA: {
5754 if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() &&
5755 Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm())
5756 return Error(L: Loc[1],
5757 Msg: "unpredictable instruction, immediate and offset mismatch.");
5758 break;
5759 }
5760 case AArch64::LDPDi:
5761 case AArch64::LDPQi:
5762 case AArch64::LDPSi:
5763 case AArch64::LDPSWi:
5764 case AArch64::LDPWi:
5765 case AArch64::LDPXi: {
5766 MCRegister Rt = Inst.getOperand(i: 0).getReg();
5767 MCRegister Rt2 = Inst.getOperand(i: 1).getReg();
5768 if (Rt == Rt2)
5769 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5770 break;
5771 }
5772 case AArch64::LDPDpost:
5773 case AArch64::LDPDpre:
5774 case AArch64::LDPQpost:
5775 case AArch64::LDPQpre:
5776 case AArch64::LDPSpost:
5777 case AArch64::LDPSpre:
5778 case AArch64::LDPSWpost: {
5779 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5780 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5781 if (Rt == Rt2)
5782 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5783 break;
5784 }
5785 case AArch64::STPDpost:
5786 case AArch64::STPDpre:
5787 case AArch64::STPQpost:
5788 case AArch64::STPQpre:
5789 case AArch64::STPSpost:
5790 case AArch64::STPSpre:
5791 case AArch64::STPWpost:
5792 case AArch64::STPWpre:
5793 case AArch64::STPXpost:
5794 case AArch64::STPXpre: {
5795 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5796 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5797 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5798 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5799 return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base "
5800 "is also a source");
5801 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5802 return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base "
5803 "is also a source");
5804 break;
5805 }
5806 case AArch64::LDRBBpre:
5807 case AArch64::LDRBpre:
5808 case AArch64::LDRHHpre:
5809 case AArch64::LDRHpre:
5810 case AArch64::LDRSBWpre:
5811 case AArch64::LDRSBXpre:
5812 case AArch64::LDRSHWpre:
5813 case AArch64::LDRSHXpre:
5814 case AArch64::LDRSWpre:
5815 case AArch64::LDRWpre:
5816 case AArch64::LDRXpre:
5817 case AArch64::LDRBBpost:
5818 case AArch64::LDRBpost:
5819 case AArch64::LDRHHpost:
5820 case AArch64::LDRHpost:
5821 case AArch64::LDRSBWpost:
5822 case AArch64::LDRSBXpost:
5823 case AArch64::LDRSHWpost:
5824 case AArch64::LDRSHXpost:
5825 case AArch64::LDRSWpost:
5826 case AArch64::LDRWpost:
5827 case AArch64::LDRXpost: {
5828 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5829 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5830 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5831 return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base "
5832 "is also a source");
5833 break;
5834 }
5835 case AArch64::STRBBpost:
5836 case AArch64::STRBpost:
5837 case AArch64::STRHHpost:
5838 case AArch64::STRHpost:
5839 case AArch64::STRWpost:
5840 case AArch64::STRXpost:
5841 case AArch64::STRBBpre:
5842 case AArch64::STRBpre:
5843 case AArch64::STRHHpre:
5844 case AArch64::STRHpre:
5845 case AArch64::STRWpre:
5846 case AArch64::STRXpre: {
5847 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5848 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5849 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5850 return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base "
5851 "is also a source");
5852 break;
5853 }
5854 case AArch64::STXRB:
5855 case AArch64::STXRH:
5856 case AArch64::STXRW:
5857 case AArch64::STXRX:
5858 case AArch64::STLXRB:
5859 case AArch64::STLXRH:
5860 case AArch64::STLXRW:
5861 case AArch64::STLXRX: {
5862 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5863 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5864 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5865 if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) ||
5866 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5867 return Error(L: Loc[0],
5868 Msg: "unpredictable STXR instruction, status is also a source");
5869 break;
5870 }
5871 case AArch64::STXPW:
5872 case AArch64::STXPX:
5873 case AArch64::STLXPW:
5874 case AArch64::STLXPX: {
5875 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5876 MCRegister Rt1 = Inst.getOperand(i: 1).getReg();
5877 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5878 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5879 if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) ||
5880 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5881 return Error(L: Loc[0],
5882 Msg: "unpredictable STXP instruction, status is also a source");
5883 break;
5884 }
5885 case AArch64::LDRABwriteback:
5886 case AArch64::LDRAAwriteback: {
5887 MCRegister Xt = Inst.getOperand(i: 0).getReg();
5888 MCRegister Xn = Inst.getOperand(i: 1).getReg();
5889 if (Xt == Xn)
5890 return Error(L: Loc[0],
5891 Msg: "unpredictable LDRA instruction, writeback base"
5892 " is also a destination");
5893 break;
5894 }
5895 }
5896
5897 // Check v8.8-A memops instructions.
5898 switch (Inst.getOpcode()) {
5899 case AArch64::CPYFP:
5900 case AArch64::CPYFPWN:
5901 case AArch64::CPYFPRN:
5902 case AArch64::CPYFPN:
5903 case AArch64::CPYFPWT:
5904 case AArch64::CPYFPWTWN:
5905 case AArch64::CPYFPWTRN:
5906 case AArch64::CPYFPWTN:
5907 case AArch64::CPYFPRT:
5908 case AArch64::CPYFPRTWN:
5909 case AArch64::CPYFPRTRN:
5910 case AArch64::CPYFPRTN:
5911 case AArch64::CPYFPT:
5912 case AArch64::CPYFPTWN:
5913 case AArch64::CPYFPTRN:
5914 case AArch64::CPYFPTN:
5915 case AArch64::CPYFM:
5916 case AArch64::CPYFMWN:
5917 case AArch64::CPYFMRN:
5918 case AArch64::CPYFMN:
5919 case AArch64::CPYFMWT:
5920 case AArch64::CPYFMWTWN:
5921 case AArch64::CPYFMWTRN:
5922 case AArch64::CPYFMWTN:
5923 case AArch64::CPYFMRT:
5924 case AArch64::CPYFMRTWN:
5925 case AArch64::CPYFMRTRN:
5926 case AArch64::CPYFMRTN:
5927 case AArch64::CPYFMT:
5928 case AArch64::CPYFMTWN:
5929 case AArch64::CPYFMTRN:
5930 case AArch64::CPYFMTN:
5931 case AArch64::CPYFE:
5932 case AArch64::CPYFEWN:
5933 case AArch64::CPYFERN:
5934 case AArch64::CPYFEN:
5935 case AArch64::CPYFEWT:
5936 case AArch64::CPYFEWTWN:
5937 case AArch64::CPYFEWTRN:
5938 case AArch64::CPYFEWTN:
5939 case AArch64::CPYFERT:
5940 case AArch64::CPYFERTWN:
5941 case AArch64::CPYFERTRN:
5942 case AArch64::CPYFERTN:
5943 case AArch64::CPYFET:
5944 case AArch64::CPYFETWN:
5945 case AArch64::CPYFETRN:
5946 case AArch64::CPYFETN:
5947 case AArch64::CPYP:
5948 case AArch64::CPYPWN:
5949 case AArch64::CPYPRN:
5950 case AArch64::CPYPN:
5951 case AArch64::CPYPWT:
5952 case AArch64::CPYPWTWN:
5953 case AArch64::CPYPWTRN:
5954 case AArch64::CPYPWTN:
5955 case AArch64::CPYPRT:
5956 case AArch64::CPYPRTWN:
5957 case AArch64::CPYPRTRN:
5958 case AArch64::CPYPRTN:
5959 case AArch64::CPYPT:
5960 case AArch64::CPYPTWN:
5961 case AArch64::CPYPTRN:
5962 case AArch64::CPYPTN:
5963 case AArch64::CPYM:
5964 case AArch64::CPYMWN:
5965 case AArch64::CPYMRN:
5966 case AArch64::CPYMN:
5967 case AArch64::CPYMWT:
5968 case AArch64::CPYMWTWN:
5969 case AArch64::CPYMWTRN:
5970 case AArch64::CPYMWTN:
5971 case AArch64::CPYMRT:
5972 case AArch64::CPYMRTWN:
5973 case AArch64::CPYMRTRN:
5974 case AArch64::CPYMRTN:
5975 case AArch64::CPYMT:
5976 case AArch64::CPYMTWN:
5977 case AArch64::CPYMTRN:
5978 case AArch64::CPYMTN:
5979 case AArch64::CPYE:
5980 case AArch64::CPYEWN:
5981 case AArch64::CPYERN:
5982 case AArch64::CPYEN:
5983 case AArch64::CPYEWT:
5984 case AArch64::CPYEWTWN:
5985 case AArch64::CPYEWTRN:
5986 case AArch64::CPYEWTN:
5987 case AArch64::CPYERT:
5988 case AArch64::CPYERTWN:
5989 case AArch64::CPYERTRN:
5990 case AArch64::CPYERTN:
5991 case AArch64::CPYET:
5992 case AArch64::CPYETWN:
5993 case AArch64::CPYETRN:
5994 case AArch64::CPYETN: {
5995 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
5996 MCRegister Xd = Inst.getOperand(i: 3).getReg();
5997 MCRegister Xs = Inst.getOperand(i: 4).getReg();
5998 MCRegister Xn = Inst.getOperand(i: 5).getReg();
5999
6000 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6001 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6002 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6003
6004 if (Xd == Xs)
6005 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source"
6006 " registers are the same");
6007 if (Xd == Xn)
6008 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size"
6009 " registers are the same");
6010 if (Xs == Xn)
6011 return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size"
6012 " registers are the same");
6013 break;
6014 }
6015 case AArch64::SETP:
6016 case AArch64::SETPT:
6017 case AArch64::SETPN:
6018 case AArch64::SETPTN:
6019 case AArch64::SETM:
6020 case AArch64::SETMT:
6021 case AArch64::SETMN:
6022 case AArch64::SETMTN:
6023 case AArch64::SETE:
6024 case AArch64::SETET:
6025 case AArch64::SETEN:
6026 case AArch64::SETETN:
6027 case AArch64::SETGP:
6028 case AArch64::SETGPT:
6029 case AArch64::SETGPN:
6030 case AArch64::SETGPTN:
6031 case AArch64::SETGM:
6032 case AArch64::SETGMT:
6033 case AArch64::SETGMN:
6034 case AArch64::SETGMTN:
6035 case AArch64::MOPSSETGE:
6036 case AArch64::MOPSSETGET:
6037 case AArch64::MOPSSETGEN:
6038 case AArch64::MOPSSETGETN: {
6039 // Xd_wb == op0, Xn_wb == op1
6040 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6041 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6042 MCRegister Xm = Inst.getOperand(i: 4).getReg();
6043
6044 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6045 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6046
6047 if (Xd == Xn)
6048 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6049 " registers are the same");
6050 if (Xd == Xm)
6051 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source"
6052 " registers are the same");
6053 if (Xn == Xm)
6054 return Error(L: Loc[0], Msg: "invalid SET instruction, source and size"
6055 " registers are the same");
6056 break;
6057 }
6058 case AArch64::SETGOP:
6059 case AArch64::SETGOPT:
6060 case AArch64::SETGOPN:
6061 case AArch64::SETGOPTN:
6062 case AArch64::SETGOM:
6063 case AArch64::SETGOMT:
6064 case AArch64::SETGOMN:
6065 case AArch64::SETGOMTN:
6066 case AArch64::SETGOE:
6067 case AArch64::SETGOET:
6068 case AArch64::SETGOEN:
6069 case AArch64::SETGOETN: {
6070 // Xd_wb == op0, Xn_wb == op1
6071 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6072 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6073
6074 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6075 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6076
6077 if (Xd == Xn)
6078 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6079 " registers are the same");
6080 break;
6081 }
6082 }
6083
6084 // Now check immediate ranges. Separate from the above as there is overlap
6085 // in the instructions being checked and this keeps the nested conditionals
6086 // to a minimum.
6087 switch (Inst.getOpcode()) {
6088 case AArch64::ADDSWri:
6089 case AArch64::ADDSXri:
6090 case AArch64::ADDWri:
6091 case AArch64::ADDXri:
6092 case AArch64::SUBSWri:
6093 case AArch64::SUBSXri:
6094 case AArch64::SUBWri:
6095 case AArch64::SUBXri: {
6096 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6097 // some slight duplication here.
6098 if (Inst.getOperand(i: 2).isExpr()) {
6099 const MCExpr *Expr = Inst.getOperand(i: 2).getExpr();
6100 AArch64::Specifier ELFSpec;
6101 AArch64::Specifier DarwinSpec;
6102 int64_t Addend;
6103 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6104
6105 // Only allow these with ADDXri.
6106 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6107 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6108 Inst.getOpcode() == AArch64::ADDXri)
6109 return false;
6110
6111 // Only allow these with ADDXri/ADDWri
6112 if (llvm::is_contained(
6113 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
6114 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
6115 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
6116 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
6117 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
6118 AArch64::S_SECREL_LO12, AArch64::S_SECREL_HI12},
6119 Element: ELFSpec) &&
6120 (Inst.getOpcode() == AArch64::ADDXri ||
6121 Inst.getOpcode() == AArch64::ADDWri))
6122 return false;
6123
6124 // Don't allow symbol refs in the immediate field otherwise
6125 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6126 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6127 // 'cmp w0, 'borked')
6128 return Error(L: Loc.back(), Msg: "invalid immediate expression");
6129 }
6130 // We don't validate more complex expressions here
6131 }
6132 return false;
6133 }
6134 default:
6135 return false;
6136 }
6137}
6138
6139static std::string AArch64MnemonicSpellCheck(StringRef S,
6140 const FeatureBitset &FBS,
6141 unsigned VariantID = 0);
6142
6143bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6144 uint64_t ErrorInfo,
6145 OperandVector &Operands) {
6146 switch (ErrCode) {
6147 case Match_InvalidTiedOperand: {
6148 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6149 if (Op.isVectorList())
6150 return Error(L: Loc, Msg: "operand must match destination register list");
6151
6152 assert(Op.isReg() && "Unexpected operand type");
6153 switch (Op.getRegEqualityTy()) {
6154 case RegConstraintEqualityTy::EqualsSubReg:
6155 return Error(L: Loc, Msg: "operand must be 64-bit form of destination register");
6156 case RegConstraintEqualityTy::EqualsSuperReg:
6157 return Error(L: Loc, Msg: "operand must be 32-bit form of destination register");
6158 case RegConstraintEqualityTy::EqualsReg:
6159 return Error(L: Loc, Msg: "operand must match destination register");
6160 }
6161 llvm_unreachable("Unknown RegConstraintEqualityTy");
6162 }
6163 case Match_MissingFeature:
6164 return Error(L: Loc,
6165 Msg: "instruction requires a CPU feature not currently enabled");
6166 case Match_InvalidOperand:
6167 return Error(L: Loc, Msg: "invalid operand for instruction");
6168 case Match_InvalidSuffix:
6169 return Error(L: Loc, Msg: "invalid type suffix for instruction");
6170 case Match_InvalidCondCode:
6171 return Error(L: Loc, Msg: "expected AArch64 condition code");
6172 case Match_AddSubRegExtendSmall:
6173 return Error(L: Loc,
6174 Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6175 case Match_AddSubRegExtendLarge:
6176 return Error(L: Loc,
6177 Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6178 case Match_AddSubSecondSource:
6179 return Error(L: Loc,
6180 Msg: "expected compatible register, symbol or integer in range [0, 4095]");
6181 case Match_LogicalSecondSource:
6182 return Error(L: Loc, Msg: "expected compatible register or logical immediate");
6183 case Match_InvalidMovImm32Shift:
6184 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16");
6185 case Match_InvalidMovImm64Shift:
6186 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48");
6187 case Match_AddSubRegShift32:
6188 return Error(L: Loc,
6189 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6190 case Match_AddSubRegShift64:
6191 return Error(L: Loc,
6192 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6193 case Match_InvalidFPImm:
6194 return Error(L: Loc,
6195 Msg: "expected compatible register or floating-point constant");
6196 case Match_InvalidMemoryIndexedSImm6:
6197 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6198 case Match_InvalidMemoryIndexedSImm5:
6199 return Error(L: Loc, Msg: "index must be an integer in range [-16, 15].");
6200 case Match_InvalidMemoryIndexed1SImm4:
6201 return Error(L: Loc, Msg: "index must be an integer in range [-8, 7].");
6202 case Match_InvalidMemoryIndexed2SImm4:
6203 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14].");
6204 case Match_InvalidMemoryIndexed3SImm4:
6205 return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21].");
6206 case Match_InvalidMemoryIndexed4SImm4:
6207 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28].");
6208 case Match_InvalidMemoryIndexed16SImm4:
6209 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112].");
6210 case Match_InvalidMemoryIndexed32SImm4:
6211 return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224].");
6212 case Match_InvalidMemoryIndexed1SImm6:
6213 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6214 case Match_InvalidMemoryIndexedSImm8:
6215 return Error(L: Loc, Msg: "index must be an integer in range [-128, 127].");
6216 case Match_InvalidMemoryIndexedSImm9:
6217 return Error(L: Loc, Msg: "index must be an integer in range [-256, 255].");
6218 case Match_InvalidMemoryIndexed16SImm9:
6219 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080].");
6220 case Match_InvalidMemoryIndexed8SImm10:
6221 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088].");
6222 case Match_InvalidMemoryIndexed4SImm7:
6223 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252].");
6224 case Match_InvalidMemoryIndexed8SImm7:
6225 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504].");
6226 case Match_InvalidMemoryIndexed16SImm7:
6227 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008].");
6228 case Match_InvalidMemoryIndexed8UImm5:
6229 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248].");
6230 case Match_InvalidMemoryIndexed8UImm3:
6231 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56].");
6232 case Match_InvalidMemoryIndexed4UImm5:
6233 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124].");
6234 case Match_InvalidMemoryIndexed2UImm5:
6235 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62].");
6236 case Match_InvalidMemoryIndexed8UImm6:
6237 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504].");
6238 case Match_InvalidMemoryIndexed16UImm6:
6239 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008].");
6240 case Match_InvalidMemoryIndexed4UImm6:
6241 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252].");
6242 case Match_InvalidMemoryIndexed2UImm6:
6243 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126].");
6244 case Match_InvalidMemoryIndexed1UImm6:
6245 return Error(L: Loc, Msg: "index must be in range [0, 63].");
6246 case Match_InvalidMemoryWExtend8:
6247 return Error(L: Loc,
6248 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0");
6249 case Match_InvalidMemoryWExtend16:
6250 return Error(L: Loc,
6251 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6252 case Match_InvalidMemoryWExtend32:
6253 return Error(L: Loc,
6254 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6255 case Match_InvalidMemoryWExtend64:
6256 return Error(L: Loc,
6257 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6258 case Match_InvalidMemoryWExtend128:
6259 return Error(L: Loc,
6260 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6261 case Match_InvalidMemoryXExtend8:
6262 return Error(L: Loc,
6263 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0");
6264 case Match_InvalidMemoryXExtend16:
6265 return Error(L: Loc,
6266 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6267 case Match_InvalidMemoryXExtend32:
6268 return Error(L: Loc,
6269 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6270 case Match_InvalidMemoryXExtend64:
6271 return Error(L: Loc,
6272 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6273 case Match_InvalidMemoryXExtend128:
6274 return Error(L: Loc,
6275 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6276 case Match_InvalidMemoryIndexed1:
6277 return Error(L: Loc, Msg: "index must be an integer in range [0, 4095].");
6278 case Match_InvalidMemoryIndexed2:
6279 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190].");
6280 case Match_InvalidMemoryIndexed4:
6281 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380].");
6282 case Match_InvalidMemoryIndexed8:
6283 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760].");
6284 case Match_InvalidMemoryIndexed16:
6285 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520].");
6286 case Match_InvalidImm0_0:
6287 return Error(L: Loc, Msg: "immediate must be 0.");
6288 case Match_InvalidImm0_1:
6289 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1].");
6290 case Match_InvalidImm0_3:
6291 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3].");
6292 case Match_InvalidImm0_7:
6293 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7].");
6294 case Match_InvalidImm0_15:
6295 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15].");
6296 case Match_InvalidImm0_31:
6297 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31].");
6298 case Match_InvalidImm0_63:
6299 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63].");
6300 case Match_InvalidImm0_127:
6301 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127].");
6302 case Match_InvalidImm0_255:
6303 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255].");
6304 case Match_InvalidImm0_65535:
6305 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535].");
6306 case Match_InvalidImm1_8:
6307 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8].");
6308 case Match_InvalidImm1_16:
6309 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16].");
6310 case Match_InvalidImm1_32:
6311 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32].");
6312 case Match_InvalidImm1_64:
6313 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64].");
6314 case Match_InvalidImmM1_62:
6315 return Error(L: Loc, Msg: "immediate must be an integer in range [-1, 62].");
6316 case Match_InvalidMemoryIndexedRange2UImm0:
6317 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1.");
6318 case Match_InvalidMemoryIndexedRange2UImm1:
6319 return Error(L: Loc, Msg: "vector select offset must be an immediate range of the "
6320 "form <immf>:<imml>, where the first "
6321 "immediate is a multiple of 2 in the range [0, 2], and "
6322 "the second immediate is immf + 1.");
6323 case Match_InvalidMemoryIndexedRange2UImm2:
6324 case Match_InvalidMemoryIndexedRange2UImm3:
6325 return Error(
6326 L: Loc,
6327 Msg: "vector select offset must be an immediate range of the form "
6328 "<immf>:<imml>, "
6329 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6330 "[0, 14] "
6331 "depending on the instruction, and the second immediate is immf + 1.");
6332 case Match_InvalidMemoryIndexedRange4UImm0:
6333 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3.");
6334 case Match_InvalidMemoryIndexedRange4UImm1:
6335 case Match_InvalidMemoryIndexedRange4UImm2:
6336 return Error(
6337 L: Loc,
6338 Msg: "vector select offset must be an immediate range of the form "
6339 "<immf>:<imml>, "
6340 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6341 "[0, 12] "
6342 "depending on the instruction, and the second immediate is immf + 3.");
6343 case Match_InvalidSVEAddSubImm8:
6344 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]"
6345 " with a shift amount of 0");
6346 case Match_InvalidSVEAddSubImm16:
6347 case Match_InvalidSVEAddSubImm32:
6348 case Match_InvalidSVEAddSubImm64:
6349 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a "
6350 "multiple of 256 in range [256, 65280]");
6351 case Match_InvalidSVECpyImm8:
6352 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]"
6353 " with a shift amount of 0");
6354 case Match_InvalidSVECpyImm16:
6355 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6356 "multiple of 256 in range [-32768, 65280]");
6357 case Match_InvalidSVECpyImm32:
6358 case Match_InvalidSVECpyImm64:
6359 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6360 "multiple of 256 in range [-32768, 32512]");
6361 case Match_InvalidIndexRange0_0:
6362 return Error(L: Loc, Msg: "expected lane specifier '[0]'");
6363 case Match_InvalidIndexRange1_1:
6364 return Error(L: Loc, Msg: "expected lane specifier '[1]'");
6365 case Match_InvalidIndexRange0_15:
6366 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6367 case Match_InvalidIndexRange0_7:
6368 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6369 case Match_InvalidIndexRange0_3:
6370 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6371 case Match_InvalidIndexRange0_1:
6372 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1].");
6373 case Match_InvalidSVEIndexRange0_63:
6374 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63].");
6375 case Match_InvalidSVEIndexRange0_31:
6376 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31].");
6377 case Match_InvalidSVEIndexRange0_15:
6378 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6379 case Match_InvalidSVEIndexRange0_7:
6380 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6381 case Match_InvalidSVEIndexRange0_3:
6382 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6383 case Match_InvalidLabel:
6384 return Error(L: Loc, Msg: "expected label or encodable integer pc offset");
6385 case Match_MRS:
6386 return Error(L: Loc, Msg: "expected readable system register");
6387 case Match_MSR:
6388 case Match_InvalidSVCR:
6389 return Error(L: Loc, Msg: "expected writable system register or pstate");
6390 case Match_InvalidComplexRotationEven:
6391 return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270.");
6392 case Match_InvalidComplexRotationOdd:
6393 return Error(L: Loc, Msg: "complex rotation must be 90 or 270.");
6394 case Match_MnemonicFail: {
6395 std::string Suggestion = AArch64MnemonicSpellCheck(
6396 S: ((AArch64Operand &)*Operands[0]).getToken(),
6397 FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits()));
6398 return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion);
6399 }
6400 case Match_InvalidGPR64shifted8:
6401 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift");
6402 case Match_InvalidGPR64shifted16:
6403 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6404 case Match_InvalidGPR64shifted32:
6405 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6406 case Match_InvalidGPR64shifted64:
6407 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6408 case Match_InvalidGPR64shifted128:
6409 return Error(
6410 L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6411 case Match_InvalidGPR64NoXZRshifted8:
6412 return Error(L: Loc, Msg: "register must be x0..x30 without shift");
6413 case Match_InvalidGPR64NoXZRshifted16:
6414 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'");
6415 case Match_InvalidGPR64NoXZRshifted32:
6416 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'");
6417 case Match_InvalidGPR64NoXZRshifted64:
6418 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'");
6419 case Match_InvalidGPR64NoXZRshifted128:
6420 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'");
6421 case Match_InvalidZPR32UXTW8:
6422 case Match_InvalidZPR32SXTW8:
6423 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6424 case Match_InvalidZPR32UXTW16:
6425 case Match_InvalidZPR32SXTW16:
6426 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6427 case Match_InvalidZPR32UXTW32:
6428 case Match_InvalidZPR32SXTW32:
6429 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6430 case Match_InvalidZPR32UXTW64:
6431 case Match_InvalidZPR32SXTW64:
6432 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6433 case Match_InvalidZPR64UXTW8:
6434 case Match_InvalidZPR64SXTW8:
6435 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6436 case Match_InvalidZPR64UXTW16:
6437 case Match_InvalidZPR64SXTW16:
6438 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6439 case Match_InvalidZPR64UXTW32:
6440 case Match_InvalidZPR64SXTW32:
6441 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6442 case Match_InvalidZPR64UXTW64:
6443 case Match_InvalidZPR64SXTW64:
6444 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6445 case Match_InvalidZPR32LSL8:
6446 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'");
6447 case Match_InvalidZPR32LSL16:
6448 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6449 case Match_InvalidZPR32LSL32:
6450 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6451 case Match_InvalidZPR32LSL64:
6452 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6453 case Match_InvalidZPR64LSL8:
6454 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'");
6455 case Match_InvalidZPR64LSL16:
6456 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6457 case Match_InvalidZPR64LSL32:
6458 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6459 case Match_InvalidZPR64LSL64:
6460 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6461 case Match_InvalidZPR0:
6462 return Error(L: Loc, Msg: "expected register without element width suffix");
6463 case Match_InvalidZPR8:
6464 case Match_InvalidZPR16:
6465 case Match_InvalidZPR32:
6466 case Match_InvalidZPR64:
6467 case Match_InvalidZPR128:
6468 return Error(L: Loc, Msg: "invalid element width");
6469 case Match_InvalidZPR_3b8:
6470 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b");
6471 case Match_InvalidZPR_3b16:
6472 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h");
6473 case Match_InvalidZPR_3b32:
6474 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s");
6475 case Match_InvalidZPR_4b8:
6476 return Error(L: Loc,
6477 Msg: "Invalid restricted vector register, expected z0.b..z15.b");
6478 case Match_InvalidZPR_4b16:
6479 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h");
6480 case Match_InvalidZPR_4b32:
6481 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s");
6482 case Match_InvalidZPR_4b64:
6483 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d");
6484 case Match_InvalidZPRMul2_Lo8:
6485 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6486 "register in z0.b..z14.b");
6487 case Match_InvalidZPRMul2_Hi8:
6488 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6489 "register in z16.b..z30.b");
6490 case Match_InvalidZPRMul2_Lo16:
6491 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6492 "register in z0.h..z14.h");
6493 case Match_InvalidZPRMul2_Hi16:
6494 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6495 "register in z16.h..z30.h");
6496 case Match_InvalidZPRMul2_Lo32:
6497 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6498 "register in z0.s..z14.s");
6499 case Match_InvalidZPRMul2_Hi32:
6500 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6501 "register in z16.s..z30.s");
6502 case Match_InvalidZPRMul2_Lo64:
6503 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6504 "register in z0.d..z14.d");
6505 case Match_InvalidZPRMul2_Hi64:
6506 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6507 "register in z16.d..z30.d");
6508 case Match_InvalidZPR_K0:
6509 return Error(L: Loc, Msg: "invalid restricted vector register, expected register "
6510 "in z20..z23 or z28..z31");
6511 case Match_InvalidSVEPattern:
6512 return Error(L: Loc, Msg: "invalid predicate pattern");
6513 case Match_InvalidSVEPPRorPNRAnyReg:
6514 case Match_InvalidSVEPPRorPNRBReg:
6515 case Match_InvalidSVEPredicateAnyReg:
6516 case Match_InvalidSVEPredicateBReg:
6517 case Match_InvalidSVEPredicateHReg:
6518 case Match_InvalidSVEPredicateSReg:
6519 case Match_InvalidSVEPredicateDReg:
6520 return Error(L: Loc, Msg: "invalid predicate register.");
6521 case Match_InvalidSVEPredicate3bAnyReg:
6522 return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6523 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6524 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6525 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6526 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6527 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6528 "pn8..pn15 with element suffix.");
6529 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6530 return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register "
6531 "expected pn8..pn15");
6532 case Match_InvalidSVEPNPredicateBReg:
6533 case Match_InvalidSVEPNPredicateHReg:
6534 case Match_InvalidSVEPNPredicateSReg:
6535 case Match_InvalidSVEPNPredicateDReg:
6536 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6537 "pn0..pn15 with element suffix.");
6538 case Match_InvalidSVEVecLenSpecifier:
6539 return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4");
6540 case Match_InvalidSVEPredicateListMul2x8:
6541 case Match_InvalidSVEPredicateListMul2x16:
6542 case Match_InvalidSVEPredicateListMul2x32:
6543 case Match_InvalidSVEPredicateListMul2x64:
6544 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6545 "predicate registers, where the first vector is a multiple of 2 "
6546 "and with correct element type");
6547 case Match_InvalidSVEExactFPImmOperandHalfOne:
6548 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0.");
6549 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6550 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0.");
6551 case Match_InvalidSVEExactFPImmOperandZeroOne:
6552 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0.");
6553 case Match_InvalidMatrixTileVectorH8:
6554 case Match_InvalidMatrixTileVectorV8:
6555 return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b");
6556 case Match_InvalidMatrixTileVectorH16:
6557 case Match_InvalidMatrixTileVectorV16:
6558 return Error(L: Loc,
6559 Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6560 case Match_InvalidMatrixTileVectorH32:
6561 case Match_InvalidMatrixTileVectorV32:
6562 return Error(L: Loc,
6563 Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6564 case Match_InvalidMatrixTileVectorH64:
6565 case Match_InvalidMatrixTileVectorV64:
6566 return Error(L: Loc,
6567 Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6568 case Match_InvalidMatrixTileVectorH128:
6569 case Match_InvalidMatrixTileVectorV128:
6570 return Error(L: Loc,
6571 Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6572 case Match_InvalidMatrixTile16:
6573 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-1].h");
6574 case Match_InvalidMatrixTile32:
6575 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s");
6576 case Match_InvalidMatrixTile64:
6577 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d");
6578 case Match_InvalidMatrix:
6579 return Error(L: Loc, Msg: "invalid matrix operand, expected za");
6580 case Match_InvalidMatrix8:
6581 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b");
6582 case Match_InvalidMatrix16:
6583 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h");
6584 case Match_InvalidMatrix32:
6585 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s");
6586 case Match_InvalidMatrix64:
6587 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d");
6588 case Match_InvalidMatrixIndexGPR32_12_15:
6589 return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]");
6590 case Match_InvalidMatrixIndexGPR32_8_11:
6591 return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]");
6592 case Match_InvalidSVEVectorList2x8Mul2:
6593 case Match_InvalidSVEVectorList2x16Mul2:
6594 case Match_InvalidSVEVectorList2x32Mul2:
6595 case Match_InvalidSVEVectorList2x64Mul2:
6596 case Match_InvalidSVEVectorList2x128Mul2:
6597 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6598 "SVE vectors, where the first vector is a multiple of 2 "
6599 "and with matching element types");
6600 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6601 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6602 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6603 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6604 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6605 "SVE vectors in the range z0-z14, where the first vector "
6606 "is a multiple of 2 "
6607 "and with matching element types");
6608 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6609 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6610 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6611 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6612 return Error(L: Loc,
6613 Msg: "Invalid vector list, expected list with 2 consecutive "
6614 "SVE vectors in the range z16-z30, where the first vector "
6615 "is a multiple of 2 "
6616 "and with matching element types");
6617 case Match_InvalidSVEVectorList4x8Mul4:
6618 case Match_InvalidSVEVectorList4x16Mul4:
6619 case Match_InvalidSVEVectorList4x32Mul4:
6620 case Match_InvalidSVEVectorList4x64Mul4:
6621 case Match_InvalidSVEVectorList4x128Mul4:
6622 return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive "
6623 "SVE vectors, where the first vector is a multiple of 4 "
6624 "and with matching element types");
6625 case Match_InvalidLookupTable:
6626 return Error(L: Loc, Msg: "Invalid lookup table, expected zt0");
6627 case Match_InvalidSVEVectorListStrided2x8:
6628 case Match_InvalidSVEVectorListStrided2x16:
6629 case Match_InvalidSVEVectorListStrided2x32:
6630 case Match_InvalidSVEVectorListStrided2x64:
6631 return Error(
6632 L: Loc,
6633 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6634 "8 registers apart, and the first register in the range [z0, z7] or "
6635 "[z16, z23] and with correct element type");
6636 case Match_InvalidSVEVectorListStrided4x8:
6637 case Match_InvalidSVEVectorListStrided4x16:
6638 case Match_InvalidSVEVectorListStrided4x32:
6639 case Match_InvalidSVEVectorListStrided4x64:
6640 return Error(
6641 L: Loc,
6642 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6643 "4 registers apart, and the first register in the range [z0, z3] or "
6644 "[z16, z19] and with correct element type");
6645 case Match_AddSubLSLImm3ShiftLarge:
6646 return Error(L: Loc,
6647 Msg: "expected 'lsl' with optional integer in range [0, 7]");
6648 default:
6649 llvm_unreachable("unexpected error code!");
6650 }
6651}
6652
6653static const char *getSubtargetFeatureName(uint64_t Val);
6654
6655bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6656 OperandVector &Operands,
6657 MCStreamer &Out,
6658 uint64_t &ErrorInfo,
6659 bool MatchingInlineAsm) {
6660 assert(!Operands.empty() && "Unexpected empty operand list!");
6661 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6662 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6663
6664 StringRef Tok = Op.getToken();
6665 unsigned NumOperands = Operands.size();
6666
6667 if (NumOperands == 4 && Tok == "lsl") {
6668 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6669 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6670 if (Op2.isScalarReg() && Op3.isImm()) {
6671 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6672 if (Op3CE) {
6673 uint64_t Op3Val = Op3CE->getValue();
6674 uint64_t NewOp3Val = 0;
6675 uint64_t NewOp4Val = 0;
6676 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6677 Reg: Op2.getReg())) {
6678 NewOp3Val = (32 - Op3Val) & 0x1f;
6679 NewOp4Val = 31 - Op3Val;
6680 } else {
6681 NewOp3Val = (64 - Op3Val) & 0x3f;
6682 NewOp4Val = 63 - Op3Val;
6683 }
6684
6685 const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6686 const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6687
6688 Operands[0] =
6689 AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(), Ctx&: getContext());
6690 Operands.push_back(Elt: AArch64Operand::CreateImm(
6691 Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()));
6692 Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(),
6693 E: Op3.getEndLoc(), Ctx&: getContext());
6694 }
6695 }
6696 } else if (NumOperands == 4 && Tok == "bfc") {
6697 // FIXME: Horrible hack to handle BFC->BFM alias.
6698 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6699 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6700 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6701
6702 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6703 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm());
6704 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm());
6705
6706 if (LSBCE && WidthCE) {
6707 uint64_t LSB = LSBCE->getValue();
6708 uint64_t Width = WidthCE->getValue();
6709
6710 uint64_t RegWidth = 0;
6711 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6712 Reg: Op1.getReg()))
6713 RegWidth = 64;
6714 else
6715 RegWidth = 32;
6716
6717 if (LSB >= RegWidth)
6718 return Error(L: LSBOp.getStartLoc(),
6719 Msg: "expected integer in range [0, 31]");
6720 if (Width < 1 || Width > RegWidth)
6721 return Error(L: WidthOp.getStartLoc(),
6722 Msg: "expected integer in range [1, 32]");
6723
6724 uint64_t ImmR = 0;
6725 if (RegWidth == 32)
6726 ImmR = (32 - LSB) & 0x1f;
6727 else
6728 ImmR = (64 - LSB) & 0x3f;
6729
6730 uint64_t ImmS = Width - 1;
6731
6732 if (ImmR != 0 && ImmS >= ImmR)
6733 return Error(L: WidthOp.getStartLoc(),
6734 Msg: "requested insert overflows register");
6735
6736 const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext());
6737 const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext());
6738 Operands[0] =
6739 AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(), Ctx&: getContext());
6740 Operands[2] = AArch64Operand::CreateReg(
6741 Reg: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar,
6742 S: SMLoc(), E: SMLoc(), Ctx&: getContext());
6743 Operands[3] = AArch64Operand::CreateImm(
6744 Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext());
6745 Operands.emplace_back(
6746 Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(),
6747 E: WidthOp.getEndLoc(), Ctx&: getContext()));
6748 }
6749 }
6750 } else if (NumOperands == 5) {
6751 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6752 // UBFIZ -> UBFM aliases.
6753 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6754 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6755 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6756 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6757
6758 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6759 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6760 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6761
6762 if (Op3CE && Op4CE) {
6763 uint64_t Op3Val = Op3CE->getValue();
6764 uint64_t Op4Val = Op4CE->getValue();
6765
6766 uint64_t RegWidth = 0;
6767 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6768 Reg: Op1.getReg()))
6769 RegWidth = 64;
6770 else
6771 RegWidth = 32;
6772
6773 if (Op3Val >= RegWidth)
6774 return Error(L: Op3.getStartLoc(),
6775 Msg: "expected integer in range [0, 31]");
6776 if (Op4Val < 1 || Op4Val > RegWidth)
6777 return Error(L: Op4.getStartLoc(),
6778 Msg: "expected integer in range [1, 32]");
6779
6780 uint64_t NewOp3Val = 0;
6781 if (RegWidth == 32)
6782 NewOp3Val = (32 - Op3Val) & 0x1f;
6783 else
6784 NewOp3Val = (64 - Op3Val) & 0x3f;
6785
6786 uint64_t NewOp4Val = Op4Val - 1;
6787
6788 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6789 return Error(L: Op4.getStartLoc(),
6790 Msg: "requested insert overflows register");
6791
6792 const MCExpr *NewOp3 =
6793 MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6794 const MCExpr *NewOp4 =
6795 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6796 Operands[3] = AArch64Operand::CreateImm(
6797 Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext());
6798 Operands[4] = AArch64Operand::CreateImm(
6799 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6800 if (Tok == "bfi")
6801 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6802 Ctx&: getContext());
6803 else if (Tok == "sbfiz")
6804 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6805 Ctx&: getContext());
6806 else if (Tok == "ubfiz")
6807 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6808 Ctx&: getContext());
6809 else
6810 llvm_unreachable("No valid mnemonic for alias?");
6811 }
6812 }
6813
6814 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6815 // UBFX -> UBFM aliases.
6816 } else if (NumOperands == 5 &&
6817 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6818 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6819 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6820 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6821
6822 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6823 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6824 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6825
6826 if (Op3CE && Op4CE) {
6827 uint64_t Op3Val = Op3CE->getValue();
6828 uint64_t Op4Val = Op4CE->getValue();
6829
6830 uint64_t RegWidth = 0;
6831 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6832 Reg: Op1.getReg()))
6833 RegWidth = 64;
6834 else
6835 RegWidth = 32;
6836
6837 if (Op3Val >= RegWidth)
6838 return Error(L: Op3.getStartLoc(),
6839 Msg: "expected integer in range [0, 31]");
6840 if (Op4Val < 1 || Op4Val > RegWidth)
6841 return Error(L: Op4.getStartLoc(),
6842 Msg: "expected integer in range [1, 32]");
6843
6844 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6845
6846 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6847 return Error(L: Op4.getStartLoc(),
6848 Msg: "requested extract overflows register");
6849
6850 const MCExpr *NewOp4 =
6851 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6852 Operands[4] = AArch64Operand::CreateImm(
6853 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6854 if (Tok == "bfxil")
6855 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6856 Ctx&: getContext());
6857 else if (Tok == "sbfx")
6858 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6859 Ctx&: getContext());
6860 else if (Tok == "ubfx")
6861 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6862 Ctx&: getContext());
6863 else
6864 llvm_unreachable("No valid mnemonic for alias?");
6865 }
6866 }
6867 }
6868 }
6869
6870 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6871 // instruction for FP registers correctly in some rare circumstances. Convert
6872 // it to a safe instruction and warn (because silently changing someone's
6873 // assembly is rude).
6874 if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) &&
6875 NumOperands == 4 && Tok == "movi") {
6876 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6877 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6878 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6879 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6880 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6881 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6882 if (Suffix.lower() == ".2d" &&
6883 cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) {
6884 Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function"
6885 " correctly on this CPU, converting to equivalent movi.16b");
6886 // Switch the suffix to .16b.
6887 unsigned Idx = Op1.isToken() ? 1 : 2;
6888 Operands[Idx] =
6889 AArch64Operand::CreateToken(Str: ".16b", S: IDLoc, Ctx&: getContext());
6890 }
6891 }
6892 }
6893
6894 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6895 // InstAlias can't quite handle this since the reg classes aren't
6896 // subclasses.
6897 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6898 // The source register can be Wn here, but the matcher expects a
6899 // GPR64. Twiddle it here if necessary.
6900 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6901 if (Op.isScalarReg()) {
6902 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6903 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6904 S: Op.getStartLoc(), E: Op.getEndLoc(),
6905 Ctx&: getContext());
6906 }
6907 }
6908 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6909 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6910 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6911 if (Op.isScalarReg() &&
6912 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6913 Reg: Op.getReg())) {
6914 // The source register can be Wn here, but the matcher expects a
6915 // GPR64. Twiddle it here if necessary.
6916 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6917 if (Op.isScalarReg()) {
6918 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6919 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6920 S: Op.getStartLoc(),
6921 E: Op.getEndLoc(), Ctx&: getContext());
6922 }
6923 }
6924 }
6925 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6926 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6927 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6928 if (Op.isScalarReg() &&
6929 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6930 Reg: Op.getReg())) {
6931 // The source register can be Wn here, but the matcher expects a
6932 // GPR32. Twiddle it here if necessary.
6933 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6934 if (Op.isScalarReg()) {
6935 MCRegister Reg = getWRegFromXReg(Reg: Op.getReg());
6936 Operands[1] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6937 S: Op.getStartLoc(),
6938 E: Op.getEndLoc(), Ctx&: getContext());
6939 }
6940 }
6941 }
6942
6943 MCInst Inst;
6944 FeatureBitset MissingFeatures;
6945 // First try to match against the secondary set of tables containing the
6946 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6947 unsigned MatchResult =
6948 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6949 matchingInlineAsm: MatchingInlineAsm, VariantID: 1);
6950
6951 // If that fails, try against the alternate table containing long-form NEON:
6952 // "fadd v0.2s, v1.2s, v2.2s"
6953 if (MatchResult != Match_Success) {
6954 // But first, save the short-form match result: we can use it in case the
6955 // long-form match also fails.
6956 auto ShortFormNEONErrorInfo = ErrorInfo;
6957 auto ShortFormNEONMatchResult = MatchResult;
6958 auto ShortFormNEONMissingFeatures = MissingFeatures;
6959
6960 MatchResult =
6961 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6962 matchingInlineAsm: MatchingInlineAsm, VariantID: 0);
6963
6964 // Now, both matches failed, and the long-form match failed on the mnemonic
6965 // suffix token operand. The short-form match failure is probably more
6966 // relevant: use it instead.
6967 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6968 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6969 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6970 MatchResult = ShortFormNEONMatchResult;
6971 ErrorInfo = ShortFormNEONErrorInfo;
6972 MissingFeatures = ShortFormNEONMissingFeatures;
6973 }
6974 }
6975
6976 switch (MatchResult) {
6977 case Match_Success: {
6978 // Perform range checking and other semantic validations
6979 SmallVector<SMLoc, 8> OperandLocs;
6980 NumOperands = Operands.size();
6981 for (unsigned i = 1; i < NumOperands; ++i)
6982 OperandLocs.push_back(Elt: Operands[i]->getStartLoc());
6983 if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs))
6984 return true;
6985
6986 Inst.setLoc(IDLoc);
6987 Out.emitInstruction(Inst, STI: getSTI());
6988 return false;
6989 }
6990 case Match_MissingFeature: {
6991 assert(MissingFeatures.any() && "Unknown missing feature!");
6992 // Special case the error message for the very common case where only
6993 // a single subtarget feature is missing (neon, e.g.).
6994 std::string Msg = "instruction requires:";
6995 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6996 if (MissingFeatures[i]) {
6997 Msg += " ";
6998 Msg += getSubtargetFeatureName(Val: i);
6999 }
7000 }
7001 return Error(L: IDLoc, Msg);
7002 }
7003 case Match_MnemonicFail:
7004 return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7005 case Match_InvalidOperand: {
7006 SMLoc ErrorLoc = IDLoc;
7007
7008 if (ErrorInfo != ~0ULL) {
7009 if (ErrorInfo >= Operands.size())
7010 return Error(L: IDLoc, Msg: "too few operands for instruction",
7011 Range: SMRange(IDLoc, getTok().getLoc()));
7012
7013 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7014 if (ErrorLoc == SMLoc())
7015 ErrorLoc = IDLoc;
7016 }
7017 // If the match failed on a suffix token operand, tweak the diagnostic
7018 // accordingly.
7019 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7020 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7021 MatchResult = Match_InvalidSuffix;
7022
7023 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7024 }
7025 case Match_InvalidTiedOperand:
7026 case Match_InvalidMemoryIndexed1:
7027 case Match_InvalidMemoryIndexed2:
7028 case Match_InvalidMemoryIndexed4:
7029 case Match_InvalidMemoryIndexed8:
7030 case Match_InvalidMemoryIndexed16:
7031 case Match_InvalidCondCode:
7032 case Match_AddSubLSLImm3ShiftLarge:
7033 case Match_AddSubRegExtendSmall:
7034 case Match_AddSubRegExtendLarge:
7035 case Match_AddSubSecondSource:
7036 case Match_LogicalSecondSource:
7037 case Match_AddSubRegShift32:
7038 case Match_AddSubRegShift64:
7039 case Match_InvalidMovImm32Shift:
7040 case Match_InvalidMovImm64Shift:
7041 case Match_InvalidFPImm:
7042 case Match_InvalidMemoryWExtend8:
7043 case Match_InvalidMemoryWExtend16:
7044 case Match_InvalidMemoryWExtend32:
7045 case Match_InvalidMemoryWExtend64:
7046 case Match_InvalidMemoryWExtend128:
7047 case Match_InvalidMemoryXExtend8:
7048 case Match_InvalidMemoryXExtend16:
7049 case Match_InvalidMemoryXExtend32:
7050 case Match_InvalidMemoryXExtend64:
7051 case Match_InvalidMemoryXExtend128:
7052 case Match_InvalidMemoryIndexed1SImm4:
7053 case Match_InvalidMemoryIndexed2SImm4:
7054 case Match_InvalidMemoryIndexed3SImm4:
7055 case Match_InvalidMemoryIndexed4SImm4:
7056 case Match_InvalidMemoryIndexed1SImm6:
7057 case Match_InvalidMemoryIndexed16SImm4:
7058 case Match_InvalidMemoryIndexed32SImm4:
7059 case Match_InvalidMemoryIndexed4SImm7:
7060 case Match_InvalidMemoryIndexed8SImm7:
7061 case Match_InvalidMemoryIndexed16SImm7:
7062 case Match_InvalidMemoryIndexed8UImm5:
7063 case Match_InvalidMemoryIndexed8UImm3:
7064 case Match_InvalidMemoryIndexed4UImm5:
7065 case Match_InvalidMemoryIndexed2UImm5:
7066 case Match_InvalidMemoryIndexed1UImm6:
7067 case Match_InvalidMemoryIndexed2UImm6:
7068 case Match_InvalidMemoryIndexed4UImm6:
7069 case Match_InvalidMemoryIndexed8UImm6:
7070 case Match_InvalidMemoryIndexed16UImm6:
7071 case Match_InvalidMemoryIndexedSImm6:
7072 case Match_InvalidMemoryIndexedSImm5:
7073 case Match_InvalidMemoryIndexedSImm8:
7074 case Match_InvalidMemoryIndexedSImm9:
7075 case Match_InvalidMemoryIndexed16SImm9:
7076 case Match_InvalidMemoryIndexed8SImm10:
7077 case Match_InvalidImm0_0:
7078 case Match_InvalidImm0_1:
7079 case Match_InvalidImm0_3:
7080 case Match_InvalidImm0_7:
7081 case Match_InvalidImm0_15:
7082 case Match_InvalidImm0_31:
7083 case Match_InvalidImm0_63:
7084 case Match_InvalidImm0_127:
7085 case Match_InvalidImm0_255:
7086 case Match_InvalidImm0_65535:
7087 case Match_InvalidImm1_8:
7088 case Match_InvalidImm1_16:
7089 case Match_InvalidImm1_32:
7090 case Match_InvalidImm1_64:
7091 case Match_InvalidImmM1_62:
7092 case Match_InvalidMemoryIndexedRange2UImm0:
7093 case Match_InvalidMemoryIndexedRange2UImm1:
7094 case Match_InvalidMemoryIndexedRange2UImm2:
7095 case Match_InvalidMemoryIndexedRange2UImm3:
7096 case Match_InvalidMemoryIndexedRange4UImm0:
7097 case Match_InvalidMemoryIndexedRange4UImm1:
7098 case Match_InvalidMemoryIndexedRange4UImm2:
7099 case Match_InvalidSVEAddSubImm8:
7100 case Match_InvalidSVEAddSubImm16:
7101 case Match_InvalidSVEAddSubImm32:
7102 case Match_InvalidSVEAddSubImm64:
7103 case Match_InvalidSVECpyImm8:
7104 case Match_InvalidSVECpyImm16:
7105 case Match_InvalidSVECpyImm32:
7106 case Match_InvalidSVECpyImm64:
7107 case Match_InvalidIndexRange0_0:
7108 case Match_InvalidIndexRange1_1:
7109 case Match_InvalidIndexRange0_15:
7110 case Match_InvalidIndexRange0_7:
7111 case Match_InvalidIndexRange0_3:
7112 case Match_InvalidIndexRange0_1:
7113 case Match_InvalidSVEIndexRange0_63:
7114 case Match_InvalidSVEIndexRange0_31:
7115 case Match_InvalidSVEIndexRange0_15:
7116 case Match_InvalidSVEIndexRange0_7:
7117 case Match_InvalidSVEIndexRange0_3:
7118 case Match_InvalidLabel:
7119 case Match_InvalidComplexRotationEven:
7120 case Match_InvalidComplexRotationOdd:
7121 case Match_InvalidGPR64shifted8:
7122 case Match_InvalidGPR64shifted16:
7123 case Match_InvalidGPR64shifted32:
7124 case Match_InvalidGPR64shifted64:
7125 case Match_InvalidGPR64shifted128:
7126 case Match_InvalidGPR64NoXZRshifted8:
7127 case Match_InvalidGPR64NoXZRshifted16:
7128 case Match_InvalidGPR64NoXZRshifted32:
7129 case Match_InvalidGPR64NoXZRshifted64:
7130 case Match_InvalidGPR64NoXZRshifted128:
7131 case Match_InvalidZPR32UXTW8:
7132 case Match_InvalidZPR32UXTW16:
7133 case Match_InvalidZPR32UXTW32:
7134 case Match_InvalidZPR32UXTW64:
7135 case Match_InvalidZPR32SXTW8:
7136 case Match_InvalidZPR32SXTW16:
7137 case Match_InvalidZPR32SXTW32:
7138 case Match_InvalidZPR32SXTW64:
7139 case Match_InvalidZPR64UXTW8:
7140 case Match_InvalidZPR64SXTW8:
7141 case Match_InvalidZPR64UXTW16:
7142 case Match_InvalidZPR64SXTW16:
7143 case Match_InvalidZPR64UXTW32:
7144 case Match_InvalidZPR64SXTW32:
7145 case Match_InvalidZPR64UXTW64:
7146 case Match_InvalidZPR64SXTW64:
7147 case Match_InvalidZPR32LSL8:
7148 case Match_InvalidZPR32LSL16:
7149 case Match_InvalidZPR32LSL32:
7150 case Match_InvalidZPR32LSL64:
7151 case Match_InvalidZPR64LSL8:
7152 case Match_InvalidZPR64LSL16:
7153 case Match_InvalidZPR64LSL32:
7154 case Match_InvalidZPR64LSL64:
7155 case Match_InvalidZPR0:
7156 case Match_InvalidZPR8:
7157 case Match_InvalidZPR16:
7158 case Match_InvalidZPR32:
7159 case Match_InvalidZPR64:
7160 case Match_InvalidZPR128:
7161 case Match_InvalidZPR_3b8:
7162 case Match_InvalidZPR_3b16:
7163 case Match_InvalidZPR_3b32:
7164 case Match_InvalidZPR_4b8:
7165 case Match_InvalidZPR_4b16:
7166 case Match_InvalidZPR_4b32:
7167 case Match_InvalidZPR_4b64:
7168 case Match_InvalidSVEPPRorPNRAnyReg:
7169 case Match_InvalidSVEPPRorPNRBReg:
7170 case Match_InvalidSVEPredicateAnyReg:
7171 case Match_InvalidSVEPattern:
7172 case Match_InvalidSVEVecLenSpecifier:
7173 case Match_InvalidSVEPredicateBReg:
7174 case Match_InvalidSVEPredicateHReg:
7175 case Match_InvalidSVEPredicateSReg:
7176 case Match_InvalidSVEPredicateDReg:
7177 case Match_InvalidSVEPredicate3bAnyReg:
7178 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7179 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7180 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7181 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7182 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7183 case Match_InvalidSVEPNPredicateBReg:
7184 case Match_InvalidSVEPNPredicateHReg:
7185 case Match_InvalidSVEPNPredicateSReg:
7186 case Match_InvalidSVEPNPredicateDReg:
7187 case Match_InvalidSVEPredicateListMul2x8:
7188 case Match_InvalidSVEPredicateListMul2x16:
7189 case Match_InvalidSVEPredicateListMul2x32:
7190 case Match_InvalidSVEPredicateListMul2x64:
7191 case Match_InvalidSVEExactFPImmOperandHalfOne:
7192 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7193 case Match_InvalidSVEExactFPImmOperandZeroOne:
7194 case Match_InvalidMatrixTile16:
7195 case Match_InvalidMatrixTile32:
7196 case Match_InvalidMatrixTile64:
7197 case Match_InvalidMatrix:
7198 case Match_InvalidMatrix8:
7199 case Match_InvalidMatrix16:
7200 case Match_InvalidMatrix32:
7201 case Match_InvalidMatrix64:
7202 case Match_InvalidMatrixTileVectorH8:
7203 case Match_InvalidMatrixTileVectorH16:
7204 case Match_InvalidMatrixTileVectorH32:
7205 case Match_InvalidMatrixTileVectorH64:
7206 case Match_InvalidMatrixTileVectorH128:
7207 case Match_InvalidMatrixTileVectorV8:
7208 case Match_InvalidMatrixTileVectorV16:
7209 case Match_InvalidMatrixTileVectorV32:
7210 case Match_InvalidMatrixTileVectorV64:
7211 case Match_InvalidMatrixTileVectorV128:
7212 case Match_InvalidSVCR:
7213 case Match_InvalidMatrixIndexGPR32_12_15:
7214 case Match_InvalidMatrixIndexGPR32_8_11:
7215 case Match_InvalidLookupTable:
7216 case Match_InvalidZPRMul2_Lo8:
7217 case Match_InvalidZPRMul2_Hi8:
7218 case Match_InvalidZPRMul2_Lo16:
7219 case Match_InvalidZPRMul2_Hi16:
7220 case Match_InvalidZPRMul2_Lo32:
7221 case Match_InvalidZPRMul2_Hi32:
7222 case Match_InvalidZPRMul2_Lo64:
7223 case Match_InvalidZPRMul2_Hi64:
7224 case Match_InvalidZPR_K0:
7225 case Match_InvalidSVEVectorList2x8Mul2:
7226 case Match_InvalidSVEVectorList2x16Mul2:
7227 case Match_InvalidSVEVectorList2x32Mul2:
7228 case Match_InvalidSVEVectorList2x64Mul2:
7229 case Match_InvalidSVEVectorList2x128Mul2:
7230 case Match_InvalidSVEVectorList4x8Mul4:
7231 case Match_InvalidSVEVectorList4x16Mul4:
7232 case Match_InvalidSVEVectorList4x32Mul4:
7233 case Match_InvalidSVEVectorList4x64Mul4:
7234 case Match_InvalidSVEVectorList4x128Mul4:
7235 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7236 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7237 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7238 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7239 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7240 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7241 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7242 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7243 case Match_InvalidSVEVectorListStrided2x8:
7244 case Match_InvalidSVEVectorListStrided2x16:
7245 case Match_InvalidSVEVectorListStrided2x32:
7246 case Match_InvalidSVEVectorListStrided2x64:
7247 case Match_InvalidSVEVectorListStrided4x8:
7248 case Match_InvalidSVEVectorListStrided4x16:
7249 case Match_InvalidSVEVectorListStrided4x32:
7250 case Match_InvalidSVEVectorListStrided4x64:
7251 case Match_MSR:
7252 case Match_MRS: {
7253 if (ErrorInfo >= Operands.size())
7254 return Error(L: IDLoc, Msg: "too few operands for instruction", Range: SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7255 // Any time we get here, there's nothing fancy to do. Just get the
7256 // operand SMLoc and display the diagnostic.
7257 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7258 if (ErrorLoc == SMLoc())
7259 ErrorLoc = IDLoc;
7260 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7261 }
7262 }
7263
7264 llvm_unreachable("Implement any new match types added!");
7265}
7266
7267/// ParseDirective parses the arm specific directives
7268bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7269 const MCContext::Environment Format = getContext().getObjectFileType();
7270 bool IsMachO = Format == MCContext::IsMachO;
7271 bool IsCOFF = Format == MCContext::IsCOFF;
7272 bool IsELF = Format == MCContext::IsELF;
7273
7274 auto IDVal = DirectiveID.getIdentifier().lower();
7275 SMLoc Loc = DirectiveID.getLoc();
7276 if (IDVal == ".arch")
7277 parseDirectiveArch(L: Loc);
7278 else if (IDVal == ".cpu")
7279 parseDirectiveCPU(L: Loc);
7280 else if (IDVal == ".tlsdesccall")
7281 parseDirectiveTLSDescCall(L: Loc);
7282 else if (IDVal == ".ltorg" || IDVal == ".pool")
7283 parseDirectiveLtorg(L: Loc);
7284 else if (IDVal == ".unreq")
7285 parseDirectiveUnreq(L: Loc);
7286 else if (IDVal == ".inst")
7287 parseDirectiveInst(L: Loc);
7288 else if (IDVal == ".cfi_negate_ra_state")
7289 parseDirectiveCFINegateRAState();
7290 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7291 parseDirectiveCFINegateRAStateWithPC();
7292 else if (IDVal == ".cfi_b_key_frame")
7293 parseDirectiveCFIBKeyFrame();
7294 else if (IDVal == ".cfi_mte_tagged_frame")
7295 parseDirectiveCFIMTETaggedFrame();
7296 else if (IDVal == ".arch_extension")
7297 parseDirectiveArchExtension(L: Loc);
7298 else if (IDVal == ".variant_pcs")
7299 parseDirectiveVariantPCS(L: Loc);
7300 else if (IsMachO) {
7301 if (IDVal == MCLOHDirectiveName())
7302 parseDirectiveLOH(LOH: IDVal, L: Loc);
7303 else
7304 return true;
7305 } else if (IsCOFF) {
7306 if (IDVal == ".seh_stackalloc")
7307 parseDirectiveSEHAllocStack(L: Loc);
7308 else if (IDVal == ".seh_endprologue")
7309 parseDirectiveSEHPrologEnd(L: Loc);
7310 else if (IDVal == ".seh_save_r19r20_x")
7311 parseDirectiveSEHSaveR19R20X(L: Loc);
7312 else if (IDVal == ".seh_save_fplr")
7313 parseDirectiveSEHSaveFPLR(L: Loc);
7314 else if (IDVal == ".seh_save_fplr_x")
7315 parseDirectiveSEHSaveFPLRX(L: Loc);
7316 else if (IDVal == ".seh_save_reg")
7317 parseDirectiveSEHSaveReg(L: Loc);
7318 else if (IDVal == ".seh_save_reg_x")
7319 parseDirectiveSEHSaveRegX(L: Loc);
7320 else if (IDVal == ".seh_save_regp")
7321 parseDirectiveSEHSaveRegP(L: Loc);
7322 else if (IDVal == ".seh_save_regp_x")
7323 parseDirectiveSEHSaveRegPX(L: Loc);
7324 else if (IDVal == ".seh_save_lrpair")
7325 parseDirectiveSEHSaveLRPair(L: Loc);
7326 else if (IDVal == ".seh_save_freg")
7327 parseDirectiveSEHSaveFReg(L: Loc);
7328 else if (IDVal == ".seh_save_freg_x")
7329 parseDirectiveSEHSaveFRegX(L: Loc);
7330 else if (IDVal == ".seh_save_fregp")
7331 parseDirectiveSEHSaveFRegP(L: Loc);
7332 else if (IDVal == ".seh_save_fregp_x")
7333 parseDirectiveSEHSaveFRegPX(L: Loc);
7334 else if (IDVal == ".seh_set_fp")
7335 parseDirectiveSEHSetFP(L: Loc);
7336 else if (IDVal == ".seh_add_fp")
7337 parseDirectiveSEHAddFP(L: Loc);
7338 else if (IDVal == ".seh_nop")
7339 parseDirectiveSEHNop(L: Loc);
7340 else if (IDVal == ".seh_save_next")
7341 parseDirectiveSEHSaveNext(L: Loc);
7342 else if (IDVal == ".seh_startepilogue")
7343 parseDirectiveSEHEpilogStart(L: Loc);
7344 else if (IDVal == ".seh_endepilogue")
7345 parseDirectiveSEHEpilogEnd(L: Loc);
7346 else if (IDVal == ".seh_trap_frame")
7347 parseDirectiveSEHTrapFrame(L: Loc);
7348 else if (IDVal == ".seh_pushframe")
7349 parseDirectiveSEHMachineFrame(L: Loc);
7350 else if (IDVal == ".seh_context")
7351 parseDirectiveSEHContext(L: Loc);
7352 else if (IDVal == ".seh_ec_context")
7353 parseDirectiveSEHECContext(L: Loc);
7354 else if (IDVal == ".seh_clear_unwound_to_call")
7355 parseDirectiveSEHClearUnwoundToCall(L: Loc);
7356 else if (IDVal == ".seh_pac_sign_lr")
7357 parseDirectiveSEHPACSignLR(L: Loc);
7358 else if (IDVal == ".seh_save_any_reg")
7359 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false);
7360 else if (IDVal == ".seh_save_any_reg_p")
7361 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false);
7362 else if (IDVal == ".seh_save_any_reg_x")
7363 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true);
7364 else if (IDVal == ".seh_save_any_reg_px")
7365 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true);
7366 else if (IDVal == ".seh_allocz")
7367 parseDirectiveSEHAllocZ(L: Loc);
7368 else if (IDVal == ".seh_save_zreg")
7369 parseDirectiveSEHSaveZReg(L: Loc);
7370 else if (IDVal == ".seh_save_preg")
7371 parseDirectiveSEHSavePReg(L: Loc);
7372 else
7373 return true;
7374 } else if (IsELF) {
7375 if (IDVal == ".aeabi_subsection")
7376 parseDirectiveAeabiSubSectionHeader(L: Loc);
7377 else if (IDVal == ".aeabi_attribute")
7378 parseDirectiveAeabiAArch64Attr(L: Loc);
7379 else
7380 return true;
7381 } else
7382 return true;
7383 return false;
7384}
7385
7386static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7387 SmallVector<StringRef, 4> &RequestedExtensions) {
7388 const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto");
7389 const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto");
7390
7391 if (!NoCrypto && Crypto) {
7392 // Map 'generic' (and others) to sha2 and aes, because
7393 // that was the traditional meaning of crypto.
7394 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7395 ArchInfo == AArch64::ARMV8_3A) {
7396 RequestedExtensions.push_back(Elt: "sha2");
7397 RequestedExtensions.push_back(Elt: "aes");
7398 }
7399 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7400 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7401 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7402 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7403 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7404 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7405 RequestedExtensions.push_back(Elt: "sm4");
7406 RequestedExtensions.push_back(Elt: "sha3");
7407 RequestedExtensions.push_back(Elt: "sha2");
7408 RequestedExtensions.push_back(Elt: "aes");
7409 }
7410 } else if (NoCrypto) {
7411 // Map 'generic' (and others) to sha2 and aes, because
7412 // that was the traditional meaning of crypto.
7413 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7414 ArchInfo == AArch64::ARMV8_3A) {
7415 RequestedExtensions.push_back(Elt: "nosha2");
7416 RequestedExtensions.push_back(Elt: "noaes");
7417 }
7418 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7419 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7420 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7421 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7422 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7423 ArchInfo == AArch64::ARMV9_4A) {
7424 RequestedExtensions.push_back(Elt: "nosm4");
7425 RequestedExtensions.push_back(Elt: "nosha3");
7426 RequestedExtensions.push_back(Elt: "nosha2");
7427 RequestedExtensions.push_back(Elt: "noaes");
7428 }
7429 }
7430}
7431
7432static SMLoc incrementLoc(SMLoc L, int Offset) {
7433 return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset);
7434}
7435
7436/// parseDirectiveArch
7437/// ::= .arch token
7438bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7439 SMLoc CurLoc = getLoc();
7440
7441 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7442 StringRef Arch, ExtensionString;
7443 std::tie(args&: Arch, args&: ExtensionString) = Name.split(Separator: '+');
7444
7445 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7446 if (!ArchInfo)
7447 return Error(L: CurLoc, Msg: "unknown arch name");
7448
7449 if (parseToken(T: AsmToken::EndOfStatement))
7450 return true;
7451
7452 // Get the architecture and extension features.
7453 std::vector<StringRef> AArch64Features;
7454 AArch64Features.push_back(x: ArchInfo->ArchFeature);
7455 AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features);
7456
7457 MCSubtargetInfo &STI = copySTI();
7458 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7459 STI.setDefaultFeatures(CPU: "generic", /*TuneCPU*/ "generic",
7460 FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: ","));
7461
7462 SmallVector<StringRef, 4> RequestedExtensions;
7463 if (!ExtensionString.empty())
7464 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7465
7466 ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions);
7467 CurLoc = incrementLoc(L: CurLoc, Offset: Arch.size());
7468
7469 for (auto Name : RequestedExtensions) {
7470 // Advance source location past '+'.
7471 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7472
7473 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7474
7475 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7476 return Extension.Name == Name;
7477 });
7478
7479 if (It == std::end(arr: ExtensionMap))
7480 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7481
7482 if (EnableFeature)
7483 STI.SetFeatureBitsTransitively(It->Features);
7484 else
7485 STI.ClearFeatureBitsTransitively(FB: It->Features);
7486 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7487 }
7488 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7489 setAvailableFeatures(Features);
7490
7491 getTargetStreamer().emitDirectiveArch(Name);
7492 return false;
7493}
7494
7495/// parseDirectiveArchExtension
7496/// ::= .arch_extension [no]feature
7497bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7498 SMLoc ExtLoc = getLoc();
7499
7500 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7501
7502 if (parseEOL())
7503 return true;
7504
7505 bool EnableFeature = true;
7506 StringRef Name = FullName;
7507 if (Name.starts_with_insensitive(Prefix: "no")) {
7508 EnableFeature = false;
7509 Name = Name.substr(Start: 2);
7510 }
7511
7512 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7513 return Extension.Name == Name;
7514 });
7515
7516 if (It == std::end(arr: ExtensionMap))
7517 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
7518
7519 MCSubtargetInfo &STI = copySTI();
7520 if (EnableFeature)
7521 STI.SetFeatureBitsTransitively(It->Features);
7522 else
7523 STI.ClearFeatureBitsTransitively(FB: It->Features);
7524 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7525 setAvailableFeatures(Features);
7526
7527 getTargetStreamer().emitDirectiveArchExtension(Name: FullName);
7528 return false;
7529}
7530
7531/// parseDirectiveCPU
7532/// ::= .cpu id
7533bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7534 SMLoc CurLoc = getLoc();
7535
7536 StringRef CPU, ExtensionString;
7537 std::tie(args&: CPU, args&: ExtensionString) =
7538 getParser().parseStringToEndOfStatement().trim().split(Separator: '+');
7539
7540 if (parseToken(T: AsmToken::EndOfStatement))
7541 return true;
7542
7543 SmallVector<StringRef, 4> RequestedExtensions;
7544 if (!ExtensionString.empty())
7545 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7546
7547 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7548 if (!CpuArch) {
7549 Error(L: CurLoc, Msg: "unknown CPU name");
7550 return false;
7551 }
7552 ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions);
7553
7554 MCSubtargetInfo &STI = copySTI();
7555 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
7556 CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size());
7557
7558 for (auto Name : RequestedExtensions) {
7559 // Advance source location past '+'.
7560 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7561
7562 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7563
7564 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7565 return Extension.Name == Name;
7566 });
7567
7568 if (It == std::end(arr: ExtensionMap))
7569 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7570
7571 if (EnableFeature)
7572 STI.SetFeatureBitsTransitively(It->Features);
7573 else
7574 STI.ClearFeatureBitsTransitively(FB: It->Features);
7575 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7576 }
7577 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7578 setAvailableFeatures(Features);
7579 return false;
7580}
7581
7582/// parseDirectiveInst
7583/// ::= .inst opcode [, ...]
7584bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7585 if (getLexer().is(K: AsmToken::EndOfStatement))
7586 return Error(L: Loc, Msg: "expected expression following '.inst' directive");
7587
7588 auto parseOp = [&]() -> bool {
7589 SMLoc L = getLoc();
7590 const MCExpr *Expr = nullptr;
7591 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
7592 return true;
7593 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
7594 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
7595 return true;
7596 getTargetStreamer().emitInst(Inst: Value->getValue());
7597 return false;
7598 };
7599
7600 return parseMany(parseOne: parseOp);
7601}
7602
7603// parseDirectiveTLSDescCall:
7604// ::= .tlsdesccall symbol
7605bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7606 StringRef Name;
7607 if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol") ||
7608 parseToken(T: AsmToken::EndOfStatement))
7609 return true;
7610
7611 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7612 const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext());
7613 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_TLSDESC, Ctx&: getContext());
7614
7615 MCInst Inst;
7616 Inst.setOpcode(AArch64::TLSDESCCALL);
7617 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
7618
7619 getParser().getStreamer().emitInstruction(Inst, STI: getSTI());
7620 return false;
7621}
7622
7623/// ::= .loh <lohName | lohId> label1, ..., labelN
7624/// The number of arguments depends on the loh identifier.
7625bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7626 MCLOHType Kind;
7627 if (getTok().isNot(K: AsmToken::Identifier)) {
7628 if (getTok().isNot(K: AsmToken::Integer))
7629 return TokError(Msg: "expected an identifier or a number in directive");
7630 // We successfully get a numeric value for the identifier.
7631 // Check if it is valid.
7632 int64_t Id = getTok().getIntVal();
7633 if (Id <= -1U && !isValidMCLOHType(Kind: Id))
7634 return TokError(Msg: "invalid numeric identifier in directive");
7635 Kind = (MCLOHType)Id;
7636 } else {
7637 StringRef Name = getTok().getIdentifier();
7638 // We successfully parse an identifier.
7639 // Check if it is a recognized one.
7640 int Id = MCLOHNameToId(Name);
7641
7642 if (Id == -1)
7643 return TokError(Msg: "invalid identifier in directive");
7644 Kind = (MCLOHType)Id;
7645 }
7646 // Consume the identifier.
7647 Lex();
7648 // Get the number of arguments of this LOH.
7649 int NbArgs = MCLOHIdToNbArgs(Kind);
7650
7651 assert(NbArgs != -1 && "Invalid number of arguments");
7652
7653 SmallVector<MCSymbol *, 3> Args;
7654 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7655 StringRef Name;
7656 if (getParser().parseIdentifier(Res&: Name))
7657 return TokError(Msg: "expected identifier in directive");
7658 Args.push_back(Elt: getContext().getOrCreateSymbol(Name));
7659
7660 if (Idx + 1 == NbArgs)
7661 break;
7662 if (parseComma())
7663 return true;
7664 }
7665 if (parseEOL())
7666 return true;
7667
7668 getStreamer().emitLOHDirective(Kind, Args);
7669 return false;
7670}
7671
7672/// parseDirectiveLtorg
7673/// ::= .ltorg | .pool
7674bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7675 if (parseEOL())
7676 return true;
7677 getTargetStreamer().emitCurrentConstantPool();
7678 return false;
7679}
7680
7681/// parseDirectiveReq
7682/// ::= name .req registername
7683bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7684 Lex(); // Eat the '.req' token.
7685 SMLoc SRegLoc = getLoc();
7686 RegKind RegisterKind = RegKind::Scalar;
7687 MCRegister RegNum;
7688 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7689
7690 if (!ParseRes.isSuccess()) {
7691 StringRef Kind;
7692 RegisterKind = RegKind::NeonVector;
7693 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector);
7694
7695 if (ParseRes.isFailure())
7696 return true;
7697
7698 if (ParseRes.isSuccess() && !Kind.empty())
7699 return Error(L: SRegLoc, Msg: "vector register without type specifier expected");
7700 }
7701
7702 if (!ParseRes.isSuccess()) {
7703 StringRef Kind;
7704 RegisterKind = RegKind::SVEDataVector;
7705 ParseRes =
7706 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7707
7708 if (ParseRes.isFailure())
7709 return true;
7710
7711 if (ParseRes.isSuccess() && !Kind.empty())
7712 return Error(L: SRegLoc,
7713 Msg: "sve vector register without type specifier expected");
7714 }
7715
7716 if (!ParseRes.isSuccess()) {
7717 StringRef Kind;
7718 RegisterKind = RegKind::SVEPredicateVector;
7719 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7720
7721 if (ParseRes.isFailure())
7722 return true;
7723
7724 if (ParseRes.isSuccess() && !Kind.empty())
7725 return Error(L: SRegLoc,
7726 Msg: "sve predicate register without type specifier expected");
7727 }
7728
7729 if (!ParseRes.isSuccess())
7730 return Error(L: SRegLoc, Msg: "register name or alias expected");
7731
7732 // Shouldn't be anything else.
7733 if (parseEOL())
7734 return true;
7735
7736 auto pair = std::make_pair(x&: RegisterKind, y&: RegNum);
7737 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair)
7738 Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'");
7739
7740 return false;
7741}
7742
7743/// parseDirectiveUneq
7744/// ::= .unreq registername
7745bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7746 if (getTok().isNot(K: AsmToken::Identifier))
7747 return TokError(Msg: "unexpected input in .unreq directive.");
7748 RegisterReqs.erase(Key: getTok().getIdentifier().lower());
7749 Lex(); // Eat the identifier.
7750 return parseToken(T: AsmToken::EndOfStatement);
7751}
7752
7753bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7754 if (parseEOL())
7755 return true;
7756 getStreamer().emitCFINegateRAState();
7757 return false;
7758}
7759
7760bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7761 if (parseEOL())
7762 return true;
7763 getStreamer().emitCFINegateRAStateWithPC();
7764 return false;
7765}
7766
7767/// parseDirectiveCFIBKeyFrame
7768/// ::= .cfi_b_key
7769bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7770 if (parseEOL())
7771 return true;
7772 getStreamer().emitCFIBKeyFrame();
7773 return false;
7774}
7775
7776/// parseDirectiveCFIMTETaggedFrame
7777/// ::= .cfi_mte_tagged_frame
7778bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7779 if (parseEOL())
7780 return true;
7781 getStreamer().emitCFIMTETaggedFrame();
7782 return false;
7783}
7784
7785/// parseDirectiveVariantPCS
7786/// ::= .variant_pcs symbolname
7787bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7788 StringRef Name;
7789 if (getParser().parseIdentifier(Res&: Name))
7790 return TokError(Msg: "expected symbol name");
7791 if (parseEOL())
7792 return true;
7793 getTargetStreamer().emitDirectiveVariantPCS(
7794 Symbol: getContext().getOrCreateSymbol(Name));
7795 return false;
7796}
7797
7798/// parseDirectiveSEHAllocStack
7799/// ::= .seh_stackalloc
7800bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7801 int64_t Size;
7802 if (parseImmExpr(Out&: Size))
7803 return true;
7804 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7805 return false;
7806}
7807
7808/// parseDirectiveSEHPrologEnd
7809/// ::= .seh_endprologue
7810bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7811 getTargetStreamer().emitARM64WinCFIPrologEnd();
7812 return false;
7813}
7814
7815/// parseDirectiveSEHSaveR19R20X
7816/// ::= .seh_save_r19r20_x
7817bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7818 int64_t Offset;
7819 if (parseImmExpr(Out&: Offset))
7820 return true;
7821 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7822 return false;
7823}
7824
7825/// parseDirectiveSEHSaveFPLR
7826/// ::= .seh_save_fplr
7827bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7828 int64_t Offset;
7829 if (parseImmExpr(Out&: Offset))
7830 return true;
7831 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7832 return false;
7833}
7834
7835/// parseDirectiveSEHSaveFPLRX
7836/// ::= .seh_save_fplr_x
7837bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7838 int64_t Offset;
7839 if (parseImmExpr(Out&: Offset))
7840 return true;
7841 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7842 return false;
7843}
7844
7845/// parseDirectiveSEHSaveReg
7846/// ::= .seh_save_reg
7847bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7848 unsigned Reg;
7849 int64_t Offset;
7850 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7851 parseComma() || parseImmExpr(Out&: Offset))
7852 return true;
7853 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7854 return false;
7855}
7856
7857/// parseDirectiveSEHSaveRegX
7858/// ::= .seh_save_reg_x
7859bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7860 unsigned Reg;
7861 int64_t Offset;
7862 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7863 parseComma() || parseImmExpr(Out&: Offset))
7864 return true;
7865 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7866 return false;
7867}
7868
7869/// parseDirectiveSEHSaveRegP
7870/// ::= .seh_save_regp
7871bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7872 unsigned Reg;
7873 int64_t Offset;
7874 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7875 parseComma() || parseImmExpr(Out&: Offset))
7876 return true;
7877 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7878 return false;
7879}
7880
7881/// parseDirectiveSEHSaveRegPX
7882/// ::= .seh_save_regp_x
7883bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7884 unsigned Reg;
7885 int64_t Offset;
7886 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7887 parseComma() || parseImmExpr(Out&: Offset))
7888 return true;
7889 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7890 return false;
7891}
7892
7893/// parseDirectiveSEHSaveLRPair
7894/// ::= .seh_save_lrpair
7895bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7896 unsigned Reg;
7897 int64_t Offset;
7898 L = getLoc();
7899 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7900 parseComma() || parseImmExpr(Out&: Offset))
7901 return true;
7902 if (check(P: ((Reg - 19) % 2 != 0), Loc: L,
7903 Msg: "expected register with even offset from x19"))
7904 return true;
7905 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7906 return false;
7907}
7908
7909/// parseDirectiveSEHSaveFReg
7910/// ::= .seh_save_freg
7911bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7912 unsigned Reg;
7913 int64_t Offset;
7914 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7915 parseComma() || parseImmExpr(Out&: Offset))
7916 return true;
7917 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7918 return false;
7919}
7920
7921/// parseDirectiveSEHSaveFRegX
7922/// ::= .seh_save_freg_x
7923bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7924 unsigned Reg;
7925 int64_t Offset;
7926 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7927 parseComma() || parseImmExpr(Out&: Offset))
7928 return true;
7929 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7930 return false;
7931}
7932
7933/// parseDirectiveSEHSaveFRegP
7934/// ::= .seh_save_fregp
7935bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7936 unsigned Reg;
7937 int64_t Offset;
7938 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7939 parseComma() || parseImmExpr(Out&: Offset))
7940 return true;
7941 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7942 return false;
7943}
7944
7945/// parseDirectiveSEHSaveFRegPX
7946/// ::= .seh_save_fregp_x
7947bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7948 unsigned Reg;
7949 int64_t Offset;
7950 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7951 parseComma() || parseImmExpr(Out&: Offset))
7952 return true;
7953 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7954 return false;
7955}
7956
7957/// parseDirectiveSEHSetFP
7958/// ::= .seh_set_fp
7959bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7960 getTargetStreamer().emitARM64WinCFISetFP();
7961 return false;
7962}
7963
7964/// parseDirectiveSEHAddFP
7965/// ::= .seh_add_fp
7966bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7967 int64_t Size;
7968 if (parseImmExpr(Out&: Size))
7969 return true;
7970 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7971 return false;
7972}
7973
7974/// parseDirectiveSEHNop
7975/// ::= .seh_nop
7976bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7977 getTargetStreamer().emitARM64WinCFINop();
7978 return false;
7979}
7980
7981/// parseDirectiveSEHSaveNext
7982/// ::= .seh_save_next
7983bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7984 getTargetStreamer().emitARM64WinCFISaveNext();
7985 return false;
7986}
7987
7988/// parseDirectiveSEHEpilogStart
7989/// ::= .seh_startepilogue
7990bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7991 getTargetStreamer().emitARM64WinCFIEpilogStart();
7992 return false;
7993}
7994
7995/// parseDirectiveSEHEpilogEnd
7996/// ::= .seh_endepilogue
7997bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7998 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7999 return false;
8000}
8001
8002/// parseDirectiveSEHTrapFrame
8003/// ::= .seh_trap_frame
8004bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8005 getTargetStreamer().emitARM64WinCFITrapFrame();
8006 return false;
8007}
8008
8009/// parseDirectiveSEHMachineFrame
8010/// ::= .seh_pushframe
8011bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8012 getTargetStreamer().emitARM64WinCFIMachineFrame();
8013 return false;
8014}
8015
8016/// parseDirectiveSEHContext
8017/// ::= .seh_context
8018bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8019 getTargetStreamer().emitARM64WinCFIContext();
8020 return false;
8021}
8022
8023/// parseDirectiveSEHECContext
8024/// ::= .seh_ec_context
8025bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8026 getTargetStreamer().emitARM64WinCFIECContext();
8027 return false;
8028}
8029
8030/// parseDirectiveSEHClearUnwoundToCall
8031/// ::= .seh_clear_unwound_to_call
8032bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8033 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8034 return false;
8035}
8036
8037/// parseDirectiveSEHPACSignLR
8038/// ::= .seh_pac_sign_lr
8039bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8040 getTargetStreamer().emitARM64WinCFIPACSignLR();
8041 return false;
8042}
8043
8044/// parseDirectiveSEHSaveAnyReg
8045/// ::= .seh_save_any_reg
8046/// ::= .seh_save_any_reg_p
8047/// ::= .seh_save_any_reg_x
8048/// ::= .seh_save_any_reg_px
8049bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8050 bool Writeback) {
8051 MCRegister Reg;
8052 SMLoc Start, End;
8053 int64_t Offset;
8054 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register") ||
8055 parseComma() || parseImmExpr(Out&: Offset))
8056 return true;
8057
8058 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8059 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8060 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8061 return Error(L, Msg: "invalid save_any_reg offset");
8062 unsigned EncodedReg;
8063 if (Reg == AArch64::FP)
8064 EncodedReg = 29;
8065 else if (Reg == AArch64::LR)
8066 EncodedReg = 30;
8067 else
8068 EncodedReg = Reg - AArch64::X0;
8069 if (Paired) {
8070 if (Reg == AArch64::LR)
8071 return Error(L: Start, Msg: "lr cannot be paired with another register");
8072 if (Writeback)
8073 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset);
8074 else
8075 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset);
8076 } else {
8077 if (Writeback)
8078 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset);
8079 else
8080 getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset);
8081 }
8082 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8083 unsigned EncodedReg = Reg - AArch64::D0;
8084 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8085 return Error(L, Msg: "invalid save_any_reg offset");
8086 if (Paired) {
8087 if (Reg == AArch64::D31)
8088 return Error(L: Start, Msg: "d31 cannot be paired with another register");
8089 if (Writeback)
8090 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset);
8091 else
8092 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset);
8093 } else {
8094 if (Writeback)
8095 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset);
8096 else
8097 getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset);
8098 }
8099 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8100 unsigned EncodedReg = Reg - AArch64::Q0;
8101 if (Offset < 0 || Offset % 16)
8102 return Error(L, Msg: "invalid save_any_reg offset");
8103 if (Paired) {
8104 if (Reg == AArch64::Q31)
8105 return Error(L: Start, Msg: "q31 cannot be paired with another register");
8106 if (Writeback)
8107 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset);
8108 else
8109 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset);
8110 } else {
8111 if (Writeback)
8112 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset);
8113 else
8114 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset);
8115 }
8116 } else {
8117 return Error(L: Start, Msg: "save_any_reg register must be x, q or d register");
8118 }
8119 return false;
8120}
8121
8122/// parseDirectiveAllocZ
8123/// ::= .seh_allocz
8124bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8125 int64_t Offset;
8126 if (parseImmExpr(Out&: Offset))
8127 return true;
8128 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8129 return false;
8130}
8131
8132/// parseDirectiveSEHSaveZReg
8133/// ::= .seh_save_zreg
8134bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8135 MCRegister RegNum;
8136 StringRef Kind;
8137 int64_t Offset;
8138 ParseStatus Res =
8139 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8140 if (!Res.isSuccess())
8141 return true;
8142 if (check(P: RegNum < AArch64::Z8 || RegNum > AArch64::Z23, Loc: L,
8143 Msg: "expected register in range z8 to z23"))
8144 return true;
8145 if (parseComma() || parseImmExpr(Out&: Offset))
8146 return true;
8147 getTargetStreamer().emitARM64WinCFISaveZReg(Reg: RegNum - AArch64::Z0, Offset);
8148 return false;
8149}
8150
8151/// parseDirectiveSEHSavePReg
8152/// ::= .seh_save_preg
8153bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8154 MCRegister RegNum;
8155 StringRef Kind;
8156 int64_t Offset;
8157 ParseStatus Res =
8158 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
8159 if (!Res.isSuccess())
8160 return true;
8161 if (check(P: RegNum < AArch64::P4 || RegNum > AArch64::P15, Loc: L,
8162 Msg: "expected register in range p4 to p15"))
8163 return true;
8164 if (parseComma() || parseImmExpr(Out&: Offset))
8165 return true;
8166 getTargetStreamer().emitARM64WinCFISavePReg(Reg: RegNum - AArch64::P0, Offset);
8167 return false;
8168}
8169
8170bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8171 // Handle parsing of .aeabi_subsection directives
8172 // - On first declaration of a subsection, expect exactly three identifiers
8173 // after `.aeabi_subsection`: the subsection name and two parameters.
8174 // - When switching to an existing subsection, it is valid to provide only
8175 // the subsection name, or the name together with the two parameters.
8176 MCAsmParser &Parser = getParser();
8177
8178 // Consume the name (subsection name)
8179 StringRef SubsectionName;
8180 AArch64BuildAttributes::VendorID SubsectionNameID;
8181 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8182 SubsectionName = Parser.getTok().getIdentifier();
8183 SubsectionNameID = AArch64BuildAttributes::getVendorID(Vendor: SubsectionName);
8184 } else {
8185 Error(L: Parser.getTok().getLoc(), Msg: "subsection name not found");
8186 return true;
8187 }
8188 Parser.Lex();
8189
8190 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8191 getTargetStreamer().getAttributesSubsectionByName(Name: SubsectionName);
8192 // Check whether only the subsection name was provided.
8193 // If so, the user is trying to switch to a subsection that should have been
8194 // declared before.
8195 if (Parser.getTok().is(K: llvm::AsmToken::EndOfStatement)) {
8196 if (SubsectionExists) {
8197 getTargetStreamer().emitAttributesSubsection(
8198 VendorName: SubsectionName,
8199 IsOptional: static_cast<AArch64BuildAttributes::SubsectionOptional>(
8200 SubsectionExists->IsOptional),
8201 ParameterType: static_cast<AArch64BuildAttributes::SubsectionType>(
8202 SubsectionExists->ParameterType));
8203 return false;
8204 }
8205 // If subsection does not exists, report error.
8206 else {
8207 Error(L: Parser.getTok().getLoc(),
8208 Msg: "Could not switch to subsection '" + SubsectionName +
8209 "' using subsection name, subsection has not been defined");
8210 return true;
8211 }
8212 }
8213
8214 // Otherwise, expecting 2 more parameters: consume a comma
8215 // parseComma() return *false* on success, and call Lex(), no need to call
8216 // Lex() again.
8217 if (Parser.parseComma()) {
8218 return true;
8219 }
8220
8221 // Consume the first parameter (optionality parameter)
8222 AArch64BuildAttributes::SubsectionOptional IsOptional;
8223 // options: optional/required
8224 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8225 StringRef Optionality = Parser.getTok().getIdentifier();
8226 IsOptional = AArch64BuildAttributes::getOptionalID(Optional: Optionality);
8227 if (AArch64BuildAttributes::OPTIONAL_NOT_FOUND == IsOptional) {
8228 Error(L: Parser.getTok().getLoc(),
8229 Msg: AArch64BuildAttributes::getSubsectionOptionalUnknownError());
8230 return true;
8231 }
8232 if (SubsectionExists) {
8233 if (IsOptional != SubsectionExists->IsOptional) {
8234 Error(L: Parser.getTok().getLoc(),
8235 Msg: "optionality mismatch! subsection '" + SubsectionName +
8236 "' already exists with optionality defined as '" +
8237 AArch64BuildAttributes::getOptionalStr(
8238 Optional: SubsectionExists->IsOptional) +
8239 "' and not '" +
8240 AArch64BuildAttributes::getOptionalStr(Optional: IsOptional) + "'");
8241 return true;
8242 }
8243 }
8244 } else {
8245 Error(L: Parser.getTok().getLoc(),
8246 Msg: "optionality parameter not found, expected required|optional");
8247 return true;
8248 }
8249 // Check for possible IsOptional unaccepted values for known subsections
8250 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8251 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8252 Error(L: Parser.getTok().getLoc(),
8253 Msg: "aeabi_feature_and_bits must be marked as optional");
8254 return true;
8255 }
8256 }
8257 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8258 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8259 Error(L: Parser.getTok().getLoc(),
8260 Msg: "aeabi_pauthabi must be marked as required");
8261 return true;
8262 }
8263 }
8264 Parser.Lex();
8265 // consume a comma
8266 if (Parser.parseComma()) {
8267 return true;
8268 }
8269
8270 // Consume the second parameter (type parameter)
8271 AArch64BuildAttributes::SubsectionType Type;
8272 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8273 StringRef Name = Parser.getTok().getIdentifier();
8274 Type = AArch64BuildAttributes::getTypeID(Type: Name);
8275 if (AArch64BuildAttributes::TYPE_NOT_FOUND == Type) {
8276 Error(L: Parser.getTok().getLoc(),
8277 Msg: AArch64BuildAttributes::getSubsectionTypeUnknownError());
8278 return true;
8279 }
8280 if (SubsectionExists) {
8281 if (Type != SubsectionExists->ParameterType) {
8282 Error(L: Parser.getTok().getLoc(),
8283 Msg: "type mismatch! subsection '" + SubsectionName +
8284 "' already exists with type defined as '" +
8285 AArch64BuildAttributes::getTypeStr(
8286 Type: SubsectionExists->ParameterType) +
8287 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8288 "'");
8289 return true;
8290 }
8291 }
8292 } else {
8293 Error(L: Parser.getTok().getLoc(),
8294 Msg: "type parameter not found, expected uleb128|ntbs");
8295 return true;
8296 }
8297 // Check for possible unaccepted 'type' values for known subsections
8298 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8299 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8300 if (AArch64BuildAttributes::NTBS == Type) {
8301 Error(L: Parser.getTok().getLoc(),
8302 Msg: SubsectionName + " must be marked as ULEB128");
8303 return true;
8304 }
8305 }
8306 Parser.Lex();
8307
8308 // Parsing finished, check for trailing tokens.
8309 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8310 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token for AArch64 build "
8311 "attributes subsection header directive");
8312 return true;
8313 }
8314
8315 getTargetStreamer().emitAttributesSubsection(VendorName: SubsectionName, IsOptional, ParameterType: Type);
8316
8317 return false;
8318}
8319
8320bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8321 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8322 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8323 // separated by a comma.
8324 MCAsmParser &Parser = getParser();
8325
8326 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8327 getTargetStreamer().getActiveAttributesSubsection();
8328 if (nullptr == ActiveSubsection) {
8329 Error(L: Parser.getTok().getLoc(),
8330 Msg: "no active subsection, build attribute can not be added");
8331 return true;
8332 }
8333 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8334 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8335
8336 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8337 if (AArch64BuildAttributes::getVendorName(
8338 Vendor: AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8339 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8340 if (AArch64BuildAttributes::getVendorName(
8341 Vendor: AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) ==
8342 ActiveSubsectionName)
8343 ActiveSubsectionID = AArch64BuildAttributes::AEABI_FEATURE_AND_BITS;
8344
8345 StringRef TagStr = "";
8346 unsigned Tag;
8347 if (Parser.getTok().is(K: AsmToken::Integer)) {
8348 Tag = getTok().getIntVal();
8349 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8350 TagStr = Parser.getTok().getIdentifier();
8351 switch (ActiveSubsectionID) {
8352 case AArch64BuildAttributes::VENDOR_UNKNOWN:
8353 // Tag was provided as an unrecognized string instead of an unsigned
8354 // integer
8355 Error(L: Parser.getTok().getLoc(), Msg: "unrecognized Tag: '" + TagStr +
8356 "' \nExcept for public subsections, "
8357 "tags have to be an unsigned int.");
8358 return true;
8359 break;
8360 case AArch64BuildAttributes::AEABI_PAUTHABI:
8361 Tag = AArch64BuildAttributes::getPauthABITagsID(PauthABITag: TagStr);
8362 if (AArch64BuildAttributes::PAUTHABI_TAG_NOT_FOUND == Tag) {
8363 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8364 TagStr + "' for subsection '" +
8365 ActiveSubsectionName + "'");
8366 return true;
8367 }
8368 break;
8369 case AArch64BuildAttributes::AEABI_FEATURE_AND_BITS:
8370 Tag = AArch64BuildAttributes::getFeatureAndBitsTagsID(FeatureAndBitsTag: TagStr);
8371 if (AArch64BuildAttributes::FEATURE_AND_BITS_TAG_NOT_FOUND == Tag) {
8372 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8373 TagStr + "' for subsection '" +
8374 ActiveSubsectionName + "'");
8375 return true;
8376 }
8377 break;
8378 }
8379 } else {
8380 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes tag not found");
8381 return true;
8382 }
8383 Parser.Lex();
8384 // consume a comma
8385 // parseComma() return *false* on success, and call Lex(), no need to call
8386 // Lex() again.
8387 if (Parser.parseComma()) {
8388 return true;
8389 }
8390
8391 // Consume the second parameter (attribute value)
8392 unsigned ValueInt = unsigned(-1);
8393 std::string ValueStr = "";
8394 if (Parser.getTok().is(K: AsmToken::Integer)) {
8395 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8396 Error(
8397 L: Parser.getTok().getLoc(),
8398 Msg: "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8399 return true;
8400 }
8401 ValueInt = getTok().getIntVal();
8402 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8403 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8404 Error(
8405 L: Parser.getTok().getLoc(),
8406 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8407 return true;
8408 }
8409 ValueStr = Parser.getTok().getIdentifier();
8410 } else if (Parser.getTok().is(K: AsmToken::String)) {
8411 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8412 Error(
8413 L: Parser.getTok().getLoc(),
8414 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8415 return true;
8416 }
8417 ValueStr = Parser.getTok().getString();
8418 } else {
8419 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes value not found");
8420 return true;
8421 }
8422 // Check for possible unaccepted values for known tags
8423 // (AEABI_FEATURE_AND_BITS)
8424 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8425 if (0 != ValueInt && 1 != ValueInt) {
8426 Error(L: Parser.getTok().getLoc(),
8427 Msg: "unknown AArch64 build attributes Value for Tag '" + TagStr +
8428 "' options are 0|1");
8429 return true;
8430 }
8431 }
8432 Parser.Lex();
8433
8434 // Parsing finished. Check for trailing tokens.
8435 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8436 Error(L: Parser.getTok().getLoc(),
8437 Msg: "unexpected token for AArch64 build attributes tag and value "
8438 "attribute directive");
8439 return true;
8440 }
8441
8442 if (unsigned(-1) != ValueInt) {
8443 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: ValueInt, String: "");
8444 }
8445 if ("" != ValueStr) {
8446 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: unsigned(-1),
8447 String: ValueStr);
8448 }
8449 return false;
8450}
8451
8452bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8453 SMLoc Loc = getLoc();
8454 if (getLexer().getKind() != AsmToken::Identifier)
8455 return TokError(Msg: "expected '%' relocation specifier");
8456 StringRef Identifier = getParser().getTok().getIdentifier();
8457 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8458 if (!Spec)
8459 return TokError(Msg: "invalid relocation specifier");
8460
8461 getParser().Lex(); // Eat the identifier
8462 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8463 return true;
8464
8465 const MCExpr *SubExpr;
8466 if (getParser().parseParenExpression(Res&: SubExpr, EndLoc&: E))
8467 return true;
8468
8469 Res = MCSpecifierExpr::create(Expr: SubExpr, S: Spec, Ctx&: getContext(), Loc);
8470 return false;
8471}
8472
8473bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8474 SMLoc EndLoc;
8475 if (parseOptionalToken(T: AsmToken::Percent))
8476 return parseExprWithSpecifier(Res, E&: EndLoc);
8477
8478 if (getParser().parseExpression(Res))
8479 return true;
8480 MCAsmParser &Parser = getParser();
8481 if (!parseOptionalToken(T: AsmToken::At))
8482 return false;
8483 if (getLexer().getKind() != AsmToken::Identifier)
8484 return Error(L: getLoc(), Msg: "expected relocation specifier");
8485
8486 std::string Identifier = Parser.getTok().getIdentifier().lower();
8487 SMLoc Loc = getLoc();
8488 Lex();
8489 if (Identifier == "auth")
8490 return parseAuthExpr(Res, EndLoc);
8491
8492 auto Spec = AArch64::S_None;
8493 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8494 if (Identifier == "got")
8495 Spec = AArch64::S_MACHO_GOT;
8496 }
8497 if (Spec == AArch64::S_None)
8498 return Error(L: Loc, Msg: "invalid relocation specifier");
8499 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val: Res))
8500 Res = MCSymbolRefExpr::create(Symbol: &SRE->getSymbol(), specifier: Spec, Ctx&: getContext(),
8501 Loc: SRE->getLoc());
8502 else
8503 return Error(L: Loc, Msg: "@ specifier only allowed after a symbol");
8504
8505 for (;;) {
8506 std::optional<MCBinaryExpr::Opcode> Opcode;
8507 if (parseOptionalToken(T: AsmToken::Plus))
8508 Opcode = MCBinaryExpr::Add;
8509 else if (parseOptionalToken(T: AsmToken::Minus))
8510 Opcode = MCBinaryExpr::Sub;
8511 else
8512 break;
8513 const MCExpr *Term;
8514 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc, TypeInfo: nullptr))
8515 return true;
8516 Res = MCBinaryExpr::create(Op: *Opcode, LHS: Res, RHS: Term, Ctx&: getContext(), Loc: Res->getLoc());
8517 }
8518 return false;
8519}
8520
8521/// parseAuthExpr
8522/// ::= _sym@AUTH(ib,123[,addr])
8523/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8524/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8525bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8526 MCAsmParser &Parser = getParser();
8527 MCContext &Ctx = getContext();
8528 AsmToken Tok = Parser.getTok();
8529
8530 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8531 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8532 return true;
8533
8534 if (Parser.getTok().isNot(K: AsmToken::Identifier))
8535 return TokError(Msg: "expected key name");
8536
8537 StringRef KeyStr = Parser.getTok().getIdentifier();
8538 auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr);
8539 if (!KeyIDOrNone)
8540 return TokError(Msg: "invalid key '" + KeyStr + "'");
8541 Parser.Lex();
8542
8543 if (parseToken(T: AsmToken::Comma, Msg: "expected ','"))
8544 return true;
8545
8546 if (Parser.getTok().isNot(K: AsmToken::Integer))
8547 return TokError(Msg: "expected integer discriminator");
8548 int64_t Discriminator = Parser.getTok().getIntVal();
8549
8550 if (!isUInt<16>(x: Discriminator))
8551 return TokError(Msg: "integer discriminator " + Twine(Discriminator) +
8552 " out of range [0, 0xFFFF]");
8553 Parser.Lex();
8554
8555 bool UseAddressDiversity = false;
8556 if (Parser.getTok().is(K: AsmToken::Comma)) {
8557 Parser.Lex();
8558 if (Parser.getTok().isNot(K: AsmToken::Identifier) ||
8559 Parser.getTok().getIdentifier() != "addr")
8560 return TokError(Msg: "expected 'addr'");
8561 UseAddressDiversity = true;
8562 Parser.Lex();
8563 }
8564
8565 EndLoc = Parser.getTok().getEndLoc();
8566 if (parseToken(T: AsmToken::RParen, Msg: "expected ')'"))
8567 return true;
8568
8569 Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone,
8570 HasAddressDiversity: UseAddressDiversity, Ctx, Loc: Res->getLoc());
8571 return false;
8572}
8573
8574bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8575 AArch64::Specifier &ELFSpec,
8576 AArch64::Specifier &DarwinSpec,
8577 int64_t &Addend) {
8578 ELFSpec = AArch64::S_INVALID;
8579 DarwinSpec = AArch64::S_None;
8580 Addend = 0;
8581
8582 if (auto *AE = dyn_cast<MCSpecifierExpr>(Val: Expr)) {
8583 ELFSpec = AE->getSpecifier();
8584 Expr = AE->getSubExpr();
8585 }
8586
8587 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr);
8588 if (SE) {
8589 // It's a simple symbol reference with no addend.
8590 DarwinSpec = AArch64::Specifier(SE->getKind());
8591 return true;
8592 }
8593
8594 // Check that it looks like a symbol + an addend
8595 MCValue Res;
8596 bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr);
8597 if (!Relocatable || Res.getSubSym())
8598 return false;
8599
8600 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8601 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8602 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8603 return false;
8604
8605 if (Res.getAddSym())
8606 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8607 Addend = Res.getConstant();
8608
8609 // It's some symbol reference + a constant addend, but really
8610 // shouldn't use both Darwin and ELF syntax.
8611 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8612}
8613
8614/// Force static initialization.
8615extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8616LLVMInitializeAArch64AsmParser() {
8617 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
8618 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
8619 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
8620 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
8621 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
8622}
8623
8624#define GET_REGISTER_MATCHER
8625#define GET_SUBTARGET_FEATURE_NAME
8626#define GET_MATCHER_IMPLEMENTATION
8627#define GET_MNEMONIC_SPELL_CHECKER
8628#include "AArch64GenAsmMatcher.inc"
8629
8630// Define this matcher function after the auto-generated include so we
8631// have the match class enum definitions.
8632unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8633 unsigned Kind) {
8634 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8635
8636 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8637 if (!Op.isImm())
8638 return Match_InvalidOperand;
8639 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8640 if (!CE)
8641 return Match_InvalidOperand;
8642 if (CE->getValue() == ExpectedVal)
8643 return Match_Success;
8644 return Match_InvalidOperand;
8645 };
8646
8647 switch (Kind) {
8648 default:
8649 return Match_InvalidOperand;
8650 case MCK_MPR:
8651 // If the Kind is a token for the MPR register class which has the "za"
8652 // register (SME accumulator array), check if the asm is a literal "za"
8653 // token. This is for the "smstart za" alias that defines the register
8654 // as a literal token.
8655 if (Op.isTokenEqual(Str: "za"))
8656 return Match_Success;
8657 return Match_InvalidOperand;
8658
8659 // If the kind is a token for a literal immediate, check if our asm operand
8660 // matches. This is for InstAliases which have a fixed-value immediate in
8661 // the asm string, such as hints which are parsed into a specific
8662 // instruction definition.
8663#define MATCH_HASH(N) \
8664 case MCK__HASH_##N: \
8665 return MatchesOpImmediate(N);
8666 MATCH_HASH(0)
8667 MATCH_HASH(1)
8668 MATCH_HASH(2)
8669 MATCH_HASH(3)
8670 MATCH_HASH(4)
8671 MATCH_HASH(6)
8672 MATCH_HASH(7)
8673 MATCH_HASH(8)
8674 MATCH_HASH(10)
8675 MATCH_HASH(12)
8676 MATCH_HASH(14)
8677 MATCH_HASH(16)
8678 MATCH_HASH(24)
8679 MATCH_HASH(25)
8680 MATCH_HASH(26)
8681 MATCH_HASH(27)
8682 MATCH_HASH(28)
8683 MATCH_HASH(29)
8684 MATCH_HASH(30)
8685 MATCH_HASH(31)
8686 MATCH_HASH(32)
8687 MATCH_HASH(40)
8688 MATCH_HASH(48)
8689 MATCH_HASH(64)
8690#undef MATCH_HASH
8691#define MATCH_HASH_MINUS(N) \
8692 case MCK__HASH__MINUS_##N: \
8693 return MatchesOpImmediate(-N);
8694 MATCH_HASH_MINUS(4)
8695 MATCH_HASH_MINUS(8)
8696 MATCH_HASH_MINUS(16)
8697#undef MATCH_HASH_MINUS
8698 }
8699}
8700
8701ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8702
8703 SMLoc S = getLoc();
8704
8705 if (getTok().isNot(K: AsmToken::Identifier))
8706 return Error(L: S, Msg: "expected register");
8707
8708 MCRegister FirstReg;
8709 ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg);
8710 if (!Res.isSuccess())
8711 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8712 "even/odd register pair");
8713
8714 const MCRegisterClass &WRegClass =
8715 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8716 const MCRegisterClass &XRegClass =
8717 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8718
8719 bool isXReg = XRegClass.contains(Reg: FirstReg),
8720 isWReg = WRegClass.contains(Reg: FirstReg);
8721 if (!isXReg && !isWReg)
8722 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8723 "even/odd register pair");
8724
8725 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8726 unsigned FirstEncoding = RI->getEncodingValue(Reg: FirstReg);
8727
8728 if (FirstEncoding & 0x1)
8729 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8730 "even/odd register pair");
8731
8732 if (getTok().isNot(K: AsmToken::Comma))
8733 return Error(L: getLoc(), Msg: "expected comma");
8734 // Eat the comma
8735 Lex();
8736
8737 SMLoc E = getLoc();
8738 MCRegister SecondReg;
8739 Res = tryParseScalarRegister(RegNum&: SecondReg);
8740 if (!Res.isSuccess())
8741 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8742 "even/odd register pair");
8743
8744 if (RI->getEncodingValue(Reg: SecondReg) != FirstEncoding + 1 ||
8745 (isXReg && !XRegClass.contains(Reg: SecondReg)) ||
8746 (isWReg && !WRegClass.contains(Reg: SecondReg)))
8747 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8748 "even/odd register pair");
8749
8750 MCRegister Pair;
8751 if (isXReg) {
8752 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64,
8753 RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8754 } else {
8755 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32,
8756 RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8757 }
8758
8759 Operands.push_back(Elt: AArch64Operand::CreateReg(Reg: Pair, Kind: RegKind::Scalar, S,
8760 E: getLoc(), Ctx&: getContext()));
8761
8762 return ParseStatus::Success;
8763}
8764
8765template <bool ParseShiftExtend, bool ParseSuffix>
8766ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8767 const SMLoc S = getLoc();
8768 // Check for a SVE vector register specifier first.
8769 MCRegister RegNum;
8770 StringRef Kind;
8771
8772 ParseStatus Res =
8773 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8774
8775 if (!Res.isSuccess())
8776 return Res;
8777
8778 if (ParseSuffix && Kind.empty())
8779 return ParseStatus::NoMatch;
8780
8781 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector);
8782 if (!KindRes)
8783 return ParseStatus::NoMatch;
8784
8785 unsigned ElementWidth = KindRes->second;
8786
8787 // No shift/extend is the default.
8788 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
8789 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8790 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext()));
8791
8792 ParseStatus Res = tryParseVectorIndex(Operands);
8793 if (Res.isFailure())
8794 return ParseStatus::Failure;
8795 return ParseStatus::Success;
8796 }
8797
8798 // Eat the comma
8799 Lex();
8800
8801 // Match the shift
8802 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
8803 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
8804 if (!Res.isSuccess())
8805 return Res;
8806
8807 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8808 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8809 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(),
8810 Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
8811 HasExplicitAmount: Ext->hasShiftExtendAmount()));
8812
8813 return ParseStatus::Success;
8814}
8815
8816ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8817 MCAsmParser &Parser = getParser();
8818
8819 SMLoc SS = getLoc();
8820 const AsmToken &TokE = getTok();
8821 bool IsHash = TokE.is(K: AsmToken::Hash);
8822
8823 if (!IsHash && TokE.isNot(K: AsmToken::Identifier))
8824 return ParseStatus::NoMatch;
8825
8826 int64_t Pattern;
8827 if (IsHash) {
8828 Lex(); // Eat hash
8829
8830 // Parse the immediate operand.
8831 const MCExpr *ImmVal;
8832 SS = getLoc();
8833 if (Parser.parseExpression(Res&: ImmVal))
8834 return ParseStatus::Failure;
8835
8836 auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
8837 if (!MCE)
8838 return TokError(Msg: "invalid operand for instruction");
8839
8840 Pattern = MCE->getValue();
8841 } else {
8842 // Parse the pattern
8843 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString());
8844 if (!Pat)
8845 return ParseStatus::NoMatch;
8846
8847 Lex();
8848 Pattern = Pat->Encoding;
8849 assert(Pattern >= 0 && Pattern < 32);
8850 }
8851
8852 Operands.push_back(
8853 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8854 S: SS, E: getLoc(), Ctx&: getContext()));
8855
8856 return ParseStatus::Success;
8857}
8858
8859ParseStatus
8860AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8861 int64_t Pattern;
8862 SMLoc SS = getLoc();
8863 const AsmToken &TokE = getTok();
8864 // Parse the pattern
8865 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8866 Name: TokE.getString());
8867 if (!Pat)
8868 return ParseStatus::NoMatch;
8869
8870 Lex();
8871 Pattern = Pat->Encoding;
8872 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8873
8874 Operands.push_back(
8875 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8876 S: SS, E: getLoc(), Ctx&: getContext()));
8877
8878 return ParseStatus::Success;
8879}
8880
8881ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8882 SMLoc SS = getLoc();
8883
8884 MCRegister XReg;
8885 if (!tryParseScalarRegister(RegNum&: XReg).isSuccess())
8886 return ParseStatus::NoMatch;
8887
8888 MCContext &ctx = getContext();
8889 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8890 MCRegister X8Reg = RI->getMatchingSuperReg(
8891 Reg: XReg, SubIdx: AArch64::x8sub_0,
8892 RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8893 if (!X8Reg)
8894 return Error(L: SS,
8895 Msg: "expected an even-numbered x-register in the range [x0,x22]");
8896
8897 Operands.push_back(
8898 Elt: AArch64Operand::CreateReg(Reg: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx));
8899 return ParseStatus::Success;
8900}
8901
8902ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8903 SMLoc S = getLoc();
8904
8905 if (getTok().isNot(K: AsmToken::Integer))
8906 return ParseStatus::NoMatch;
8907
8908 if (getLexer().peekTok().isNot(K: AsmToken::Colon))
8909 return ParseStatus::NoMatch;
8910
8911 const MCExpr *ImmF;
8912 if (getParser().parseExpression(Res&: ImmF))
8913 return ParseStatus::NoMatch;
8914
8915 if (getTok().isNot(K: AsmToken::Colon))
8916 return ParseStatus::NoMatch;
8917
8918 Lex(); // Eat ':'
8919 if (getTok().isNot(K: AsmToken::Integer))
8920 return ParseStatus::NoMatch;
8921
8922 SMLoc E = getTok().getLoc();
8923 const MCExpr *ImmL;
8924 if (getParser().parseExpression(Res&: ImmL))
8925 return ParseStatus::NoMatch;
8926
8927 unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue();
8928 unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue();
8929
8930 Operands.push_back(
8931 Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext()));
8932 return ParseStatus::Success;
8933}
8934
8935template <int Adj>
8936ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8937 SMLoc S = getLoc();
8938
8939 parseOptionalToken(T: AsmToken::Hash);
8940 bool IsNegative = parseOptionalToken(T: AsmToken::Minus);
8941
8942 if (getTok().isNot(K: AsmToken::Integer))
8943 return ParseStatus::NoMatch;
8944
8945 const MCExpr *Ex;
8946 if (getParser().parseExpression(Res&: Ex))
8947 return ParseStatus::NoMatch;
8948
8949 int64_t Imm = dyn_cast<MCConstantExpr>(Val: Ex)->getValue();
8950 if (IsNegative)
8951 Imm = -Imm;
8952
8953 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8954 // return a value, which is certain to trigger a error message about invalid
8955 // immediate range instead of a non-descriptive invalid operand error.
8956 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8957 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8958 Imm = -2;
8959 else
8960 Imm += Adj;
8961
8962 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
8963 Operands.push_back(Elt: AArch64Operand::CreateImm(
8964 Val: MCConstantExpr::create(Value: Imm, Ctx&: getContext()), S, E, Ctx&: getContext()));
8965
8966 return ParseStatus::Success;
8967}
8968